summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.clang-format13
-rw-r--r--.mailmap6
-rw-r--r--Documentation/ABI/obsolete/sysfs-class-dax2
-rw-r--r--Documentation/ABI/obsolete/sysfs-kernel-fadump_registered2
-rw-r--r--Documentation/ABI/obsolete/sysfs-kernel-fadump_release_mem2
-rw-r--r--Documentation/ABI/removed/sysfs-bus-nfit2
-rw-r--r--Documentation/ABI/testing/sysfs-bus-nfit40
-rw-r--r--Documentation/ABI/testing/sysfs-bus-papr-pmem4
-rw-r--r--Documentation/ABI/testing/sysfs-module4
-rw-r--r--Documentation/admin-guide/sysctl/kernel.rst26
-rw-r--r--Documentation/block/data-integrity.rst2
-rw-r--r--Documentation/cdrom/cdrom-standard.rst30
-rw-r--r--Documentation/devicetree/bindings/clock/idt,versaclock5.yaml2
-rw-r--r--Documentation/devicetree/bindings/connector/usb-connector.yaml15
-rw-r--r--Documentation/devicetree/bindings/hwmon/lm75.yaml1
-rw-r--r--Documentation/devicetree/bindings/hwmon/ti,ads7828.yaml2
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mpc.yaml7
-rw-r--r--Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml1
-rw-r--r--Documentation/devicetree/bindings/input/input.yaml1
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml1
-rw-r--r--Documentation/devicetree/bindings/leds/leds-bcm6328.txt4
-rw-r--r--Documentation/devicetree/bindings/leds/leds-bcm6358.txt2
-rw-r--r--Documentation/devicetree/bindings/media/renesas,drif.yaml24
-rw-r--r--Documentation/devicetree/bindings/mfd/mt6397.txt1
-rw-r--r--Documentation/devicetree/bindings/mmc/brcm,iproc-sdhci.yaml1
-rw-r--r--Documentation/devicetree/bindings/mmc/ingenic,mmc.yaml1
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc-controller.yaml25
-rw-r--r--Documentation/devicetree/bindings/mmc/renesas,mmcif.txt53
-rw-r--r--Documentation/devicetree/bindings/mmc/renesas,mmcif.yaml135
-rw-r--r--Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml10
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-am654.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/qcom,ipa.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/renesas,ether.yaml2
-rw-r--r--Documentation/devicetree/bindings/nvmem/mtk-efuse.txt1
-rw-r--r--Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml2
-rw-r--r--Documentation/devicetree/bindings/power/supply/sc2731-charger.yaml2
-rw-r--r--Documentation/devicetree/bindings/regulator/max8893.yaml88
-rw-r--r--Documentation/devicetree/bindings/regulator/mt6359-regulator.yaml385
-rw-r--r--Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.yaml17
-rw-r--r--Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.yaml5
-rw-r--r--Documentation/devicetree/bindings/regulator/regulator.yaml82
-rw-r--r--Documentation/devicetree/bindings/regulator/richtek,rt6160-regulator.yaml61
-rw-r--r--Documentation/devicetree/bindings/regulator/richtek,rt6245-regulator.yaml89
-rw-r--r--Documentation/devicetree/bindings/regulator/rohm,bd9576-regulator.yaml6
-rw-r--r--Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml4
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,rpmsg.yaml2
-rw-r--r--Documentation/devicetree/bindings/spi/renesas,rzn1-spi.txt11
-rw-r--r--Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml6
-rw-r--r--Documentation/devicetree/bindings/spi/spi-cadence.txt30
-rw-r--r--Documentation/devicetree/bindings/spi/spi-cadence.yaml66
-rw-r--r--Documentation/devicetree/bindings/spi/spi-controller.yaml7
-rw-r--r--Documentation/devicetree/bindings/spi/spi-mux.yaml2
-rw-r--r--Documentation/devicetree/bindings/spi/spi-rockchip.yaml1
-rw-r--r--Documentation/devicetree/bindings/spi/spi-xilinx.txt23
-rw-r--r--Documentation/devicetree/bindings/spi/spi-xilinx.yaml57
-rw-r--r--Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt25
-rw-r--r--Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.yaml51
-rw-r--r--Documentation/devicetree/bindings/trivial-devices.yaml4
-rw-r--r--Documentation/driver-api/nvdimm/nvdimm.rst2
-rw-r--r--Documentation/driver-api/serial/index.rst1
-rw-r--r--Documentation/driver-api/thermal/sysfs-api.rst24
-rw-r--r--Documentation/driver-api/usb/usb.rst17
-rw-r--r--Documentation/filesystems/erofs.rst175
-rw-r--r--Documentation/hwmon/adm1177.rst3
-rw-r--r--Documentation/hwmon/dps920ab.rst73
-rw-r--r--Documentation/hwmon/index.rst4
-rw-r--r--Documentation/hwmon/ir36021.rst2
-rw-r--r--Documentation/hwmon/lm75.rst6
-rw-r--r--Documentation/hwmon/ltc2992.rst2
-rw-r--r--Documentation/hwmon/max31790.rst5
-rw-r--r--Documentation/hwmon/mp2888.rst113
-rw-r--r--Documentation/hwmon/pim4328.rst105
-rw-r--r--Documentation/hwmon/pm6764tr.rst2
-rw-r--r--Documentation/hwmon/pmbus-core.rst42
-rw-r--r--Documentation/hwmon/pmbus.rst11
-rw-r--r--Documentation/hwmon/sht4x.rst45
-rw-r--r--Documentation/hwmon/tmp103.rst4
-rw-r--r--Documentation/hwmon/zl6100.rst132
-rw-r--r--Documentation/locking/lockdep-design.rst4
-rw-r--r--Documentation/networking/device_drivers/ethernet/intel/i40e.rst4
-rw-r--r--Documentation/networking/device_drivers/ethernet/intel/iavf.rst2
-rw-r--r--Documentation/powerpc/syscall64-abi.rst10
-rw-r--r--Documentation/process/kernel-enforcement-statement.rst2
-rw-r--r--Documentation/riscv/vm-layout.rst4
-rw-r--r--Documentation/security/tpm/xen-tpmfront.rst2
-rw-r--r--Documentation/spi/pxa2xx.rst58
-rw-r--r--Documentation/timers/no_hz.rst2
-rw-r--r--Documentation/translations/zh_CN/SecurityBugs50
-rw-r--r--Documentation/usb/gadget_configfs.rst2
-rw-r--r--Documentation/usb/mtouchusb.rst2
-rw-r--r--Documentation/usb/usb-serial.rst2
-rw-r--r--Documentation/userspace-api/ioctl/hdio.rst799
-rw-r--r--Documentation/userspace-api/seccomp_filter.rst16
-rw-r--r--Documentation/virt/kvm/amd-memory-encryption.rst2
-rw-r--r--Documentation/virt/kvm/mmu.rst4
-rw-r--r--Documentation/virt/kvm/vcpu-requests.rst8
-rw-r--r--Documentation/vm/slub.rst10
-rw-r--r--Documentation/x86/amd-memory-encryption.rst6
-rw-r--r--MAINTAINERS120
-rw-r--r--Makefile12
-rw-r--r--arch/alpha/configs/defconfig13
-rw-r--r--arch/alpha/include/asm/atomic.h88
-rw-r--r--arch/alpha/include/asm/cmpxchg.h12
-rw-r--r--arch/alpha/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/arc/Makefile2
-rw-r--r--arch/arc/include/asm/atomic.h60
-rw-r--r--arch/arc/include/asm/cmpxchg.h14
-rw-r--r--arch/arc/include/asm/page.h12
-rw-r--r--arch/arc/include/asm/pgtable.h12
-rw-r--r--arch/arc/include/uapi/asm/page.h1
-rw-r--r--arch/arc/include/uapi/asm/sigcontext.h1
-rw-r--r--arch/arc/kernel/entry.S4
-rw-r--r--arch/arc/kernel/kgdb.c1
-rw-r--r--arch/arc/kernel/process.c8
-rw-r--r--arch/arc/kernel/signal.c47
-rw-r--r--arch/arc/kernel/vmlinux.lds.S2
-rw-r--r--arch/arc/mm/init.c11
-rw-r--r--arch/arc/mm/ioremap.c5
-rw-r--r--arch/arc/mm/tlb.c2
-rw-r--r--arch/arm/boot/dts/imx6dl-yapp4-common.dtsi6
-rw-r--r--arch/arm/boot/dts/imx6q-dhcom-som.dtsi12
-rw-r--r--arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi2
-rw-r--r--arch/arm/boot/dts/imx7d-meerkat96.dts2
-rw-r--r--arch/arm/boot/dts/imx7d-pico.dtsi2
-rw-r--r--arch/arm/configs/footbridge_defconfig1
-rw-r--r--arch/arm/configs/pxa_defconfig2
-rw-r--r--arch/arm/include/asm/atomic.h96
-rw-r--r--arch/arm/include/asm/cmpxchg.h20
-rw-r--r--arch/arm/include/asm/cpuidle.h5
-rw-r--r--arch/arm/include/asm/sync_bitops.h2
-rw-r--r--arch/arm/kernel/setup.c16
-rw-r--r--arch/arm/mach-imx/pm-imx27.c1
-rw-r--r--arch/arm/mach-npcm/Kconfig1
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c14
-rw-r--r--arch/arm/mach-omap1/board-h2.c4
-rw-r--r--arch/arm/mach-omap1/pm.c10
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c2
-rw-r--r--arch/arm/mach-pxa/pxa_cplds_irqs.c7
-rw-r--r--arch/arm/tools/syscall.tbl2
-rw-r--r--arch/arm/xen/mm.c20
-rw-r--r--arch/arm64/Kbuild3
-rw-r--r--arch/arm64/Kconfig.platforms1
-rw-r--r--arch/arm64/Makefile3
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts3
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts5
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts10
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi23
-rw-r--r--arch/arm64/boot/dts/renesas/hihope-rzg2-ex-aistarvision-mipi-adapter-2.1.dtsi2
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774a1.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774b1.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774c0-ek874-mipi-2.1.dts2
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774c0.dtsi4
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774e1.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77950.dtsi4
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77951.dtsi12
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77960.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77961.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77965.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77970.dtsi4
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77980.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts2
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77990.dtsi4
-rw-r--r--arch/arm64/boot/dts/renesas/salvator-common.dtsi3
-rw-r--r--arch/arm64/boot/dts/ti/k3-am64-main.dtsi11
-rw-r--r--arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi3
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-main.dtsi10
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi4
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi13
-rw-r--r--arch/arm64/boot/dts/ti/k3-am654-base-board.dts31
-rw-r--r--arch/arm64/boot/dts/ti/k3-j7200-main.dtsi8
-rw-r--r--arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi7
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-main.dtsi10
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi11
-rw-r--r--arch/arm64/include/asm/Kbuild2
-rw-r--r--arch/arm64/include/asm/atomic.h2
-rw-r--r--arch/arm64/include/asm/barrier.h2
-rw-r--r--arch/arm64/include/asm/cpucaps.h74
-rw-r--r--arch/arm64/include/asm/kvm_asm.h3
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h5
-rw-r--r--arch/arm64/include/asm/unistd32.h3
-rw-r--r--arch/arm64/kvm/arm.c20
-rw-r--r--arch/arm64/kvm/hyp/exception.c18
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/adjust_pc.h18
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-main.c8
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mem_protect.c4
-rw-r--r--arch/arm64/kvm/hyp/nvhe/setup.c2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/switch.c3
-rw-r--r--arch/arm64/kvm/hyp/vhe/switch.c3
-rw-r--r--arch/arm64/kvm/mmu.c12
-rw-r--r--arch/arm64/kvm/reset.c28
-rw-r--r--arch/arm64/kvm/sys_regs.c42
-rw-r--r--arch/arm64/mm/flush.c4
-rw-r--r--arch/arm64/mm/init.c3
-rw-r--r--arch/arm64/mm/mmu.c3
-rw-r--r--arch/arm64/mm/proc.S12
-rw-r--r--arch/arm64/tools/Makefile22
-rw-r--r--arch/arm64/tools/cpucaps65
-rwxr-xr-xarch/arm64/tools/gen-cpucaps.awk40
-rw-r--r--arch/csky/include/asm/cmpxchg.h8
-rw-r--r--arch/h8300/include/asm/Kbuild1
-rw-r--r--arch/h8300/include/asm/atomic.h97
-rw-r--r--arch/h8300/include/asm/cmpxchg.h66
-rw-r--r--arch/hexagon/include/asm/atomic.h28
-rw-r--r--arch/hexagon/include/asm/cmpxchg.h4
-rw-r--r--arch/ia64/include/asm/atomic.h74
-rw-r--r--arch/ia64/include/asm/cmpxchg.h16
-rw-r--r--arch/ia64/include/uapi/asm/cmpxchg.h10
-rw-r--r--arch/ia64/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/m68k/atari/config.c12
-rw-r--r--arch/m68k/configs/amiga_defconfig10
-rw-r--r--arch/m68k/configs/atari_defconfig8
-rw-r--r--arch/m68k/configs/mac_defconfig9
-rw-r--r--arch/m68k/configs/multi_defconfig16
-rw-r--r--arch/m68k/configs/q40_defconfig8
-rw-r--r--arch/m68k/include/asm/atomic.h60
-rw-r--r--arch/m68k/include/asm/cmpxchg.h10
-rw-r--r--arch/m68k/include/asm/mmu_context.h2
-rw-r--r--arch/m68k/kernel/signal.c3
-rw-r--r--arch/m68k/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/m68k/mac/config.c24
-rw-r--r--arch/m68k/q40/config.c37
-rw-r--r--arch/microblaze/include/asm/Kbuild1
-rw-r--r--arch/microblaze/include/asm/atomic.h28
-rw-r--r--arch/microblaze/include/asm/cmpxchg.h9
-rw-r--r--arch/microblaze/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/mips/alchemy/board-xxs1500.c1
-rw-r--r--arch/mips/include/asm/atomic.h55
-rw-r--r--arch/mips/include/asm/cmpxchg.h22
-rw-r--r--arch/mips/include/asm/mips-boards/launch.h5
-rw-r--r--arch/mips/kernel/cmpxchg.c4
-rw-r--r--arch/mips/kernel/syscalls/syscall_n32.tbl2
-rw-r--r--arch/mips/kernel/syscalls/syscall_n64.tbl2
-rw-r--r--arch/mips/kernel/syscalls/syscall_o32.tbl2
-rw-r--r--arch/mips/lib/mips-atomic.c12
-rw-r--r--arch/mips/mm/cache.c30
-rw-r--r--arch/mips/ralink/of.c2
-rw-r--r--arch/openrisc/include/asm/atomic.h42
-rw-r--r--arch/openrisc/include/asm/barrier.h9
-rw-r--r--arch/openrisc/include/asm/cmpxchg.h4
-rw-r--r--arch/openrisc/kernel/setup.c2
-rw-r--r--arch/openrisc/mm/init.c6
-rw-r--r--arch/parisc/include/asm/atomic.h34
-rw-r--r--arch/parisc/include/asm/cmpxchg.h14
-rw-r--r--arch/parisc/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/powerpc/boot/dts/fsl/p1010si-post.dtsi8
-rw-r--r--arch/powerpc/boot/dts/fsl/p2041si-post.dtsi16
-rw-r--r--arch/powerpc/include/asm/atomic.h140
-rw-r--r--arch/powerpc/include/asm/cmpxchg.h30
-rw-r--r--arch/powerpc/include/asm/hvcall.h3
-rw-r--r--arch/powerpc/include/asm/interrupt.h9
-rw-r--r--arch/powerpc/include/asm/jump_label.h2
-rw-r--r--arch/powerpc/include/asm/kvm_host.h1
-rw-r--r--arch/powerpc/include/asm/paravirt.h22
-rw-r--r--arch/powerpc/include/asm/plpar_wrappers.h6
-rw-r--r--arch/powerpc/include/asm/pte-walk.h29
-rw-r--r--arch/powerpc/include/asm/ptrace.h45
-rw-r--r--arch/powerpc/include/asm/qspinlock.h2
-rw-r--r--arch/powerpc/include/asm/syscall.h42
-rw-r--r--arch/powerpc/include/asm/uaccess.h2
-rw-r--r--arch/powerpc/kernel/eeh.c23
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S38
-rw-r--r--arch/powerpc/kernel/interrupt.c4
-rw-r--r--arch/powerpc/kernel/io-workarounds.c16
-rw-r--r--arch/powerpc/kernel/iommu.c11
-rw-r--r--arch/powerpc/kernel/kprobes.c4
-rw-r--r--arch/powerpc/kernel/legacy_serial.c7
-rw-r--r--arch/powerpc/kernel/setup_64.c4
-rw-r--r--arch/powerpc/kernel/signal.h4
-rw-r--r--arch/powerpc/kernel/signal_64.c9
-rw-r--r--arch/powerpc/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv.c3
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c15
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S7
-rw-r--r--arch/powerpc/lib/feature-fixups.c114
-rw-r--r--arch/powerpc/mm/mem.c1
-rw-r--r--arch/powerpc/perf/core-book3s.c2
-rw-r--r--arch/powerpc/platforms/pseries/hvCall.S10
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c29
-rw-r--r--arch/riscv/Kconfig19
-rw-r--r--arch/riscv/Kconfig.socs1
-rw-r--r--arch/riscv/Makefile11
-rw-r--r--arch/riscv/boot/dts/microchip/Makefile1
-rw-r--r--arch/riscv/boot/dts/sifive/Makefile1
-rw-r--r--arch/riscv/boot/dts/sifive/fu740-c000.dtsi2
-rw-r--r--arch/riscv/errata/sifive/Makefile2
-rw-r--r--arch/riscv/include/asm/alternative-macros.h4
-rw-r--r--arch/riscv/include/asm/atomic.h128
-rw-r--r--arch/riscv/include/asm/cmpxchg.h34
-rw-r--r--arch/riscv/include/asm/kexec.h4
-rw-r--r--arch/riscv/include/asm/pgtable.h5
-rw-r--r--arch/riscv/kernel/machine_kexec.c11
-rw-r--r--arch/riscv/kernel/probes/kprobes.c2
-rw-r--r--arch/riscv/kernel/setup.c4
-rw-r--r--arch/riscv/kernel/stacktrace.c14
-rw-r--r--arch/riscv/kernel/traps.c13
-rw-r--r--arch/riscv/kernel/vmlinux-xip.lds.S15
-rw-r--r--arch/riscv/mm/init.c8
-rw-r--r--arch/riscv/mm/kasan_init.c10
-rw-r--r--arch/s390/include/asm/atomic.h2
-rw-r--r--arch/s390/include/asm/stacktrace.h18
-rw-r--r--arch/s390/kernel/entry.S5
-rw-r--r--arch/s390/kernel/signal.c1
-rw-r--r--arch/s390/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/s390/kernel/topology.c12
-rw-r--r--arch/s390/kvm/pv.c7
-rw-r--r--arch/sh/include/asm/atomic-grb.h6
-rw-r--r--arch/sh/include/asm/atomic-irq.h6
-rw-r--r--arch/sh/include/asm/atomic-llsc.h6
-rw-r--r--arch/sh/include/asm/atomic.h8
-rw-r--r--arch/sh/include/asm/cmpxchg.h4
-rw-r--r--arch/sh/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/sh/kernel/traps.c1
-rw-r--r--arch/sparc/include/asm/atomic_32.h38
-rw-r--r--arch/sparc/include/asm/atomic_64.h36
-rw-r--r--arch/sparc/include/asm/cmpxchg_32.h12
-rw-r--r--arch/sparc/include/asm/cmpxchg_64.h12
-rw-r--r--arch/sparc/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/sparc/lib/atomic32.c24
-rw-r--r--arch/sparc/lib/atomic_64.S42
-rw-r--r--arch/x86/Makefile13
-rw-r--r--arch/x86/boot/compressed/Makefile7
-rw-r--r--arch/x86/boot/compressed/misc.c2
-rw-r--r--arch/x86/boot/compressed/misc.h2
-rw-r--r--arch/x86/boot/compressed/sev.c (renamed from arch/x86/boot/compressed/sev-es.c)4
-rw-r--r--arch/x86/entry/common.c5
-rw-r--r--arch/x86/entry/entry_64.S4
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl2
-rw-r--r--arch/x86/entry/syscalls/syscall_64.tbl2
-rw-r--r--arch/x86/events/core.c6
-rw-r--r--arch/x86/events/intel/core.c10
-rw-r--r--arch/x86/events/intel/lbr.c27
-rw-r--r--arch/x86/events/intel/uncore_snbep.c9
-rw-r--r--arch/x86/events/perf_event.h6
-rw-r--r--arch/x86/events/rapl.c6
-rw-r--r--arch/x86/include/asm/apic.h1
-rw-r--r--arch/x86/include/asm/atomic.h2
-rw-r--r--arch/x86/include/asm/cpufeatures.h3
-rw-r--r--arch/x86/include/asm/disabled-features.h7
-rw-r--r--arch/x86/include/asm/fpu/api.h6
-rw-r--r--arch/x86/include/asm/fpu/internal.h50
-rw-r--r--arch/x86/include/asm/idtentry.h29
-rw-r--r--arch/x86/include/asm/intel-family.h3
-rw-r--r--arch/x86/include/asm/jump_label.h79
-rw-r--r--arch/x86/include/asm/kvm-x86-ops.h1
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/include/asm/mce.h13
-rw-r--r--arch/x86/include/asm/msr-index.h10
-rw-r--r--arch/x86/include/asm/page_64.h2
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/include/asm/sev-common.h76
-rw-r--r--arch/x86/include/asm/sev.h (renamed from arch/x86/include/asm/sev-es.h)30
-rw-r--r--arch/x86/include/asm/thermal.h4
-rw-r--r--arch/x86/include/asm/vdso/clocksource.h2
-rw-r--r--arch/x86/kernel/Makefile6
-rw-r--r--arch/x86/kernel/acpi/cstate.c3
-rw-r--r--arch/x86/kernel/alternative.c64
-rw-r--r--arch/x86/kernel/amd_nb.c3
-rw-r--r--arch/x86/kernel/apic/apic.c1
-rw-r--r--arch/x86/kernel/apic/vector.c20
-rw-r--r--arch/x86/kernel/cpu/amd.c24
-rw-r--r--arch/x86/kernel/cpu/cpu.h2
-rw-r--r--arch/x86/kernel/cpu/hygon.c4
-rw-r--r--arch/x86/kernel/cpu/intel.c4
-rw-r--r--arch/x86/kernel/cpu/mce/amd.c55
-rw-r--r--arch/x86/kernel/cpu/mce/apei.c3
-rw-r--r--arch/x86/kernel/cpu/mtrr/cleanup.c2
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c4
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c4
-rw-r--r--arch/x86/kernel/cpu/sgx/virt.c1
-rw-r--r--arch/x86/kernel/cpu/tsx.c37
-rw-r--r--arch/x86/kernel/fpu/signal.c80
-rw-r--r--arch/x86/kernel/fpu/xstate.c98
-rw-r--r--arch/x86/kernel/head64.c2
-rw-r--r--arch/x86/kernel/jump_label.c81
-rw-r--r--arch/x86/kernel/mmconf-fam10h_64.c2
-rw-r--r--arch/x86/kernel/nmi.c2
-rw-r--r--arch/x86/kernel/setup.c44
-rw-r--r--arch/x86/kernel/sev-shared.c (renamed from arch/x86/kernel/sev-es-shared.c)21
-rw-r--r--arch/x86/kernel/sev.c (renamed from arch/x86/kernel/sev-es.c)339
-rw-r--r--arch/x86/kernel/signal_compat.c9
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/kernel/umip.c10
-rw-r--r--arch/x86/kvm/cpuid.c1
-rw-r--r--arch/x86/kvm/emulate.c5
-rw-r--r--arch/x86/kvm/hyperv.c8
-rw-r--r--arch/x86/kvm/kvm_emulate.h3
-rw-r--r--arch/x86/kvm/lapic.c36
-rw-r--r--arch/x86/kvm/mmu/mmu.c26
-rw-r--r--arch/x86/kvm/mmu/paging_tmpl.h14
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c6
-rw-r--r--arch/x86/kvm/svm/avic.c12
-rw-r--r--arch/x86/kvm/svm/sev.c26
-rw-r--r--arch/x86/kvm/svm/svm.c8
-rw-r--r--arch/x86/kvm/svm/svm.h40
-rw-r--r--arch/x86/kvm/trace.h6
-rw-r--r--arch/x86/kvm/vmx/capabilities.h3
-rw-r--r--arch/x86/kvm/vmx/posted_intr.c14
-rw-r--r--arch/x86/kvm/vmx/posted_intr.h1
-rw-r--r--arch/x86/kvm/vmx/vmx.c7
-rw-r--r--arch/x86/kvm/x86.c54
-rw-r--r--arch/x86/lib/insn-eval.c30
-rw-r--r--arch/x86/lib/retpoline.S4
-rw-r--r--arch/x86/mm/extable.c2
-rw-r--r--arch/x86/mm/fault.c4
-rw-r--r--arch/x86/mm/ioremap.c4
-rw-r--r--arch/x86/mm/mem_encrypt_identity.c17
-rw-r--r--arch/x86/mm/numa.c8
-rw-r--r--arch/x86/pci/amd_bus.c2
-rw-r--r--arch/x86/pci/fixup.c44
-rw-r--r--arch/x86/platform/efi/efi.c2
-rw-r--r--arch/x86/platform/efi/efi_64.c2
-rw-r--r--arch/x86/platform/efi/quirks.c12
-rw-r--r--arch/x86/realmode/Makefile1
-rw-r--r--arch/x86/realmode/init.c16
-rw-r--r--arch/x86/realmode/rm/trampoline_64.S4
-rw-r--r--arch/x86/xen/enlighten_pv.c10
-rw-r--r--arch/xtensa/include/asm/atomic.h26
-rw-r--r--arch/xtensa/include/asm/cmpxchg.h14
-rw-r--r--arch/xtensa/kernel/syscalls/syscall.tbl2
-rw-r--r--block/bfq-iosched.c34
-rw-r--r--block/blk-iocost.c14
-rw-r--r--block/blk-mq-sched.c8
-rw-r--r--block/blk-mq.c11
-rw-r--r--block/genhd.c11
-rw-r--r--block/kyber-iosched.c5
-rw-r--r--block/mq-deadline.c3
-rw-r--r--block/partitions/efi.c2
-rw-r--r--crypto/async_tx/async_xor.c3
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/acpi_apd.c1
-rw-r--r--drivers/acpi/acpica/utdelete.c8
-rw-r--r--drivers/acpi/bus.c27
-rw-r--r--drivers/acpi/device_pm.c1
-rw-r--r--drivers/acpi/internal.h3
-rw-r--r--drivers/acpi/nfit/core.c15
-rw-r--r--drivers/acpi/power.c61
-rw-r--r--drivers/acpi/scan.c3
-rw-r--r--drivers/acpi/sleep.c6
-rw-r--r--drivers/acpi/sleep.h1
-rw-r--r--drivers/android/binder.c2
-rw-r--r--drivers/ata/Kconfig6
-rw-r--r--drivers/ata/ahci.c4
-rw-r--r--drivers/ata/ahci.h7
-rw-r--r--drivers/ata/ahci_sunxi.c2
-rw-r--r--drivers/ata/pata_atiixp.c3
-rw-r--r--drivers/ata/pata_cs5520.c3
-rw-r--r--drivers/ata/pata_cs5530.c3
-rw-r--r--drivers/ata/pata_cypress.c10
-rw-r--r--drivers/ata/pata_ep93xx.c2
-rw-r--r--drivers/ata/pata_falcon.c62
-rw-r--r--drivers/ata/pata_macio.c5
-rw-r--r--drivers/ata/pata_octeon_cf.c5
-rw-r--r--drivers/ata/pata_rb532_cf.c8
-rw-r--r--drivers/ata/pata_sc1200.c3
-rw-r--r--drivers/ata/pata_serverworks.c3
-rw-r--r--drivers/ata/sata_fsl.c2
-rw-r--r--drivers/ata/sata_highbank.c6
-rw-r--r--drivers/ata/sata_mv.c6
-rw-r--r--drivers/ata/sata_nv.c12
-rw-r--r--drivers/ata/sata_sil24.c5
-rw-r--r--drivers/base/core.c71
-rw-r--r--drivers/base/memory.c6
-rw-r--r--drivers/base/power/runtime.c10
-rw-r--r--drivers/base/regmap/Kconfig6
-rw-r--r--drivers/base/regmap/Makefile1
-rw-r--r--drivers/base/regmap/regmap-i2c.c45
-rw-r--r--drivers/base/regmap/regmap-irq.c7
-rw-r--r--drivers/base/regmap/regmap-mdio.c116
-rw-r--r--drivers/base/regmap/regmap.c15
-rw-r--r--drivers/base/swnode.c16
-rw-r--r--drivers/block/loop.c25
-rw-r--r--drivers/block/loop.h1
-rw-r--r--drivers/block/nbd.c10
-rw-r--r--drivers/bluetooth/btusb.c25
-rw-r--r--drivers/bus/mhi/pci_generic.c42
-rw-r--r--drivers/bus/ti-sysc.c60
-rw-r--r--drivers/cdrom/gdrom.c13
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/char/tpm/tpm2-cmd.c1
-rw-r--r--drivers/char/tpm/tpm_tis_core.c22
-rw-r--r--drivers/clk/clk.c9
-rw-r--r--drivers/clocksource/hyperv_timer.c4
-rw-r--r--drivers/cpufreq/Kconfig.arm10
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c6
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c245
-rw-r--r--drivers/cpufreq/intel_pstate.c14
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c1
-rw-r--r--drivers/dma-buf/dma-buf.c10
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c3
-rw-r--r--drivers/dma/idxd/cdev.c1
-rw-r--r--drivers/dma/idxd/init.c67
-rw-r--r--drivers/dma/ipu/ipu_irq.c2
-rw-r--r--drivers/dma/mediatek/mtk-uart-apdma.c27
-rw-r--r--drivers/dma/pl330.c6
-rw-r--r--drivers/dma/qcom/Kconfig1
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c17
-rw-r--r--drivers/dma/sf-pdma/Kconfig1
-rw-r--r--drivers/dma/sh/rcar-dmac.c2
-rw-r--r--drivers/dma/ste_dma40.c3
-rw-r--r--drivers/dma/stm32-mdma.c4
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c31
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c2
-rw-r--r--drivers/edac/amd64_edac.c2
-rw-r--r--drivers/edac/mce_amd.c70
-rw-r--r--drivers/firmware/arm_scmi/notify.h2
-rw-r--r--drivers/firmware/arm_scpi.c4
-rw-r--r--drivers/firmware/efi/apple-properties.c2
-rw-r--r--drivers/firmware/efi/cper.c4
-rw-r--r--drivers/firmware/efi/dev-path-parser.c49
-rw-r--r--drivers/firmware/efi/fdtparams.c3
-rw-r--r--drivers/firmware/efi/libstub/file.c2
-rw-r--r--drivers/firmware/efi/memattr.c5
-rw-r--r--drivers/gpio/Kconfig2
-rw-r--r--drivers/gpio/gpio-cadence.c1
-rw-r--r--drivers/gpio/gpio-mxc.c2
-rw-r--r--drivers/gpio/gpio-tegra186.c11
-rw-r--r--drivers/gpio/gpio-wcd934x.c2
-rw-r--r--drivers/gpio/gpio-xilinx.c2
-rw-r--r--drivers/gpio/gpiolib-cdev.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c13
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c30
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c7
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h6
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/si_dpm.c174
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h34
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c10
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c17
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c1
-rw-r--r--drivers/gpu/drm/drm_auth.c3
-rw-r--r--drivers/gpu/drm/drm_ioctl.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c2
-rw-r--r--drivers/gpu/drm/i915/Kconfig2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c61
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c71
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c11
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c2
-rw-r--r--drivers/gpu/drm/i915/gt/gen7_renderclear.c5
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c124
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c122
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h4
-rw-r--r--drivers/gpu/drm/i915/i915_active.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c11
-rw-r--r--drivers/gpu/drm/i915/i915_mm.c117
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c4
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.c1
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c2
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c9
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c164
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.c1
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c26
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c1
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c23
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c17
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ld9040.c1
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c144
-rw-r--r--drivers/gpu/drm/radeon/nislands_smc.h34
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c4
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c177
-rw-r--r--drivers/gpu/drm/radeon/sislands_smc.h34
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c31
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h5
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c41
-rw-r--r--drivers/gpu/drm/tegra/drm.h2
-rw-r--r--drivers/gpu/drm/tegra/hub.c2
-rw-r--r--drivers/gpu/drm/tegra/sor.c70
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c44
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_vec.c6
-rw-r--r--drivers/gpu/host1x/bus.c30
-rw-r--r--drivers/hid/Kconfig19
-rw-r--r--drivers/hid/Makefile1
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_client.c19
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_hid.c3
-rw-r--r--drivers/hid/hid-a4tech.c2
-rw-r--r--drivers/hid/hid-asus.c32
-rw-r--r--drivers/hid/hid-core.c4
-rw-r--r--drivers/hid/hid-debug.c3
-rw-r--r--drivers/hid/hid-ft260.c29
-rw-r--r--drivers/hid/hid-gt683r.c1
-rw-r--r--drivers/hid/hid-ids.h9
-rw-r--r--drivers/hid/hid-input.c3
-rw-r--r--drivers/hid/hid-logitech-hidpp.c1
-rw-r--r--drivers/hid/hid-magicmouse.c7
-rw-r--r--drivers/hid/hid-multitouch.c46
-rw-r--r--drivers/hid/hid-quirks.c4
-rw-r--r--drivers/hid/hid-semitek.c40
-rw-r--r--drivers/hid/hid-sensor-custom.c8
-rw-r--r--drivers/hid/hid-sensor-hub.c13
-rw-r--r--drivers/hid/hid-thrustmaster.c2
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c13
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h2
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c2
-rw-r--r--drivers/hid/surface-hid/surface_hid_core.c6
-rw-r--r--drivers/hid/usbhid/hid-core.c2
-rw-r--r--drivers/hid/usbhid/hid-pidff.c1
-rw-r--r--drivers/hwmon/Kconfig11
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/adm9240.c2
-rw-r--r--drivers/hwmon/bt1-pvt.c4
-rw-r--r--drivers/hwmon/corsair-psu.c18
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c4
-rw-r--r--drivers/hwmon/hwmon.c36
-rw-r--r--drivers/hwmon/ina3221.c22
-rw-r--r--drivers/hwmon/lm70.c28
-rw-r--r--drivers/hwmon/lm75.c13
-rw-r--r--drivers/hwmon/lm80.c11
-rw-r--r--drivers/hwmon/lm90.c92
-rw-r--r--drivers/hwmon/ltc2992.c8
-rw-r--r--drivers/hwmon/max31722.c9
-rw-r--r--drivers/hwmon/max31790.c72
-rw-r--r--drivers/hwmon/ntc_thermistor.c4
-rw-r--r--drivers/hwmon/occ/common.c5
-rw-r--r--drivers/hwmon/occ/common.h2
-rw-r--r--drivers/hwmon/pmbus/Kconfig34
-rw-r--r--drivers/hwmon/pmbus/Makefile3
-rw-r--r--drivers/hwmon/pmbus/adm1275.c14
-rw-r--r--drivers/hwmon/pmbus/bpa-rs600.c29
-rw-r--r--drivers/hwmon/pmbus/dps920ab.c206
-rw-r--r--drivers/hwmon/pmbus/fsp-3y.c45
-rw-r--r--drivers/hwmon/pmbus/isl68137.c4
-rw-r--r--drivers/hwmon/pmbus/mp2888.c408
-rw-r--r--drivers/hwmon/pmbus/pim4328.c233
-rw-r--r--drivers/hwmon/pmbus/pmbus.c19
-rw-r--r--drivers/hwmon/pmbus/pmbus.h2
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c151
-rw-r--r--drivers/hwmon/pmbus/q54sj108a2.c2
-rw-r--r--drivers/hwmon/pmbus/zl6100.c94
-rw-r--r--drivers/hwmon/sch5627.c18
-rw-r--r--drivers/hwmon/sch5636.c9
-rw-r--r--drivers/hwmon/sch56xx-common.c65
-rw-r--r--drivers/hwmon/sch56xx-common.h4
-rw-r--r--drivers/hwmon/scpi-hwmon.c9
-rw-r--r--drivers/hwmon/sht4x.c296
-rw-r--r--drivers/hwmon/tps23861.c17
-rw-r--r--drivers/i2c/busses/Kconfig2
-rw-r--r--drivers/i2c/busses/i2c-ali1563.c2
-rw-r--r--drivers/i2c/busses/i2c-altera.c9
-rw-r--r--drivers/i2c/busses/i2c-cadence.c2
-rw-r--r--drivers/i2c/busses/i2c-cp2615.c14
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c2
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c3
-rw-r--r--drivers/i2c/busses/i2c-i801.c9
-rw-r--r--drivers/i2c/busses/i2c-icy.c1
-rw-r--r--drivers/i2c/busses/i2c-mpc.c81
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c5
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c2
-rw-r--r--drivers/i2c/busses/i2c-ocores.c8
-rw-r--r--drivers/i2c/busses/i2c-pnx.c8
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c23
-rw-r--r--drivers/i2c/busses/i2c-robotfuzz-osif.c4
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c3
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c2
-rw-r--r--drivers/i2c/busses/i2c-st.c4
-rw-r--r--drivers/i2c/busses/i2c-stm32f4.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra-bpmp.c4
-rw-r--r--drivers/i2c/i2c-dev.c2
-rw-r--r--drivers/i2c/muxes/i2c-arb-gpio-challenge.c4
-rw-r--r--drivers/ide/Kconfig849
-rw-r--r--drivers/ide/Makefile111
-rw-r--r--drivers/ide/aec62xx.c331
-rw-r--r--drivers/ide/ali14xx.c250
-rw-r--r--drivers/ide/alim15x3.c602
-rw-r--r--drivers/ide/amd74xx.c343
-rw-r--r--drivers/ide/atiixp.c212
-rw-r--r--drivers/ide/buddha.c238
-rw-r--r--drivers/ide/cmd640.c848
-rw-r--r--drivers/ide/cmd64x.c452
-rw-r--r--drivers/ide/cs5520.c168
-rw-r--r--drivers/ide/cs5530.c295
-rw-r--r--drivers/ide/cs5535.c216
-rw-r--r--drivers/ide/cs5536.c294
-rw-r--r--drivers/ide/cy82c693.c234
-rw-r--r--drivers/ide/delkin_cb.c181
-rw-r--r--drivers/ide/dtc2278.c155
-rw-r--r--drivers/ide/falconide.c197
-rw-r--r--drivers/ide/gayle.c188
-rw-r--r--drivers/ide/hpt366.c1545
-rw-r--r--drivers/ide/ht6560b.c383
-rw-r--r--drivers/ide/icside.c692
-rw-r--r--drivers/ide/ide-4drives.c65
-rw-r--r--drivers/ide/ide-acpi.c622
-rw-r--r--drivers/ide/ide-atapi.c756
-rw-r--r--drivers/ide/ide-cd.c1858
-rw-r--r--drivers/ide/ide-cd.h123
-rw-r--r--drivers/ide/ide-cd_ioctl.c468
-rw-r--r--drivers/ide/ide-cd_verbose.c362
-rw-r--r--drivers/ide/ide-cs.c364
-rw-r--r--drivers/ide/ide-devsets.c192
-rw-r--r--drivers/ide/ide-disk.c795
-rw-r--r--drivers/ide/ide-disk.h30
-rw-r--r--drivers/ide/ide-disk_ioctl.c33
-rw-r--r--drivers/ide/ide-disk_proc.c125
-rw-r--r--drivers/ide/ide-dma-sff.c336
-rw-r--r--drivers/ide/ide-dma.c551
-rw-r--r--drivers/ide/ide-eh.c443
-rw-r--r--drivers/ide/ide-floppy.c551
-rw-r--r--drivers/ide/ide-floppy.h42
-rw-r--r--drivers/ide/ide-floppy_ioctl.c339
-rw-r--r--drivers/ide/ide-floppy_proc.c34
-rw-r--r--drivers/ide/ide-gd.c432
-rw-r--r--drivers/ide/ide-gd.h43
-rw-r--r--drivers/ide/ide-generic.c139
-rw-r--r--drivers/ide/ide-io-std.c262
-rw-r--r--drivers/ide/ide-io.c904
-rw-r--r--drivers/ide/ide-ioctls.c306
-rw-r--r--drivers/ide/ide-iops.c536
-rw-r--r--drivers/ide/ide-legacy.c59
-rw-r--r--drivers/ide/ide-lib.c146
-rw-r--r--drivers/ide/ide-park.c155
-rw-r--r--drivers/ide/ide-pci-generic.c203
-rw-r--r--drivers/ide/ide-pio-blacklist.c96
-rw-r--r--drivers/ide/ide-pm.c261
-rw-r--r--drivers/ide/ide-pnp.c92
-rw-r--r--drivers/ide/ide-probe.c1623
-rw-r--r--drivers/ide/ide-proc.c633
-rw-r--r--drivers/ide/ide-scan-pci.c113
-rw-r--r--drivers/ide/ide-sysfs.c143
-rw-r--r--drivers/ide/ide-tape.c2083
-rw-r--r--drivers/ide/ide-taskfile.c668
-rw-r--r--drivers/ide/ide-timings.c198
-rw-r--r--drivers/ide/ide-xfer-mode.c267
-rw-r--r--drivers/ide/ide.c415
-rw-r--r--drivers/ide/ide_platform.c133
-rw-r--r--drivers/ide/it8172.c165
-rw-r--r--drivers/ide/it8213.c217
-rw-r--r--drivers/ide/it821x.c715
-rw-r--r--drivers/ide/jmicron.c176
-rw-r--r--drivers/ide/macide.c161
-rw-r--r--drivers/ide/ns87415.c350
-rw-r--r--drivers/ide/opti621.c179
-rw-r--r--drivers/ide/palm_bk3710.c387
-rw-r--r--drivers/ide/pdc202xx_new.c557
-rw-r--r--drivers/ide/pdc202xx_old.c362
-rw-r--r--drivers/ide/piix.c476
-rw-r--r--drivers/ide/pmac.c1703
-rw-r--r--drivers/ide/q40ide.c168
-rw-r--r--drivers/ide/qd65xx.c446
-rw-r--r--drivers/ide/qd65xx.h145
-rw-r--r--drivers/ide/rapide.c106
-rw-r--r--drivers/ide/rz1000.c100
-rw-r--r--drivers/ide/sc1200.c355
-rw-r--r--drivers/ide/serverworks.c456
-rw-r--r--drivers/ide/setup-pci.c682
-rw-r--r--drivers/ide/siimage.c843
-rw-r--r--drivers/ide/sis5513.c637
-rw-r--r--drivers/ide/sl82c105.c367
-rw-r--r--drivers/ide/slc90e66.c182
-rw-r--r--drivers/ide/tc86c001.c270
-rw-r--r--drivers/ide/triflex.c143
-rw-r--r--drivers/ide/trm290.c374
-rw-r--r--drivers/ide/tx4938ide.c209
-rw-r--r--drivers/ide/tx4939ide.c628
-rw-r--r--drivers/ide/umc8672.c184
-rw-r--r--drivers/ide/via82cxxx.c532
-rw-r--r--drivers/iio/accel/Kconfig1
-rw-r--r--drivers/iio/adc/ad7124.c36
-rw-r--r--drivers/iio/adc/ad7192.c19
-rw-r--r--drivers/iio/adc/ad7768-1.c8
-rw-r--r--drivers/iio/adc/ad7793.c1
-rw-r--r--drivers/iio/adc/ad7923.c4
-rw-r--r--drivers/iio/common/hid-sensors/Kconfig1
-rw-r--r--drivers/iio/dac/ad5770r.c16
-rw-r--r--drivers/iio/gyro/Kconfig1
-rw-r--r--drivers/iio/gyro/fxas21002c_core.c2
-rw-r--r--drivers/iio/gyro/mpu3050-core.c13
-rw-r--r--drivers/iio/humidity/Kconfig1
-rw-r--r--drivers/iio/industrialio-core.c9
-rw-r--r--drivers/iio/light/Kconfig2
-rw-r--r--drivers/iio/light/gp2ap002.c5
-rw-r--r--drivers/iio/light/tsl2583.c8
-rw-r--r--drivers/iio/magnetometer/Kconfig1
-rw-r--r--drivers/iio/orientation/Kconfig2
-rw-r--r--drivers/iio/pressure/Kconfig1
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c1
-rw-r--r--drivers/iio/temperature/Kconfig1
-rw-r--r--drivers/infiniband/core/cma.c5
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c5
-rw-r--r--drivers/infiniband/core/uverbs_std_types_device.c7
-rw-r--r--drivers/infiniband/hw/mlx4/main.c8
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c9
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c6
-rw-r--r--drivers/infiniband/hw/mlx5/dm.c3
-rw-r--r--drivers/infiniband/hw/mlx5/doorbell.c7
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c11
-rw-r--r--drivers/infiniband/hw/mlx5/main.c1
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c8
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c16
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c7
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c11
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c1
-rw-r--r--drivers/interconnect/qcom/bcm-voter.c4
-rw-r--r--drivers/iommu/amd/iommu.c4
-rw-r--r--drivers/iommu/intel/dmar.c4
-rw-r--r--drivers/iommu/intel/iommu.c9
-rw-r--r--drivers/iommu/intel/pasid.c3
-rw-r--r--drivers/iommu/virtio-iommu.c1
-rw-r--r--drivers/irqchip/Kconfig2
-rw-r--r--drivers/irqchip/irq-gic-v3.c36
-rw-r--r--drivers/irqchip/irq-mvebu-icu.c4
-rw-r--r--drivers/irqchip/irq-mvebu-sei.c4
-rw-r--r--drivers/irqchip/irq-stm32-exti.c4
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c17
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNinfineon.c21
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c1
-rw-r--r--drivers/leds/leds-lp5523.c2
-rw-r--r--drivers/md/bcache/bcache.h1
-rw-r--r--drivers/md/bcache/request.c20
-rw-r--r--drivers/md/bcache/stats.c14
-rw-r--r--drivers/md/bcache/stats.h1
-rw-r--r--drivers/md/bcache/sysfs.c4
-rw-r--r--drivers/md/dm-integrity.c81
-rw-r--r--drivers/md/dm-snap.c3
-rw-r--r--drivers/md/dm-verity-verify-sig.c2
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/media/dvb-frontends/sp8870.c2
-rw-r--r--drivers/media/platform/rcar_drif.c1
-rw-r--r--drivers/media/usb/gspca/cpia1.c6
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_mt9m111.c16
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_po1030.c14
-rw-r--r--drivers/memstick/core/ms_block.c37
-rw-r--r--drivers/memstick/host/rtsx_usb_ms.c10
-rw-r--r--drivers/mfd/mt6358-irq.c89
-rw-r--r--drivers/mfd/mt6397-core.c24
-rw-r--r--drivers/misc/cardreader/rtl8411.c1
-rw-r--r--drivers/misc/cardreader/rts5209.c1
-rw-r--r--drivers/misc/cardreader/rts5227.c2
-rw-r--r--drivers/misc/cardreader/rts5228.c1
-rw-r--r--drivers/misc/cardreader/rts5229.c1
-rw-r--r--drivers/misc/cardreader/rts5249.c3
-rw-r--r--drivers/misc/cardreader/rts5260.c1
-rw-r--r--drivers/misc/cardreader/rts5261.c1
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c44
-rw-r--r--drivers/misc/eeprom/at24.c6
-rw-r--r--drivers/misc/habanalabs/common/command_submission.c2
-rw-r--r--drivers/misc/habanalabs/common/firmware_if.c53
-rw-r--r--drivers/misc/habanalabs/common/habanalabs.h23
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_drv.c7
-rw-r--r--drivers/misc/habanalabs/common/sysfs.c4
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c59
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c12
-rw-r--r--drivers/misc/habanalabs/goya/goya.c47
-rw-r--r--drivers/misc/habanalabs/goya/goya_hwmgr.c40
-rw-r--r--drivers/misc/ics932s401.c2
-rw-r--r--drivers/misc/kgdbts.c3
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.h1
-rw-r--r--drivers/misc/mei/interrupt.c3
-rw-r--r--drivers/mmc/core/block.c11
-rw-r--r--drivers/mmc/core/core.c22
-rw-r--r--drivers/mmc/core/core.h9
-rw-r--r--drivers/mmc/core/debugfs.c1
-rw-r--r--drivers/mmc/core/host.c3
-rw-r--r--drivers/mmc/core/mmc.c68
-rw-r--r--drivers/mmc/core/mmc_ops.c163
-rw-r--r--drivers/mmc/core/mmc_ops.h12
-rw-r--r--drivers/mmc/core/sd.c481
-rw-r--r--drivers/mmc/core/sd_ops.c38
-rw-r--r--drivers/mmc/core/sdio.c6
-rw-r--r--drivers/mmc/host/Kconfig2
-rw-r--r--drivers/mmc/host/cqhci-core.c21
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c1
-rw-r--r--drivers/mmc/host/jz4740_mmc.c6
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c57
-rw-r--r--drivers/mmc/host/mmc_spi.c12
-rw-r--r--drivers/mmc/host/mtk-sd.c25
-rw-r--r--drivers/mmc/host/of_mmc_spi.c2
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c9
-rw-r--r--drivers/mmc/host/s3cmci.c7
-rw-r--r--drivers/mmc/host/sdhci-acpi.c11
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c18
-rw-r--r--drivers/mmc/host/sdhci-iproc.c30
-rw-r--r--drivers/mmc/host/sdhci-of-aspeed.c50
-rw-r--r--drivers/mmc/host/sdhci-omap.c5
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c11
-rw-r--r--drivers/mmc/host/sdhci-sprd.c1
-rw-r--r--drivers/mmc/host/sdhci.c2
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mmc/host/sdhci_am654.c6
-rw-r--r--drivers/mmc/host/usdhi6rol0.c1
-rw-r--r--drivers/mmc/host/via-sdmmc.c3
-rw-r--r--drivers/mmc/host/vub300.c2
-rw-r--r--drivers/mtd/nand/raw/cs553x_nand.c12
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c12
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_slc.c15
-rw-r--r--drivers/mtd/nand/raw/ndfc.c12
-rw-r--r--drivers/mtd/nand/raw/sharpsl.c12
-rw-r--r--drivers/mtd/nand/raw/tmio_nand.c8
-rw-r--r--drivers/mtd/nand/raw/txx9ndfmc.c5
-rw-r--r--drivers/mtd/nand/spi/core.c45
-rw-r--r--drivers/mtd/parsers/ofpart_core.c26
-rw-r--r--drivers/net/appletalk/cops.c4
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/caif/caif_serial.c4
-rw-r--r--drivers/net/can/usb/mcba_usb.c17
-rw-r--r--drivers/net/dsa/bcm_sf2.c5
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c1
-rw-r--r--drivers/net/dsa/mt7530.c8
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c15
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.c23
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c74
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c54
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c146
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h10
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c27
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c27
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c48
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c46
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c80
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c6
-rw-r--r--drivers/net/ethernet/ec_bhf.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c24
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c8
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c4
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c21
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c110
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c64
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c51
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c11
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c55
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c23
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c14
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c3
-rw-r--r--drivers/net/ethernet/korina.c12
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c17
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h22
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c54
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c77
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c107
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c114
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c5
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c2
-rw-r--r--drivers/net/ethernet/microchip/encx24j600_hw.h2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c5
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c1
-rw-r--r--drivers/net/ethernet/pensando/Kconfig1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c4
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c1
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c18
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c2
-rw-r--r--drivers/net/ethernet/sfc/nic.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c38
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c2
-rw-r--r--drivers/net/ethernet/sun/niu.c32
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c4
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c27
-rw-r--r--drivers/net/hamradio/mkiss.c1
-rw-r--r--drivers/net/ieee802154/mrf24j40.c4
-rw-r--r--drivers/net/ipa/ipa.h2
-rw-r--r--drivers/net/ipa/ipa_mem.c3
-rw-r--r--drivers/net/mdio/mdio-octeon.c2
-rw-r--r--drivers/net/mdio/mdio-thunder.c1
-rw-r--r--drivers/net/mhi/net.c2
-rw-r--r--drivers/net/phy/dp83867.c6
-rw-r--r--drivers/net/phy/mdio_bus.c3
-rw-r--r--drivers/net/usb/cdc_eem.c4
-rw-r--r--drivers/net/usb/cdc_ncm.c2
-rw-r--r--drivers/net/usb/hso.c45
-rw-r--r--drivers/net/usb/lan78xx.c1
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/r8152.c44
-rw-r--r--drivers/net/usb/smsc75xx.c12
-rw-r--r--drivers/net/virtio_net.c20
-rw-r--r--drivers/net/vrf.c6
-rw-r--r--drivers/net/wireguard/Makefile3
-rw-r--r--drivers/net/wireguard/allowedips.c189
-rw-r--r--drivers/net/wireguard/allowedips.h14
-rw-r--r--drivers/net/wireguard/main.c17
-rw-r--r--drivers/net/wireguard/peer.c27
-rw-r--r--drivers/net/wireguard/peer.h3
-rw-r--r--drivers/net/wireguard/selftest/allowedips.c165
-rw-r--r--drivers/net/wireguard/socket.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c201
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h14
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c34
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h19
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c42
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.h5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c8
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c5
-rw-r--r--drivers/net/wireless/marvell/libertas/mesh.c33
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c81
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c17
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c18
-rw-r--r--drivers/net/xen-netback/interface.c6
-rw-r--r--drivers/nfc/nfcmrvl/fw_dnld.h2
-rw-r--r--drivers/nfc/nfcmrvl/i2c.c2
-rw-r--r--drivers/nfc/nfcmrvl/nfcmrvl.h2
-rw-r--r--drivers/nfc/nfcmrvl/spi.c2
-rw-r--r--drivers/nfc/nfcmrvl/uart.c2
-rw-r--r--drivers/nfc/nfcmrvl/usb.c2
-rw-r--r--drivers/nvme/host/Kconfig3
-rw-r--r--drivers/nvme/host/core.c7
-rw-r--r--drivers/nvme/host/fabrics.c5
-rw-r--r--drivers/nvme/host/fc.c37
-rw-r--r--drivers/nvme/host/multipath.c55
-rw-r--r--drivers/nvme/host/nvme.h8
-rw-r--r--drivers/nvme/host/rdma.c5
-rw-r--r--drivers/nvme/host/tcp.c5
-rw-r--r--drivers/nvme/target/admin-cmd.c7
-rw-r--r--drivers/nvme/target/core.c50
-rw-r--r--drivers/nvme/target/discovery.c2
-rw-r--r--drivers/nvme/target/fabrics-cmd.c6
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c2
-rw-r--r--drivers/nvme/target/io-cmd-file.c8
-rw-r--r--drivers/nvme/target/loop.c15
-rw-r--r--drivers/nvme/target/nvmet.h8
-rw-r--r--drivers/nvme/target/passthru.c2
-rw-r--r--drivers/nvme/target/rdma.c4
-rw-r--r--drivers/nvme/target/tcp.c2
-rw-r--r--drivers/pci/controller/dwc/Makefile3
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194-acpi.c108
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c138
-rw-r--r--drivers/pci/controller/pci-aardvark.c49
-rw-r--r--drivers/pci/of.c9
-rw-r--r--drivers/pci/pci.c16
-rw-r--r--drivers/pci/probe.c3
-rw-r--r--drivers/pci/quirks.c93
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init.h4
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c1
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c2
-rw-r--r--drivers/phy/microchip/sparx5_serdes.c4
-rw-r--r--drivers/phy/ralink/phy-mt7621-pci.c2
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c1
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c4
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c4
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c3
-rw-r--r--drivers/pinctrl/aspeed/pinmux-aspeed.c3
-rw-r--r--drivers/pinctrl/pinctrl-microchip-sgpio.c4
-rw-r--r--drivers/pinctrl/qcom/Kconfig2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx55.c18
-rw-r--r--drivers/pinctrl/ralink/pinctrl-rt2880.c2
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c9
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo.c11
-rw-r--r--drivers/platform/mellanox/mlxreg-hotplug.c4
-rw-r--r--drivers/platform/surface/aggregator/controller.c5
-rw-r--r--drivers/platform/surface/surface_aggregator_registry.c7
-rw-r--r--drivers/platform/surface/surface_dtx.c9
-rw-r--r--drivers/platform/x86/Kconfig2
-rw-r--r--drivers/platform/x86/dell/dell-smbios-wmi.c3
-rw-r--r--drivers/platform/x86/gigabyte-wmi.c38
-rw-r--r--drivers/platform/x86/hp-wireless.c2
-rw-r--r--drivers/platform/x86/hp_accel.c22
-rw-r--r--drivers/platform/x86/ideapad-laptop.c13
-rw-r--r--drivers/platform/x86/intel_int0002_vgpio.c80
-rw-r--r--drivers/platform/x86/intel_punit_ipc.c1
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c1
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c43
-rw-r--r--drivers/ptp/ptp_clock.c6
-rw-r--r--drivers/ptp/ptp_ocp.c4
-rw-r--r--drivers/rapidio/rio_cm.c17
-rw-r--r--drivers/regulator/Kconfig57
-rw-r--r--drivers/regulator/Makefile8
-rw-r--r--drivers/regulator/atc260x-regulator.c19
-rw-r--r--drivers/regulator/bd70528-regulator.c283
-rw-r--r--drivers/regulator/bd71815-regulator.c60
-rw-r--r--drivers/regulator/bd718x7-regulator.c2
-rw-r--r--drivers/regulator/bd9576-regulator.c1084
-rw-r--r--drivers/regulator/core.c322
-rw-r--r--drivers/regulator/cros-ec-regulator.c3
-rw-r--r--drivers/regulator/da9052-regulator.c3
-rw-r--r--drivers/regulator/da9121-regulator.c10
-rw-r--r--drivers/regulator/devres.c52
-rw-r--r--drivers/regulator/fan53555.c119
-rw-r--r--drivers/regulator/fan53880.c10
-rw-r--r--drivers/regulator/fixed.c10
-rw-r--r--drivers/regulator/helpers.c2
-rw-r--r--drivers/regulator/hi6421-regulator.c8
-rw-r--r--drivers/regulator/hi6421v600-regulator.c63
-rw-r--r--drivers/regulator/hi655x-regulator.c18
-rw-r--r--drivers/regulator/internal.h11
-rw-r--r--drivers/regulator/irq_helpers.c397
-rw-r--r--drivers/regulator/lp8755.c55
-rw-r--r--drivers/regulator/ltc3589.c73
-rw-r--r--drivers/regulator/max77620-regulator.c17
-rw-r--r--drivers/regulator/max77686-regulator.c42
-rw-r--r--drivers/regulator/max77802-regulator.c70
-rw-r--r--drivers/regulator/max8893.c183
-rw-r--r--drivers/regulator/max8973-regulator.c37
-rw-r--r--drivers/regulator/mcp16502.c79
-rw-r--r--drivers/regulator/mp5416.c44
-rw-r--r--drivers/regulator/mp886x.c32
-rw-r--r--drivers/regulator/mt6315-regulator.c23
-rw-r--r--drivers/regulator/mt6358-regulator.c24
-rw-r--r--drivers/regulator/mt6359-regulator.c997
-rw-r--r--drivers/regulator/of_regulator.c58
-rw-r--r--drivers/regulator/pca9450-regulator.c51
-rw-r--r--drivers/regulator/qcom-labibb-regulator.c10
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c62
-rw-r--r--drivers/regulator/qcom_smd-regulator.c85
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c6
-rw-r--r--drivers/regulator/qcom_usb_vbus-regulator.c12
-rw-r--r--drivers/regulator/rk808-regulator.c116
-rw-r--r--drivers/regulator/rt4801-regulator.c4
-rw-r--r--drivers/regulator/rt4831-regulator.c3
-rw-r--r--drivers/regulator/rt6160-regulator.c319
-rw-r--r--drivers/regulator/rt6245-regulator.c254
-rw-r--r--drivers/regulator/rtmv20-regulator.c44
-rw-r--r--drivers/regulator/scmi-regulator.c2
-rw-r--r--drivers/regulator/stpmic1_regulator.c20
-rw-r--r--drivers/regulator/sy7636a-regulator.c128
-rw-r--r--drivers/regulator/uniphier-regulator.c1
-rw-r--r--drivers/regulator/userspace-consumer.c14
-rw-r--r--drivers/rtc/rtc-mt6397.c2
-rw-r--r--drivers/s390/block/dasd_diag.c8
-rw-r--r--drivers/s390/block/dasd_fba.c8
-rw-r--r--drivers/s390/block/dasd_int.h1
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c4
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c12
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c1
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c2
-rw-r--r--drivers/s390/crypto/ap_queue.c11
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c10
-rw-r--r--drivers/scsi/BusLogic.c6
-rw-r--r--drivers/scsi/BusLogic.h2
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_gram.y1
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h2
-rw-r--r--drivers/scsi/aic7xxx/scsi_message.h11
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c1
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c8
-rw-r--r--drivers/scsi/hosts.c47
-rw-r--r--drivers/scsi/libsas/sas_port.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c10
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c7
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c12
-rw-r--r--drivers/scsi/qedf/qedf_main.c24
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c2
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/sd.c22
-rw-r--r--drivers/scsi/sr.c2
-rw-r--r--drivers/scsi/ufs/ufs-hisi.c15
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c19
-rw-r--r--drivers/scsi/ufs/ufshcd.c5
-rw-r--r--drivers/scsi/vmw_pvscsi.c8
-rw-r--r--drivers/soc/amlogic/meson-clk-measure.c4
-rw-r--r--drivers/soundwire/qcom.c12
-rw-r--r--drivers/spi/Kconfig3
-rw-r--r--drivers/spi/spi-altera-dfl.c4
-rw-r--r--drivers/spi/spi-ath79.c9
-rw-r--r--drivers/spi/spi-atmel.c139
-rw-r--r--drivers/spi/spi-bcm2835.c204
-rw-r--r--drivers/spi/spi-bcm2835aux.c2
-rw-r--r--drivers/spi/spi-bitbang.c18
-rw-r--r--drivers/spi/spi-dw-mmio.c2
-rw-r--r--drivers/spi/spi-fsl-dspi.c4
-rw-r--r--drivers/spi/spi-fsl-spi.c4
-rw-r--r--drivers/spi/spi-geni-qcom.c4
-rw-r--r--drivers/spi/spi-hisi-kunpeng.c51
-rw-r--r--drivers/spi/spi-lm70llp.c2
-rw-r--r--drivers/spi/spi-loopback-test.c2
-rw-r--r--drivers/spi/spi-mem.c88
-rw-r--r--drivers/spi/spi-meson-spicc.c8
-rw-r--r--drivers/spi/spi-mpc512x-psc.c4
-rw-r--r--drivers/spi/spi-mpc52xx-psc.c4
-rw-r--r--drivers/spi/spi-mpc52xx.c2
-rw-r--r--drivers/spi/spi-npcm-pspi.c2
-rw-r--r--drivers/spi/spi-nxp-fspi.c11
-rw-r--r--drivers/spi/spi-oc-tiny.c2
-rw-r--r--drivers/spi/spi-omap-100k.c6
-rw-r--r--drivers/spi/spi-omap-uwire.c13
-rw-r--r--drivers/spi/spi-omap2-mcspi.c37
-rw-r--r--drivers/spi/spi-pl022.c4
-rw-r--r--drivers/spi/spi-ppc4xx.c10
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c41
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c11
-rw-r--r--drivers/spi/spi-pxa2xx.c392
-rw-r--r--drivers/spi/spi-pxa2xx.h68
-rw-r--r--drivers/spi/spi-rockchip.c55
-rw-r--r--drivers/spi/spi-rspi.c6
-rw-r--r--drivers/spi/spi-sc18is602.c9
-rw-r--r--drivers/spi/spi-sh-msiof.c4
-rw-r--r--drivers/spi/spi-sprd.c1
-rw-r--r--drivers/spi/spi-stm32-qspi.c93
-rw-r--r--drivers/spi/spi-sun6i.c6
-rw-r--r--drivers/spi/spi-tegra114.c3
-rw-r--r--drivers/spi/spi-tegra20-slink.c5
-rw-r--r--drivers/spi/spi-tegra210-quad.c2
-rw-r--r--drivers/spi/spi-topcliff-pch.c4
-rw-r--r--drivers/spi/spi-uniphier.c2
-rw-r--r--drivers/spi/spi-zynq-qspi.c16
-rw-r--r--drivers/spi/spi.c312
-rw-r--r--drivers/spi/spidev.c2
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c4
-rw-r--r--drivers/staging/iio/cdc/ad7746.c1
-rw-r--r--drivers/staging/ralink-gdma/ralink-gdma.c2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c29
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c21
-rw-r--r--drivers/target/target_core_iblock.c4
-rw-r--r--drivers/target/target_core_transport.c6
-rw-r--r--drivers/target/target_core_user.c12
-rw-r--r--drivers/tee/amdtee/amdtee_private.h13
-rw-r--r--drivers/tee/amdtee/call.c94
-rw-r--r--drivers/tee/amdtee/core.c15
-rw-r--r--drivers/tee/optee/call.c6
-rw-r--r--drivers/tee/optee/optee_msg.h6
-rw-r--r--drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c4
-rw-r--r--drivers/thermal/intel/therm_throt.c15
-rw-r--r--drivers/thermal/intel/x86_pkg_temp_thermal.c2
-rw-r--r--drivers/thermal/qcom/qcom-spmi-adc-tm5.c2
-rw-r--r--drivers/thermal/thermal_core.c63
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c2
-rw-r--r--drivers/thunderbolt/dma_port.c11
-rw-r--r--drivers/thunderbolt/usb4.c9
-rw-r--r--drivers/tty/serial/8250/8250.h32
-rw-r--r--drivers/tty/serial/8250/8250_aspeed_vuart.c1
-rw-r--r--drivers/tty/serial/8250/8250_dw.c1
-rw-r--r--drivers/tty/serial/8250/8250_exar.c6
-rw-r--r--drivers/tty/serial/8250/8250_pci.c47
-rw-r--r--drivers/tty/serial/8250/8250_port.c12
-rw-r--r--drivers/tty/serial/max310x.c2
-rw-r--r--drivers/tty/serial/mvebu-uart.c3
-rw-r--r--drivers/tty/serial/rp2.c52
-rw-r--r--drivers/tty/serial/serial-tegra.c2
-rw-r--r--drivers/tty/serial/serial_core.c8
-rw-r--r--drivers/tty/serial/sh-sci.c4
-rw-r--r--drivers/tty/vt/vt.c2
-rw-r--r--drivers/tty/vt/vt_ioctl.c57
-rw-r--r--drivers/uio/uio_hv_generic.c12
-rw-r--r--drivers/uio/uio_pci_generic.c2
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c12
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.c14
-rw-r--r--drivers/usb/cdns3/cdnsp-ring.c7
-rw-r--r--drivers/usb/chipidea/udc.c1
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c16
-rw-r--r--drivers/usb/class/cdc-wdm.c30
-rw-r--r--drivers/usb/core/devio.c11
-rw-r--r--drivers/usb/core/hub.c13
-rw-r--r--drivers/usb/dwc2/core.h2
-rw-r--r--drivers/usb/dwc2/gadget.c3
-rw-r--r--drivers/usb/dwc2/platform.c4
-rw-r--r--drivers/usb/dwc3/core.c8
-rw-r--r--drivers/usb/dwc3/core.h7
-rw-r--r--drivers/usb/dwc3/debug.h11
-rw-r--r--drivers/usb/dwc3/debugfs.c21
-rw-r--r--drivers/usb/dwc3/dwc3-imx8mp.c3
-rw-r--r--drivers/usb/dwc3/dwc3-meson-g12a.c13
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c5
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c1
-rw-r--r--drivers/usb/dwc3/ep0.c3
-rw-r--r--drivers/usb/dwc3/gadget.c44
-rw-r--r--drivers/usb/gadget/config.c8
-rw-r--r--drivers/usb/gadget/function/f_ecm.c2
-rw-r--r--drivers/usb/gadget/function/f_eem.c6
-rw-r--r--drivers/usb/gadget/function/f_fs.c3
-rw-r--r--drivers/usb/gadget/function/f_hid.c3
-rw-r--r--drivers/usb/gadget/function/f_loopback.c2
-rw-r--r--drivers/usb/gadget/function/f_ncm.c10
-rw-r--r--drivers/usb/gadget/function/f_printer.c3
-rw-r--r--drivers/usb/gadget/function/f_rndis.c2
-rw-r--r--drivers/usb/gadget/function/f_serial.c2
-rw-r--r--drivers/usb/gadget/function/f_sourcesink.c3
-rw-r--r--drivers/usb/gadget/function/f_subset.c2
-rw-r--r--drivers/usb/gadget/function/f_tcm.c3
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c5
-rw-r--r--drivers/usb/host/fotg210-hcd.c4
-rw-r--r--drivers/usb/host/xhci-ext-caps.h5
-rw-r--r--drivers/usb/host/xhci-pci.c15
-rw-r--r--drivers/usb/host/xhci-ring.c30
-rw-r--r--drivers/usb/host/xhci.c6
-rw-r--r--drivers/usb/host/xhci.h1
-rw-r--r--drivers/usb/misc/brcmstb-usb-pinmap.c2
-rw-r--r--drivers/usb/misc/trancevibrator.c4
-rw-r--r--drivers/usb/misc/uss720.c1
-rw-r--r--drivers/usb/musb/mediatek.c2
-rw-r--r--drivers/usb/musb/musb_core.c3
-rw-r--r--drivers/usb/serial/cp210x.c84
-rw-r--r--drivers/usb/serial/ftdi_sio.c4
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h8
-rw-r--r--drivers/usb/serial/omninet.c8
-rw-r--r--drivers/usb/serial/option.c4
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h1
-rw-r--r--drivers/usb/serial/quatech2.c6
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c3
-rw-r--r--drivers/usb/typec/mux.c9
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c15
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c262
-rw-r--r--drivers/usb/typec/tcpm/wcove.c2
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c49
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h6
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c19
-rw-r--r--drivers/vfio/pci/Kconfig1
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c2
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c2
-rw-r--r--drivers/vfio/vfio_iommu_type1.c2
-rw-r--r--drivers/video/console/vgacon.c56
-rw-r--r--drivers/video/fbdev/core/fb_defio.c35
-rw-r--r--drivers/video/fbdev/core/fbcon.c2
-rw-r--r--drivers/video/fbdev/core/fbmem.c4
-rw-r--r--drivers/video/fbdev/hgafb.c21
-rw-r--r--drivers/video/fbdev/imsttfb.c26
-rw-r--r--drivers/xen/events/events_base.c11
-rw-r--r--drivers/xen/gntdev.c4
-rw-r--r--drivers/xen/swiotlb-xen.c5
-rw-r--r--drivers/xen/unpopulated-alloc.c4
-rw-r--r--drivers/xen/xen-pciback/vpci.c14
-rw-r--r--drivers/xen/xen-pciback/xenbus.c22
-rw-r--r--fs/afs/cmservice.c5
-rw-r--r--fs/afs/dir.c4
-rw-r--r--fs/afs/fsclient.c4
-rw-r--r--fs/afs/main.c4
-rw-r--r--fs/afs/vlclient.c1
-rw-r--r--fs/afs/write.c26
-rw-r--r--fs/block_dev.c18
-rw-r--r--fs/btrfs/block-group.c8
-rw-r--r--fs/btrfs/compression.c49
-rw-r--r--fs/btrfs/disk-io.c26
-rw-r--r--fs/btrfs/extent-tree.c2
-rw-r--r--fs/btrfs/extent_io.c9
-rw-r--r--fs/btrfs/file-item.c108
-rw-r--r--fs/btrfs/file.c4
-rw-r--r--fs/btrfs/inode.c22
-rw-r--r--fs/btrfs/reflink.c33
-rw-r--r--fs/btrfs/tree-log.c57
-rw-r--r--fs/btrfs/volumes.c2
-rw-r--r--fs/btrfs/zoned.c27
-rw-r--r--fs/btrfs/zoned.h5
-rw-r--r--fs/ceph/dir.c22
-rw-r--r--fs/ceph/file.c17
-rw-r--r--fs/ceph/inode.c2
-rw-r--r--fs/ceph/super.h2
-rw-r--r--fs/cifs/cifs_ioctl.h25
-rw-r--r--fs/cifs/cifsfs.c2
-rw-r--r--fs/cifs/cifsglob.h4
-rw-r--r--fs/cifs/cifspdu.h3
-rw-r--r--fs/cifs/file.c46
-rw-r--r--fs/cifs/fs_context.c2
-rw-r--r--fs/cifs/ioctl.c143
-rw-r--r--fs/cifs/misc.c23
-rw-r--r--fs/cifs/smb2ops.c4
-rw-r--r--fs/cifs/smb2pdu.c13
-rw-r--r--fs/cifs/trace.h29
-rw-r--r--fs/coredump.c2
-rw-r--r--fs/dax.c35
-rw-r--r--fs/debugfs/file.c2
-rw-r--r--fs/debugfs/inode.c9
-rw-r--r--fs/ecryptfs/crypto.c4
-rw-r--r--fs/erofs/zmap.c21
-rw-r--r--fs/ext4/extents.c43
-rw-r--r--fs/ext4/fast_commit.c170
-rw-r--r--fs/ext4/fast_commit.h19
-rw-r--r--fs/ext4/ialloc.c6
-rw-r--r--fs/ext4/mballoc.c2
-rw-r--r--fs/ext4/namei.c6
-rw-r--r--fs/ext4/super.c11
-rw-r--r--fs/ext4/sysfs.c4
-rw-r--r--fs/f2fs/compress.c55
-rw-r--r--fs/f2fs/data.c39
-rw-r--r--fs/f2fs/f2fs.h2
-rw-r--r--fs/f2fs/file.c3
-rw-r--r--fs/f2fs/segment.c4
-rw-r--r--fs/gfs2/file.c5
-rw-r--r--fs/gfs2/glock.c28
-rw-r--r--fs/gfs2/glops.c2
-rw-r--r--fs/gfs2/log.c6
-rw-r--r--fs/gfs2/log.h1
-rw-r--r--fs/gfs2/lops.c7
-rw-r--r--fs/gfs2/lops.h1
-rw-r--r--fs/gfs2/util.c1
-rw-r--r--fs/hfsplus/extents.c7
-rw-r--r--fs/hugetlbfs/inode.c8
-rw-r--r--fs/io-wq.c29
-rw-r--r--fs/io-wq.h2
-rw-r--r--fs/io_uring.c83
-rw-r--r--fs/iomap/buffered-io.c4
-rw-r--r--fs/namespace.c6
-rw-r--r--fs/netfs/Kconfig2
-rw-r--r--fs/netfs/read_helper.c51
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/filelayout/filelayout.c2
-rw-r--r--fs/nfs/namespace.c2
-rw-r--r--fs/nfs/nfs4_fs.h1
-rw-r--r--fs/nfs/nfs4client.c2
-rw-r--r--fs/nfs/nfs4file.c2
-rw-r--r--fs/nfs/nfs4proc.c41
-rw-r--r--fs/nfs/nfstrace.h4
-rw-r--r--fs/nfs/pagelist.c20
-rw-r--r--fs/nfs/pnfs.c17
-rw-r--r--fs/nfs/super.c2
-rw-r--r--fs/nilfs2/sysfs.c1
-rw-r--r--fs/notify/fanotify/fanotify_user.c34
-rw-r--r--fs/notify/fdinfo.c2
-rw-r--r--fs/ocfs2/file.c55
-rw-r--r--fs/proc/base.c13
-rw-r--r--fs/quota/dquot.c6
-rw-r--r--fs/signalfd.c23
-rw-r--r--fs/squashfs/file.c6
-rw-r--r--fs/xfs/libxfs/xfs_ag_resv.c18
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c12
-rw-r--r--fs/xfs/libxfs/xfs_fs.h4
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c46
-rw-r--r--fs/xfs/libxfs/xfs_trans_inode.c17
-rw-r--r--fs/xfs/scrub/common.c4
-rw-r--r--fs/xfs/xfs_bmap_util.c98
-rw-r--r--fs/xfs/xfs_inode.c29
-rw-r--r--fs/xfs/xfs_ioctl.c101
-rw-r--r--fs/xfs/xfs_message.h2
-rw-r--r--include/asm-generic/atomic-instrumented.h498
-rw-r--r--include/asm-generic/atomic.h118
-rw-r--r--include/asm-generic/atomic64.h45
-rw-r--r--include/asm-generic/cmpxchg-local.h4
-rw-r--r--include/asm-generic/cmpxchg.h42
-rw-r--r--include/asm-generic/vmlinux.lds.h1
-rw-r--r--include/dt-bindings/usb/pd.h89
-rw-r--r--include/linux/arch_topology.h1
-rw-r--r--include/linux/ata.h2
-rw-r--r--include/linux/atomic-fallback.h2595
-rw-r--r--include/linux/atomic.h4
-rw-r--r--include/linux/avf/virtchnl.h1
-rw-r--r--include/linux/bits.h2
-rw-r--r--include/linux/blkdev.h5
-rw-r--r--include/linux/ceph/auth.h4
-rw-r--r--include/linux/cgroup-defs.h6
-rw-r--r--include/linux/cgroup.h2
-rw-r--r--include/linux/compat.h10
-rw-r--r--include/linux/compiler.h22
-rw-r--r--include/linux/compiler_attributes.h1
-rw-r--r--include/linux/console_struct.h1
-rw-r--r--include/linux/const.h8
-rw-r--r--include/linux/debug_locks.h2
-rw-r--r--include/linux/device.h6
-rw-r--r--include/linux/dynamic_debug.h5
-rw-r--r--include/linux/elevator.h2
-rw-r--r--include/linux/entry-kvm.h3
-rw-r--r--include/linux/fanotify.h4
-rw-r--r--include/linux/fb.h3
-rw-r--r--include/linux/fwnode.h1
-rw-r--r--include/linux/genhd.h2
-rw-r--r--include/linux/hid.h3
-rw-r--r--include/linux/host1x.h30
-rw-r--r--include/linux/huge_mm.h8
-rw-r--r--include/linux/hugetlb.h24
-rw-r--r--include/linux/ide.h1623
-rw-r--r--include/linux/init.h3
-rw-r--r--include/linux/instrumentation.h20
-rw-r--r--include/linux/jump_label.h16
-rw-r--r--include/linux/kvm_host.h18
-rw-r--r--include/linux/libata.h13
-rw-r--r--include/linux/libnvdimm.h1
-rw-r--r--include/linux/lockdep_types.h2
-rw-r--r--include/linux/mfd/mt6358/core.h8
-rw-r--r--include/linux/mfd/mt6359/core.h133
-rw-r--r--include/linux/mfd/mt6359/registers.h529
-rw-r--r--include/linux/mfd/mt6359p/registers.h249
-rw-r--r--include/linux/mfd/mt6397/core.h1
-rw-r--r--include/linux/mfd/mt6397/rtc.h1
-rw-r--r--include/linux/mfd/rohm-bd70528.h4
-rw-r--r--include/linux/mfd/rohm-bd71828.h10
-rw-r--r--include/linux/minmax.h10
-rw-r--r--include/linux/mlx4/device.h1
-rw-r--r--include/linux/mlx5/driver.h48
-rw-r--r--include/linux/mlx5/mlx5_ifc.h2
-rw-r--r--include/linux/mlx5/mpfs.h18
-rw-r--r--include/linux/mlx5/transobj.h1
-rw-r--r--include/linux/mm.h35
-rw-r--r--include/linux/mm_types.h31
-rw-r--r--include/linux/mmc/card.h23
-rw-r--r--include/linux/mmc/host.h2
-rw-r--r--include/linux/mmc/sd.h4
-rw-r--r--include/linux/mtd/spinand.h22
-rw-r--r--include/linux/pagemap.h19
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/pci_ids.h3
-rw-r--r--include/linux/pgtable.h8
-rw-r--r--include/linux/phy.h5
-rw-r--r--include/linux/platform_data/spi-ath79.h16
-rw-r--r--include/linux/platform_data/ti-sysc.h1
-rw-r--r--include/linux/pm.h1
-rw-r--r--include/linux/pmbus.h30
-rw-r--r--include/linux/ptp_clock_kernel.h2
-rw-r--r--include/linux/pxa2xx_ssp.h51
-rw-r--r--include/linux/randomize_kstack.h2
-rw-r--r--include/linux/reboot.h1
-rw-r--r--include/linux/regmap.h40
-rw-r--r--include/linux/regulator/consumer.h14
-rw-r--r--include/linux/regulator/coupler.h5
-rw-r--r--include/linux/regulator/driver.h187
-rw-r--r--include/linux/regulator/machine.h26
-rw-r--r--include/linux/regulator/mt6359-regulator.h59
-rw-r--r--include/linux/rmap.h1
-rw-r--r--include/linux/rtsx_pci.h2
-rw-r--r--include/linux/sched.h9
-rw-r--r--include/linux/sched/signal.h1
-rw-r--r--include/linux/seqlock.h6
-rw-r--r--include/linux/signal.h2
-rw-r--r--include/linux/socket.h2
-rw-r--r--include/linux/spi/pxa2xx_spi.h21
-rw-r--r--include/linux/spi/spi-mem.h16
-rw-r--r--include/linux/spi/spi.h12
-rw-r--r--include/linux/sunrpc/xprt.h2
-rw-r--r--include/linux/surface_aggregator/device.h6
-rw-r--r--include/linux/swapops.h15
-rw-r--r--include/linux/tick.h7
-rw-r--r--include/linux/usb/pd.h2
-rw-r--r--include/linux/usb/pd_ext_sdb.h4
-rw-r--r--include/linux/vmalloc.h1
-rw-r--r--include/net/caif/caif_dev.h2
-rw-r--r--include/net/caif/cfcnfg.h2
-rw-r--r--include/net/caif/cfserl.h1
-rw-r--r--include/net/cfg80211.h4
-rw-r--r--include/net/mac80211.h9
-rw-r--r--include/net/net_namespace.h14
-rw-r--r--include/net/netfilter/nf_flow_table.h1
-rw-r--r--include/net/netfilter/nf_tables.h6
-rw-r--r--include/net/nfc/nci_core.h1
-rw-r--r--include/net/page_pool.h12
-rw-r--r--include/net/pkt_cls.h11
-rw-r--r--include/net/pkt_sched.h7
-rw-r--r--include/net/sch_generic.h35
-rw-r--r--include/net/sock.h21
-rw-r--r--include/net/tls.h10
-rw-r--r--include/sound/soc-dai.h2
-rw-r--r--include/trace/events/spi.h57
-rw-r--r--include/uapi/asm-generic/siginfo.h15
-rw-r--r--include/uapi/asm-generic/unistd.h3
-rw-r--r--include/uapi/linux/fs.h2
-rw-r--r--include/uapi/linux/futex.h2
-rw-r--r--include/uapi/linux/in.h3
-rw-r--r--include/uapi/linux/input-event-codes.h1
-rw-r--r--include/uapi/linux/io_uring.h19
-rw-r--r--include/uapi/linux/kvm.h5
-rw-r--r--include/uapi/linux/perf_event.h2
-rw-r--r--include/uapi/linux/signalfd.h4
-rw-r--r--include/uapi/linux/userfaultfd.h4
-rw-r--r--include/uapi/linux/virtio_ids.h2
-rw-r--r--include/uapi/misc/habanalabs.h33
-rw-r--r--include/xen/arm/swiotlb-xen.h15
-rw-r--r--init/Kconfig41
-rw-r--r--init/main.c2
-rw-r--r--ipc/mqueue.c6
-rw-r--r--ipc/msg.c6
-rw-r--r--ipc/sem.c6
-rw-r--r--kernel/bpf/Kconfig89
-rw-r--r--kernel/bpf/bpf_lsm.c2
-rw-r--r--kernel/bpf/btf.c12
-rw-r--r--kernel/bpf/helpers.c42
-rw-r--r--kernel/bpf/ringbuf.c24
-rw-r--r--kernel/bpf/syscall.c3
-rw-r--r--kernel/bpf/verifier.c162
-rw-r--r--kernel/cgroup/cgroup-v1.c6
-rw-r--r--kernel/cgroup/cgroup.c21
-rw-r--r--kernel/cgroup/cpuset.c2
-rw-r--r--kernel/cgroup/rdma.c2
-rw-r--r--kernel/cgroup/rstat.c2
-rw-r--r--kernel/crash_core.c1
-rw-r--r--kernel/dma/swiotlb.c23
-rw-r--r--kernel/entry/common.c5
-rw-r--r--kernel/events/core.c13
-rw-r--r--kernel/exit.c1
-rw-r--r--kernel/fork.c1
-rw-r--r--kernel/futex.c32
-rw-r--r--kernel/irq_work.c3
-rw-r--r--kernel/jump_label.c12
-rw-r--r--kernel/kcsan/debugfs.c3
-rw-r--r--kernel/kthread.c77
-rw-r--r--kernel/locking/lockdep.c135
-rw-r--r--kernel/locking/mutex-debug.c4
-rw-r--r--kernel/locking/mutex-debug.h2
-rw-r--r--kernel/locking/mutex.c18
-rw-r--r--kernel/locking/mutex.h4
-rw-r--r--kernel/module.c31
-rw-r--r--kernel/printk/printk_safe.c2
-rw-r--r--kernel/ptrace.c18
-rw-r--r--kernel/reboot.c79
-rw-r--r--kernel/resource.c2
-rw-r--r--kernel/sched/core.c1
-rw-r--r--kernel/sched/debug.c3
-rw-r--r--kernel/sched/fair.c102
-rw-r--r--kernel/sched/pelt.h11
-rw-r--r--kernel/seccomp.c30
-rw-r--r--kernel/signal.c103
-rw-r--r--kernel/sysctl.c29
-rw-r--r--kernel/time/alarmtimer.c2
-rw-r--r--kernel/time/tick-sched.c1
-rw-r--r--kernel/trace/bpf_trace.c32
-rw-r--r--kernel/trace/ftrace.c8
-rw-r--r--kernel/trace/trace.c44
-rw-r--r--kernel/trace/trace_clock.c6
-rw-r--r--kernel/watchdog.c34
-rw-r--r--kernel/workqueue.c12
-rw-r--r--lib/Kconfig.debug1
-rw-r--r--lib/Makefile1
-rw-r--r--lib/atomic64.c36
-rw-r--r--lib/crc64.c2
-rw-r--r--lib/debug_locks.c2
-rw-r--r--lib/dynamic_debug.c20
-rw-r--r--lib/locking-selftest.c83
-rw-r--r--lib/percpu-refcount.c6
-rw-r--r--lib/test_kasan.c29
-rw-r--r--mm/debug_vm_pgtable.c4
-rw-r--r--mm/gup.c4
-rw-r--r--mm/huge_memory.c56
-rw-r--r--mm/hugetlb.c157
-rw-r--r--mm/internal.h73
-rw-r--r--mm/ioremap.c6
-rw-r--r--mm/kasan/init.c4
-rw-r--r--mm/kfence/core.c6
-rw-r--r--mm/ksm.c3
-rw-r--r--mm/memory-failure.c119
-rw-r--r--mm/memory.c45
-rw-r--r--mm/migrate.c1
-rw-r--r--mm/page_alloc.c8
-rw-r--r--mm/page_vma_mapped.c160
-rw-r--r--mm/pgtable-generic.c5
-rw-r--r--mm/rmap.c39
-rw-r--r--mm/shmem.c34
-rw-r--r--mm/shuffle.h4
-rw-r--r--mm/slab_common.c13
-rw-r--r--mm/slub.c47
-rw-r--r--mm/sparse.c13
-rw-r--r--mm/swapfile.c2
-rw-r--r--mm/truncate.c43
-rw-r--r--mm/userfaultfd.c28
-rw-r--r--mm/vmalloc.c41
-rw-r--r--net/Kconfig27
-rw-r--r--net/appletalk/aarp.c2
-rw-r--r--net/batman-adv/bat_iv_ogm.c4
-rw-r--r--net/bluetooth/hci_core.c7
-rw-r--r--net/bluetooth/hci_sock.c4
-rw-r--r--net/bluetooth/smp.c6
-rw-r--r--net/bridge/br_private.h4
-rw-r--r--net/bridge/br_vlan_tunnel.c38
-rw-r--r--net/caif/caif_dev.c13
-rw-r--r--net/caif/caif_usb.c14
-rw-r--r--net/caif/cfcnfg.c16
-rw-r--r--net/caif/cfserl.c5
-rw-r--r--net/can/bcm.c62
-rw-r--r--net/can/isotp.c110
-rw-r--r--net/can/j1939/transport.c54
-rw-r--r--net/can/raw.c62
-rw-r--r--net/ceph/auth.c20
-rw-r--r--net/ceph/auth_none.c5
-rw-r--r--net/ceph/auth_x.c15
-rw-r--r--net/compat.c2
-rw-r--r--net/core/dev.c29
-rw-r--r--net/core/devlink.c4
-rw-r--r--net/core/fib_rules.c2
-rw-r--r--net/core/filter.c3
-rw-r--r--net/core/neighbour.c1
-rw-r--r--net/core/net_namespace.c20
-rw-r--r--net/core/page_pool.c12
-rw-r--r--net/core/rtnetlink.c4
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/core/sock.c24
-rw-r--r--net/dsa/master.c5
-rw-r--r--net/dsa/slave.c12
-rw-r--r--net/dsa/tag_8021q.c2
-rw-r--r--net/ethtool/eeprom.c2
-rw-r--r--net/ethtool/ioctl.c10
-rw-r--r--net/ethtool/stats.c2
-rw-r--r--net/ethtool/strset.c2
-rw-r--r--net/hsr/hsr_device.c2
-rw-r--r--net/hsr/hsr_forward.c30
-rw-r--r--net/hsr/hsr_forward.h8
-rw-r--r--net/hsr/hsr_main.h4
-rw-r--r--net/hsr/hsr_slave.c11
-rw-r--r--net/ieee802154/nl-mac.c10
-rw-r--r--net/ieee802154/nl-phy.c4
-rw-r--r--net/ieee802154/nl802154.c9
-rw-r--r--net/ipv4/af_inet.c4
-rw-r--r--net/ipv4/bpf_tcp_ca.c2
-rw-r--r--net/ipv4/cipso_ipv4.c1
-rw-r--r--net/ipv4/devinet.c2
-rw-r--r--net/ipv4/icmp.c7
-rw-r--r--net/ipv4/igmp.c1
-rw-r--r--net/ipv4/ipconfig.c13
-rw-r--r--net/ipv4/ping.c12
-rw-r--r--net/ipv4/route.c15
-rw-r--r--net/ipv4/udp.c10
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/mcast.c3
-rw-r--r--net/ipv6/netfilter/nft_fib_ipv6.c22
-rw-r--r--net/ipv6/reassembly.c4
-rw-r--r--net/ipv6/route.c8
-rw-r--r--net/ipv6/sit.c3
-rw-r--r--net/ipv6/udp.c3
-rw-r--r--net/mac80211/debugfs.c11
-rw-r--r--net/mac80211/ieee80211_i.h38
-rw-r--r--net/mac80211/iface.c30
-rw-r--r--net/mac80211/key.c7
-rw-r--r--net/mac80211/key.h2
-rw-r--r--net/mac80211/main.c7
-rw-r--r--net/mac80211/mlme.c8
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c2
-rw-r--r--net/mac80211/rx.c157
-rw-r--r--net/mac80211/scan.c21
-rw-r--r--net/mac80211/sta_info.c6
-rw-r--r--net/mac80211/sta_info.h33
-rw-r--r--net/mac80211/tx.c52
-rw-r--r--net/mac80211/util.c24
-rw-r--r--net/mac80211/wpa.c13
-rw-r--r--net/mptcp/options.c5
-rw-r--r--net/mptcp/pm_netlink.c8
-rw-r--r--net/mptcp/protocol.c88
-rw-r--r--net/mptcp/protocol.h4
-rw-r--r--net/mptcp/sockopt.c4
-rw-r--r--net/mptcp/subflow.c182
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c2
-rw-r--r--net/netfilter/nf_conntrack_proto.c2
-rw-r--r--net/netfilter/nf_flow_table_core.c3
-rw-r--r--net/netfilter/nf_flow_table_offload.c7
-rw-r--r--net/netfilter/nf_synproxy_core.c5
-rw-r--r--net/netfilter/nf_tables_api.c169
-rw-r--r--net/netfilter/nfnetlink_cthelper.c8
-rw-r--r--net/netfilter/nft_ct.c2
-rw-r--r--net/netfilter/nft_set_pipapo.c4
-rw-r--r--net/netfilter/nft_set_pipapo.h2
-rw-r--r--net/netfilter/nft_set_pipapo_avx2.c3
-rw-r--r--net/netlink/af_netlink.c6
-rw-r--r--net/nfc/llcp_sock.c2
-rw-r--r--net/nfc/nci/core.c1
-rw-r--r--net/nfc/nci/hci.c5
-rw-r--r--net/nfc/rawsock.c2
-rw-r--r--net/openvswitch/meter.c8
-rw-r--r--net/packet/af_packet.c51
-rw-r--r--net/qrtr/qrtr.c2
-rw-r--r--net/rds/connection.c23
-rw-r--r--net/rds/recv.c2
-rw-r--r--net/rds/tcp.c4
-rw-r--r--net/rds/tcp.h3
-rw-r--r--net/rds/tcp_listen.c6
-rw-r--r--net/sched/act_ct.c31
-rw-r--r--net/sched/cls_api.c2
-rw-r--r--net/sched/sch_cake.c18
-rw-r--r--net/sched/sch_dsmark.c3
-rw-r--r--net/sched/sch_fq_pie.c19
-rw-r--r--net/sched/sch_generic.c50
-rw-r--r--net/sched/sch_htb.c8
-rw-r--r--net/sctp/socket.c1
-rw-r--r--net/sctp/sysctl.c2
-rw-r--r--net/smc/smc_ism.c26
-rw-r--r--net/socket.c13
-rw-r--r--net/sunrpc/clnt.c7
-rw-r--r--net/sunrpc/xprt.c40
-rw-r--r--net/sunrpc/xprtmultipath.c2
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c27
-rw-r--r--net/sunrpc/xprtrdma/transport.c12
-rw-r--r--net/sunrpc/xprtrdma/verbs.c18
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h1
-rw-r--r--net/sunrpc/xprtsock.c2
-rw-r--r--net/tipc/core.c6
-rw-r--r--net/tipc/core.h10
-rw-r--r--net/tipc/discover.c4
-rw-r--r--net/tipc/link.c5
-rw-r--r--net/tipc/link.h1
-rw-r--r--net/tipc/msg.c9
-rw-r--r--net/tipc/net.c15
-rw-r--r--net/tipc/node.c12
-rw-r--r--net/tipc/socket.c5
-rw-r--r--net/tipc/udp_media.c2
-rw-r--r--net/tls/tls_device.c60
-rw-r--r--net/tls/tls_device_fallback.c7
-rw-r--r--net/tls/tls_main.c1
-rw-r--r--net/tls/tls_sw.c11
-rw-r--r--net/unix/af_unix.c7
-rw-r--r--net/wireless/Makefile2
-rw-r--r--net/wireless/core.c13
-rw-r--r--net/wireless/pmsr.c16
-rw-r--r--net/wireless/sysfs.c4
-rw-r--r--net/wireless/util.c10
-rw-r--r--net/x25/af_x25.c2
-rw-r--r--samples/bpf/xdpsock_user.c2
-rw-r--r--samples/vfio-mdev/mdpy-fb.c13
-rw-r--r--scripts/Makefile.build5
-rw-r--r--scripts/Makefile.modfinal2
-rwxr-xr-xscripts/atomic/check-atomics.sh1
-rwxr-xr-xscripts/atomic/gen-atomic-instrumented.sh51
-rwxr-xr-xscripts/atomic/gen-atomics.sh1
-rwxr-xr-xscripts/dummy-tools/gcc6
-rwxr-xr-xscripts/jobserver-exec2
-rwxr-xr-xscripts/link-vmlinux.sh2
-rw-r--r--scripts/recordmcount.h15
-rw-r--r--security/keys/trusted-keys/trusted_tpm1.c8
-rw-r--r--security/keys/trusted-keys/trusted_tpm2.c6
-rw-r--r--sound/core/control_led.c33
-rw-r--r--sound/core/seq/seq_timer.c10
-rw-r--r--sound/core/timer.c3
-rw-r--r--sound/firewire/Kconfig4
-rw-r--r--sound/firewire/amdtp-stream-trace.h6
-rw-r--r--sound/firewire/amdtp-stream.c44
-rw-r--r--sound/firewire/bebob/bebob.c2
-rw-r--r--sound/firewire/dice/dice-alesis.c2
-rw-r--r--sound/firewire/dice/dice-pcm.c4
-rw-r--r--sound/firewire/dice/dice-stream.c2
-rw-r--r--sound/firewire/dice/dice-tcelectronic.c4
-rw-r--r--sound/firewire/dice/dice.c24
-rw-r--r--sound/firewire/dice/dice.h3
-rw-r--r--sound/firewire/oxfw/oxfw.c1
-rw-r--r--sound/hda/intel-dsp-config.c4
-rw-r--r--sound/isa/gus/gus_main.c13
-rw-r--r--sound/isa/sb/sb16_main.c10
-rw-r--r--sound/isa/sb/sb8.c10
-rw-r--r--sound/pci/hda/hda_codec.c5
-rw-r--r--sound/pci/hda/hda_generic.c1
-rw-r--r--sound/pci/hda/hda_intel.c3
-rw-r--r--sound/pci/hda/patch_cirrus.c7
-rw-r--r--sound/pci/hda/patch_realtek.c157
-rw-r--r--sound/pci/intel8x0.c7
-rw-r--r--sound/soc/amd/raven/acp3x-pcm-dma.c10
-rw-r--r--sound/soc/amd/raven/acp3x.h1
-rw-r--r--sound/soc/amd/raven/pci-acp3x.c15
-rw-r--r--sound/soc/codecs/ak5558.c2
-rw-r--r--sound/soc/codecs/cs35l32.c3
-rw-r--r--sound/soc/codecs/cs35l33.c1
-rw-r--r--sound/soc/codecs/cs35l34.c3
-rw-r--r--sound/soc/codecs/cs42l42.c3
-rw-r--r--sound/soc/codecs/cs42l56.c7
-rw-r--r--sound/soc/codecs/cs42l73.c3
-rw-r--r--sound/soc/codecs/cs43130.c28
-rw-r--r--sound/soc/codecs/cs53l30.c3
-rw-r--r--sound/soc/codecs/da7219.c5
-rw-r--r--sound/soc/codecs/lpass-rx-macro.c1
-rw-r--r--sound/soc/codecs/lpass-tx-macro.c1
-rw-r--r--sound/soc/codecs/max98088.c13
-rw-r--r--sound/soc/codecs/rt5659.c26
-rw-r--r--sound/soc/codecs/rt5682-sdw.c3
-rw-r--r--sound/soc/codecs/rt711-sdca.c4
-rw-r--r--sound/soc/codecs/sti-sas.c1
-rw-r--r--sound/soc/codecs/tas2562.h14
-rw-r--r--sound/soc/fsl/Kconfig1
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c1
-rw-r--r--sound/soc/generic/audio-graph-card.c57
-rw-r--r--sound/soc/generic/simple-card.c168
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c25
-rw-r--r--sound/soc/pxa/pxa-ssp.c16
-rw-r--r--sound/soc/qcom/lpass-cpu.c91
-rw-r--r--sound/soc/qcom/lpass.h4
-rw-r--r--sound/soc/soc-core.c2
-rw-r--r--sound/soc/soc-topology.c6
-rw-r--r--sound/soc/sof/intel/hda-dai.c5
-rw-r--r--sound/soc/sof/pm.c1
-rw-r--r--sound/soc/stm/stm32_sai_sub.c5
-rw-r--r--sound/usb/format.c2
-rw-r--r--sound/usb/line6/driver.c4
-rw-r--r--sound/usb/line6/pod.c5
-rw-r--r--sound/usb/line6/variax.c6
-rw-r--r--sound/usb/midi.c11
-rw-r--r--sound/usb/mixer_quirks.c2
-rw-r--r--sound/usb/mixer_scarlett_gen2.c81
-rw-r--r--sound/usb/mixer_scarlett_gen2.h2
-rw-r--r--tools/arch/mips/include/uapi/asm/perf_regs.h40
-rw-r--r--tools/arch/x86/include/asm/disabled-features.h7
-rw-r--r--tools/arch/x86/include/asm/msr-index.h6
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h2
-rw-r--r--tools/bootconfig/include/linux/bootconfig.h4
-rw-r--r--tools/bootconfig/main.c1
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-cgroup.rst4
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst2
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool6
-rw-r--r--tools/bpf/bpftool/cgroup.c3
-rw-r--r--tools/bpf/bpftool/prog.c2
-rw-r--r--tools/build/Makefile.build22
-rw-r--r--tools/include/linux/bits.h2
-rw-r--r--tools/include/linux/const.h8
-rw-r--r--tools/include/uapi/asm-generic/unistd.h3
-rw-r--r--tools/include/uapi/linux/fs.h2
-rw-r--r--tools/include/uapi/linux/in.h3
-rw-r--r--tools/include/uapi/linux/kvm.h5
-rw-r--r--tools/include/uapi/linux/perf_event.h2
-rw-r--r--tools/lib/bpf/libbpf.c3
-rw-r--r--tools/lib/bpf/libbpf_internal.h5
-rw-r--r--tools/lib/bpf/xsk.c2
-rw-r--r--tools/objtool/arch/x86/decode.c9
-rw-r--r--tools/objtool/arch/x86/include/arch/special.h1
-rw-r--r--tools/objtool/check.c38
-rw-r--r--tools/objtool/elf.c136
-rw-r--r--tools/objtool/include/objtool/elf.h18
-rw-r--r--tools/objtool/include/objtool/objtool.h3
-rw-r--r--tools/objtool/include/objtool/special.h1
-rw-r--r--tools/objtool/special.c14
-rw-r--r--tools/perf/Documentation/perf-intel-pt.txt6
-rw-r--r--tools/perf/Documentation/perf-script.txt7
-rw-r--r--tools/perf/Makefile.config1
-rw-r--r--tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl2
-rw-r--r--tools/perf/arch/powerpc/entry/syscalls/syscall.tbl2
-rw-r--r--tools/perf/arch/s390/entry/syscalls/syscall.tbl2
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_64.tbl2
-rw-r--r--tools/perf/builtin-buildid-list.c3
-rw-r--r--tools/perf/builtin-record.c6
-rw-r--r--tools/perf/builtin-stat.c13
-rwxr-xr-xtools/perf/check-headers.sh1
-rw-r--r--tools/perf/perf.c4
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/cache.json30
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/floating_point.json2
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/frontend.json124
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/locks.json4
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/marked.json61
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/memory.json79
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/others.json133
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/pipeline.json135
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/pmc.json8
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/translation.json22
-rw-r--r--tools/perf/pmu-events/jevents.c2
-rwxr-xr-xtools/perf/scripts/python/exported-sql-viewer.py12
-rw-r--r--tools/perf/tests/attr/base-record2
-rw-r--r--tools/perf/tests/pfm.c4
-rwxr-xr-xtools/perf/tests/shell/stat_bpf_counters.sh4
-rw-r--r--tools/perf/trace/beauty/include/linux/socket.h2
-rw-r--r--tools/perf/util/bpf_counter.c10
-rw-r--r--tools/perf/util/dwarf-aux.c8
-rw-r--r--tools/perf/util/env.c1
-rw-r--r--tools/perf/util/event.h2
-rw-r--r--tools/perf/util/evlist.c3
-rw-r--r--tools/perf/util/evsel.c1
-rw-r--r--tools/perf/util/evsel.h4
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c6
-rw-r--r--tools/perf/util/intel-pt.c6
-rw-r--r--tools/perf/util/machine.c3
-rw-r--r--tools/perf/util/metricgroup.c14
-rw-r--r--tools/perf/util/parse-events.c13
-rw-r--r--tools/perf/util/parse-events.l1
-rw-r--r--tools/perf/util/perf_api_probe.c10
-rw-r--r--tools/perf/util/perf_api_probe.h1
-rw-r--r--tools/perf/util/pfm.c11
-rw-r--r--tools/perf/util/probe-finder.c3
-rw-r--r--tools/perf/util/session.c1
-rw-r--r--tools/perf/util/stat-display.c8
-rw-r--r--tools/perf/util/symbol-elf.c1
-rw-r--r--tools/scripts/Makefile.include30
-rw-r--r--tools/testing/nvdimm/test/iomap.c2
-rw-r--r--tools/testing/nvdimm/test/nfit.c42
-rw-r--r--tools/testing/selftests/arm64/bti/test.c1
-rw-r--r--tools/testing/selftests/bpf/network_helpers.c2
-rw-r--r--tools/testing/selftests/bpf/network_helpers.h1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ringbuf.c49
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_redirect.c785
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_neigh.c33
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c9
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_peer.c56
-rwxr-xr-xtools/testing/selftests/bpf/test_tc_redirect.sh216
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/and.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/bounds.c14
-rw-r--r--tools/testing/selftests/bpf/verifier/dead_code.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/jmp32.c22
-rw-r--r--tools/testing/selftests/bpf/verifier/jset.c10
-rw-r--r--tools/testing/selftests/bpf/verifier/stack_ptr.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/unpriv.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/value_ptr_arith.c15
-rw-r--r--tools/testing/selftests/exec/Makefile6
-rw-r--r--tools/testing/selftests/futex/functional/.gitignore2
-rw-r--r--tools/testing/selftests/futex/functional/Makefile7
-rw-r--r--tools/testing/selftests/futex/functional/futex_requeue.c136
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait.c171
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait_timeout.c126
-rwxr-xr-xtools/testing/selftests/futex/functional/run.sh6
-rw-r--r--tools/testing/selftests/kvm/.gitignore1
-rw-r--r--tools/testing/selftests/kvm/Makefile3
-rw-r--r--tools/testing/selftests/kvm/demand_paging_test.c174
-rw-r--r--tools/testing/selftests/kvm/hardware_disable_test.c32
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h14
-rw-r--r--tools/testing/selftests/kvm/include/test_util.h12
-rw-r--r--tools/testing/selftests/kvm/kvm_page_table_test.c2
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c330
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util_internal.h17
-rw-r--r--tools/testing/selftests/kvm/lib/perf_test_util.c6
-rw-r--r--tools/testing/selftests/kvm/lib/rbtree.c1
-rw-r--r--tools/testing/selftests/kvm/lib/test_util.c51
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c16
-rw-r--r--tools/testing/selftests/kvm/memslot_modification_stress_test.c18
-rw-r--r--tools/testing/selftests/kvm/memslot_perf_test.c1037
-rw-r--r--tools/testing/selftests/kvm/set_memory_region_test.c4
-rw-r--r--tools/testing/selftests/kvm/x86_64/get_cpuid_test.c5
-rw-r--r--tools/testing/selftests/kvm/x86_64/get_msr_index_features.c8
-rw-r--r--tools/testing/selftests/nci/.gitignore1
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh25
-rwxr-xr-xtools/testing/selftests/net/icmp.sh74
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect.sh24
-rwxr-xr-xtools/testing/selftests/net/udpgro_fwd.sh2
-rwxr-xr-xtools/testing/selftests/net/veth.sh5
-rw-r--r--tools/testing/selftests/netfilter/Makefile2
-rwxr-xr-xtools/testing/selftests/netfilter/nft_fib.sh221
-rw-r--r--tools/testing/selftests/perf_events/sigtrap_threads.c14
-rw-r--r--tools/testing/selftests/proc/.gitignore1
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c27
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_pie.json8
-rwxr-xr-xtools/testing/selftests/wireguard/netns.sh1
-rw-r--r--tools/testing/selftests/wireguard/qemu/kernel.config1
-rw-r--r--virt/kvm/kvm_main.c25
-rw-r--r--virt/lib/irqbypass.c16
2134 files changed, 31565 insertions, 57743 deletions
diff --git a/.clang-format b/.clang-format
index c24b147cac01..15d4eaabc6b5 100644
--- a/.clang-format
+++ b/.clang-format
@@ -109,8 +109,8 @@ ForEachMacros:
- 'css_for_each_child'
- 'css_for_each_descendant_post'
- 'css_for_each_descendant_pre'
- - 'cxl_for_each_cmd'
- 'device_for_each_child_node'
+ - 'displayid_iter_for_each'
- 'dma_fence_chain_for_each'
- 'do_for_each_ftrace_op'
- 'drm_atomic_crtc_for_each_plane'
@@ -136,6 +136,7 @@ ForEachMacros:
- 'drm_mm_for_each_node_in_range'
- 'drm_mm_for_each_node_safe'
- 'flow_action_for_each'
+ - 'for_each_acpi_dev_match'
- 'for_each_active_dev_scope'
- 'for_each_active_drhd_unit'
- 'for_each_active_iommu'
@@ -171,7 +172,6 @@ ForEachMacros:
- 'for_each_dapm_widgets'
- 'for_each_dev_addr'
- 'for_each_dev_scope'
- - 'for_each_displayid_db'
- 'for_each_dma_cap_mask'
- 'for_each_dpcm_be'
- 'for_each_dpcm_be_rollback'
@@ -179,6 +179,7 @@ ForEachMacros:
- 'for_each_dpcm_fe'
- 'for_each_drhd_unit'
- 'for_each_dss_dev'
+ - 'for_each_dtpm_table'
- 'for_each_efi_memory_desc'
- 'for_each_efi_memory_desc_in_map'
- 'for_each_element'
@@ -215,6 +216,7 @@ ForEachMacros:
- 'for_each_migratetype_order'
- 'for_each_msi_entry'
- 'for_each_msi_entry_safe'
+ - 'for_each_msi_vector'
- 'for_each_net'
- 'for_each_net_continue_reverse'
- 'for_each_netdev'
@@ -270,6 +272,12 @@ ForEachMacros:
- 'for_each_prime_number_from'
- 'for_each_process'
- 'for_each_process_thread'
+ - 'for_each_prop_codec_conf'
+ - 'for_each_prop_dai_codec'
+ - 'for_each_prop_dai_cpu'
+ - 'for_each_prop_dlc_codecs'
+ - 'for_each_prop_dlc_cpus'
+ - 'for_each_prop_dlc_platforms'
- 'for_each_property_of_node'
- 'for_each_registered_fb'
- 'for_each_requested_gpio'
@@ -430,6 +438,7 @@ ForEachMacros:
- 'queue_for_each_hw_ctx'
- 'radix_tree_for_each_slot'
- 'radix_tree_for_each_tagged'
+ - 'rb_for_each'
- 'rbtree_postorder_for_each_entry_safe'
- 'rdma_for_each_block'
- 'rdma_for_each_port'
diff --git a/.mailmap b/.mailmap
index 3e2bff9137e9..db58eedb44f1 100644
--- a/.mailmap
+++ b/.mailmap
@@ -160,6 +160,7 @@ Jeff Layton <jlayton@kernel.org> <jlayton@primarydata.com>
Jeff Layton <jlayton@kernel.org> <jlayton@redhat.com>
Jens Axboe <axboe@suse.de>
Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
+Jernej Skrabec <jernej.skrabec@gmail.com> <jernej.skrabec@siol.net>
Jiri Slaby <jirislaby@kernel.org> <jirislaby@gmail.com>
Jiri Slaby <jirislaby@kernel.org> <jslaby@novell.com>
Jiri Slaby <jirislaby@kernel.org> <jslaby@suse.com>
@@ -211,6 +212,8 @@ Manivannan Sadhasivam <mani@kernel.org> <manivannanece23@gmail.com>
Manivannan Sadhasivam <mani@kernel.org> <manivannan.sadhasivam@linaro.org>
Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com>
Marc Zyngier <maz@kernel.org> <marc.zyngier@arm.com>
+Marek Behún <kabel@kernel.org> <marek.behun@nic.cz>
+Marek Behún <kabel@kernel.org> Marek Behun <marek.behun@nic.cz>
Mark Brown <broonie@sirena.org.uk>
Mark Starovoytov <mstarovo@pm.me> <mstarovoitov@marvell.com>
Mark Yao <markyao0591@gmail.com> <mark.yao@rock-chips.com>
@@ -242,6 +245,9 @@ Maxime Ripard <mripard@kernel.org> <maxime.ripard@free-electrons.com>
Mayuresh Janorkar <mayur@ti.com>
Michael Buesch <m@bues.ch>
Michel Dänzer <michel@tungstengraphics.com>
+Michel Lespinasse <michel@lespinasse.org>
+Michel Lespinasse <michel@lespinasse.org> <walken@google.com>
+Michel Lespinasse <michel@lespinasse.org> <walken@zoy.org>
Miguel Ojeda <ojeda@kernel.org> <miguel.ojeda.sandonis@gmail.com>
Mike Rapoport <rppt@kernel.org> <mike@compulab.co.il>
Mike Rapoport <rppt@kernel.org> <mike.rapoport@gmail.com>
diff --git a/Documentation/ABI/obsolete/sysfs-class-dax b/Documentation/ABI/obsolete/sysfs-class-dax
index 0faf1354cd05..5bcce27458e3 100644
--- a/Documentation/ABI/obsolete/sysfs-class-dax
+++ b/Documentation/ABI/obsolete/sysfs-class-dax
@@ -1,7 +1,7 @@
What: /sys/class/dax/
Date: May, 2016
KernelVersion: v4.7
-Contact: linux-nvdimm@lists.01.org
+Contact: nvdimm@lists.linux.dev
Description: Device DAX is the device-centric analogue of Filesystem
DAX (CONFIG_FS_DAX). It allows memory ranges to be
allocated and mapped without need of an intervening file
diff --git a/Documentation/ABI/obsolete/sysfs-kernel-fadump_registered b/Documentation/ABI/obsolete/sysfs-kernel-fadump_registered
index 0360be39c98e..dae880b1a5d5 100644
--- a/Documentation/ABI/obsolete/sysfs-kernel-fadump_registered
+++ b/Documentation/ABI/obsolete/sysfs-kernel-fadump_registered
@@ -1,4 +1,4 @@
-This ABI is renamed and moved to a new location /sys/kernel/fadump/registered.¬
+This ABI is renamed and moved to a new location /sys/kernel/fadump/registered.
What: /sys/kernel/fadump_registered
Date: Feb 2012
diff --git a/Documentation/ABI/obsolete/sysfs-kernel-fadump_release_mem b/Documentation/ABI/obsolete/sysfs-kernel-fadump_release_mem
index 6ce0b129ab12..ca2396edb5f1 100644
--- a/Documentation/ABI/obsolete/sysfs-kernel-fadump_release_mem
+++ b/Documentation/ABI/obsolete/sysfs-kernel-fadump_release_mem
@@ -1,4 +1,4 @@
-This ABI is renamed and moved to a new location /sys/kernel/fadump/release_mem.¬
+This ABI is renamed and moved to a new location /sys/kernel/fadump/release_mem.
What: /sys/kernel/fadump_release_mem
Date: Feb 2012
diff --git a/Documentation/ABI/removed/sysfs-bus-nfit b/Documentation/ABI/removed/sysfs-bus-nfit
index ae8c1ca53828..277437005def 100644
--- a/Documentation/ABI/removed/sysfs-bus-nfit
+++ b/Documentation/ABI/removed/sysfs-bus-nfit
@@ -1,7 +1,7 @@
What: /sys/bus/nd/devices/regionX/nfit/ecc_unit_size
Date: Aug, 2017
KernelVersion: v4.14 (Removed v4.18)
-Contact: linux-nvdimm@lists.01.org
+Contact: nvdimm@lists.linux.dev
Description:
(RO) Size of a write request to a DIMM that will not incur a
read-modify-write cycle at the memory controller.
diff --git a/Documentation/ABI/testing/sysfs-bus-nfit b/Documentation/ABI/testing/sysfs-bus-nfit
index 63ef0b9ecce7..e7282d184a74 100644
--- a/Documentation/ABI/testing/sysfs-bus-nfit
+++ b/Documentation/ABI/testing/sysfs-bus-nfit
@@ -5,7 +5,7 @@ Interface Table (NFIT)' section in the ACPI specification
What: /sys/bus/nd/devices/nmemX/nfit/serial
Date: Jun, 2015
KernelVersion: v4.2
-Contact: linux-nvdimm@lists.01.org
+Contact: nvdimm@lists.linux.dev
Description:
(RO) Serial number of the NVDIMM (non-volatile dual in-line
memory module), assigned by the module vendor.
@@ -14,7 +14,7 @@ Description:
What: /sys/bus/nd/devices/nmemX/nfit/handle
Date: Apr, 2015
KernelVersion: v4.2
-Contact: linux-nvdimm@lists.01.org
+Contact: nvdimm@lists.linux.dev
Description:
(RO) The address (given by the _ADR object) of the device on its
parent bus of the NVDIMM device containing the NVDIMM region.
@@ -23,7 +23,7 @@ Description:
What: /sys/bus/nd/devices/nmemX/nfit/device
Date: Apr, 2015
KernelVersion: v4.1
-Contact: linux-nvdimm@lists.01.org
+Contact: nvdimm@lists.linux.dev
Description:
(RO) Device id for the NVDIMM, assigned by the module vendor.
@@ -31,7 +31,7 @@ Description:
What: /sys/bus/nd/devices/nmemX/nfit/rev_id
Date: Jun, 2015
KernelVersion: v4.2
-Contact: linux-nvdimm@lists.01.org
+Contact: nvdimm@lists.linux.dev
Description:
(RO) Revision of the NVDIMM, assigned by the module vendor.
@@ -39,7 +39,7 @@ Description:
What: /sys/bus/nd/devices/nmemX/nfit/phys_id
Date: Apr, 2015
KernelVersion: v4.2
-Contact: linux-nvdimm@lists.01.org
+Contact: nvdimm@lists.linux.dev
Description:
(RO) Handle (i.e., instance number) for the SMBIOS (system
management BIOS) Memory Device structure describing the NVDIMM
@@ -49,7 +49,7 @@ Description:
What: /sys/bus/nd/devices/nmemX/nfit/flags
Date: Jun, 2015
KernelVersion: v4.2
-Contact: linux-nvdimm@lists.01.org
+Contact: nvdimm@lists.linux.dev
Description:
(RO) The flags in the NFIT memory device sub-structure indicate
the state of the data on the nvdimm relative to its energy
@@ -68,7 +68,7 @@ What: /sys/bus/nd/devices/nmemX/nfit/format1
What: /sys/bus/nd/devices/nmemX/nfit/formats
Date: Apr, 2016
KernelVersion: v4.7
-Contact: linux-nvdimm@lists.01.org
+Contact: nvdimm@lists.linux.dev
Description:
(RO) The interface codes indicate support for persistent memory
mapped directly into system physical address space and / or a
@@ -84,7 +84,7 @@ Description:
What: /sys/bus/nd/devices/nmemX/nfit/vendor
Date: Apr, 2016
KernelVersion: v4.7
-Contact: linux-nvdimm@lists.01.org
+Contact: nvdimm@lists.linux.dev
Description:
(RO) Vendor id of the NVDIMM.
@@ -92,7 +92,7 @@ Description:
What: /sys/bus/nd/devices/nmemX/nfit/dsm_mask
Date: May, 2016
KernelVersion: v4.7
-Contact: linux-nvdimm@lists.01.org
+Contact: nvdimm@lists.linux.dev
Description:
(RO) The bitmask indicates the supported device specific control
functions relative to the NVDIMM command family supported by the
@@ -102,7 +102,7 @@ Description:
What: /sys/bus/nd/devices/nmemX/nfit/family
Date: Apr, 2016
KernelVersion: v4.7
-Contact: linux-nvdimm@lists.01.org
+Contact: nvdimm@lists.linux.dev
Description:
(RO) Displays the NVDIMM family command sets. Values
0, 1, 2 and 3 correspond to NVDIMM_FAMILY_INTEL,
@@ -118,7 +118,7 @@ Description:
What: /sys/bus/nd/devices/nmemX/nfit/id
Date: Apr, 2016
KernelVersion: v4.7
-Contact: linux-nvdimm@lists.01.org
+Contact: nvdimm@lists.linux.dev
Description:
(RO) ACPI specification 6.2 section 5.2.25.9, defines an
identifier for an NVDIMM, which refelects the id attribute.
@@ -127,7 +127,7 @@ Description:
What: /sys/bus/nd/devices/nmemX/nfit/subsystem_vendor
Date: Apr, 2016
KernelVersion: v4.7
-Contact: linux-nvdimm@lists.01.org
+Contact: nvdimm@lists.linux.dev
Description:
(RO) Sub-system vendor id of the NVDIMM non-volatile memory
subsystem controller.
@@ -136,7 +136,7 @@ Description:
What: /sys/bus/nd/devices/nmemX/nfit/subsystem_rev_id
Date: Apr, 2016
KernelVersion: v4.7
-Contact: linux-nvdimm@lists.01.org
+Contact: nvdimm@lists.linux.dev
Description:
(RO) Sub-system revision id of the NVDIMM non-volatile memory subsystem
controller, assigned by the non-volatile memory subsystem
@@ -146,7 +146,7 @@ Description:
What: /sys/bus/nd/devices/nmemX/nfit/subsystem_device
Date: Apr, 2016
KernelVersion: v4.7
-Contact: linux-nvdimm@lists.01.org
+Contact: nvdimm@lists.linux.dev
Description:
(RO) Sub-system device id for the NVDIMM non-volatile memory
subsystem controller, assigned by the non-volatile memory
@@ -156,7 +156,7 @@ Description:
What: /sys/bus/nd/devices/ndbusX/nfit/revision
Date: Jun, 2015
KernelVersion: v4.2
-Contact: linux-nvdimm@lists.01.org
+Contact: nvdimm@lists.linux.dev
Description:
(RO) ACPI NFIT table revision number.
@@ -164,7 +164,7 @@ Description:
What: /sys/bus/nd/devices/ndbusX/nfit/scrub
Date: Sep, 2016
KernelVersion: v4.9
-Contact: linux-nvdimm@lists.01.org
+Contact: nvdimm@lists.linux.dev
Description:
(RW) This shows the number of full Address Range Scrubs (ARS)
that have been completed since driver load time. Userspace can
@@ -177,7 +177,7 @@ Description:
What: /sys/bus/nd/devices/ndbusX/nfit/hw_error_scrub
Date: Sep, 2016
KernelVersion: v4.9
-Contact: linux-nvdimm@lists.01.org
+Contact: nvdimm@lists.linux.dev
Description:
(RW) Provides a way to toggle the behavior between just adding
the address (cache line) where the MCE happened to the poison
@@ -196,7 +196,7 @@ Description:
What: /sys/bus/nd/devices/ndbusX/nfit/dsm_mask
Date: Jun, 2017
KernelVersion: v4.13
-Contact: linux-nvdimm@lists.01.org
+Contact: nvdimm@lists.linux.dev
Description:
(RO) The bitmask indicates the supported bus specific control
functions. See the section named 'NVDIMM Root Device _DSMs' in
@@ -205,7 +205,7 @@ Description:
What: /sys/bus/nd/devices/ndbusX/nfit/firmware_activate_noidle
Date: Apr, 2020
KernelVersion: v5.8
-Contact: linux-nvdimm@lists.01.org
+Contact: nvdimm@lists.linux.dev
Description:
(RW) The Intel platform implementation of firmware activate
support exposes an option let the platform force idle devices in
@@ -225,7 +225,7 @@ Description:
What: /sys/bus/nd/devices/regionX/nfit/range_index
Date: Jun, 2015
KernelVersion: v4.2
-Contact: linux-nvdimm@lists.01.org
+Contact: nvdimm@lists.linux.dev
Description:
(RO) A unique number provided by the BIOS to identify an address
range. Used by NVDIMM Region Mapping Structure to uniquely refer
diff --git a/Documentation/ABI/testing/sysfs-bus-papr-pmem b/Documentation/ABI/testing/sysfs-bus-papr-pmem
index 8316c33862a0..92e2db0e2d3d 100644
--- a/Documentation/ABI/testing/sysfs-bus-papr-pmem
+++ b/Documentation/ABI/testing/sysfs-bus-papr-pmem
@@ -1,7 +1,7 @@
What: /sys/bus/nd/devices/nmemX/papr/flags
Date: Apr, 2020
KernelVersion: v5.8
-Contact: linuxppc-dev <linuxppc-dev@lists.ozlabs.org>, linux-nvdimm@lists.01.org,
+Contact: linuxppc-dev <linuxppc-dev@lists.ozlabs.org>, nvdimm@lists.linux.dev,
Description:
(RO) Report flags indicating various states of a
papr-pmem NVDIMM device. Each flag maps to a one or
@@ -36,7 +36,7 @@ Description:
What: /sys/bus/nd/devices/nmemX/papr/perf_stats
Date: May, 2020
KernelVersion: v5.9
-Contact: linuxppc-dev <linuxppc-dev@lists.ozlabs.org>, linux-nvdimm@lists.01.org,
+Contact: linuxppc-dev <linuxppc-dev@lists.ozlabs.org>, nvdimm@lists.linux.dev,
Description:
(RO) Report various performance stats related to papr-scm NVDIMM
device. Each stat is reported on a new line with each line
diff --git a/Documentation/ABI/testing/sysfs-module b/Documentation/ABI/testing/sysfs-module
index a485434d2a0f..88bddf192ceb 100644
--- a/Documentation/ABI/testing/sysfs-module
+++ b/Documentation/ABI/testing/sysfs-module
@@ -37,13 +37,13 @@ Description: Maximum time allowed for periodic transfers per microframe (μs)
What: /sys/module/*/{coresize,initsize}
Date: Jan 2012
-KernelVersion:»·3.3
+KernelVersion: 3.3
Contact: Kay Sievers <kay.sievers@vrfy.org>
Description: Module size in bytes.
What: /sys/module/*/taint
Date: Jan 2012
-KernelVersion:»·3.3
+KernelVersion: 3.3
Contact: Kay Sievers <kay.sievers@vrfy.org>
Description: Module taint flags:
== =====================
diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
index 1d56a6b73a4e..68b21395a743 100644
--- a/Documentation/admin-guide/sysctl/kernel.rst
+++ b/Documentation/admin-guide/sysctl/kernel.rst
@@ -483,10 +483,11 @@ modprobe
========
The full path to the usermode helper for autoloading kernel modules,
-by default "/sbin/modprobe". This binary is executed when the kernel
-requests a module. For example, if userspace passes an unknown
-filesystem type to mount(), then the kernel will automatically request
-the corresponding filesystem module by executing this usermode helper.
+by default ``CONFIG_MODPROBE_PATH``, which in turn defaults to
+"/sbin/modprobe". This binary is executed when the kernel requests a
+module. For example, if userspace passes an unknown filesystem type
+to mount(), then the kernel will automatically request the
+corresponding filesystem module by executing this usermode helper.
This usermode helper should insert the needed module into the kernel.
This sysctl only affects module autoloading. It has no effect on the
@@ -1457,11 +1458,22 @@ unprivileged_bpf_disabled
=========================
Writing 1 to this entry will disable unprivileged calls to ``bpf()``;
-once disabled, calling ``bpf()`` without ``CAP_SYS_ADMIN`` will return
-``-EPERM``.
+once disabled, calling ``bpf()`` without ``CAP_SYS_ADMIN`` or ``CAP_BPF``
+will return ``-EPERM``. Once set to 1, this can't be cleared from the
+running kernel anymore.
-Once set, this can't be cleared.
+Writing 2 to this entry will also disable unprivileged calls to ``bpf()``,
+however, an admin can still change this setting later on, if needed, by
+writing 0 or 1 to this entry.
+If ``BPF_UNPRIV_DEFAULT_OFF`` is enabled in the kernel config, then this
+entry will default to 2 instead of 0.
+
+= =============================================================
+0 Unprivileged calls to ``bpf()`` are enabled
+1 Unprivileged calls to ``bpf()`` are disabled without recovery
+2 Unprivileged calls to ``bpf()`` are disabled
+= =============================================================
watchdog
========
diff --git a/Documentation/block/data-integrity.rst b/Documentation/block/data-integrity.rst
index 4f2452a95c43..07a97aa26668 100644
--- a/Documentation/block/data-integrity.rst
+++ b/Documentation/block/data-integrity.rst
@@ -1,4 +1,4 @@
-==============
+==============
Data Integrity
==============
diff --git a/Documentation/cdrom/cdrom-standard.rst b/Documentation/cdrom/cdrom-standard.rst
index 70500b189cc8..5845960ca382 100644
--- a/Documentation/cdrom/cdrom-standard.rst
+++ b/Documentation/cdrom/cdrom-standard.rst
@@ -146,18 +146,18 @@ with the kernel as a block device by registering the following general
*struct file_operations*::
struct file_operations cdrom_fops = {
- NULL, /∗ lseek ∗/
- block _read , /∗ read—general block-dev read ∗/
- block _write, /∗ write—general block-dev write ∗/
- NULL, /∗ readdir ∗/
- NULL, /∗ select ∗/
- cdrom_ioctl, /∗ ioctl ∗/
- NULL, /∗ mmap ∗/
- cdrom_open, /∗ open ∗/
- cdrom_release, /∗ release ∗/
- NULL, /∗ fsync ∗/
- NULL, /∗ fasync ∗/
- NULL /∗ revalidate ∗/
+ NULL, /* lseek */
+ block _read , /* read--general block-dev read */
+ block _write, /* write--general block-dev write */
+ NULL, /* readdir */
+ NULL, /* select */
+ cdrom_ioctl, /* ioctl */
+ NULL, /* mmap */
+ cdrom_open, /* open */
+ cdrom_release, /* release */
+ NULL, /* fsync */
+ NULL, /* fasync */
+ NULL /* revalidate */
};
Every active CD-ROM device shares this *struct*. The routines
@@ -250,12 +250,12 @@ The drive-specific, minor-like information that is registered with
`cdrom.c`, currently contains the following fields::
struct cdrom_device_info {
- const struct cdrom_device_ops * ops; /* device operations for this major */
+ const struct cdrom_device_ops * ops; /* device operations for this major */
struct list_head list; /* linked list of all device_info */
struct gendisk * disk; /* matching block layer disk */
void * handle; /* driver-dependent data */
- int mask; /* mask of capability: disables them */
+ int mask; /* mask of capability: disables them */
int speed; /* maximum speed for reading data */
int capacity; /* number of discs in a jukebox */
@@ -569,7 +569,7 @@ the *CDC_CLOSE_TRAY* bit in *mask*.
In the file `cdrom.c` you will encounter many constructions of the type::
- if (cdo->capability & ∼cdi->mask & CDC _⟨capability⟩) ...
+ if (cdo->capability & ~cdi->mask & CDC _<capability>) ...
There is no *ioctl* to set the mask... The reason is that
I think it is better to control the **behavior** rather than the
diff --git a/Documentation/devicetree/bindings/clock/idt,versaclock5.yaml b/Documentation/devicetree/bindings/clock/idt,versaclock5.yaml
index c268debe5b8d..28675b0b80f1 100644
--- a/Documentation/devicetree/bindings/clock/idt,versaclock5.yaml
+++ b/Documentation/devicetree/bindings/clock/idt,versaclock5.yaml
@@ -60,7 +60,6 @@ properties:
maxItems: 2
idt,xtal-load-femtofarads:
- $ref: /schemas/types.yaml#/definitions/uint32
minimum: 9000
maximum: 22760
description: Optional load capacitor for XTAL1 and XTAL2
@@ -84,7 +83,6 @@ patternProperties:
enum: [ 1800000, 2500000, 3300000 ]
idt,slew-percent:
description: The Slew rate control for CMOS single-ended.
- $ref: /schemas/types.yaml#/definitions/uint32
enum: [ 80, 85, 90, 100 ]
required:
diff --git a/Documentation/devicetree/bindings/connector/usb-connector.yaml b/Documentation/devicetree/bindings/connector/usb-connector.yaml
index 32509b98142e..92b49bc37939 100644
--- a/Documentation/devicetree/bindings/connector/usb-connector.yaml
+++ b/Documentation/devicetree/bindings/connector/usb-connector.yaml
@@ -149,6 +149,17 @@ properties:
maxItems: 6
$ref: /schemas/types.yaml#/definitions/uint32-array
+ sink-vdos-v1:
+ description: An array of u32 with each entry, a Vendor Defined Message Object (VDO),
+ providing additional information corresponding to the product, the detailed bit
+ definitions and the order of each VDO can be found in
+ "USB Power Delivery Specification Revision 2.0, Version 1.3" chapter 6.4.4.3.1 Discover
+ Identity. User can specify the VDO array via VDO_IDH/_CERT/_PRODUCT/_CABLE/_AMA defined in
+ dt-bindings/usb/pd.h.
+ minItems: 3
+ maxItems: 6
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+
op-sink-microwatt:
description: Sink required operating power in microwatt, if source can't
offer the power, Capability Mismatch is set. Required for power sink and
@@ -207,6 +218,10 @@ properties:
SNK_READY for non-pd link.
type: boolean
+dependencies:
+ sink-vdos-v1: [ 'sink-vdos' ]
+ sink-vdos: [ 'sink-vdos-v1' ]
+
required:
- compatible
diff --git a/Documentation/devicetree/bindings/hwmon/lm75.yaml b/Documentation/devicetree/bindings/hwmon/lm75.yaml
index 96eed5cc7841..72980d083c21 100644
--- a/Documentation/devicetree/bindings/hwmon/lm75.yaml
+++ b/Documentation/devicetree/bindings/hwmon/lm75.yaml
@@ -30,6 +30,7 @@ properties:
- st,stds75
- st,stlm75
- microchip,tcn75
+ - ti,tmp1075
- ti,tmp100
- ti,tmp101
- ti,tmp105
diff --git a/Documentation/devicetree/bindings/hwmon/ti,ads7828.yaml b/Documentation/devicetree/bindings/hwmon/ti,ads7828.yaml
index 33ee575bb09d..926be9a29044 100644
--- a/Documentation/devicetree/bindings/hwmon/ti,ads7828.yaml
+++ b/Documentation/devicetree/bindings/hwmon/ti,ads7828.yaml
@@ -49,7 +49,7 @@ examples:
#size-cells = <0>;
adc@48 {
- comatible = "ti,ads7828";
+ compatible = "ti,ads7828";
reg = <0x48>;
vref-supply = <&vref>;
ti,differential-input;
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mpc.yaml b/Documentation/devicetree/bindings/i2c/i2c-mpc.yaml
index 7b553d559c83..98c6fcf7bf26 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mpc.yaml
+++ b/Documentation/devicetree/bindings/i2c/i2c-mpc.yaml
@@ -46,6 +46,13 @@ properties:
description: |
I2C bus timeout in microseconds
+ fsl,i2c-erratum-a004447:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: |
+ Indicates the presence of QorIQ erratum A-004447, which
+ says that the standard i2c recovery scheme mechanism does
+ not work and an alternate implementation is needed.
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml b/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
index 6f2398cdc82d..1e7894e524f9 100644
--- a/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
@@ -102,7 +102,6 @@ patternProperties:
st,adc-channel-names:
description: List of single-ended channel names.
- $ref: /schemas/types.yaml#/definitions/string-array
st,filter-order:
description: |
diff --git a/Documentation/devicetree/bindings/input/input.yaml b/Documentation/devicetree/bindings/input/input.yaml
index 74244d21d2b3..d41d8743aad4 100644
--- a/Documentation/devicetree/bindings/input/input.yaml
+++ b/Documentation/devicetree/bindings/input/input.yaml
@@ -38,6 +38,5 @@ properties:
Duration in seconds which the key should be kept pressed for device to
reset automatically. Device with key pressed reset feature can specify
this property.
- $ref: /schemas/types.yaml#/definitions/uint32
additionalProperties: true
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml b/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml
index cb6498108b78..36c955965d90 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml
@@ -92,7 +92,6 @@ properties:
this interconnect to send RPMh commands.
qcom,bcm-voter-names:
- $ref: /schemas/types.yaml#/definitions/string-array
description: |
Names for each of the qcom,bcm-voters specified.
diff --git a/Documentation/devicetree/bindings/leds/leds-bcm6328.txt b/Documentation/devicetree/bindings/leds/leds-bcm6328.txt
index ccebce597f37..a555d94084b7 100644
--- a/Documentation/devicetree/bindings/leds/leds-bcm6328.txt
+++ b/Documentation/devicetree/bindings/leds/leds-bcm6328.txt
@@ -4,8 +4,8 @@ This controller is present on BCM6318, BCM6328, BCM6362 and BCM63268.
In these SoCs it's possible to control LEDs both as GPIOs or by hardware.
However, on some devices there are Serial LEDs (LEDs connected to a 74x164
controller), which can either be controlled by software (exporting the 74x164
-as spi-gpio. See Documentation/devicetree/bindings/gpio/gpio-74x164.txt), or
-by hardware using this driver.
+as spi-gpio. See Documentation/devicetree/bindings/gpio/fairchild,74hc595.yaml),
+or by hardware using this driver.
Some of these Serial LEDs are hardware controlled (e.g. ethernet LEDs) and
exporting the 74x164 as spi-gpio prevents those LEDs to be hardware
controlled, so the only chance to keep them working is by using this driver.
diff --git a/Documentation/devicetree/bindings/leds/leds-bcm6358.txt b/Documentation/devicetree/bindings/leds/leds-bcm6358.txt
index da5708e7b43b..6e51c6b91ee5 100644
--- a/Documentation/devicetree/bindings/leds/leds-bcm6358.txt
+++ b/Documentation/devicetree/bindings/leds/leds-bcm6358.txt
@@ -3,7 +3,7 @@ LEDs connected to Broadcom BCM6358 controller
This controller is present on BCM6358 and BCM6368.
In these SoCs there are Serial LEDs (LEDs connected to a 74x164 controller),
which can either be controlled by software (exporting the 74x164 as spi-gpio.
-See Documentation/devicetree/bindings/gpio/gpio-74x164.txt), or
+See Documentation/devicetree/bindings/gpio/fairchild,74hc595.yaml), or
by hardware using this driver.
Required properties:
diff --git a/Documentation/devicetree/bindings/media/renesas,drif.yaml b/Documentation/devicetree/bindings/media/renesas,drif.yaml
index f1bdaeab4053..9cd56ff2c316 100644
--- a/Documentation/devicetree/bindings/media/renesas,drif.yaml
+++ b/Documentation/devicetree/bindings/media/renesas,drif.yaml
@@ -67,9 +67,7 @@ properties:
maxItems: 1
clock-names:
- maxItems: 1
- items:
- - const: fck
+ const: fck
resets:
maxItems: 1
@@ -99,32 +97,26 @@ properties:
Indicates that the channel acts as primary among the bonded channels.
port:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
+ unevaluatedProperties: false
description:
- Child port node corresponding to the data input, in accordance with the
- video interface bindings defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt.
- The port node must contain at least one endpoint.
+ Child port node corresponding to the data input. The port node must
+ contain at least one endpoint.
properties:
endpoint:
- type: object
+ $ref: /schemas/graph.yaml#/$defs/endpoint-base
+ unevaluatedProperties: false
properties:
- remote-endpoint:
- description:
- A phandle to the remote tuner endpoint subnode in remote node
- port.
-
sync-active:
+ $ref: /schemas/types.yaml#/definitions/uint32
enum: [0, 1]
description:
Indicates sync signal polarity, 0/1 for low/high respectively.
This property maps to SYNCAC bit in the hardware manual. The
default is 1 (active high).
- additionalProperties: false
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/mfd/mt6397.txt b/Documentation/devicetree/bindings/mfd/mt6397.txt
index 2661775a3825..99a84b69a29f 100644
--- a/Documentation/devicetree/bindings/mfd/mt6397.txt
+++ b/Documentation/devicetree/bindings/mfd/mt6397.txt
@@ -21,6 +21,7 @@ Required properties:
compatible:
"mediatek,mt6323" for PMIC MT6323
"mediatek,mt6358" for PMIC MT6358
+ "mediatek,mt6359" for PMIC MT6359
"mediatek,mt6397" for PMIC MT6397
Optional subnodes:
diff --git a/Documentation/devicetree/bindings/mmc/brcm,iproc-sdhci.yaml b/Documentation/devicetree/bindings/mmc/brcm,iproc-sdhci.yaml
index 6f569fbfa134..2f63f2cdeb71 100644
--- a/Documentation/devicetree/bindings/mmc/brcm,iproc-sdhci.yaml
+++ b/Documentation/devicetree/bindings/mmc/brcm,iproc-sdhci.yaml
@@ -21,6 +21,7 @@ properties:
- brcm,bcm2711-emmc2
- brcm,sdhci-iproc-cygnus
- brcm,sdhci-iproc
+ - brcm,bcm7211a0-sdhci
reg:
minItems: 1
diff --git a/Documentation/devicetree/bindings/mmc/ingenic,mmc.yaml b/Documentation/devicetree/bindings/mmc/ingenic,mmc.yaml
index 04ba8b7fc054..546480f41141 100644
--- a/Documentation/devicetree/bindings/mmc/ingenic,mmc.yaml
+++ b/Documentation/devicetree/bindings/mmc/ingenic,mmc.yaml
@@ -19,6 +19,7 @@ properties:
- ingenic,jz4740-mmc
- ingenic,jz4725b-mmc
- ingenic,jz4760-mmc
+ - ingenic,jz4775-mmc
- ingenic,jz4780-mmc
- ingenic,x1000-mmc
- items:
diff --git a/Documentation/devicetree/bindings/mmc/mmc-controller.yaml b/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
index e141330c1114..25ac8e200970 100644
--- a/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
+++ b/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
@@ -220,6 +220,11 @@ properties:
description:
eMMC HS400 enhanced strobe mode is supported
+ no-mmc-hs400:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ All eMMC HS400 modes are not supported.
+
dsr:
description:
Value the card Driver Stage Register (DSR) should be programmed
@@ -358,22 +363,6 @@ additionalProperties: true
examples:
- |
- mmc@ab000000 {
- compatible = "sdhci";
- reg = <0xab000000 0x200>;
- interrupts = <23>;
- bus-width = <4>;
- cd-gpios = <&gpio 69 0>;
- cd-inverted;
- wp-gpios = <&gpio 70 0>;
- max-frequency = <50000000>;
- keep-power-in-suspend;
- wakeup-source;
- mmc-pwrseq = <&sdhci0_pwrseq>;
- clk-phase-sd-hs = <63>, <72>;
- };
-
- - |
mmc3: mmc@1c12000 {
#address-cells = <1>;
#size-cells = <0>;
@@ -385,9 +374,9 @@ examples:
non-removable;
mmc-pwrseq = <&sdhci0_pwrseq>;
- brcmf: bcrmf@1 {
+ brcmf: wifi@1 {
reg = <1>;
- compatible = "brcm,bcm43xx-fmac";
+ compatible = "brcm,bcm4329-fmac";
interrupt-parent = <&pio>;
interrupts = <10 8>;
interrupt-names = "host-wake";
diff --git a/Documentation/devicetree/bindings/mmc/renesas,mmcif.txt b/Documentation/devicetree/bindings/mmc/renesas,mmcif.txt
deleted file mode 100644
index 291532ac0446..000000000000
--- a/Documentation/devicetree/bindings/mmc/renesas,mmcif.txt
+++ /dev/null
@@ -1,53 +0,0 @@
-* Renesas Multi Media Card Interface (MMCIF) Controller
-
-This file documents differences between the core properties in mmc.txt
-and the properties used by the MMCIF device.
-
-
-Required properties:
-
-- compatible: should be "renesas,mmcif-<soctype>", "renesas,sh-mmcif" as a
- fallback. Examples with <soctype> are:
- - "renesas,mmcif-r7s72100" for the MMCIF found in r7s72100 SoCs
- - "renesas,mmcif-r8a73a4" for the MMCIF found in r8a73a4 SoCs
- - "renesas,mmcif-r8a7740" for the MMCIF found in r8a7740 SoCs
- - "renesas,mmcif-r8a7742" for the MMCIF found in r8a7742 SoCs
- - "renesas,mmcif-r8a7743" for the MMCIF found in r8a7743 SoCs
- - "renesas,mmcif-r8a7744" for the MMCIF found in r8a7744 SoCs
- - "renesas,mmcif-r8a7745" for the MMCIF found in r8a7745 SoCs
- - "renesas,mmcif-r8a7778" for the MMCIF found in r8a7778 SoCs
- - "renesas,mmcif-r8a7790" for the MMCIF found in r8a7790 SoCs
- - "renesas,mmcif-r8a7791" for the MMCIF found in r8a7791 SoCs
- - "renesas,mmcif-r8a7793" for the MMCIF found in r8a7793 SoCs
- - "renesas,mmcif-r8a7794" for the MMCIF found in r8a7794 SoCs
- - "renesas,mmcif-sh73a0" for the MMCIF found in sh73a0 SoCs
-
-- interrupts: Some SoCs have only 1 shared interrupt, while others have either
- 2 or 3 individual interrupts (error, int, card detect). Below is the number
- of interrupts for each SoC:
- 1: r8a73a4, r8a7742, r8a7743, r8a7744, r8a7745, r8a7778, r8a7790, r8a7791,
- r8a7793, r8a7794
- 2: r8a7740, sh73a0
- 3: r7s72100
-
-- clocks: reference to the functional clock
-
-- dmas: reference to the DMA channels, one per channel name listed in the
- dma-names property.
-- dma-names: must contain "tx" for the transmit DMA channel and "rx" for the
- receive DMA channel.
-- max-frequency: Maximum operating clock frequency, driver uses default clock
- frequency if it is not set.
-
-
-Example: R8A7790 (R-Car H2) MMCIF0
-
- mmcif0: mmc@ee200000 {
- compatible = "renesas,mmcif-r8a7790", "renesas,sh-mmcif";
- reg = <0 0xee200000 0 0x80>;
- interrupts = <0 169 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&mstp3_clks R8A7790_CLK_MMCIF0>;
- dmas = <&dmac0 0xd1>, <&dmac0 0xd2>;
- dma-names = "tx", "rx";
- max-frequency = <97500000>;
- };
diff --git a/Documentation/devicetree/bindings/mmc/renesas,mmcif.yaml b/Documentation/devicetree/bindings/mmc/renesas,mmcif.yaml
new file mode 100644
index 000000000000..c36ba561c387
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/renesas,mmcif.yaml
@@ -0,0 +1,135 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mmc/renesas,mmcif.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas Multi Media Card Interface (MMCIF) Controller
+
+maintainers:
+ - Wolfram Sang <wsa+renesas@sang-engineering.com>
+
+allOf:
+ - $ref: "mmc-controller.yaml"
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - renesas,mmcif-r7s72100 # RZ/A1H
+ - renesas,mmcif-r8a73a4 # R-Mobile APE6
+ - renesas,mmcif-r8a7740 # R-Mobile A1
+ - renesas,mmcif-r8a7742 # RZ/G1H
+ - renesas,mmcif-r8a7743 # RZ/G1M
+ - renesas,mmcif-r8a7744 # RZ/G1N
+ - renesas,mmcif-r8a7745 # RZ/G1E
+ - renesas,mmcif-r8a7778 # R-Car M1A
+ - renesas,mmcif-r8a7790 # R-Car H2
+ - renesas,mmcif-r8a7791 # R-Car M2-W
+ - renesas,mmcif-r8a7793 # R-Car M2-N
+ - renesas,mmcif-r8a7794 # R-Car E2
+ - renesas,mmcif-sh73a0 # SH-Mobile AG5
+ - const: renesas,sh-mmcif
+
+ reg:
+ maxItems: 1
+
+ interrupts: true
+
+ clocks:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ dmas:
+ minItems: 2
+ maxItems: 4
+ description:
+ Must contain a list of pairs of references to DMA specifiers, one for
+ transmission, and one for reception.
+
+ dma-names:
+ minItems: 2
+ maxItems: 4
+ items:
+ enum:
+ - tx
+ - rx
+
+ max-frequency: true
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - power-domains
+
+if:
+ properties:
+ compatible:
+ contains:
+ const: renesas,mmcif-r7s72100
+then:
+ properties:
+ interrupts:
+ items:
+ - description: Error interrupt
+ - description: Normal operation interrupt
+ - description: Card detection interrupt
+else:
+ if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,mmcif-r8a7740
+ - renesas,mmcif-sh73a0
+ then:
+ properties:
+ interrupts:
+ items:
+ - description: Error interrupt
+ - description: Normal operation interrupt
+ else:
+ if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,mmcif-r8a73a4
+ - renesas,mmcif-r8a7778
+ then:
+ properties:
+ interrupts:
+ maxItems: 1
+ else:
+ properties:
+ interrupts:
+ maxItems: 1
+ required:
+ - resets
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a7790-cpg-mssr.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/r8a7790-sysc.h>
+
+ mmcif0: mmc@ee200000 {
+ compatible = "renesas,mmcif-r8a7790", "renesas,sh-mmcif";
+ reg = <0xee200000 0x80>;
+ interrupts = <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 315>;
+ power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
+ resets = <&cpg 315>;
+ dmas = <&dmac0 0xd1>, <&dmac0 0xd2>, <&dmac1 0xd1>, <&dmac1 0xd2>;
+ dma-names = "tx", "rx", "tx", "rx";
+ max-frequency = <97500000>;
+ };
diff --git a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml
index 3762f1c8de96..54fb59820d2b 100644
--- a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml
+++ b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml
@@ -29,21 +29,15 @@ properties:
- const: rockchip,rk3288-dw-mshc
- items:
- enum:
- # for Rockchip PX30
- rockchip,px30-dw-mshc
- # for Rockchip RK3036
+ - rockchip,rk1808-dw-mshc
- rockchip,rk3036-dw-mshc
- # for Rockchip RK322x
- rockchip,rk3228-dw-mshc
- # for Rockchip RK3308
- rockchip,rk3308-dw-mshc
- # for Rockchip RK3328
- rockchip,rk3328-dw-mshc
- # for Rockchip RK3368
- rockchip,rk3368-dw-mshc
- # for Rockchip RK3399
- rockchip,rk3399-dw-mshc
- # for Rockchip RV1108
+ - rockchip,rk3568-dw-mshc
- rockchip,rv1108-dw-mshc
- const: rockchip,rk3288-dw-mshc
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-am654.yaml b/Documentation/devicetree/bindings/mmc/sdhci-am654.yaml
index 3a79e39253d2..29399e88ac53 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-am654.yaml
+++ b/Documentation/devicetree/bindings/mmc/sdhci-am654.yaml
@@ -19,7 +19,6 @@ properties:
- const: ti,am654-sdhci-5.1
- const: ti,j721e-sdhci-8bit
- const: ti,j721e-sdhci-4bit
- - const: ti,j721e-sdhci-4bit
- const: ti,am64-sdhci-8bit
- const: ti,am64-sdhci-4bit
- items:
diff --git a/Documentation/devicetree/bindings/net/qcom,ipa.yaml b/Documentation/devicetree/bindings/net/qcom,ipa.yaml
index 7443490d4cc6..5fe6d3dceb08 100644
--- a/Documentation/devicetree/bindings/net/qcom,ipa.yaml
+++ b/Documentation/devicetree/bindings/net/qcom,ipa.yaml
@@ -105,7 +105,6 @@ properties:
- description: Whether the IPA clock is enabled (if valid)
qcom,smem-state-names:
- $ref: /schemas/types.yaml#/definitions/string-array
description: The names of the state bits used for SMP2P output
items:
- const: ipa-clock-enabled-valid
diff --git a/Documentation/devicetree/bindings/net/renesas,ether.yaml b/Documentation/devicetree/bindings/net/renesas,ether.yaml
index 8ce5ed8a58dd..c101a1ec846e 100644
--- a/Documentation/devicetree/bindings/net/renesas,ether.yaml
+++ b/Documentation/devicetree/bindings/net/renesas,ether.yaml
@@ -10,7 +10,7 @@ allOf:
- $ref: ethernet-controller.yaml#
maintainers:
- - Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
+ - Sergei Shtylyov <sergei.shtylyov@gmail.com>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/nvmem/mtk-efuse.txt b/Documentation/devicetree/bindings/nvmem/mtk-efuse.txt
index d479ad977e24..b6791702bcfc 100644
--- a/Documentation/devicetree/bindings/nvmem/mtk-efuse.txt
+++ b/Documentation/devicetree/bindings/nvmem/mtk-efuse.txt
@@ -9,7 +9,6 @@ Required properties:
"mediatek,mt8173-efuse" or "mediatek,efuse": for MT8173
"mediatek,mt8192-efuse", "mediatek,efuse": for MT8192
"mediatek,mt8516-efuse", "mediatek,efuse": for MT8516
- "mediatek,mt8192-efuse", "mediatek,efuse": for MT8192
- reg: Should contain registers location and length
= Data cells =
diff --git a/Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml b/Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml
index 01dcd14e7b2a..320a232c7208 100644
--- a/Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml
+++ b/Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml
@@ -118,7 +118,7 @@ patternProperties:
description:
Specifies the Spread Spectrum Clocking mode used. It can be NO_SSC,
EXTERNAL_SSC or INTERNAL_SSC.
- Refer include/dt-bindings/phy/phy-cadence-torrent.h for the constants to be used.
+ Refer include/dt-bindings/phy/phy-cadence.h for the constants to be used.
$ref: /schemas/types.yaml#/definitions/uint32
enum: [0, 1, 2]
default: 0
diff --git a/Documentation/devicetree/bindings/power/supply/sc2731-charger.yaml b/Documentation/devicetree/bindings/power/supply/sc2731-charger.yaml
index db1aa238cda5..b62c2431f94e 100644
--- a/Documentation/devicetree/bindings/power/supply/sc2731-charger.yaml
+++ b/Documentation/devicetree/bindings/power/supply/sc2731-charger.yaml
@@ -20,7 +20,7 @@ properties:
maxItems: 1
phys:
- $ref: /schemas/types.yaml#/definitions/phandle
+ maxItems: 1
description: phandle to the USB phy
monitored-battery:
diff --git a/Documentation/devicetree/bindings/regulator/max8893.yaml b/Documentation/devicetree/bindings/regulator/max8893.yaml
new file mode 100644
index 000000000000..2b5e977bf409
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/max8893.yaml
@@ -0,0 +1,88 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/max8893.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Regulator driver for MAX8893 PMIC from Maxim Integrated.
+
+maintainers:
+ - Sergey Larin <cerg2010cerg2010@mail.ru>
+
+description: |
+ The device has 5 LDO regulators and a single BUCK regulator.
+ Programming is done through I2C bus.
+
+properties:
+ compatible:
+ const: maxim,max8893
+
+ reg:
+ maxItems: 1
+
+ regulators:
+ type: object
+
+ patternProperties:
+ "^(ldo[1-5]|buck)$":
+ $ref: "regulator.yaml#"
+
+ additionalProperties: false
+
+additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - regulators
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pmic@3e {
+ compatible = "maxim,max8893";
+ reg = <0x3e>;
+
+ regulators {
+ /* Front camera - s5k6aafx, back - m5mo */
+ /* Numbers used to indicate the sequence */
+ front_1_back_1: buck {
+ regulator-name = "cam_isp_core_1v2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ front_4_back_5: ldo1 {
+ regulator-name = "vt_io_1v8,cam_isp_1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ front_3_back_4: ldo2 {
+ regulator-name = "vt_core_1v5";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ };
+
+ front_5_back_6: ldo3 {
+ regulator-name = "vt_cam_1v8,vt_sensor_io_1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ ldo4 {
+ /* not used */
+ };
+
+ back_7: ldo5 {
+ regulator-name = "cam_sensor_io_1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/regulator/mt6359-regulator.yaml b/Documentation/devicetree/bindings/regulator/mt6359-regulator.yaml
new file mode 100644
index 000000000000..8cc413eb482d
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/mt6359-regulator.yaml
@@ -0,0 +1,385 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/mt6359-regulator.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MT6359 Regulator from MediaTek Integrated
+
+maintainers:
+ - Hsin-Hsiung Wang <hsin-hsiung.wang@mediatek.com>
+
+description: |
+ List of regulators provided by this controller. It is named
+ according to its regulator type, buck_<name> and ldo_<name>.
+ MT6359 regulators node should be sub node of the MT6397 MFD node.
+
+patternProperties:
+ "^buck_v(s1|gpu11|modem|pu|core|s2|pa|proc2|proc1|core_sshub)$":
+ type: object
+ $ref: "regulator.yaml#"
+
+ properties:
+ regulator-name:
+ pattern: "^v(s1|gpu11|modem|pu|core|s2|pa|proc2|proc1|core_sshub)$"
+
+ unevaluatedProperties: false
+
+ "^ldo_v(ibr|rf12|usb|camio|efuse|xo22)$":
+ type: object
+ $ref: "regulator.yaml#"
+
+ properties:
+ regulator-name:
+ pattern: "^v(ibr|rf12|usb|camio|efuse|xo22)$"
+
+ unevaluatedProperties: false
+
+ "^ldo_v(rfck|emc|a12|a09|ufs|bbck)$":
+ type: object
+ $ref: "regulator.yaml#"
+
+ properties:
+ regulator-name:
+ pattern: "^v(rfck|emc|a12|a09|ufs|bbck)$"
+
+ unevaluatedProperties: false
+
+ "^ldo_vcn(18|13|33_1_bt|13_1_wifi|33_2_bt|33_2_wifi)$":
+ type: object
+ $ref: "regulator.yaml#"
+
+ properties:
+ regulator-name:
+ pattern: "^vcn(18|13|33_1_bt|13_1_wifi|33_2_bt|33_2_wifi)$"
+
+ unevaluatedProperties: false
+
+ "^ldo_vsram_(proc2|others|md|proc1|others_sshub)$":
+ type: object
+ $ref: "regulator.yaml#"
+
+ properties:
+ regulator-name:
+ pattern: "^vsram_(proc2|others|md|proc1|others_sshub)$"
+
+ unevaluatedProperties: false
+
+ "^ldo_v(fe|bif|io)28$":
+ type: object
+ $ref: "regulator.yaml#"
+
+ properties:
+ regulator-name:
+ pattern: "^v(fe|bif|io)28$"
+
+ unevaluatedProperties: false
+
+ "^ldo_v(aud|io|aux|rf|m)18$":
+ type: object
+ $ref: "regulator.yaml#"
+
+ properties:
+ regulator-name:
+ pattern: "^v(aud|io|aux|rf|m)18$"
+
+ unevaluatedProperties: false
+
+ "^ldo_vsim[12]$":
+ type: object
+ $ref: "regulator.yaml#"
+
+ properties:
+ regulator-name:
+ pattern: "^vsim[12]$"
+
+ required:
+ - regulator-name
+
+ unevaluatedProperties: false
+
+additionalProperties: false
+
+examples:
+ - |
+ pmic {
+ regulators {
+ mt6359_vs1_buck_reg: buck_vs1 {
+ regulator-name = "vs1";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <2200000>;
+ regulator-enable-ramp-delay = <0>;
+ regulator-always-on;
+ };
+ mt6359_vgpu11_buck_reg: buck_vgpu11 {
+ regulator-name = "vgpu11";
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1193750>;
+ regulator-ramp-delay = <5000>;
+ regulator-enable-ramp-delay = <200>;
+ regulator-allowed-modes = <0 1 2>;
+ };
+ mt6359_vmodem_buck_reg: buck_vmodem {
+ regulator-name = "vmodem";
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-ramp-delay = <10760>;
+ regulator-enable-ramp-delay = <200>;
+ };
+ mt6359_vpu_buck_reg: buck_vpu {
+ regulator-name = "vpu";
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1193750>;
+ regulator-ramp-delay = <5000>;
+ regulator-enable-ramp-delay = <200>;
+ regulator-allowed-modes = <0 1 2>;
+ };
+ mt6359_vcore_buck_reg: buck_vcore {
+ regulator-name = "vcore";
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-ramp-delay = <5000>;
+ regulator-enable-ramp-delay = <200>;
+ regulator-allowed-modes = <0 1 2>;
+ };
+ mt6359_vs2_buck_reg: buck_vs2 {
+ regulator-name = "vs2";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1600000>;
+ regulator-enable-ramp-delay = <0>;
+ regulator-always-on;
+ };
+ mt6359_vpa_buck_reg: buck_vpa {
+ regulator-name = "vpa";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <3650000>;
+ regulator-enable-ramp-delay = <300>;
+ };
+ mt6359_vproc2_buck_reg: buck_vproc2 {
+ regulator-name = "vproc2";
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1193750>;
+ regulator-ramp-delay = <7500>;
+ regulator-enable-ramp-delay = <200>;
+ regulator-allowed-modes = <0 1 2>;
+ };
+ mt6359_vproc1_buck_reg: buck_vproc1 {
+ regulator-name = "vproc1";
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1193750>;
+ regulator-ramp-delay = <7500>;
+ regulator-enable-ramp-delay = <200>;
+ regulator-allowed-modes = <0 1 2>;
+ };
+ mt6359_vcore_sshub_buck_reg: buck_vcore_sshub {
+ regulator-name = "vcore_sshub";
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1193750>;
+ };
+ mt6359_vgpu11_sshub_buck_reg: buck_vgpu11_sshub {
+ regulator-name = "vgpu11_sshub";
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1193750>;
+ };
+ mt6359_vaud18_ldo_reg: ldo_vaud18 {
+ regulator-name = "vaud18";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <240>;
+ };
+ mt6359_vsim1_ldo_reg: ldo_vsim1 {
+ regulator-name = "vsim1";
+ regulator-min-microvolt = <1700000>;
+ regulator-max-microvolt = <3100000>;
+ };
+ mt6359_vibr_ldo_reg: ldo_vibr {
+ regulator-name = "vibr";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3300000>;
+ };
+ mt6359_vrf12_ldo_reg: ldo_vrf12 {
+ regulator-name = "vrf12";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1300000>;
+ };
+ mt6359_vusb_ldo_reg: ldo_vusb {
+ regulator-name = "vusb";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-enable-ramp-delay = <960>;
+ regulator-always-on;
+ };
+ mt6359_vsram_proc2_ldo_reg: ldo_vsram_proc2 {
+ regulator-name = "vsram_proc2";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1293750>;
+ regulator-ramp-delay = <7500>;
+ regulator-enable-ramp-delay = <240>;
+ regulator-always-on;
+ };
+ mt6359_vio18_ldo_reg: ldo_vio18 {
+ regulator-name = "vio18";
+ regulator-min-microvolt = <1700000>;
+ regulator-max-microvolt = <1900000>;
+ regulator-enable-ramp-delay = <960>;
+ regulator-always-on;
+ };
+ mt6359_vcamio_ldo_reg: ldo_vcamio {
+ regulator-name = "vcamio";
+ regulator-min-microvolt = <1700000>;
+ regulator-max-microvolt = <1900000>;
+ };
+ mt6359_vcn18_ldo_reg: ldo_vcn18 {
+ regulator-name = "vcn18";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <240>;
+ };
+ mt6359_vfe28_ldo_reg: ldo_vfe28 {
+ regulator-name = "vfe28";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-enable-ramp-delay = <120>;
+ };
+ mt6359_vcn13_ldo_reg: ldo_vcn13 {
+ regulator-name = "vcn13";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1300000>;
+ };
+ mt6359_vcn33_1_bt_ldo_reg: ldo_vcn33_1_bt {
+ regulator-name = "vcn33_1_bt";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <3500000>;
+ };
+ mt6359_vcn33_1_wifi_ldo_reg: ldo_vcn33_1_wifi {
+ regulator-name = "vcn33_1_wifi";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <3500000>;
+ };
+ mt6359_vaux18_ldo_reg: ldo_vaux18 {
+ regulator-name = "vaux18";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <240>;
+ regulator-always-on;
+ };
+ mt6359_vsram_others_ldo_reg: ldo_vsram_others {
+ regulator-name = "vsram_others";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1293750>;
+ regulator-ramp-delay = <5000>;
+ regulator-enable-ramp-delay = <240>;
+ };
+ mt6359_vefuse_ldo_reg: ldo_vefuse {
+ regulator-name = "vefuse";
+ regulator-min-microvolt = <1700000>;
+ regulator-max-microvolt = <2000000>;
+ };
+ mt6359_vxo22_ldo_reg: ldo_vxo22 {
+ regulator-name = "vxo22";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2200000>;
+ regulator-always-on;
+ };
+ mt6359_vrfck_ldo_reg: ldo_vrfck {
+ regulator-name = "vrfck";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1700000>;
+ };
+ mt6359_vrfck_1_ldo_reg: ldo_vrfck_1 {
+ regulator-name = "vrfck";
+ regulator-min-microvolt = <1240000>;
+ regulator-max-microvolt = <1600000>;
+ };
+ mt6359_vbif28_ldo_reg: ldo_vbif28 {
+ regulator-name = "vbif28";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-enable-ramp-delay = <240>;
+ };
+ mt6359_vio28_ldo_reg: ldo_vio28 {
+ regulator-name = "vio28";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+ mt6359_vemc_ldo_reg: ldo_vemc {
+ regulator-name = "vemc";
+ regulator-min-microvolt = <2900000>;
+ regulator-max-microvolt = <3300000>;
+ };
+ mt6359_vemc_1_ldo_reg: ldo_vemc_1 {
+ regulator-name = "vemc";
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <3300000>;
+ };
+ mt6359_vcn33_2_bt_ldo_reg: ldo_vcn33_2_bt {
+ regulator-name = "vcn33_2_bt";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <3500000>;
+ };
+ mt6359_vcn33_2_wifi_ldo_reg: ldo_vcn33_2_wifi {
+ regulator-name = "vcn33_2_wifi";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <3500000>;
+ };
+ mt6359_va12_ldo_reg: ldo_va12 {
+ regulator-name = "va12";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-always-on;
+ };
+ mt6359_va09_ldo_reg: ldo_va09 {
+ regulator-name = "va09";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1200000>;
+ };
+ mt6359_vrf18_ldo_reg: ldo_vrf18 {
+ regulator-name = "vrf18";
+ regulator-min-microvolt = <1700000>;
+ regulator-max-microvolt = <1810000>;
+ };
+ mt6359_vsram_md_ldo_reg: ldo_vsram_md {
+ regulator-name = "vsram_md";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1293750>;
+ regulator-ramp-delay = <10760>;
+ regulator-enable-ramp-delay = <240>;
+ };
+ mt6359_vufs_ldo_reg: ldo_vufs {
+ regulator-name = "vufs";
+ regulator-min-microvolt = <1700000>;
+ regulator-max-microvolt = <1900000>;
+ };
+ mt6359_vm18_ldo_reg: ldo_vm18 {
+ regulator-name = "vm18";
+ regulator-min-microvolt = <1700000>;
+ regulator-max-microvolt = <1900000>;
+ regulator-always-on;
+ };
+ mt6359_vbbck_ldo_reg: ldo_vbbck {
+ regulator-name = "vbbck";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1200000>;
+ };
+ mt6359_vsram_proc1_ldo_reg: ldo_vsram_proc1 {
+ regulator-name = "vsram_proc1";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1293750>;
+ regulator-ramp-delay = <7500>;
+ regulator-enable-ramp-delay = <240>;
+ regulator-always-on;
+ };
+ mt6359_vsim2_ldo_reg: ldo_vsim2 {
+ regulator-name = "vsim2";
+ regulator-min-microvolt = <1700000>;
+ regulator-max-microvolt = <3100000>;
+ };
+ mt6359_vsram_others_sshub_ldo: ldo_vsram_others_sshub {
+ regulator-name = "vsram_others_sshub";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1293750>;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.yaml b/Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.yaml
index e561a5b941e4..34de38377aa6 100644
--- a/Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.yaml
@@ -33,6 +33,9 @@ description: |
The names used for regulator nodes must match those supported by a given
PMIC. Supported regulator node names are
+ For PM6150, smps1 - smps5, ldo1 - ldo19
+ For PM6150L, smps1 - smps8, ldo1 - ldo11, bob
+ For PM7325, smps1 - smps8, ldo1 - ldo19
For PM8005, smps1 - smps4
For PM8009, smps1 - smps2, ldo1 - ldo7
For PM8150, smps1 - smps10, ldo1 - ldo18
@@ -41,15 +44,15 @@ description: |
For PM8350C, smps1 - smps10, ldo1 - ldo13, bob
For PM8998, smps1 - smps13, ldo1 - ldo28, lvs1 - lvs2
For PMI8998, bob
- For PM6150, smps1 - smps5, ldo1 - ldo19
- For PM6150L, smps1 - smps8, ldo1 - ldo11, bob
- For PMX55, smps1 - smps7, ldo1 - ldo16
- For PM7325, smps1 - smps8, ldo1 - ldo19
For PMR735A, smps1 - smps3, ldo1 - ldo7
+ For PMX55, smps1 - smps7, ldo1 - ldo16
properties:
compatible:
enum:
+ - qcom,pm6150-rpmh-regulators
+ - qcom,pm6150l-rpmh-regulators
+ - qcom,pm7325-rpmh-regulators
- qcom,pm8005-rpmh-regulators
- qcom,pm8009-rpmh-regulators
- qcom,pm8009-1-rpmh-regulators
@@ -59,11 +62,9 @@ properties:
- qcom,pm8350c-rpmh-regulators
- qcom,pm8998-rpmh-regulators
- qcom,pmi8998-rpmh-regulators
- - qcom,pm6150-rpmh-regulators
- - qcom,pm6150l-rpmh-regulators
- - qcom,pmx55-rpmh-regulators
- - qcom,pm7325-rpmh-regulators
+ - qcom,pmm8155au-rpmh-regulators
- qcom,pmr735a-rpmh-regulators
+ - qcom,pmx55-rpmh-regulators
qcom,pmic-id:
description: |
diff --git a/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.yaml b/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.yaml
index a35c6cb9bf97..83b53579f463 100644
--- a/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.yaml
@@ -24,6 +24,10 @@ description:
For mp5496, s2
+ For pm8226, s1, s2, s3, s4, s5, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10,
+ l11, l12, l13, l14, l15, l16, l17, l18, l19, l20, l21, l22, l23, l24, l25,
+ l26, l27, l28, lvs1
+
For pm8841, s1, s2, s3, s4, s5, s6, s7, s8
For pm8916, s1, s2, s3, s4, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11,
@@ -68,6 +72,7 @@ properties:
compatible:
enum:
- qcom,rpm-mp5496-regulators
+ - qcom,rpm-pm8226-regulators
- qcom,rpm-pm8841-regulators
- qcom,rpm-pm8916-regulators
- qcom,rpm-pm8941-regulators
diff --git a/Documentation/devicetree/bindings/regulator/regulator.yaml b/Documentation/devicetree/bindings/regulator/regulator.yaml
index 6d0bc9cd4040..a6ae9ecae5cc 100644
--- a/Documentation/devicetree/bindings/regulator/regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/regulator.yaml
@@ -117,6 +117,88 @@ properties:
description: Enable over current protection.
type: boolean
+ regulator-oc-protection-microamp:
+ description: Set over current protection limit. This is a limit where
+ hardware performs emergency shutdown. Zero can be passed to disable
+ protection and value '1' indicates that protection should be enabled but
+ limit setting can be omitted.
+
+ regulator-oc-error-microamp:
+ description: Set over current error limit. This is a limit where part of
+ the hardware propably is malfunctional and damage prevention is requested.
+ Zero can be passed to disable error detection and value '1' indicates
+ that detection should be enabled but limit setting can be omitted.
+
+ regulator-oc-warn-microamp:
+ description: Set over current warning limit. This is a limit where hardware
+ is assumed still to be functional but approaching limit where it gets
+ damaged. Recovery actions should be initiated. Zero can be passed to
+ disable detection and value '1' indicates that detection should
+ be enabled but limit setting can be omitted.
+
+ regulator-ov-protection-microvolt:
+ description: Set over voltage protection limit. This is a limit where
+ hardware performs emergency shutdown. Zero can be passed to disable
+ protection and value '1' indicates that protection should be enabled but
+ limit setting can be omitted. Limit is given as microvolt offset from
+ voltage set to regulator.
+
+ regulator-ov-error-microvolt:
+ description: Set over voltage error limit. This is a limit where part of
+ the hardware propably is malfunctional and damage prevention is requested
+ Zero can be passed to disable error detection and value '1' indicates
+ that detection should be enabled but limit setting can be omitted. Limit
+ is given as microvolt offset from voltage set to regulator.
+
+ regulator-ov-warn-microvolt:
+ description: Set over voltage warning limit. This is a limit where hardware
+ is assumed still to be functional but approaching limit where it gets
+ damaged. Recovery actions should be initiated. Zero can be passed to
+ disable detection and value '1' indicates that detection should
+ be enabled but limit setting can be omitted. Limit is given as microvolt
+ offset from voltage set to regulator.
+
+ regulator-uv-protection-microvolt:
+ description: Set over under voltage protection limit. This is a limit where
+ hardware performs emergency shutdown. Zero can be passed to disable
+ protection and value '1' indicates that protection should be enabled but
+ limit setting can be omitted. Limit is given as microvolt offset from
+ voltage set to regulator.
+
+ regulator-uv-error-microvolt:
+ description: Set under voltage error limit. This is a limit where part of
+ the hardware propably is malfunctional and damage prevention is requested
+ Zero can be passed to disable error detection and value '1' indicates
+ that detection should be enabled but limit setting can be omitted. Limit
+ is given as microvolt offset from voltage set to regulator.
+
+ regulator-uv-warn-microvolt:
+ description: Set over under voltage warning limit. This is a limit where
+ hardware is assumed still to be functional but approaching limit where
+ it gets damaged. Recovery actions should be initiated. Zero can be passed
+ to disable detection and value '1' indicates that detection should
+ be enabled but limit setting can be omitted. Limit is given as microvolt
+ offset from voltage set to regulator.
+
+ regulator-temp-protection-kelvin:
+ description: Set over temperature protection limit. This is a limit where
+ hardware performs emergency shutdown. Zero can be passed to disable
+ protection and value '1' indicates that protection should be enabled but
+ limit setting can be omitted.
+
+ regulator-temp-error-kelvin:
+ description: Set over temperature error limit. This is a limit where part of
+ the hardware propably is malfunctional and damage prevention is requested
+ Zero can be passed to disable error detection and value '1' indicates
+ that detection should be enabled but limit setting can be omitted.
+
+ regulator-temp-warn-kelvin:
+ description: Set over temperature warning limit. This is a limit where
+ hardware is assumed still to be functional but approaching limit where it
+ gets damaged. Recovery actions should be initiated. Zero can be passed to
+ disable detection and value '1' indicates that detection should
+ be enabled but limit setting can be omitted.
+
regulator-active-discharge:
description: |
tristate, enable/disable active discharge of regulators. The values are:
diff --git a/Documentation/devicetree/bindings/regulator/richtek,rt6160-regulator.yaml b/Documentation/devicetree/bindings/regulator/richtek,rt6160-regulator.yaml
new file mode 100644
index 000000000000..0534b0d68359
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/richtek,rt6160-regulator.yaml
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/richtek,rt6160-regulator.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Richtek RT6160 BuckBoost converter
+
+maintainers:
+ - ChiYuan Huang <cy_huang@richtek.com>
+
+description: |
+ The RT6160 is a high-efficiency buck-boost converter that can provide
+ up to 3A output current from 2025mV to 5200mV. And it support the wide
+ input voltage range from 2200mV to 5500mV.
+
+ Datasheet is available at
+ https://www.richtek.com/assets/product_file/RT6160A/DS6160A-00.pdf
+
+allOf:
+ - $ref: regulator.yaml#
+
+properties:
+ compatible:
+ enum:
+ - richtek,rt6160
+
+ reg:
+ maxItems: 1
+
+ enable-gpios:
+ description: A connection of the 'enable' gpio line.
+ maxItems: 1
+
+ richtek,vsel-active-low:
+ description: |
+ Used to indicate the 'vsel' pin active level. if not specified, use
+ high active level as the default.
+ type: boolean
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rt6160@75 {
+ compatible = "richtek,rt6160";
+ reg = <0x75>;
+ enable-gpios = <&gpio26 2 0>;
+ regulator-name = "rt6160-buckboost";
+ regulator-min-microvolt = <2025000>;
+ regulator-max-microvolt = <5200000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/regulator/richtek,rt6245-regulator.yaml b/Documentation/devicetree/bindings/regulator/richtek,rt6245-regulator.yaml
new file mode 100644
index 000000000000..796ceac87445
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/richtek,rt6245-regulator.yaml
@@ -0,0 +1,89 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/richtek,rt6245-regulator.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Richtek RT6245 High Current Voltage Regulator
+
+maintainers:
+ - ChiYuan Huang <cy_huang@richtek.com>
+
+description: |
+ The RT6245 is a high-performance, synchronous step-down converter
+ that can deliver up to 14A output current with an input supply
+ voltage range of 4.5V to 17V.
+
+allOf:
+ - $ref: regulator.yaml#
+
+properties:
+ compatible:
+ enum:
+ - richtek,rt6245
+
+ reg:
+ maxItems: 1
+
+ enable-gpios:
+ description: |
+ A connection of the chip 'enable' gpio line. If not provided,
+ it will be treat as a default-on power.
+ maxItems: 1
+
+ richtek,oc-level-select:
+ $ref: "/schemas/types.yaml#/definitions/uint8"
+ enum: [0, 1, 2, 3]
+ description: |
+ Over current level selection. Each respective value means the current
+ limit 8A, 14A, 12A, 10A. If this property is missing then keep in
+ in chip default.
+
+ richtek,ot-level-select:
+ $ref: "/schemas/types.yaml#/definitions/uint8"
+ enum: [0, 1, 2]
+ description: |
+ Over temperature level selection. Each respective value means the degree
+ 150'c, 130'c, 170'c. If this property is missing then keep in chip
+ default.
+
+ richtek,pgdly-time-select:
+ $ref: "/schemas/types.yaml#/definitions/uint8"
+ enum: [0, 1, 2, 3]
+ description: |
+ Power good signal delay time selection. Each respective value means the
+ delay time 0us, 10us, 20us, 40us. If this property is missing then keep
+ in chip default.
+
+
+ richtek,switch-freq-select:
+ $ref: "/schemas/types.yaml#/definitions/uint8"
+ enum: [0, 1, 2]
+ description: |
+ Buck switch frequency selection. Each respective value means 400KHz,
+ 800KHz, 1200KHz. If this property is missing then keep in chip default.
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rt6245@34 {
+ compatible = "richtek,rt6245";
+ status = "okay";
+ reg = <0x34>;
+ enable-gpios = <&gpio26 2 0>;
+
+ regulator-name = "rt6245-regulator";
+ regulator-min-microvolt = <437500>;
+ regulator-max-microvolt = <1387500>;
+ regulator-boot-on;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/regulator/rohm,bd9576-regulator.yaml b/Documentation/devicetree/bindings/regulator/rohm,bd9576-regulator.yaml
index b6515a0cee62..7cb74cc8c5d9 100644
--- a/Documentation/devicetree/bindings/regulator/rohm,bd9576-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/rohm,bd9576-regulator.yaml
@@ -27,6 +27,12 @@ patternProperties:
Properties for single regulator.
$ref: "regulator.yaml#"
+ properties:
+ rohm,ocw-fet-ron-micro-ohms:
+ description: |
+ External FET's ON-resistance. Required if VoutS1 OCP/OCW is
+ to be set.
+
required:
- regulator-name
diff --git a/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml b/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml
index db61f0731a20..2e35aeaa8781 100644
--- a/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml
+++ b/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml
@@ -57,7 +57,7 @@ patternProperties:
rate
sound-dai:
- $ref: /schemas/types.yaml#/definitions/phandle
+ $ref: /schemas/types.yaml#/definitions/phandle-array
description: phandle of the CPU DAI
patternProperties:
@@ -71,7 +71,7 @@ patternProperties:
properties:
sound-dai:
- $ref: /schemas/types.yaml#/definitions/phandle
+ $ref: /schemas/types.yaml#/definitions/phandle-array
description: phandle of the codec DAI
required:
diff --git a/Documentation/devicetree/bindings/sound/fsl,rpmsg.yaml b/Documentation/devicetree/bindings/sound/fsl,rpmsg.yaml
index b4c190bddd84..61802a11baf4 100644
--- a/Documentation/devicetree/bindings/sound/fsl,rpmsg.yaml
+++ b/Documentation/devicetree/bindings/sound/fsl,rpmsg.yaml
@@ -49,7 +49,7 @@ properties:
maxItems: 1
memory-region:
- $ref: /schemas/types.yaml#/definitions/phandle
+ maxItems: 1
description:
phandle to a node describing reserved memory (System RAM memory)
The M core can't access all the DDR memory space on some platform,
diff --git a/Documentation/devicetree/bindings/spi/renesas,rzn1-spi.txt b/Documentation/devicetree/bindings/spi/renesas,rzn1-spi.txt
deleted file mode 100644
index fb1a6728638d..000000000000
--- a/Documentation/devicetree/bindings/spi/renesas,rzn1-spi.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-Renesas RZ/N1 SPI Controller
-
-This controller is based on the Synopsys DW Synchronous Serial Interface and
-inherits all properties defined in snps,dw-apb-ssi.txt except for the
-compatible property.
-
-Required properties:
-- compatible : The device specific string followed by the generic RZ/N1 string.
- Therefore it must be one of:
- "renesas,r9a06g032-spi", "renesas,rzn1-spi"
- "renesas,r9a06g033-spi", "renesas,rzn1-spi"
diff --git a/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml b/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml
index 4825157cd92e..ca91201a9926 100644
--- a/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml
+++ b/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml
@@ -67,6 +67,12 @@ properties:
const: baikal,bt1-sys-ssi
- description: Canaan Kendryte K210 SoS SPI Controller
const: canaan,k210-spi
+ - description: Renesas RZ/N1 SPI Controller
+ items:
+ - enum:
+ - renesas,r9a06g032-spi # RZ/N1D
+ - renesas,r9a06g033-spi # RZ/N1S
+ - const: renesas,rzn1-spi # RZ/N1
reg:
minItems: 1
diff --git a/Documentation/devicetree/bindings/spi/spi-cadence.txt b/Documentation/devicetree/bindings/spi/spi-cadence.txt
deleted file mode 100644
index 05a2ef945664..000000000000
--- a/Documentation/devicetree/bindings/spi/spi-cadence.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-Cadence SPI controller Device Tree Bindings
--------------------------------------------
-
-Required properties:
-- compatible : Should be "cdns,spi-r1p6" or "xlnx,zynq-spi-r1p6".
-- reg : Physical base address and size of SPI registers map.
-- interrupts : Property with a value describing the interrupt
- number.
-- clock-names : List of input clock names - "ref_clk", "pclk"
- (See clock bindings for details).
-- clocks : Clock phandles (see clock bindings for details).
-
-Optional properties:
-- num-cs : Number of chip selects used.
- If a decoder is used, this will be the number of
- chip selects after the decoder.
-- is-decoded-cs : Flag to indicate whether decoder is used or not.
-
-Example:
-
- spi@e0007000 {
- compatible = "xlnx,zynq-spi-r1p6";
- clock-names = "ref_clk", "pclk";
- clocks = <&clkc 26>, <&clkc 35>;
- interrupt-parent = <&intc>;
- interrupts = <0 49 4>;
- num-cs = <4>;
- is-decoded-cs = <0>;
- reg = <0xe0007000 0x1000>;
- } ;
diff --git a/Documentation/devicetree/bindings/spi/spi-cadence.yaml b/Documentation/devicetree/bindings/spi/spi-cadence.yaml
new file mode 100644
index 000000000000..9787be21318e
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-cadence.yaml
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/spi-cadence.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cadence SPI controller Device Tree Bindings
+
+maintainers:
+ - Michal Simek <michal.simek@xilinx.com>
+
+allOf:
+ - $ref: "spi-controller.yaml#"
+
+properties:
+ compatible:
+ enum:
+ - cdns,spi-r1p6
+ - xlnx,zynq-spi-r1p6
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: ref_clk
+ - const: pclk
+
+ clocks:
+ maxItems: 2
+
+ num-cs:
+ description: |
+ Number of chip selects used. If a decoder is used,
+ this will be the number of chip selects after the
+ decoder.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 4
+ default: 4
+
+ is-decoded-cs:
+ description: |
+ Flag to indicate whether decoder is used or not.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 0, 1 ]
+ default: 0
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ spi@e0007000 {
+ compatible = "xlnx,zynq-spi-r1p6";
+ clock-names = "ref_clk", "pclk";
+ clocks = <&clkc 26>, <&clkc 35>;
+ interrupt-parent = <&intc>;
+ interrupts = <0 49 4>;
+ num-cs = <4>;
+ is-decoded-cs = <0>;
+ reg = <0xe0007000 0x1000>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/spi/spi-controller.yaml b/Documentation/devicetree/bindings/spi/spi-controller.yaml
index 0477396e4945..faef4f6f55b8 100644
--- a/Documentation/devicetree/bindings/spi/spi-controller.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-controller.yaml
@@ -114,8 +114,11 @@ patternProperties:
Compatible of the SPI device.
reg:
- minimum: 0
- maximum: 256
+ minItems: 1
+ maxItems: 256
+ items:
+ minimum: 0
+ maximum: 256
description:
Chip select used by the device.
diff --git a/Documentation/devicetree/bindings/spi/spi-mux.yaml b/Documentation/devicetree/bindings/spi/spi-mux.yaml
index d09c6355e22d..51c7622dc20b 100644
--- a/Documentation/devicetree/bindings/spi/spi-mux.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-mux.yaml
@@ -72,7 +72,7 @@ examples:
mux-controls = <&mux>;
- spi-flash@0 {
+ flash@0 {
compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <40000000>;
diff --git a/Documentation/devicetree/bindings/spi/spi-rockchip.yaml b/Documentation/devicetree/bindings/spi/spi-rockchip.yaml
index 1e6cf29e6388..7f987e79337c 100644
--- a/Documentation/devicetree/bindings/spi/spi-rockchip.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-rockchip.yaml
@@ -33,6 +33,7 @@ properties:
- rockchip,rk3328-spi
- rockchip,rk3368-spi
- rockchip,rk3399-spi
+ - rockchip,rv1126-spi
- const: rockchip,rk3066-spi
reg:
diff --git a/Documentation/devicetree/bindings/spi/spi-xilinx.txt b/Documentation/devicetree/bindings/spi/spi-xilinx.txt
deleted file mode 100644
index 5f4ed3e5c994..000000000000
--- a/Documentation/devicetree/bindings/spi/spi-xilinx.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-Xilinx SPI controller Device Tree Bindings
--------------------------------------------------
-
-Required properties:
-- compatible : Should be "xlnx,xps-spi-2.00.a", "xlnx,xps-spi-2.00.b" or "xlnx,axi-quad-spi-1.00.a"
-- reg : Physical base address and size of SPI registers map.
-- interrupts : Property with a value describing the interrupt
- number.
-
-Optional properties:
-- xlnx,num-ss-bits : Number of chip selects used.
-- xlnx,num-transfer-bits : Number of bits per transfer. This will be 8 if not specified
-
-Example:
- axi_quad_spi@41e00000 {
- compatible = "xlnx,xps-spi-2.00.a";
- interrupt-parent = <&intc>;
- interrupts = <0 31 1>;
- reg = <0x41e00000 0x10000>;
- xlnx,num-ss-bits = <0x1>;
- xlnx,num-transfer-bits = <32>;
- };
-
diff --git a/Documentation/devicetree/bindings/spi/spi-xilinx.yaml b/Documentation/devicetree/bindings/spi/spi-xilinx.yaml
new file mode 100644
index 000000000000..593f7693bace
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-xilinx.yaml
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/spi-xilinx.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Xilinx SPI controller Device Tree Bindings
+
+maintainers:
+ - Michal Simek <michal.simek@xilinx.com>
+
+allOf:
+ - $ref: "spi-controller.yaml#"
+
+properties:
+ compatible:
+ enum:
+ - xlnx,xps-spi-2.00.a
+ - xlnx,xps-spi-2.00.b
+ - xlnx,axi-quad-spi-1.00.a
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ xlnx,num-ss-bits:
+ description: Number of chip selects used.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 32
+
+ xlnx,num-transfer-bits:
+ description: Number of bits per transfer. This will be 8 if not specified.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [8, 16, 32]
+ default: 8
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ spi0: spi@41e00000 {
+ compatible = "xlnx,xps-spi-2.00.a";
+ interrupt-parent = <&intc>;
+ interrupts = <0 31 1>;
+ reg = <0x41e00000 0x10000>;
+ xlnx,num-ss-bits = <0x1>;
+ xlnx,num-transfer-bits = <32>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt b/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt
deleted file mode 100644
index 0f6d37ff541c..000000000000
--- a/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-Xilinx Zynq UltraScale+ MPSoC GQSPI controller Device Tree Bindings
--------------------------------------------------------------------
-
-Required properties:
-- compatible : Should be "xlnx,zynqmp-qspi-1.0".
-- reg : Physical base address and size of GQSPI registers map.
-- interrupts : Property with a value describing the interrupt
- number.
-- clock-names : List of input clock names - "ref_clk", "pclk"
- (See clock bindings for details).
-- clocks : Clock phandles (see clock bindings for details).
-
-Optional properties:
-- num-cs : Number of chip selects used.
-
-Example:
- qspi: spi@ff0f0000 {
- compatible = "xlnx,zynqmp-qspi-1.0";
- clock-names = "ref_clk", "pclk";
- clocks = <&misc_clk &misc_clk>;
- interrupts = <0 15 4>;
- interrupt-parent = <&gic>;
- num-cs = <1>;
- reg = <0x0 0xff0f0000 0x1000>,<0x0 0xc0000000 0x8000000>;
- };
diff --git a/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.yaml b/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.yaml
new file mode 100644
index 000000000000..ea72c8001256
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/spi-zynqmp-qspi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Xilinx Zynq UltraScale+ MPSoC GQSPI controller Device Tree Bindings
+
+maintainers:
+ - Michal Simek <michal.simek@xilinx.com>
+
+allOf:
+ - $ref: "spi-controller.yaml#"
+
+properties:
+ compatible:
+ const: xlnx,zynqmp-qspi-1.0
+
+ reg:
+ maxItems: 2
+
+ interrupts:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: ref_clk
+ - const: pclk
+
+ clocks:
+ maxItems: 2
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/xlnx-zynqmp-clk.h>
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ qspi: spi@ff0f0000 {
+ compatible = "xlnx,zynqmp-qspi-1.0";
+ clocks = <&zynqmp_clk QSPI_REF>, <&zynqmp_clk LPD_LSBUS>;
+ clock-names = "ref_clk", "pclk";
+ interrupts = <0 15 4>;
+ interrupt-parent = <&gic>;
+ reg = <0x0 0xff0f0000 0x0 0x1000>,
+ <0x0 0xc0000000 0x0 0x8000000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/trivial-devices.yaml b/Documentation/devicetree/bindings/trivial-devices.yaml
index 8341e9d23c1e..37ac0a3ae3b4 100644
--- a/Documentation/devicetree/bindings/trivial-devices.yaml
+++ b/Documentation/devicetree/bindings/trivial-devices.yaml
@@ -73,6 +73,8 @@ properties:
- dallas,ds4510
# Digital Thermometer and Thermostat
- dallas,ds75
+ # Delta Electronics DPS920AB 920W 54V Power Supply
+ - delta,dps920ab
# 1/4 Brick DC/DC Regulated Power Module
- delta,q54sj108a2
# Devantech SRF02 ultrasonic ranger in I2C mode
@@ -103,6 +105,8 @@ properties:
- fsl,mpl3115
# MPR121: Proximity Capacitive Touch Sensor Controller
- fsl,mpr121
+ # Monolithic Power Systems Inc. multi-phase controller mp2888
+ - mps,mp2888
# Monolithic Power Systems Inc. multi-phase controller mp2975
- mps,mp2975
# G751: Digital Temperature Sensor and Thermal Watchdog with Two-Wire Interface
diff --git a/Documentation/driver-api/nvdimm/nvdimm.rst b/Documentation/driver-api/nvdimm/nvdimm.rst
index ef6d59e0978e..1d8302b89bd4 100644
--- a/Documentation/driver-api/nvdimm/nvdimm.rst
+++ b/Documentation/driver-api/nvdimm/nvdimm.rst
@@ -4,7 +4,7 @@ LIBNVDIMM: Non-Volatile Devices
libnvdimm - kernel / libndctl - userspace helper library
-linux-nvdimm@lists.01.org
+nvdimm@lists.linux.dev
Version 13
diff --git a/Documentation/driver-api/serial/index.rst b/Documentation/driver-api/serial/index.rst
index 21351b8c95a4..8f7d7af3b90b 100644
--- a/Documentation/driver-api/serial/index.rst
+++ b/Documentation/driver-api/serial/index.rst
@@ -19,7 +19,6 @@ Serial drivers
moxa-smartio
n_gsm
- rocket
serial-iso7816
serial-rs485
diff --git a/Documentation/driver-api/thermal/sysfs-api.rst b/Documentation/driver-api/thermal/sysfs-api.rst
index 4b638c14bc16..c93fa5e961a0 100644
--- a/Documentation/driver-api/thermal/sysfs-api.rst
+++ b/Documentation/driver-api/thermal/sysfs-api.rst
@@ -740,21 +740,15 @@ possible.
5. thermal_emergency_poweroff
=============================
-On an event of critical trip temperature crossing. Thermal framework
-allows the system to shutdown gracefully by calling orderly_poweroff().
-In the event of a failure of orderly_poweroff() to shut down the system
-we are in danger of keeping the system alive at undesirably high
-temperatures. To mitigate this high risk scenario we program a work
-queue to fire after a pre-determined number of seconds to start
-an emergency shutdown of the device using the kernel_power_off()
-function. In case kernel_power_off() fails then finally
-emergency_restart() is called in the worst case.
+On an event of critical trip temperature crossing the thermal framework
+shuts down the system by calling hw_protection_shutdown(). The
+hw_protection_shutdown() first attempts to perform an orderly shutdown
+but accepts a delay after which it proceeds doing a forced power-off
+or as last resort an emergency_restart.
The delay should be carefully profiled so as to give adequate time for
-orderly_poweroff(). In case of failure of an orderly_poweroff() the
-emergency poweroff kicks in after the delay has elapsed and shuts down
-the system.
+orderly poweroff.
-If set to 0 emergency poweroff will not be supported. So a carefully
-profiled non-zero positive value is a must for emergency poweroff to be
-triggered.
+If the delay is set to 0 emergency poweroff will not be supported. So a
+carefully profiled non-zero positive value is a must for emergency
+poweroff to be triggered.
diff --git a/Documentation/driver-api/usb/usb.rst b/Documentation/driver-api/usb/usb.rst
index 543e70434da2..2c94ff2f4385 100644
--- a/Documentation/driver-api/usb/usb.rst
+++ b/Documentation/driver-api/usb/usb.rst
@@ -109,16 +109,21 @@ well as to make sure they aren't relying on some HCD-specific behavior.
USB-Standard Types
==================
-In ``drivers/usb/common/common.c`` and ``drivers/usb/common/debug.c`` you
-will find the USB data types defined in chapter 9 of the USB specification.
-These data types are used throughout USB, and in APIs including this host
-side API, gadget APIs, usb character devices and debugfs interfaces.
+In ``include/uapi/linux/usb/ch9.h`` you will find the USB data types defined
+in chapter 9 of the USB specification. These data types are used throughout
+USB, and in APIs including this host side API, gadget APIs, usb character
+devices and debugfs interfaces. That file is itself included by
+``include/linux/usb/ch9.h``, which also contains declarations of a few
+utility routines for manipulating these data types; the implementations
+are in ``drivers/usb/common/common.c``.
.. kernel-doc:: drivers/usb/common/common.c
:export:
-.. kernel-doc:: drivers/usb/common/debug.c
- :export:
+In addition, some functions useful for creating debugging output are
+defined in ``drivers/usb/common/debug.c``.
+
+.. _usb_header:
Host-Side Data Types and Macros
===============================
diff --git a/Documentation/filesystems/erofs.rst b/Documentation/filesystems/erofs.rst
index bf145171c2bf..832839fcf4c3 100644
--- a/Documentation/filesystems/erofs.rst
+++ b/Documentation/filesystems/erofs.rst
@@ -50,8 +50,8 @@ Here is the main features of EROFS:
- Support POSIX.1e ACLs by using xattrs;
- - Support transparent file compression as an option:
- LZ4 algorithm with 4 KB fixed-sized output compression for high performance.
+ - Support transparent data compression as an option:
+ LZ4 algorithm with the fixed-sized output compression for high performance.
The following git tree provides the file system user-space tools under
development (ex, formatting tool mkfs.erofs):
@@ -113,31 +113,31 @@ may not. All metadatas can be now observed in two different spaces (views):
::
- |-> aligned with 8B
- |-> followed closely
- + meta_blkaddr blocks |-> another slot
- _____________________________________________________________________
- | ... | inode | xattrs | extents | data inline | ... | inode ...
- |________|_______|(optional)|(optional)|__(optional)_|_____|__________
- |-> aligned with the inode slot size
- . .
- . .
- . .
- . .
- . .
- . .
- .____________________________________________________|-> aligned with 4B
- | xattr_ibody_header | shared xattrs | inline xattrs |
- |____________________|_______________|_______________|
- |-> 12 bytes <-|->x * 4 bytes<-| .
- . . .
- . . .
- . . .
- ._______________________________.______________________.
- | id | id | id | id | ... | id | ent | ... | ent| ... |
- |____|____|____|____|______|____|_____|_____|____|_____|
- |-> aligned with 4B
- |-> aligned with 4B
+ |-> aligned with 8B
+ |-> followed closely
+ + meta_blkaddr blocks |-> another slot
+ _____________________________________________________________________
+ | ... | inode | xattrs | extents | data inline | ... | inode ...
+ |________|_______|(optional)|(optional)|__(optional)_|_____|__________
+ |-> aligned with the inode slot size
+ . .
+ . .
+ . .
+ . .
+ . .
+ . .
+ .____________________________________________________|-> aligned with 4B
+ | xattr_ibody_header | shared xattrs | inline xattrs |
+ |____________________|_______________|_______________|
+ |-> 12 bytes <-|->x * 4 bytes<-| .
+ . . .
+ . . .
+ . . .
+ ._______________________________.______________________.
+ | id | id | id | id | ... | id | ent | ... | ent| ... |
+ |____|____|____|____|______|____|_____|_____|____|_____|
+ |-> aligned with 4B
+ |-> aligned with 4B
Inode could be 32 or 64 bytes, which can be distinguished from a common
field which all inode versions have -- i_format::
@@ -175,13 +175,13 @@ may not. All metadatas can be now observed in two different spaces (views):
Each share xattr can also be directly found by the following formula:
xattr offset = xattr_blkaddr * block_size + 4 * xattr_id
- ::
+::
- |-> aligned by 4 bytes
- + xattr_blkaddr blocks |-> aligned with 4 bytes
- _________________________________________________________________________
- | ... | xattr_entry | xattr data | ... | xattr_entry | xattr data ...
- |________|_____________|_____________|_____|______________|_______________
+ |-> aligned by 4 bytes
+ + xattr_blkaddr blocks |-> aligned with 4 bytes
+ _________________________________________________________________________
+ | ... | xattr_entry | xattr data | ... | xattr_entry | xattr data ...
+ |________|_____________|_____________|_____|______________|_______________
Directories
-----------
@@ -193,48 +193,77 @@ algorithm (could refer to the related source code).
::
- ___________________________
- / |
- / ______________|________________
- / / | nameoff1 | nameoffN-1
- ____________.______________._______________v________________v__________
- | dirent | dirent | ... | dirent | filename | filename | ... | filename |
- |___.0___|____1___|_____|___N-1__|____0_____|____1_____|_____|___N-1____|
- \ ^
- \ | * could have
- \ | trailing '\0'
- \________________________| nameoff0
-
- Directory block
+ ___________________________
+ / |
+ / ______________|________________
+ / / | nameoff1 | nameoffN-1
+ ____________.______________._______________v________________v__________
+ | dirent | dirent | ... | dirent | filename | filename | ... | filename |
+ |___.0___|____1___|_____|___N-1__|____0_____|____1_____|_____|___N-1____|
+ \ ^
+ \ | * could have
+ \ | trailing '\0'
+ \________________________| nameoff0
+ Directory block
Note that apart from the offset of the first filename, nameoff0 also indicates
the total number of directory entries in this block since it is no need to
introduce another on-disk field at all.
-Compression
------------
-Currently, EROFS supports 4KB fixed-sized output transparent file compression,
-as illustrated below::
-
- |---- Variant-Length Extent ----|-------- VLE --------|----- VLE -----
- clusterofs clusterofs clusterofs
- | | | logical data
- _________v_______________________________v_____________________v_______________
- ... | . | | . | | . | ...
- ____|____.________|_____________|________.____|_____________|__.__________|____
- |-> cluster <-|-> cluster <-|-> cluster <-|-> cluster <-|-> cluster <-|
- size size size size size
- . . . .
- . . . .
- . . . .
- _______._____________._____________._____________._____________________
- ... | | | | ... physical data
- _______|_____________|_____________|_____________|_____________________
- |-> cluster <-|-> cluster <-|-> cluster <-|
- size size size
-
-Currently each on-disk physical cluster can contain 4KB (un)compressed data
-at most. For each logical cluster, there is a corresponding on-disk index to
-describe its cluster type, physical cluster address, etc.
-
-See "struct z_erofs_vle_decompressed_index" in erofs_fs.h for more details.
+Data compression
+----------------
+EROFS implements LZ4 fixed-sized output compression which generates fixed-sized
+compressed data blocks from variable-sized input in contrast to other existing
+fixed-sized input solutions. Relatively higher compression ratios can be gotten
+by using fixed-sized output compression since nowadays popular data compression
+algorithms are mostly LZ77-based and such fixed-sized output approach can be
+benefited from the historical dictionary (aka. sliding window).
+
+In details, original (uncompressed) data is turned into several variable-sized
+extents and in the meanwhile, compressed into physical clusters (pclusters).
+In order to record each variable-sized extent, logical clusters (lclusters) are
+introduced as the basic unit of compress indexes to indicate whether a new
+extent is generated within the range (HEAD) or not (NONHEAD). Lclusters are now
+fixed in block size, as illustrated below::
+
+ |<- variable-sized extent ->|<- VLE ->|
+ clusterofs clusterofs clusterofs
+ | | |
+ _________v_________________________________v_______________________v________
+ ... | . | | . | | . ...
+ ____|____._________|______________|________.___ _|______________|__.________
+ |-> lcluster <-|-> lcluster <-|-> lcluster <-|-> lcluster <-|
+ (HEAD) (NONHEAD) (HEAD) (NONHEAD) .
+ . CBLKCNT . .
+ . . .
+ . . .
+ _______._____________________________.______________._________________
+ ... | | | | ...
+ _______|______________|______________|______________|_________________
+ |-> big pcluster <-|-> pcluster <-|
+
+A physical cluster can be seen as a container of physical compressed blocks
+which contains compressed data. Previously, only lcluster-sized (4KB) pclusters
+were supported. After big pcluster feature is introduced (available since
+Linux v5.13), pcluster can be a multiple of lcluster size.
+
+For each HEAD lcluster, clusterofs is recorded to indicate where a new extent
+starts and blkaddr is used to seek the compressed data. For each NONHEAD
+lcluster, delta0 and delta1 are available instead of blkaddr to indicate the
+distance to its HEAD lcluster and the next HEAD lcluster. A PLAIN lcluster is
+also a HEAD lcluster except that its data is uncompressed. See the comments
+around "struct z_erofs_vle_decompressed_index" in erofs_fs.h for more details.
+
+If big pcluster is enabled, pcluster size in lclusters needs to be recorded as
+well. Let the delta0 of the first NONHEAD lcluster store the compressed block
+count with a special flag as a new called CBLKCNT NONHEAD lcluster. It's easy
+to understand its delta0 is constantly 1, as illustrated below::
+
+ __________________________________________________________
+ | HEAD | NONHEAD | NONHEAD | ... | NONHEAD | HEAD | HEAD |
+ |__:___|_(CBLKCNT)_|_________|_____|_________|__:___|____:_|
+ |<----- a big pcluster (with CBLKCNT) ------>|<-- -->|
+ a lcluster-sized pcluster (without CBLKCNT) ^
+
+If another HEAD follows a HEAD lcluster, there is no room to record CBLKCNT,
+but it's easy to know the size of such pcluster is 1 lcluster as well.
diff --git a/Documentation/hwmon/adm1177.rst b/Documentation/hwmon/adm1177.rst
index 471be1e98d6f..1c85a2af92bf 100644
--- a/Documentation/hwmon/adm1177.rst
+++ b/Documentation/hwmon/adm1177.rst
@@ -20,7 +20,8 @@ Usage Notes
-----------
This driver does not auto-detect devices. You will have to instantiate the
-devices explicitly. Please see :doc:`/i2c/instantiating-devices` for details.
+devices explicitly. Please see Documentation/i2c/instantiating-devices.rst
+for details.
Sysfs entries
diff --git a/Documentation/hwmon/dps920ab.rst b/Documentation/hwmon/dps920ab.rst
new file mode 100644
index 000000000000..c33b4cdc0a60
--- /dev/null
+++ b/Documentation/hwmon/dps920ab.rst
@@ -0,0 +1,73 @@
+.. SPDX-License-Identifier: GPL-2.0-or-later
+
+Kernel driver dps920ab
+========================
+
+Supported chips:
+
+ * Delta DPS920AB
+
+ Prefix: 'dps920ab'
+
+ Addresses scanned: -
+
+Authors:
+ Robert Marko <robert.marko@sartura.hr>
+
+
+Description
+-----------
+
+This driver implements support for Delta DPS920AB 920W 54V DC single output
+power supply with PMBus support.
+
+The driver is a client driver to the core PMBus driver.
+Please see Documentation/hwmon/pmbus.rst for details on PMBus client drivers.
+
+
+Usage Notes
+-----------
+
+This driver does not auto-detect devices. You will have to instantiate the
+devices explicitly. Please see Documentation/i2c/instantiating-devices.rst for
+details.
+
+
+Sysfs entries
+-------------
+
+======================= ======================================================
+curr1_label "iin"
+curr1_input Measured input current
+curr1_alarm Input current high alarm
+
+curr2_label "iout1"
+curr2_input Measured output current
+curr2_max Maximum output current
+curr2_rated_max Maximum rated output current
+
+in1_label "vin"
+in1_input Measured input voltage
+in1_alarm Input voltage alarm
+
+in2_label "vout1"
+in2_input Measured output voltage
+in2_rated_min Minimum rated output voltage
+in2_rated_max Maximum rated output voltage
+in2_alarm Output voltage alarm
+
+power1_label "pin"
+power1_input Measured input power
+power1_alarm Input power high alarm
+
+power2_label "pout1"
+power2_input Measured output power
+power2_rated_max Maximum rated output power
+
+temp[1-3]_input Measured temperature
+temp[1-3]_alarm Temperature alarm
+
+fan1_alarm Fan 1 warning.
+fan1_fault Fan 1 fault.
+fan1_input Fan 1 speed in RPM.
+======================= ======================================================
diff --git a/Documentation/hwmon/index.rst b/Documentation/hwmon/index.rst
index 9ed60fa84cbe..bc01601ea81a 100644
--- a/Documentation/hwmon/index.rst
+++ b/Documentation/hwmon/index.rst
@@ -53,6 +53,7 @@ Hardware Monitoring Kernel Drivers
da9055
dell-smm-hwmon
dme1737
+ dps920ab
drivetemp
ds1621
ds620
@@ -137,6 +138,7 @@ Hardware Monitoring Kernel Drivers
mcp3021
menf21bmc
mlxreg-fan
+ mp2888
mp2975
nct6683
nct6775
@@ -150,6 +152,7 @@ Hardware Monitoring Kernel Drivers
pc87360
pc87427
pcf8591
+ pim4328
pm6764tr
pmbus
powr1220
@@ -164,6 +167,7 @@ Hardware Monitoring Kernel Drivers
sht15
sht21
sht3x
+ sht4x
shtc1
sis5595
sl28cpld
diff --git a/Documentation/hwmon/ir36021.rst b/Documentation/hwmon/ir36021.rst
index ca3436b04e20..1faa85c39f1b 100644
--- a/Documentation/hwmon/ir36021.rst
+++ b/Documentation/hwmon/ir36021.rst
@@ -19,7 +19,7 @@ Authors:
Description
-----------
-The IR36021 is a dual‐loop digital multi‐phase buck controller designed for
+The IR36021 is a dual-loop digital multi-phase buck controller designed for
point of load applications.
Usage Notes
diff --git a/Documentation/hwmon/lm75.rst b/Documentation/hwmon/lm75.rst
index 81257d5fc48f..8d0ab4ad5fb5 100644
--- a/Documentation/hwmon/lm75.rst
+++ b/Documentation/hwmon/lm75.rst
@@ -93,9 +93,9 @@ Supported chips:
https://www.st.com/resource/en/datasheet/stlm75.pdf
- * Texas Instruments TMP100, TMP101, TMP105, TMP112, TMP75, TMP75B, TMP75C, TMP175, TMP275
+ * Texas Instruments TMP100, TMP101, TMP105, TMP112, TMP75, TMP75B, TMP75C, TMP175, TMP275, TMP1075
- Prefixes: 'tmp100', 'tmp101', 'tmp105', 'tmp112', 'tmp175', 'tmp75', 'tmp75b', 'tmp75c', 'tmp275'
+ Prefixes: 'tmp100', 'tmp101', 'tmp105', 'tmp112', 'tmp175', 'tmp75', 'tmp75b', 'tmp75c', 'tmp275', 'tmp1075'
Addresses scanned: none
@@ -119,6 +119,8 @@ Supported chips:
https://www.ti.com/product/tmp275
+ https://www.ti.com/product/TMP1075
+
* NXP LM75B, PCT2075
Prefix: 'lm75b', 'pct2075'
diff --git a/Documentation/hwmon/ltc2992.rst b/Documentation/hwmon/ltc2992.rst
index 46aa1aa84a1a..a0bcd867a0f5 100644
--- a/Documentation/hwmon/ltc2992.rst
+++ b/Documentation/hwmon/ltc2992.rst
@@ -19,7 +19,7 @@ This driver supports hardware monitoring for Linear Technology LTC2992 power mon
LTC2992 is a rail-to-rail system monitor that measures current,
voltage, and power of two supplies.
-Two ADCs simultaneously measure each supply’s current. A third ADC monitors
+Two ADCs simultaneously measure each supply's current. A third ADC monitors
the input voltages and four auxiliary external voltages.
diff --git a/Documentation/hwmon/max31790.rst b/Documentation/hwmon/max31790.rst
index f301385d8cef..7b097c3b9b90 100644
--- a/Documentation/hwmon/max31790.rst
+++ b/Documentation/hwmon/max31790.rst
@@ -38,6 +38,7 @@ Sysfs entries
fan[1-12]_input RO fan tachometer speed in RPM
fan[1-12]_fault RO fan experienced fault
fan[1-6]_target RW desired fan speed in RPM
-pwm[1-6]_enable RW regulator mode, 0=disabled, 1=manual mode, 2=rpm mode
-pwm[1-6] RW fan target duty cycle (0-255)
+pwm[1-6]_enable RW regulator mode, 0=disabled (duty cycle=0%), 1=manual mode, 2=rpm mode
+pwm[1-6] RW read: current pwm duty cycle,
+ write: target pwm duty cycle (0-255)
================== === =======================================================
diff --git a/Documentation/hwmon/mp2888.rst b/Documentation/hwmon/mp2888.rst
new file mode 100644
index 000000000000..5e578fd7b147
--- /dev/null
+++ b/Documentation/hwmon/mp2888.rst
@@ -0,0 +1,113 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Kernel driver mp2888
+====================
+
+Supported chips:
+
+ * MPS MP12254
+
+ Prefix: 'mp2888'
+
+Author:
+
+ Vadim Pasternak <vadimp@nvidia.com>
+
+Description
+-----------
+
+This driver implements support for Monolithic Power Systems, Inc. (MPS)
+vendor dual-loop, digital, multi-phase controller MP2888.
+
+This device: supports:
+
+- One power rail.
+- Programmable Multi-Phase up to 10 Phases.
+- PWM-VID Interface
+- One pages 0 for telemetry.
+- Programmable pins for PMBus Address.
+- Built-In EEPROM to Store Custom Configurations.
+
+Device complaint with:
+
+- PMBus rev 1.3 interface.
+
+Device supports direct format for reading output current, output voltage,
+input and output power and temperature.
+Device supports linear format for reading input voltage and input power.
+
+The driver provides the next attributes for the current:
+
+- for current out input and maximum alarm;
+- for phase current: input and label.
+
+The driver exports the following attributes via the 'sysfs' files, where:
+
+- 'n' is number of configured phases (from 1 to 10);
+- index 1 for "iout";
+- indexes 2 ... 1 + n for phases.
+
+**curr[1-{1+n}]_input**
+
+**curr[1-{1+n}]_label**
+
+**curr1_max**
+
+**curr1_max_alarm**
+
+The driver provides the next attributes for the voltage:
+
+- for voltage in: input, low and high critical thresholds, low and high
+ critical alarms;
+- for voltage out: input and high alarm;
+
+The driver exports the following attributes via the 'sysfs' files, where
+
+**in1_crit**
+
+**in1_crit_alarm**
+
+**in1_input**
+
+**in1_label**
+
+**in1_min**
+
+**in1_min_alarm**
+
+**in2_alarm**
+
+**in2_input**
+
+**in2_label**
+
+The driver provides the next attributes for the power:
+
+- for power in alarm and input.
+- for power out: cap, cap alarm an input.
+
+The driver exports the following attributes via the 'sysfs' files, where
+- indexes 1 for "pin";
+- indexes 2 for "pout";
+
+**power1_alarm**
+
+**power1_input**
+
+**power1_label**
+
+**power2_input**
+
+**power2_label**
+
+**power2_max**
+
+**power2_max_alarm**
+
+The driver provides the next attributes for the temperature:
+
+**temp1_input**
+
+**temp1_max**
+
+**temp1_max_alarm**
diff --git a/Documentation/hwmon/pim4328.rst b/Documentation/hwmon/pim4328.rst
new file mode 100644
index 000000000000..70c9e7a6882c
--- /dev/null
+++ b/Documentation/hwmon/pim4328.rst
@@ -0,0 +1,105 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Kernel driver pim4328
+=====================
+
+Supported chips:
+
+ * Flex PIM4328
+
+ Prefix: 'pim4328', 'bmr455'
+
+ Addresses scanned: -
+
+ Datasheet:
+
+https://flexpowermodules.com/resources/fpm-techspec-pim4328
+
+ * Flex PIM4820
+
+ Prefixes: 'pim4820'
+
+ Addresses scanned: -
+
+ Datasheet: https://flexpowermodules.com/resources/fpm-techspec-pim4820
+
+ * Flex PIM4006, PIM4106, PIM4206, PIM4306, PIM4406
+
+ Prefixes: 'pim4006', 'pim4106', 'pim4206', 'pim4306', 'pim4406'
+
+ Addresses scanned: -
+
+ Datasheet: https://flexpowermodules.com/resources/fpm-techspec-pim4006
+
+Author: Erik Rosen <erik.rosen@metormote.com>
+
+
+Description
+-----------
+
+This driver supports hardware monitoring for Flex PIM4328 and
+compatible digital power interface modules.
+
+The driver is a client driver to the core PMBus driver. Please see
+Documentation/hwmon/pmbus.rst and Documentation.hwmon/pmbus-core for details
+on PMBus client drivers.
+
+
+Usage Notes
+-----------
+
+This driver does not auto-detect devices. You will have to instantiate the
+devices explicitly. Please see Documentation/i2c/instantiating-devices.rst for
+details.
+
+
+Platform data support
+---------------------
+
+The driver supports standard PMBus driver platform data.
+
+
+Sysfs entries
+-------------
+
+The following attributes are supported. All attributes are read-only.
+
+======================= ========================================================
+in1_label "vin"
+in1_input Measured input voltage.
+in1_alarm Input voltage alarm.
+
+in2_label "vin.0"
+in2_input Measured input voltage on input A.
+
+ PIM4328 and PIM4X06
+
+in3_label "vin.1"
+in3_input Measured input voltage on input B.
+
+ PIM4328 and PIM4X06
+
+in4_label "vcap"
+in4_input Measured voltage on holdup capacitor.
+
+ PIM4328
+
+curr1_label "iin.0"
+curr1_input Measured input current on input A.
+
+ PIM4X06
+
+curr2_label "iin.1"
+curr2_input Measured input current on input B.
+
+ PIM4X06
+
+currX_label "iout1"
+currX_input Measured output current.
+currX_alarm Output current alarm.
+
+ X is 1 for PIM4820, 3 otherwise.
+
+temp1_input Measured temperature.
+temp1_alarm High temperature alarm.
+======================= ========================================================
diff --git a/Documentation/hwmon/pm6764tr.rst b/Documentation/hwmon/pm6764tr.rst
index a1fb8fea2326..294a8ffc8bd8 100644
--- a/Documentation/hwmon/pm6764tr.rst
+++ b/Documentation/hwmon/pm6764tr.rst
@@ -20,7 +20,7 @@ Description:
------------
This driver supports the STMicroelectronics PM6764TR chip. The PM6764TR is a high
-performance digital controller designed to power Intel’s VR12.5 processors and memories.
+performance digital controller designed to power Intel's VR12.5 processors and memories.
The device utilizes digital technology to implement all control and power management
functions to provide maximum flexibility and performance. The NVM is embedded to store
diff --git a/Documentation/hwmon/pmbus-core.rst b/Documentation/hwmon/pmbus-core.rst
index 73e23ab42cc3..e7e0c9ef10be 100644
--- a/Documentation/hwmon/pmbus-core.rst
+++ b/Documentation/hwmon/pmbus-core.rst
@@ -289,12 +289,22 @@ PMBus driver platform data
==========================
PMBus platform data is defined in include/linux/pmbus.h. Platform data
-currently only provides a flag field with a single bit used::
+currently provides a flags field with four bits used::
- #define PMBUS_SKIP_STATUS_CHECK (1 << 0)
+ #define PMBUS_SKIP_STATUS_CHECK BIT(0)
+
+ #define PMBUS_WRITE_PROTECTED BIT(1)
+
+ #define PMBUS_NO_CAPABILITY BIT(2)
+
+ #define PMBUS_READ_STATUS_AFTER_FAILED_CHECK BIT(3)
struct pmbus_platform_data {
u32 flags; /* Device specific flags */
+
+ /* regulator support */
+ int num_regulators;
+ struct regulator_init_data *reg_init_data;
};
@@ -302,8 +312,9 @@ Flags
-----
PMBUS_SKIP_STATUS_CHECK
- During register detection, skip checking the status register for
- communication or command errors.
+
+During register detection, skip checking the status register for
+communication or command errors.
Some PMBus chips respond with valid data when trying to read an unsupported
register. For such chips, checking the status register is mandatory when
@@ -315,3 +326,26 @@ status register must be disabled.
Some i2c controllers do not support single-byte commands (write commands with
no data, i2c_smbus_write_byte()). With such controllers, clearing the status
register is impossible, and the PMBUS_SKIP_STATUS_CHECK flag must be set.
+
+PMBUS_WRITE_PROTECTED
+
+Set if the chip is write protected and write protection is not determined
+by the standard WRITE_PROTECT command.
+
+PMBUS_NO_CAPABILITY
+
+Some PMBus chips don't respond with valid data when reading the CAPABILITY
+register. For such chips, this flag should be set so that the PMBus core
+driver doesn't use CAPABILITY to determine it's behavior.
+
+PMBUS_READ_STATUS_AFTER_FAILED_CHECK
+
+Read the STATUS register after each failed register check.
+
+Some PMBus chips end up in an undefined state when trying to read an
+unsupported register. For such chips, it is necessary to reset the
+chip pmbus controller to a known state after a failed register check.
+This can be done by reading a known register. By setting this flag the
+driver will try to read the STATUS register after each failed
+register check. This read may fail, but it will put the chip into a
+known state.
diff --git a/Documentation/hwmon/pmbus.rst b/Documentation/hwmon/pmbus.rst
index c44f14115413..7ecfec6ca2db 100644
--- a/Documentation/hwmon/pmbus.rst
+++ b/Documentation/hwmon/pmbus.rst
@@ -3,15 +3,18 @@ Kernel driver pmbus
Supported chips:
- * Ericsson BMR453, BMR454
+ * Flex BMR310, BMR453, BMR454, BMR456, BMR457, BMR458, BMR480,
+ BMR490, BMR491, BMR492
- Prefixes: 'bmr453', 'bmr454'
+ Prefixes: 'bmr310', 'bmr453', 'bmr454', 'bmr456', 'bmr457', 'bmr458', 'bmr480',
+ 'bmr490', 'bmr491', 'bmr492'
Addresses scanned: -
- Datasheet:
+ Datasheets:
+
+ https://flexpowermodules.com/products
- http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146395
* ON Semiconductor ADP4000, NCP4200, NCP4208
diff --git a/Documentation/hwmon/sht4x.rst b/Documentation/hwmon/sht4x.rst
new file mode 100644
index 000000000000..3b37abcd4a46
--- /dev/null
+++ b/Documentation/hwmon/sht4x.rst
@@ -0,0 +1,45 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Kernel driver sht4x
+===================
+
+Supported Chips:
+
+ * Sensirion SHT4X
+
+ Prefix: 'sht4x'
+
+ Addresses scanned: None
+
+ Datasheet:
+
+ English: https://www.sensirion.com/fileadmin/user_upload/customers/sensirion/Dokumente/2_Humidity_Sensors/Datasheets/Sensirion_Humidity_Sensors_SHT4x_Datasheet.pdf
+
+Author: Navin Sankar Velliangiri <navin@linumiz.com>
+
+
+Description
+-----------
+
+This driver implements support for the Sensirion SHT4x chip, a humidity
+and temperature sensor. Temperature is measured in degree celsius, relative
+humidity is expressed as a percentage. In sysfs interface, all values are
+scaled by 1000, i.e. the value for 31.5 degrees celsius is 31500.
+
+Usage Notes
+-----------
+
+The device communicates with the I2C protocol. Sensors can have the I2C
+address 0x44. See Documentation/i2c/instantiating-devices.rst for methods
+to instantiate the device.
+
+Sysfs entries
+-------------
+
+=============== ============================================
+temp1_input Measured temperature in millidegrees Celcius
+humidity1_input Measured humidity in %H
+update_interval The minimum interval for polling the sensor,
+ in milliseconds. Writable. Must be at least
+ 2000.
+============== =============================================
diff --git a/Documentation/hwmon/tmp103.rst b/Documentation/hwmon/tmp103.rst
index e195a7d14309..b3ef81475cf8 100644
--- a/Documentation/hwmon/tmp103.rst
+++ b/Documentation/hwmon/tmp103.rst
@@ -21,10 +21,10 @@ Description
The TMP103 is a digital output temperature sensor in a four-ball
wafer chip-scale package (WCSP). The TMP103 is capable of reading
temperatures to a resolution of 1°C. The TMP103 is specified for
-operation over a temperature range of –40°C to +125°C.
+operation over a temperature range of -40°C to +125°C.
Resolution: 8 Bits
-Accuracy: ±1°C Typ (–10°C to +100°C)
+Accuracy: ±1°C Typ (-10°C to +100°C)
The driver provides the common sysfs-interface for temperatures (see
Documentation/hwmon/sysfs-interface.rst under Temperatures).
diff --git a/Documentation/hwmon/zl6100.rst b/Documentation/hwmon/zl6100.rst
index 968aff10ce0a..d42ed9d3ac69 100644
--- a/Documentation/hwmon/zl6100.rst
+++ b/Documentation/hwmon/zl6100.rst
@@ -3,87 +3,103 @@ Kernel driver zl6100
Supported chips:
- * Intersil / Zilker Labs ZL2004
+ * Renesas / Intersil / Zilker Labs ZL2004
Prefix: 'zl2004'
Addresses scanned: -
- Datasheet: http://www.intersil.com/data/fn/fn6847.pdf
+ Datasheet: https://www.renesas.com/us/en/document/dst/zl2004-datasheet.pdf
- * Intersil / Zilker Labs ZL2005
+ * Renesas / Intersil / Zilker Labs ZL2005
Prefix: 'zl2005'
Addresses scanned: -
- Datasheet: http://www.intersil.com/data/fn/fn6848.pdf
+ Datasheet: https://www.renesas.com/us/en/document/dst/zl2005-datasheet.pdf
- * Intersil / Zilker Labs ZL2006
+ * Renesas / Intersil / Zilker Labs ZL2006
Prefix: 'zl2006'
Addresses scanned: -
- Datasheet: http://www.intersil.com/data/fn/fn6850.pdf
+ Datasheet: https://www.renesas.com/us/en/document/dst/zl2006-datasheet.pdf
- * Intersil / Zilker Labs ZL2008
+ * Renesas / Intersil / Zilker Labs ZL2008
Prefix: 'zl2008'
Addresses scanned: -
- Datasheet: http://www.intersil.com/data/fn/fn6859.pdf
+ Datasheet: https://www.renesas.com/us/en/document/dst/zl2008-datasheet.pdf
- * Intersil / Zilker Labs ZL2105
+ * Renesas / Intersil / Zilker Labs ZL2105
Prefix: 'zl2105'
Addresses scanned: -
- Datasheet: http://www.intersil.com/data/fn/fn6851.pdf
+ Datasheet: https://www.renesas.com/us/en/document/dst/zl2105-datasheet.pdf
- * Intersil / Zilker Labs ZL2106
+ * Renesas / Intersil / Zilker Labs ZL2106
Prefix: 'zl2106'
Addresses scanned: -
- Datasheet: http://www.intersil.com/data/fn/fn6852.pdf
+ Datasheet: https://www.renesas.com/us/en/document/dst/zl2106-datasheet.pdf
- * Intersil / Zilker Labs ZL6100
+ * Renesas / Intersil / Zilker Labs ZL6100
Prefix: 'zl6100'
Addresses scanned: -
- Datasheet: http://www.intersil.com/data/fn/fn6876.pdf
+ Datasheet: https://www.renesas.com/us/en/document/dst/zl6100-datasheet.pdf
- * Intersil / Zilker Labs ZL6105
+ * Renesas / Intersil / Zilker Labs ZL6105
Prefix: 'zl6105'
Addresses scanned: -
- Datasheet: http://www.intersil.com/data/fn/fn6906.pdf
+ Datasheet: https://www.renesas.com/us/en/document/dst/zl6105-datasheet.pdf
- * Intersil / Zilker Labs ZL9101M
+ * Renesas / Intersil / Zilker Labs ZL8802
+
+ Prefix: 'zl8802'
+
+ Addresses scanned: -
+
+ Datasheet: https://www.renesas.com/us/en/document/dst/zl8802-datasheet
+
+ * Renesas / Intersil / Zilker Labs ZL9101M
Prefix: 'zl9101'
Addresses scanned: -
- Datasheet: http://www.intersil.com/data/fn/fn7669.pdf
+ Datasheet: https://www.renesas.com/us/en/document/dst/zl9101m-datasheet
- * Intersil / Zilker Labs ZL9117M
+ * Renesas / Intersil / Zilker Labs ZL9117M
Prefix: 'zl9117'
Addresses scanned: -
- Datasheet: http://www.intersil.com/data/fn/fn7914.pdf
+ Datasheet: https://www.renesas.com/us/en/document/dst/zl9117m-datasheet
+
+ * Renesas / Intersil / Zilker Labs ZLS1003, ZLS4009
+
+ Prefix: 'zls1003', zls4009
+
+ Addresses scanned: -
+
+ Datasheet: Not published
- * Ericsson BMR450, BMR451
+ * Flex BMR450, BMR451
Prefix: 'bmr450', 'bmr451'
@@ -91,17 +107,39 @@ Supported chips:
Datasheet:
-http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146401
+https://flexpowermodules.com/resources/fpm-techspec-bmr450-digital-pol-regulators-20a
- * Ericsson BMR462, BMR463, BMR464
+ * Flex BMR462, BMR463, BMR464
Prefixes: 'bmr462', 'bmr463', 'bmr464'
Addresses scanned: -
- Datasheet:
+ Datasheet: https://flexpowermodules.com/resources/fpm-techspec-bmr462
+
+ * Flex BMR465, BMR467
+
+ Prefixes: 'bmr465', 'bmr467'
+
+ Addresses scanned: -
+
+ Datasheet: https://flexpowermodules.com/resources/fpm-techspec-bmr465-digital-pol
+
+ * Flex BMR466
+
+ Prefixes: 'bmr466'
+
+ Addresses scanned: -
+
+ Datasheet: https://flexpowermodules.com/resources/fpm-techspec-bmr466-8x12
- http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146256
+ * Flex BMR469
+
+ Prefixes: 'bmr469'
+
+ Addresses scanned: -
+
+ Datasheet: https://flexpowermodules.com/resources/fpm-techspec-bmr4696001
Author: Guenter Roeck <linux@roeck-us.net>
@@ -109,8 +147,8 @@ Author: Guenter Roeck <linux@roeck-us.net>
Description
-----------
-This driver supports hardware monitoring for Intersil / Zilker Labs ZL6100 and
-compatible digital DC-DC controllers.
+This driver supports hardware monitoring for Renesas / Intersil / Zilker Labs
+ZL6100 and compatible digital DC-DC controllers.
The driver is a client driver to the core PMBus driver. Please see
Documentation/hwmon/pmbus.rst and Documentation.hwmon/pmbus-core for details
@@ -147,12 +185,12 @@ Module parameters
delay
-----
-Intersil/Zilker Labs DC-DC controllers require a minimum interval between I2C
-bus accesses. According to Intersil, the minimum interval is 2 ms, though 1 ms
-appears to be sufficient and has not caused any problems in testing. The problem
-is known to affect all currently supported chips. For manual override, the
-driver provides a writeable module parameter, 'delay', which can be used to set
-the interval to a value between 0 and 65,535 microseconds.
+Renesas/Intersil/Zilker Labs DC-DC controllers require a minimum interval
+between I2C bus accesses. According to Intersil, the minimum interval is 2 ms,
+though 1 ms appears to be sufficient and has not caused any problems in testing.
+The problem is known to affect all currently supported chips. For manual override,
+the driver provides a writeable module parameter, 'delay', which can be used
+to set the interval to a value between 0 and 65,535 microseconds.
Sysfs entries
@@ -182,24 +220,32 @@ in2_crit Critical maximum VMON/VDRV voltage.
in2_lcrit_alarm VMON/VDRV voltage critical low alarm.
in2_crit_alarm VMON/VDRV voltage critical high alarm.
- vmon attributes are supported on ZL2004, ZL9101M,
- and ZL9117M only.
+ vmon attributes are supported on ZL2004, ZL8802,
+ ZL9101M, ZL9117M and ZLS4009 only.
-inX_label "vout1"
+inX_label "vout[12]"
inX_input Measured output voltage.
inX_lcrit Critical minimum output Voltage.
inX_crit Critical maximum output voltage.
inX_lcrit_alarm Critical output voltage critical low alarm.
inX_crit_alarm Critical output voltage critical high alarm.
- X is 3 for ZL2004, ZL9101M, and ZL9117M, 2 otherwise.
+ X is 3 for ZL2004, ZL9101M, and ZL9117M,
+ 3, 4 for ZL8802 and 2 otherwise.
+
+curr1_label "iin"
+curr1_input Measured input current.
+
+ iin attributes are supported on ZL8802 only
+
+currY_label "iout[12]"
+currY_input Measured output current.
+currY_lcrit Critical minimum output current.
+currY_crit Critical maximum output current.
+currY_lcrit_alarm Output current critical low alarm.
+currY_crit_alarm Output current critical high alarm.
-curr1_label "iout1"
-curr1_input Measured output current.
-curr1_lcrit Critical minimum output current.
-curr1_crit Critical maximum output current.
-curr1_lcrit_alarm Output current critical low alarm.
-curr1_crit_alarm Output current critical high alarm.
+ Y is 2, 3 for ZL8802, 1 otherwise
temp[12]_input Measured temperature.
temp[12]_min Minimum temperature.
diff --git a/Documentation/locking/lockdep-design.rst b/Documentation/locking/lockdep-design.rst
index 9f3cfca9f8a4..82f36cab61bd 100644
--- a/Documentation/locking/lockdep-design.rst
+++ b/Documentation/locking/lockdep-design.rst
@@ -453,9 +453,9 @@ There are simply four block conditions:
Block condition matrix, Y means the row blocks the column, and N means otherwise.
+---+---+---+---+
- | | E | r | R |
+ | | W | r | R |
+---+---+---+---+
- | E | Y | Y | Y |
+ | W | Y | Y | Y |
+---+---+---+---+
| r | Y | Y | N |
+---+---+---+---+
diff --git a/Documentation/networking/device_drivers/ethernet/intel/i40e.rst b/Documentation/networking/device_drivers/ethernet/intel/i40e.rst
index 8a9b18573688..2d3f6bd969a2 100644
--- a/Documentation/networking/device_drivers/ethernet/intel/i40e.rst
+++ b/Documentation/networking/device_drivers/ethernet/intel/i40e.rst
@@ -173,7 +173,7 @@ Director rule is added from ethtool (Sideband filter), ATR is turned off by the
driver. To re-enable ATR, the sideband can be disabled with the ethtool -K
option. For example::
- ethtool –K [adapter] ntuple [off|on]
+ ethtool -K [adapter] ntuple [off|on]
If sideband is re-enabled after ATR is re-enabled, ATR remains enabled until a
TCP-IP flow is added. When all TCP-IP sideband rules are deleted, ATR is
@@ -688,7 +688,7 @@ shaper bw_rlimit: for each tc, sets minimum and maximum bandwidth rates.
Totals must be equal or less than port speed.
For example: min_rate 1Gbit 3Gbit: Verify bandwidth limit using network
-monitoring tools such as ifstat or sar –n DEV [interval] [number of samples]
+monitoring tools such as `ifstat` or `sar -n DEV [interval] [number of samples]`
2. Enable HW TC offload on interface::
diff --git a/Documentation/networking/device_drivers/ethernet/intel/iavf.rst b/Documentation/networking/device_drivers/ethernet/intel/iavf.rst
index 52e037b11c97..25330b7b5168 100644
--- a/Documentation/networking/device_drivers/ethernet/intel/iavf.rst
+++ b/Documentation/networking/device_drivers/ethernet/intel/iavf.rst
@@ -179,7 +179,7 @@ shaper bw_rlimit: for each tc, sets minimum and maximum bandwidth rates.
Totals must be equal or less than port speed.
For example: min_rate 1Gbit 3Gbit: Verify bandwidth limit using network
-monitoring tools such as ifstat or sar –n DEV [interval] [number of samples]
+monitoring tools such as ``ifstat`` or ``sar -n DEV [interval] [number of samples]``
NOTE:
Setting up channels via ethtool (ethtool -L) is not supported when the
diff --git a/Documentation/powerpc/syscall64-abi.rst b/Documentation/powerpc/syscall64-abi.rst
index dabee3729e5a..56490c4c0c07 100644
--- a/Documentation/powerpc/syscall64-abi.rst
+++ b/Documentation/powerpc/syscall64-abi.rst
@@ -109,6 +109,16 @@ auxiliary vector.
scv 0 syscalls will always behave as PPC_FEATURE2_HTM_NOSC.
+ptrace
+------
+When ptracing system calls (PTRACE_SYSCALL), the pt_regs.trap value contains
+the system call type that can be used to distinguish between sc and scv 0
+system calls, and the different register conventions can be accounted for.
+
+If the value of (pt_regs.trap & 0xfff0) is 0xc00 then the system call was
+performed with the sc instruction, if it is 0x3000 then the system call was
+performed with the scv 0 instruction.
+
vsyscall
========
diff --git a/Documentation/process/kernel-enforcement-statement.rst b/Documentation/process/kernel-enforcement-statement.rst
index e5a1be476047..dc2d813b2e79 100644
--- a/Documentation/process/kernel-enforcement-statement.rst
+++ b/Documentation/process/kernel-enforcement-statement.rst
@@ -1,4 +1,4 @@
-.. _process_statement_kernel:
+.. _process_statement_kernel:
Linux Kernel Enforcement Statement
----------------------------------
diff --git a/Documentation/riscv/vm-layout.rst b/Documentation/riscv/vm-layout.rst
index 329d32098af4..b7f98930d38d 100644
--- a/Documentation/riscv/vm-layout.rst
+++ b/Documentation/riscv/vm-layout.rst
@@ -58,6 +58,6 @@ RISC-V Linux Kernel SV39
|
____________________________________________________________|____________________________________________________________
| | | |
- ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | modules
- ffffffff80000000 | -2 GB | ffffffffffffffff | 2 GB | kernel, BPF
+ ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | modules, BPF
+ ffffffff80000000 | -2 GB | ffffffffffffffff | 2 GB | kernel
__________________|____________|__________________|_________|____________________________________________________________
diff --git a/Documentation/security/tpm/xen-tpmfront.rst b/Documentation/security/tpm/xen-tpmfront.rst
index 00d5b1db227d..31c67522f2ad 100644
--- a/Documentation/security/tpm/xen-tpmfront.rst
+++ b/Documentation/security/tpm/xen-tpmfront.rst
@@ -1,4 +1,4 @@
-=============================
+=============================
Virtual TPM interface for Xen
=============================
diff --git a/Documentation/spi/pxa2xx.rst b/Documentation/spi/pxa2xx.rst
index 882d3cc72cc2..6312968acfe9 100644
--- a/Documentation/spi/pxa2xx.rst
+++ b/Documentation/spi/pxa2xx.rst
@@ -2,43 +2,47 @@
PXA2xx SPI on SSP driver HOWTO
==============================
-This a mini howto on the pxa2xx_spi driver. The driver turns a PXA2xx
-synchronous serial port into a SPI master controller
+This a mini HOWTO on the pxa2xx_spi driver. The driver turns a PXA2xx
+synchronous serial port into an SPI master controller
(see Documentation/spi/spi-summary.rst). The driver has the following features
-- Support for any PXA2xx SSP
+- Support for any PXA2xx and compatible SSP.
- SSP PIO and SSP DMA data transfers.
- External and Internal (SSPFRM) chip selects.
- Per slave device (chip) configuration.
- Full suspend, freeze, resume support.
-The driver is built around a "spi_message" fifo serviced by workqueue and a
-tasklet. The workqueue, "pump_messages", drives message fifo and the tasklet
-(pump_transfer) is responsible for queuing SPI transactions and setting up and
-launching the dma/interrupt driven transfers.
+The driver is built around a &struct spi_message FIFO serviced by kernel
+thread. The kernel thread, spi_pump_messages(), drives message FIFO and
+is responsible for queuing SPI transactions and setting up and launching
+the DMA or interrupt driven transfers.
Declaring PXA2xx Master Controllers
-----------------------------------
-Typically a SPI master is defined in the arch/.../mach-*/board-*.c as a
-"platform device". The master configuration is passed to the driver via a table
-found in include/linux/spi/pxa2xx_spi.h::
+Typically, for a legacy platform, an SPI master is defined in the
+arch/.../mach-*/board-*.c as a "platform device". The master configuration
+is passed to the driver via a table found in include/linux/spi/pxa2xx_spi.h::
struct pxa2xx_spi_controller {
u16 num_chipselect;
u8 enable_dma;
+ ...
};
The "pxa2xx_spi_controller.num_chipselect" field is used to determine the number of
slave device (chips) attached to this SPI master.
The "pxa2xx_spi_controller.enable_dma" field informs the driver that SSP DMA should
-be used. This caused the driver to acquire two DMA channels: rx_channel and
-tx_channel. The rx_channel has a higher DMA service priority the tx_channel.
+be used. This caused the driver to acquire two DMA channels: Rx channel and
+Tx channel. The Rx channel has a higher DMA service priority than the Tx channel.
See the "PXA2xx Developer Manual" section "DMA Controller".
+For the new platforms the description of the controller and peripheral devices
+comes from Device Tree or ACPI.
+
NSSP MASTER SAMPLE
------------------
-Below is a sample configuration using the PXA255 NSSP::
+Below is a sample configuration using the PXA255 NSSP for a legacy platform::
static struct resource pxa_spi_nssp_resources[] = {
[0] = {
@@ -79,9 +83,10 @@ Below is a sample configuration using the PXA255 NSSP::
Declaring Slave Devices
-----------------------
-Typically each SPI slave (chip) is defined in the arch/.../mach-*/board-*.c
-using the "spi_board_info" structure found in "linux/spi/spi.h". See
-"Documentation/spi/spi-summary.rst" for additional information.
+Typically, for a legacy platform, each SPI slave (chip) is defined in the
+arch/.../mach-*/board-*.c using the "spi_board_info" structure found in
+"linux/spi/spi.h". See "Documentation/spi/spi-summary.rst" for additional
+information.
Each slave device attached to the PXA must provide slave specific configuration
information via the structure "pxa2xx_spi_chip" found in
@@ -101,9 +106,9 @@ device. All fields are optional.
};
The "pxa2xx_spi_chip.tx_threshold" and "pxa2xx_spi_chip.rx_threshold" fields are
-used to configure the SSP hardware fifo. These fields are critical to the
+used to configure the SSP hardware FIFO. These fields are critical to the
performance of pxa2xx_spi driver and misconfiguration will result in rx
-fifo overruns (especially in PIO mode transfers). Good default values are::
+FIFO overruns (especially in PIO mode transfers). Good default values are::
.tx_threshold = 8,
.rx_threshold = 8,
@@ -118,7 +123,7 @@ use a value of 8. The driver will determine a reasonable default if
dma_burst_size == 0.
The "pxa2xx_spi_chip.timeout" fields is used to efficiently handle
-trailing bytes in the SSP receiver fifo. The correct value for this field is
+trailing bytes in the SSP receiver FIFO. The correct value for this field is
dependent on the SPI bus speed ("spi_board_info.max_speed_hz") and the specific
slave device. Please note that the PXA2xx SSP 1 does not support trailing byte
timeouts and must busy-wait any trailing bytes.
@@ -131,19 +136,19 @@ testing.
The "pxa2xx_spi_chip.cs_control" field is used to point to a board specific
function for asserting/deasserting a slave device chip select. If the field is
NULL, the pxa2xx_spi master controller driver assumes that the SSP port is
-configured to use SSPFRM instead.
+configured to use GPIO or SSPFRM instead.
NOTE: the SPI driver cannot control the chip select if SSPFRM is used, so the
chipselect is dropped after each spi_transfer. Most devices need chip select
-asserted around the complete message. Use SSPFRM as a GPIO (through cs_control)
+asserted around the complete message. Use SSPFRM as a GPIO (through a descriptor)
to accommodate these chips.
NSSP SLAVE SAMPLE
-----------------
-The pxa2xx_spi_chip structure is passed to the pxa2xx_spi driver in the
-"spi_board_info.controller_data" field. Below is a sample configuration using
-the PXA255 NSSP.
+For a legacy platform or in some other cases, the pxa2xx_spi_chip structure
+is passed to the pxa2xx_spi driver in the "spi_board_info.controller_data"
+field. Below is a sample configuration using the PXA255 NSSP.
::
@@ -212,7 +217,9 @@ DMA and PIO I/O Support
-----------------------
The pxa2xx_spi driver supports both DMA and interrupt driven PIO message
transfers. The driver defaults to PIO mode and DMA transfers must be enabled
-by setting the "enable_dma" flag in the "pxa2xx_spi_controller" structure. The DMA
+by setting the "enable_dma" flag in the "pxa2xx_spi_controller" structure.
+For the newer platforms, that are known to support DMA, the driver will enable
+it automatically and try it first with a possible fallback to PIO. The DMA
mode supports both coherent and stream based DMA mappings.
The following logic is used to determine the type of I/O to be used on
@@ -236,5 +243,4 @@ a per "spi_transfer" basis::
THANKS TO
---------
-
David Brownell and others for mentoring the development of this driver.
diff --git a/Documentation/timers/no_hz.rst b/Documentation/timers/no_hz.rst
index c4c70e1aada3..6cadad7c3aad 100644
--- a/Documentation/timers/no_hz.rst
+++ b/Documentation/timers/no_hz.rst
@@ -1,4 +1,4 @@
-======================================
+======================================
NO_HZ: Reducing Scheduling-Clock Ticks
======================================
diff --git a/Documentation/translations/zh_CN/SecurityBugs b/Documentation/translations/zh_CN/SecurityBugs
deleted file mode 100644
index 2d0fffd122ce..000000000000
--- a/Documentation/translations/zh_CN/SecurityBugs
+++ /dev/null
@@ -1,50 +0,0 @@
-Chinese translated version of Documentation/admin-guide/security-bugs.rst
-
-If you have any comment or update to the content, please contact the
-original document maintainer directly. However, if you have a problem
-communicating in English you can also ask the Chinese maintainer for
-help. Contact the Chinese maintainer if this translation is outdated
-or if there is a problem with the translation.
-
-Chinese maintainer: Harry Wei <harryxiyou@gmail.com>
----------------------------------------------------------------------
-Documentation/admin-guide/security-bugs.rst 的中文翻译
-
-如果想评论或更新本文的内容,请直接联系原文档的维护者。如果你使用英文
-交流有困难的话,也可以向中文版维护者求助。如果本翻译更新不及时或者翻
-译存在问题,请联系中文版维护者。
-
-中文版维护者: 贾威威 Harry Wei <harryxiyou@gmail.com>
-中文版翻译者: 贾威威 Harry Wei <harryxiyou@gmail.com>
-中文版校译者: 贾威威 Harry Wei <harryxiyou@gmail.com>
-
-
-以下为正文
----------------------------------------------------------------------
-Linux内核开发者认为安全非常重要。因此,我们想要知道当一个有关于
-安全的漏洞被发现的时候,并且它可能会被尽快的修复或者公开。请把这个安全
-漏洞报告给Linux内核安全团队。
-
-1) 联系
-
-linux内核安全团队可以通过email<security@kernel.org>来联系。这是
-一组独立的安全工作人员,可以帮助改善漏洞报告并且公布和取消一个修复。安
-全团队有可能会从部分的维护者那里引进额外的帮助来了解并且修复安全漏洞。
-当遇到任何漏洞,所能提供的信息越多就越能诊断和修复。如果你不清楚什么
-是有帮助的信息,那就请重温一下admin-guide/reporting-bugs.rst文件中的概述过程。任
-何攻击性的代码都是非常有用的,未经报告者的同意不会被取消,除非它已经
-被公布于众。
-
-2) 公开
-
-Linux内核安全团队的宗旨就是和漏洞提交者一起处理漏洞的解决方案直
-到公开。我们喜欢尽快地完全公开漏洞。当一个漏洞或者修复还没有被完全地理
-解,解决方案没有通过测试或者供应商协调,可以合理地延迟公开。然而,我们
-期望这些延迟尽可能的短些,是可数的几天,而不是几个星期或者几个月。公开
-日期是通过安全团队和漏洞提供者以及供应商洽谈后的结果。公开时间表是从很
-短(特殊的,它已经被公众所知道)到几个星期。作为一个基本的默认政策,我
-们所期望通知公众的日期是7天的安排。
-
-3) 保密协议
-
-Linux内核安全团队不是一个正式的团体,因此不能加入任何的保密协议。
diff --git a/Documentation/usb/gadget_configfs.rst b/Documentation/usb/gadget_configfs.rst
index 158e48dab586..e4566ffb223f 100644
--- a/Documentation/usb/gadget_configfs.rst
+++ b/Documentation/usb/gadget_configfs.rst
@@ -140,7 +140,7 @@ is an arbitrary string allowed in a filesystem, e.g.::
Each function provides its specific set of attributes, with either read-only
or read-write access. Where applicable they need to be written to as
appropriate.
-Please refer to Documentation/ABI/*/configfs-usb-gadget* for more information.
+Please refer to Documentation/ABI/testing/configfs-usb-gadget for more information.
4. Associating the functions with their configurations
------------------------------------------------------
diff --git a/Documentation/usb/mtouchusb.rst b/Documentation/usb/mtouchusb.rst
index d1111b74bf75..5ae1f74fe74b 100644
--- a/Documentation/usb/mtouchusb.rst
+++ b/Documentation/usb/mtouchusb.rst
@@ -1,4 +1,4 @@
-================
+================
mtouchusb driver
================
diff --git a/Documentation/usb/usb-serial.rst b/Documentation/usb/usb-serial.rst
index 8fa7dbd3da9a..69586aeb60bb 100644
--- a/Documentation/usb/usb-serial.rst
+++ b/Documentation/usb/usb-serial.rst
@@ -1,4 +1,4 @@
-==========
+==========
USB serial
==========
diff --git a/Documentation/userspace-api/ioctl/hdio.rst b/Documentation/userspace-api/ioctl/hdio.rst
index 817371bf94e9..6ee8fc88699f 100644
--- a/Documentation/userspace-api/ioctl/hdio.rst
+++ b/Documentation/userspace-api/ioctl/hdio.rst
@@ -7,8 +7,8 @@ Summary of `HDIO_` ioctl calls
November, 2004
This document attempts to describe the ioctl(2) calls supported by
-the HD/IDE layer. These are by-and-large implemented (as of Linux 2.6)
-in drivers/ide/ide.c and drivers/block/scsi_ioctl.c
+the HD/IDE layer. These are by-and-large implemented (as of Linux 5.11)
+drivers/ata/libata-scsi.c.
ioctl values are listed in <linux/hdreg.h>. As of this writing, they
are as follows:
@@ -17,50 +17,17 @@ are as follows:
======================= =======================================
HDIO_GETGEO get device geometry
- HDIO_GET_UNMASKINTR get current unmask setting
- HDIO_GET_MULTCOUNT get current IDE blockmode setting
- HDIO_GET_QDMA get use-qdma flag
- HDIO_SET_XFER set transfer rate via proc
- HDIO_OBSOLETE_IDENTITY OBSOLETE, DO NOT USE
- HDIO_GET_KEEPSETTINGS get keep-settings-on-reset flag
HDIO_GET_32BIT get current io_32bit setting
- HDIO_GET_NOWERR get ignore-write-error flag
- HDIO_GET_DMA get use-dma flag
- HDIO_GET_NICE get nice flags
HDIO_GET_IDENTITY get IDE identification info
- HDIO_GET_WCACHE get write cache mode on|off
- HDIO_GET_ACOUSTIC get acoustic value
- HDIO_GET_ADDRESS get sector addressing mode
- HDIO_GET_BUSSTATE get the bus state of the hwif
- HDIO_TRISTATE_HWIF execute a channel tristate
- HDIO_DRIVE_RESET execute a device reset
HDIO_DRIVE_TASKFILE execute raw taskfile
HDIO_DRIVE_TASK execute task and special drive command
HDIO_DRIVE_CMD execute a special drive command
- HDIO_DRIVE_CMD_AEB HDIO_DRIVE_TASK
======================= =======================================
ioctls that pass non-pointer values:
======================= =======================================
- HDIO_SET_MULTCOUNT change IDE blockmode
- HDIO_SET_UNMASKINTR permit other irqs during I/O
- HDIO_SET_KEEPSETTINGS keep ioctl settings on reset
HDIO_SET_32BIT change io_32bit flags
- HDIO_SET_NOWERR change ignore-write-error flag
- HDIO_SET_DMA change use-dma flag
- HDIO_SET_PIO_MODE reconfig interface to new speed
- HDIO_SCAN_HWIF register and (re)scan interface
- HDIO_SET_NICE set nice flags
- HDIO_UNREGISTER_HWIF unregister interface
- HDIO_SET_WCACHE change write cache enable-disable
- HDIO_SET_ACOUSTIC change acoustic behavior
- HDIO_SET_BUSSTATE set the bus state of the hwif
- HDIO_SET_QDMA change use-qdma flag
- HDIO_SET_ADDRESS change lba addressing modes
-
- HDIO_SET_IDE_SCSI Set scsi emulation mode on/off
- HDIO_SET_SCSI_IDE not implemented yet
======================= =======================================
@@ -137,143 +104,6 @@ HDIO_GETGEO
-
-HDIO_GET_UNMASKINTR
- get current unmask setting
-
-
- usage::
-
- long val;
-
- ioctl(fd, HDIO_GET_UNMASKINTR, &val);
-
- inputs:
- none
-
-
-
- outputs:
- The value of the drive's current unmask setting
-
-
-
-
-
-HDIO_SET_UNMASKINTR
- permit other irqs during I/O
-
-
- usage::
-
- unsigned long val;
-
- ioctl(fd, HDIO_SET_UNMASKINTR, val);
-
- inputs:
- New value for unmask flag
-
-
-
- outputs:
- none
-
-
-
- error return:
- - EINVAL Called on a partition instead of the whole disk device
- - EACCES Access denied: requires CAP_SYS_ADMIN
- - EINVAL value out of range [0 1]
- - EBUSY Controller busy
-
-
-
-
-HDIO_GET_MULTCOUNT
- get current IDE blockmode setting
-
-
- usage::
-
- long val;
-
- ioctl(fd, HDIO_GET_MULTCOUNT, &val);
-
- inputs:
- none
-
-
-
- outputs:
- The value of the current IDE block mode setting. This
- controls how many sectors the drive will transfer per
- interrupt.
-
-
-
-HDIO_SET_MULTCOUNT
- change IDE blockmode
-
-
- usage::
-
- int val;
-
- ioctl(fd, HDIO_SET_MULTCOUNT, val);
-
- inputs:
- New value for IDE block mode setting. This controls how many
- sectors the drive will transfer per interrupt.
-
- outputs:
- none
-
-
-
- error return:
- - EINVAL Called on a partition instead of the whole disk device
- - EACCES Access denied: requires CAP_SYS_ADMIN
- - EINVAL value out of range supported by disk.
- - EBUSY Controller busy or blockmode already set.
- - EIO Drive did not accept new block mode.
-
- notes:
- Source code comments read::
-
- This is tightly woven into the driver->do_special cannot
- touch. DON'T do it again until a total personality rewrite
- is committed.
-
- If blockmode has already been set, this ioctl will fail with
- -EBUSY
-
-
-
-HDIO_GET_QDMA
- get use-qdma flag
-
-
- Not implemented, as of 2.6.8.1
-
-
-
-HDIO_SET_XFER
- set transfer rate via proc
-
-
- Not implemented, as of 2.6.8.1
-
-
-
-HDIO_OBSOLETE_IDENTITY
- OBSOLETE, DO NOT USE
-
-
- Same as HDIO_GET_IDENTITY (see below), except that it only
- returns the first 142 bytes of drive identity information.
-
-
-
HDIO_GET_IDENTITY
get IDE identification info
@@ -308,60 +138,6 @@ HDIO_GET_IDENTITY
-HDIO_GET_KEEPSETTINGS
- get keep-settings-on-reset flag
-
-
- usage::
-
- long val;
-
- ioctl(fd, HDIO_GET_KEEPSETTINGS, &val);
-
- inputs:
- none
-
-
-
- outputs:
- The value of the current "keep settings" flag
-
-
-
- notes:
- When set, indicates that kernel should restore settings
- after a drive reset.
-
-
-
-HDIO_SET_KEEPSETTINGS
- keep ioctl settings on reset
-
-
- usage::
-
- long val;
-
- ioctl(fd, HDIO_SET_KEEPSETTINGS, val);
-
- inputs:
- New value for keep_settings flag
-
-
-
- outputs:
- none
-
-
-
- error return:
- - EINVAL Called on a partition instead of the whole disk device
- - EACCES Access denied: requires CAP_SYS_ADMIN
- - EINVAL value out of range [0 1]
- - EBUSY Controller busy
-
-
-
HDIO_GET_32BIT
get current io_32bit setting
@@ -387,288 +163,6 @@ HDIO_GET_32BIT
-
-
-HDIO_GET_NOWERR
- get ignore-write-error flag
-
-
- usage::
-
- long val;
-
- ioctl(fd, HDIO_GET_NOWERR, &val);
-
- inputs:
- none
-
-
-
- outputs:
- The value of the current ignore-write-error flag
-
-
-
-
-
-HDIO_GET_DMA
- get use-dma flag
-
-
- usage::
-
- long val;
-
- ioctl(fd, HDIO_GET_DMA, &val);
-
- inputs:
- none
-
-
-
- outputs:
- The value of the current use-dma flag
-
-
-
-
-
-HDIO_GET_NICE
- get nice flags
-
-
- usage::
-
- long nice;
-
- ioctl(fd, HDIO_GET_NICE, &nice);
-
- inputs:
- none
-
-
-
- outputs:
- The drive's "nice" values.
-
-
-
- notes:
- Per-drive flags which determine when the system will give more
- bandwidth to other devices sharing the same IDE bus.
-
- See <linux/hdreg.h>, near symbol IDE_NICE_DSC_OVERLAP.
-
-
-
-
-HDIO_SET_NICE
- set nice flags
-
-
- usage::
-
- unsigned long nice;
-
- ...
- ioctl(fd, HDIO_SET_NICE, nice);
-
- inputs:
- bitmask of nice flags.
-
-
-
- outputs:
- none
-
-
-
- error returns:
- - EACCES Access denied: requires CAP_SYS_ADMIN
- - EPERM Flags other than DSC_OVERLAP and NICE_1 set.
- - EPERM DSC_OVERLAP specified but not supported by drive
-
- notes:
- This ioctl sets the DSC_OVERLAP and NICE_1 flags from values
- provided by the user.
-
- Nice flags are listed in <linux/hdreg.h>, starting with
- IDE_NICE_DSC_OVERLAP. These values represent shifts.
-
-
-
-
-
-HDIO_GET_WCACHE
- get write cache mode on|off
-
-
- usage::
-
- long val;
-
- ioctl(fd, HDIO_GET_WCACHE, &val);
-
- inputs:
- none
-
-
-
- outputs:
- The value of the current write cache mode
-
-
-
-
-
-HDIO_GET_ACOUSTIC
- get acoustic value
-
-
- usage::
-
- long val;
-
- ioctl(fd, HDIO_GET_ACOUSTIC, &val);
-
- inputs:
- none
-
-
-
- outputs:
- The value of the current acoustic settings
-
-
-
- notes:
- See HDIO_SET_ACOUSTIC
-
-
-
-
-
-HDIO_GET_ADDRESS
- usage::
-
-
- long val;
-
- ioctl(fd, HDIO_GET_ADDRESS, &val);
-
- inputs:
- none
-
-
-
- outputs:
- The value of the current addressing mode:
-
- = ===================
- 0 28-bit
- 1 48-bit
- 2 48-bit doing 28-bit
- 3 64-bit
- = ===================
-
-
-
-HDIO_GET_BUSSTATE
- get the bus state of the hwif
-
-
- usage::
-
- long state;
-
- ioctl(fd, HDIO_SCAN_HWIF, &state);
-
- inputs:
- none
-
-
-
- outputs:
- Current power state of the IDE bus. One of BUSSTATE_OFF,
- BUSSTATE_ON, or BUSSTATE_TRISTATE
-
- error returns:
- - EACCES Access denied: requires CAP_SYS_ADMIN
-
-
-
-
-HDIO_SET_BUSSTATE
- set the bus state of the hwif
-
-
- usage::
-
- int state;
-
- ...
- ioctl(fd, HDIO_SCAN_HWIF, state);
-
- inputs:
- Desired IDE power state. One of BUSSTATE_OFF, BUSSTATE_ON,
- or BUSSTATE_TRISTATE
-
- outputs:
- none
-
-
-
- error returns:
- - EACCES Access denied: requires CAP_SYS_RAWIO
- - EOPNOTSUPP Hardware interface does not support bus power control
-
-
-
-
-HDIO_TRISTATE_HWIF
- execute a channel tristate
-
-
- Not implemented, as of 2.6.8.1. See HDIO_SET_BUSSTATE
-
-
-
-HDIO_DRIVE_RESET
- execute a device reset
-
-
- usage::
-
- int args[3]
-
- ...
- ioctl(fd, HDIO_DRIVE_RESET, args);
-
- inputs:
- none
-
-
-
- outputs:
- none
-
-
-
- error returns:
- - EACCES Access denied: requires CAP_SYS_ADMIN
- - ENXIO No such device: phy dead or ctl_addr == 0
- - EIO I/O error: reset timed out or hardware error
-
- notes:
-
- - Execute a reset on the device as soon as the current IO
- operation has completed.
-
- - Executes an ATAPI soft reset if applicable, otherwise
- executes an ATA soft reset on the controller.
-
-
-
HDIO_DRIVE_TASKFILE
execute raw taskfile
@@ -1026,14 +520,6 @@ HDIO_DRIVE_TASK
-HDIO_DRIVE_CMD_AEB
- HDIO_DRIVE_TASK
-
-
- Not implemented, as of 2.6.8.1
-
-
-
HDIO_SET_32BIT
change io_32bit flags
@@ -1059,284 +545,3 @@ HDIO_SET_32BIT
- EACCES Access denied: requires CAP_SYS_ADMIN
- EINVAL value out of range [0 3]
- EBUSY Controller busy
-
-
-
-
-HDIO_SET_NOWERR
- change ignore-write-error flag
-
-
- usage::
-
- int val;
-
- ioctl(fd, HDIO_SET_NOWERR, val);
-
- inputs:
- New value for ignore-write-error flag. Used for ignoring
-
-
- WRERR_STAT
-
- outputs:
- none
-
-
-
- error return:
- - EINVAL Called on a partition instead of the whole disk device
- - EACCES Access denied: requires CAP_SYS_ADMIN
- - EINVAL value out of range [0 1]
- - EBUSY Controller busy
-
-
-
-HDIO_SET_DMA
- change use-dma flag
-
-
- usage::
-
- long val;
-
- ioctl(fd, HDIO_SET_DMA, val);
-
- inputs:
- New value for use-dma flag
-
-
-
- outputs:
- none
-
-
-
- error return:
- - EINVAL Called on a partition instead of the whole disk device
- - EACCES Access denied: requires CAP_SYS_ADMIN
- - EINVAL value out of range [0 1]
- - EBUSY Controller busy
-
-
-
-HDIO_SET_PIO_MODE
- reconfig interface to new speed
-
-
- usage::
-
- long val;
-
- ioctl(fd, HDIO_SET_PIO_MODE, val);
-
- inputs:
- New interface speed.
-
-
-
- outputs:
- none
-
-
-
- error return:
- - EINVAL Called on a partition instead of the whole disk device
- - EACCES Access denied: requires CAP_SYS_ADMIN
- - EINVAL value out of range [0 255]
- - EBUSY Controller busy
-
-
-
-HDIO_SCAN_HWIF
- register and (re)scan interface
-
-
- usage::
-
- int args[3]
-
- ...
- ioctl(fd, HDIO_SCAN_HWIF, args);
-
- inputs:
-
- ======= =========================
- args[0] io address to probe
-
-
- args[1] control address to probe
- args[2] irq number
- ======= =========================
-
- outputs:
- none
-
-
-
- error returns:
- - EACCES Access denied: requires CAP_SYS_RAWIO
- - EIO Probe failed.
-
- notes:
- This ioctl initializes the addresses and irq for a disk
- controller, probes for drives, and creates /proc/ide
- interfaces as appropriate.
-
-
-
-HDIO_UNREGISTER_HWIF
- unregister interface
-
-
- usage::
-
- int index;
-
- ioctl(fd, HDIO_UNREGISTER_HWIF, index);
-
- inputs:
- index index of hardware interface to unregister
-
-
-
- outputs:
- none
-
-
-
- error returns:
- - EACCES Access denied: requires CAP_SYS_RAWIO
-
- notes:
- This ioctl removes a hardware interface from the kernel.
-
- Currently (2.6.8) this ioctl silently fails if any drive on
- the interface is busy.
-
-
-
-HDIO_SET_WCACHE
- change write cache enable-disable
-
-
- usage::
-
- int val;
-
- ioctl(fd, HDIO_SET_WCACHE, val);
-
- inputs:
- New value for write cache enable
-
-
-
- outputs:
- none
-
-
-
- error return:
- - EINVAL Called on a partition instead of the whole disk device
- - EACCES Access denied: requires CAP_SYS_ADMIN
- - EINVAL value out of range [0 1]
- - EBUSY Controller busy
-
-
-
-HDIO_SET_ACOUSTIC
- change acoustic behavior
-
-
- usage::
-
- int val;
-
- ioctl(fd, HDIO_SET_ACOUSTIC, val);
-
- inputs:
- New value for drive acoustic settings
-
-
-
- outputs:
- none
-
-
-
- error return:
- - EINVAL Called on a partition instead of the whole disk device
- - EACCES Access denied: requires CAP_SYS_ADMIN
- - EINVAL value out of range [0 254]
- - EBUSY Controller busy
-
-
-
-HDIO_SET_QDMA
- change use-qdma flag
-
-
- Not implemented, as of 2.6.8.1
-
-
-
-HDIO_SET_ADDRESS
- change lba addressing modes
-
-
- usage::
-
- int val;
-
- ioctl(fd, HDIO_SET_ADDRESS, val);
-
- inputs:
- New value for addressing mode
-
- = ===================
- 0 28-bit
- 1 48-bit
- 2 48-bit doing 28-bit
- = ===================
-
- outputs:
- none
-
-
-
- error return:
- - EINVAL Called on a partition instead of the whole disk device
- - EACCES Access denied: requires CAP_SYS_ADMIN
- - EINVAL value out of range [0 2]
- - EBUSY Controller busy
- - EIO Drive does not support lba48 mode.
-
-
-HDIO_SET_IDE_SCSI
- usage::
-
-
- long val;
-
- ioctl(fd, HDIO_SET_IDE_SCSI, val);
-
- inputs:
- New value for scsi emulation mode (?)
-
-
-
- outputs:
- none
-
-
-
- error return:
- - EINVAL Called on a partition instead of the whole disk device
- - EACCES Access denied: requires CAP_SYS_ADMIN
- - EINVAL value out of range [0 1]
- - EBUSY Controller busy
-
-
-
-HDIO_SET_SCSI_IDE
- Not implemented, as of 2.6.8.1
diff --git a/Documentation/userspace-api/seccomp_filter.rst b/Documentation/userspace-api/seccomp_filter.rst
index bd9165241b6c..6efb41cc8072 100644
--- a/Documentation/userspace-api/seccomp_filter.rst
+++ b/Documentation/userspace-api/seccomp_filter.rst
@@ -250,14 +250,14 @@ Users can read via ``ioctl(SECCOMP_IOCTL_NOTIF_RECV)`` (or ``poll()``) on a
seccomp notification fd to receive a ``struct seccomp_notif``, which contains
five members: the input length of the structure, a unique-per-filter ``id``,
the ``pid`` of the task which triggered this request (which may be 0 if the
-task is in a pid ns not visible from the listener's pid namespace), a ``flags``
-member which for now only has ``SECCOMP_NOTIF_FLAG_SIGNALED``, representing
-whether or not the notification is a result of a non-fatal signal, and the
-``data`` passed to seccomp. Userspace can then make a decision based on this
-information about what to do, and ``ioctl(SECCOMP_IOCTL_NOTIF_SEND)`` a
-response, indicating what should be returned to userspace. The ``id`` member of
-``struct seccomp_notif_resp`` should be the same ``id`` as in ``struct
-seccomp_notif``.
+task is in a pid ns not visible from the listener's pid namespace). The
+notification also contains the ``data`` passed to seccomp, and a filters flag.
+The structure should be zeroed out prior to calling the ioctl.
+
+Userspace can then make a decision based on this information about what to do,
+and ``ioctl(SECCOMP_IOCTL_NOTIF_SEND)`` a response, indicating what should be
+returned to userspace. The ``id`` member of ``struct seccomp_notif_resp`` should
+be the same ``id`` as in ``struct seccomp_notif``.
It is worth noting that ``struct seccomp_data`` contains the values of register
arguments to the syscall, but does not contain pointers to memory. The task's
diff --git a/Documentation/virt/kvm/amd-memory-encryption.rst b/Documentation/virt/kvm/amd-memory-encryption.rst
index 5ec8a1902e15..5c081c8c7164 100644
--- a/Documentation/virt/kvm/amd-memory-encryption.rst
+++ b/Documentation/virt/kvm/amd-memory-encryption.rst
@@ -22,7 +22,7 @@ to SEV::
[ecx]:
Bits[31:0] Number of encrypted guests supported simultaneously
-If support for SEV is present, MSR 0xc001_0010 (MSR_K8_SYSCFG) and MSR 0xc001_0015
+If support for SEV is present, MSR 0xc001_0010 (MSR_AMD64_SYSCFG) and MSR 0xc001_0015
(MSR_K7_HWCR) can be used to determine if it can be enabled::
0xc001_0010:
diff --git a/Documentation/virt/kvm/mmu.rst b/Documentation/virt/kvm/mmu.rst
index 5bfe28b0728e..20d85daed395 100644
--- a/Documentation/virt/kvm/mmu.rst
+++ b/Documentation/virt/kvm/mmu.rst
@@ -171,8 +171,8 @@ Shadow pages contain the following information:
shadow pages) so role.quadrant takes values in the range 0..3. Each
quadrant maps 1GB virtual address space.
role.access:
- Inherited guest access permissions in the form uwx. Note execute
- permission is positive, not negative.
+ Inherited guest access permissions from the parent ptes in the form uwx.
+ Note execute permission is positive, not negative.
role.invalid:
The page is invalid and should not be used. It is a root page that is
currently pinned (by a cpu hardware register pointing to it); once it is
diff --git a/Documentation/virt/kvm/vcpu-requests.rst b/Documentation/virt/kvm/vcpu-requests.rst
index 5feb3706a7ae..af1b37441e0a 100644
--- a/Documentation/virt/kvm/vcpu-requests.rst
+++ b/Documentation/virt/kvm/vcpu-requests.rst
@@ -118,10 +118,12 @@ KVM_REQ_MMU_RELOAD
necessary to inform each VCPU to completely refresh the tables. This
request is used for that.
-KVM_REQ_PENDING_TIMER
+KVM_REQ_UNBLOCK
- This request may be made from a timer handler run on the host on behalf
- of a VCPU. It informs the VCPU thread to inject a timer interrupt.
+ This request informs the vCPU to exit kvm_vcpu_block. It is used for
+ example from timer handlers that run on the host on behalf of a vCPU,
+ or in order to update the interrupt routing and ensure that assigned
+ devices will wake up the vCPU.
KVM_REQ_UNHALT
diff --git a/Documentation/vm/slub.rst b/Documentation/vm/slub.rst
index 03f294a638bd..d3028554b1e9 100644
--- a/Documentation/vm/slub.rst
+++ b/Documentation/vm/slub.rst
@@ -181,7 +181,7 @@ SLUB Debug output
Here is a sample of slub debug output::
====================================================================
- BUG kmalloc-8: Redzone overwritten
+ BUG kmalloc-8: Right Redzone overwritten
--------------------------------------------------------------------
INFO: 0xc90f6d28-0xc90f6d2b. First byte 0x00 instead of 0xcc
@@ -189,10 +189,10 @@ Here is a sample of slub debug output::
INFO: Object 0xc90f6d20 @offset=3360 fp=0xc90f6d58
INFO: Allocated in get_modalias+0x61/0xf5 age=53 cpu=1 pid=554
- Bytes b4 0xc90f6d10: 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
- Object 0xc90f6d20: 31 30 31 39 2e 30 30 35 1019.005
- Redzone 0xc90f6d28: 00 cc cc cc .
- Padding 0xc90f6d50: 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ
+ Bytes b4 (0xc90f6d10): 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
+ Object (0xc90f6d20): 31 30 31 39 2e 30 30 35 1019.005
+ Redzone (0xc90f6d28): 00 cc cc cc .
+ Padding (0xc90f6d50): 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ
[<c010523d>] dump_trace+0x63/0x1eb
[<c01053df>] show_trace_log_lvl+0x1a/0x2f
diff --git a/Documentation/x86/amd-memory-encryption.rst b/Documentation/x86/amd-memory-encryption.rst
index c48d452d0718..a1940ebe7be5 100644
--- a/Documentation/x86/amd-memory-encryption.rst
+++ b/Documentation/x86/amd-memory-encryption.rst
@@ -53,7 +53,7 @@ CPUID function 0x8000001f reports information related to SME::
system physical addresses, not guest physical
addresses)
-If support for SME is present, MSR 0xc00100010 (MSR_K8_SYSCFG) can be used to
+If support for SME is present, MSR 0xc00100010 (MSR_AMD64_SYSCFG) can be used to
determine if SME is enabled and/or to enable memory encryption::
0xc0010010:
@@ -79,7 +79,7 @@ The state of SME in the Linux kernel can be documented as follows:
The CPU supports SME (determined through CPUID instruction).
- Enabled:
- Supported and bit 23 of MSR_K8_SYSCFG is set.
+ Supported and bit 23 of MSR_AMD64_SYSCFG is set.
- Active:
Supported, Enabled and the Linux kernel is actively applying
@@ -89,7 +89,7 @@ The state of SME in the Linux kernel can be documented as follows:
SME can also be enabled and activated in the BIOS. If SME is enabled and
activated in the BIOS, then all memory accesses will be encrypted and it will
not be necessary to activate the Linux memory encryption support. If the BIOS
-merely enables SME (sets bit 23 of the MSR_K8_SYSCFG), then Linux can activate
+merely enables SME (sets bit 23 of the MSR_AMD64_SYSCFG), then Linux can activate
memory encryption by default (CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT=y) or
by supplying mem_encrypt=on on the kernel command line. However, if BIOS does
not enable SME, then Linux will not be able to activate memory encryption, even
diff --git a/MAINTAINERS b/MAINTAINERS
index bd7aff0c120f..2560e8f6abc1 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1578,7 +1578,7 @@ F: drivers/clk/sunxi/
ARM/Allwinner sunXi SoC support
M: Maxime Ripard <mripard@kernel.org>
M: Chen-Yu Tsai <wens@csie.org>
-R: Jernej Skrabec <jernej.skrabec@siol.net>
+R: Jernej Skrabec <jernej.skrabec@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/sunxi/linux.git
@@ -1618,8 +1618,8 @@ F: Documentation/devicetree/bindings/sound/amlogic*
F: sound/soc/meson/
ARM/Amlogic Meson SoC support
+M: Neil Armstrong <narmstrong@baylibre.com>
M: Kevin Hilman <khilman@baylibre.com>
-R: Neil Armstrong <narmstrong@baylibre.com>
R: Jerome Brunet <jbrunet@baylibre.com>
R: Martin Blumenstingl <martin.blumenstingl@googlemail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1816,7 +1816,7 @@ F: drivers/pinctrl/pinctrl-gemini.c
F: drivers/rtc/rtc-ftrtc010.c
ARM/CZ.NIC TURRIS SUPPORT
-M: Marek Behun <kabel@kernel.org>
+M: Marek Behún <kabel@kernel.org>
S: Maintained
W: https://www.turris.cz/
F: Documentation/ABI/testing/debugfs-moxtet
@@ -3877,6 +3877,7 @@ L: linux-btrfs@vger.kernel.org
S: Maintained
W: http://btrfs.wiki.kernel.org/
Q: http://patchwork.kernel.org/project/linux-btrfs/list/
+C: irc://irc.libera.chat/btrfs
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux.git
F: Documentation/filesystems/btrfs.rst
F: fs/btrfs/
@@ -4138,6 +4139,14 @@ S: Odd Fixes
F: Documentation/devicetree/bindings/arm/cavium-thunder2.txt
F: arch/arm64/boot/dts/cavium/thunder2-99xx*
+CBS/ETF/TAPRIO QDISCS
+M: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+S: Maintained
+L: netdev@vger.kernel.org
+F: net/sched/sch_cbs.c
+F: net/sched/sch_etf.c
+F: net/sched/sch_taprio.c
+
CC2520 IEEE-802.15.4 RADIO DRIVER
M: Varka Bhadram <varkabhadram@gmail.com>
L: linux-wpan@vger.kernel.org
@@ -5089,7 +5098,7 @@ S: Maintained
F: drivers/net/fddi/defza.*
DEINTERLACE DRIVERS FOR ALLWINNER H3
-M: Jernej Skrabec <jernej.skrabec@siol.net>
+M: Jernej Skrabec <jernej.skrabec@gmail.com>
L: linux-media@vger.kernel.org
S: Maintained
T: git git://linuxtv.org/media_tree.git
@@ -5180,6 +5189,13 @@ W: https://linuxtv.org
T: git git://linuxtv.org/media_tree.git
F: drivers/media/platform/sti/delta
+DELTA DPS920AB PSU DRIVER
+M: Robert Marko <robert.marko@sartura.hr>
+L: linux-hwmon@vger.kernel.org
+S: Maintained
+F: Documentation/hwmon/dps920ab.rst
+F: drivers/hwmon/pmbus/dps920ab.c
+
DENALI NAND DRIVER
L: linux-mtd@lists.infradead.org
S: Orphan
@@ -5237,7 +5253,7 @@ DEVICE DIRECT ACCESS (DAX)
M: Dan Williams <dan.j.williams@intel.com>
M: Vishal Verma <vishal.l.verma@intel.com>
M: Dave Jiang <dave.jiang@intel.com>
-L: linux-nvdimm@lists.01.org
+L: nvdimm@lists.linux.dev
S: Supported
F: drivers/dax/
@@ -5569,7 +5585,6 @@ F: drivers/soc/fsl/dpio
DPAA2 ETHERNET DRIVER
M: Ioana Ciornei <ioana.ciornei@nxp.com>
-M: Ioana Radulescu <ruxandra.radulescu@nxp.com>
L: netdev@vger.kernel.org
S: Maintained
F: Documentation/networking/device_drivers/ethernet/freescale/dpaa2/ethernet-driver.rst
@@ -5632,14 +5647,14 @@ F: include/linux/power/smartreflex.h
DRM DRIVER FOR ALLWINNER DE2 AND DE3 ENGINE
M: Maxime Ripard <mripard@kernel.org>
M: Chen-Yu Tsai <wens@csie.org>
-R: Jernej Skrabec <jernej.skrabec@siol.net>
+R: Jernej Skrabec <jernej.skrabec@gmail.com>
L: dri-devel@lists.freedesktop.org
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
F: drivers/gpu/drm/sun4i/sun8i*
DRM DRIVER FOR ARM PL111 CLCD
-M: Eric Anholt <eric@anholt.net>
+M: Emma Anholt <emma@anholt.net>
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
F: drivers/gpu/drm/pl111/
@@ -5719,7 +5734,7 @@ T: git git://anongit.freedesktop.org/drm/drm-misc
F: drivers/gpu/drm/tiny/gm12u320.c
DRM DRIVER FOR HX8357D PANELS
-M: Eric Anholt <eric@anholt.net>
+M: Emma Anholt <emma@anholt.net>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/devicetree/bindings/display/himax,hx8357d.txt
@@ -6023,7 +6038,7 @@ M: Neil Armstrong <narmstrong@baylibre.com>
M: Robert Foss <robert.foss@linaro.org>
R: Laurent Pinchart <Laurent.pinchart@ideasonboard.com>
R: Jonas Karlman <jonas@kwiboo.se>
-R: Jernej Skrabec <jernej.skrabec@siol.net>
+R: Jernej Skrabec <jernej.skrabec@gmail.com>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
F: drivers/gpu/drm/bridge/
@@ -6177,7 +6192,7 @@ F: Documentation/devicetree/bindings/display/ti/
F: drivers/gpu/drm/omapdrm/
DRM DRIVERS FOR V3D
-M: Eric Anholt <eric@anholt.net>
+M: Emma Anholt <emma@anholt.net>
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml
@@ -6185,7 +6200,7 @@ F: drivers/gpu/drm/v3d/
F: include/uapi/drm/v3d_drm.h
DRM DRIVERS FOR VC4
-M: Eric Anholt <eric@anholt.net>
+M: Emma Anholt <emma@anholt.net>
M: Maxime Ripard <mripard@kernel.org>
S: Supported
T: git git://github.com/anholt/linux
@@ -6938,6 +6953,7 @@ F: net/core/failover.c
FANOTIFY
M: Jan Kara <jack@suse.cz>
R: Amir Goldstein <amir73il@gmail.com>
+R: Matthew Bobrowski <repnop@google.com>
L: linux-fsdevel@vger.kernel.org
S: Maintained
F: fs/notify/fanotify/
@@ -7006,7 +7022,7 @@ M: Dan Williams <dan.j.williams@intel.com>
R: Matthew Wilcox <willy@infradead.org>
R: Jan Kara <jack@suse.cz>
L: linux-fsdevel@vger.kernel.org
-L: linux-nvdimm@lists.01.org
+L: nvdimm@lists.linux.dev
S: Supported
F: fs/dax.c
F: include/linux/dax.h
@@ -7345,7 +7361,6 @@ F: drivers/net/ethernet/freescale/fs_enet/
F: include/linux/fs_enet_pd.h
FREESCALE SOC SOUND DRIVERS
-M: Timur Tabi <timur@kernel.org>
M: Nicolin Chen <nicoleotsuka@gmail.com>
M: Xiubo Li <Xiubo.Lee@gmail.com>
R: Fabio Estevam <festevam@gmail.com>
@@ -8763,22 +8778,6 @@ L: linux-i2c@vger.kernel.org
S: Maintained
F: drivers/i2c/busses/i2c-icy.c
-IDE SUBSYSTEM
-M: "David S. Miller" <davem@davemloft.net>
-L: linux-ide@vger.kernel.org
-S: Maintained
-Q: http://patchwork.ozlabs.org/project/linux-ide/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/ide.git
-F: Documentation/ide/
-F: drivers/ide/
-F: include/linux/ide.h
-
-IDE/ATAPI DRIVERS
-L: linux-ide@vger.kernel.org
-S: Orphan
-F: Documentation/cdrom/ide-cd.rst
-F: drivers/ide/ide-cd*
-
IDEAPAD LAPTOP EXTRAS DRIVER
M: Ike Panhc <ike.pan@canonical.com>
L: platform-driver-x86@vger.kernel.org
@@ -10378,7 +10377,7 @@ LIBNVDIMM BLK: MMIO-APERTURE DRIVER
M: Dan Williams <dan.j.williams@intel.com>
M: Vishal Verma <vishal.l.verma@intel.com>
M: Dave Jiang <dave.jiang@intel.com>
-L: linux-nvdimm@lists.01.org
+L: nvdimm@lists.linux.dev
S: Supported
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
P: Documentation/nvdimm/maintainer-entry-profile.rst
@@ -10389,7 +10388,7 @@ LIBNVDIMM BTT: BLOCK TRANSLATION TABLE
M: Vishal Verma <vishal.l.verma@intel.com>
M: Dan Williams <dan.j.williams@intel.com>
M: Dave Jiang <dave.jiang@intel.com>
-L: linux-nvdimm@lists.01.org
+L: nvdimm@lists.linux.dev
S: Supported
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
P: Documentation/nvdimm/maintainer-entry-profile.rst
@@ -10399,7 +10398,7 @@ LIBNVDIMM PMEM: PERSISTENT MEMORY DRIVER
M: Dan Williams <dan.j.williams@intel.com>
M: Vishal Verma <vishal.l.verma@intel.com>
M: Dave Jiang <dave.jiang@intel.com>
-L: linux-nvdimm@lists.01.org
+L: nvdimm@lists.linux.dev
S: Supported
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
P: Documentation/nvdimm/maintainer-entry-profile.rst
@@ -10407,7 +10406,7 @@ F: drivers/nvdimm/pmem*
LIBNVDIMM: DEVICETREE BINDINGS
M: Oliver O'Halloran <oohall@gmail.com>
-L: linux-nvdimm@lists.01.org
+L: nvdimm@lists.linux.dev
S: Supported
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
F: Documentation/devicetree/bindings/pmem/pmem-region.txt
@@ -10418,7 +10417,7 @@ M: Dan Williams <dan.j.williams@intel.com>
M: Vishal Verma <vishal.l.verma@intel.com>
M: Dave Jiang <dave.jiang@intel.com>
M: Ira Weiny <ira.weiny@intel.com>
-L: linux-nvdimm@lists.01.org
+L: nvdimm@lists.linux.dev
S: Supported
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
P: Documentation/nvdimm/maintainer-entry-profile.rst
@@ -10937,7 +10936,7 @@ F: include/linux/mv643xx.h
MARVELL MV88X3310 PHY DRIVER
M: Russell King <linux@armlinux.org.uk>
-M: Marek Behun <marek.behun@nic.cz>
+M: Marek Behún <kabel@kernel.org>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/phy/marvell10g.c
@@ -12180,6 +12179,7 @@ F: drivers/platform/surface/surfacepro3_button.c
MICROSOFT SURFACE SYSTEM AGGREGATOR SUBSYSTEM
M: Maximilian Luz <luzmaximilian@gmail.com>
+L: platform-driver-x86@vger.kernel.org
S: Maintained
W: https://github.com/linux-surface/surface-aggregator-module
C: irc://chat.freenode.net/##linux-surface
@@ -12680,9 +12680,9 @@ F: drivers/rtc/rtc-ntxec.c
F: include/linux/mfd/ntxec.h
NETRONOME ETHERNET DRIVERS
-M: Simon Horman <simon.horman@netronome.com>
+M: Simon Horman <simon.horman@corigine.com>
R: Jakub Kicinski <kuba@kernel.org>
-L: oss-drivers@netronome.com
+L: oss-drivers@corigine.com
S: Maintained
F: drivers/net/ethernet/netronome/
@@ -12709,7 +12709,6 @@ M: "David S. Miller" <davem@davemloft.net>
M: Jakub Kicinski <kuba@kernel.org>
L: netdev@vger.kernel.org
S: Maintained
-W: http://www.linuxfoundation.org/en/Net
Q: https://patchwork.kernel.org/project/netdevbpf/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
@@ -12754,7 +12753,6 @@ M: "David S. Miller" <davem@davemloft.net>
M: Jakub Kicinski <kuba@kernel.org>
L: netdev@vger.kernel.org
S: Maintained
-W: http://www.linuxfoundation.org/en/Net
Q: https://patchwork.kernel.org/project/netdevbpf/list/
B: mailto:netdev@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
@@ -12896,8 +12894,10 @@ F: include/uapi/linux/nexthop.h
F: net/ipv4/nexthop.c
NFC SUBSYSTEM
+M: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+L: linux-nfc@lists.01.org (subscribers-only)
L: netdev@vger.kernel.org
-S: Orphan
+S: Maintained
F: Documentation/devicetree/bindings/net/nfc/
F: drivers/nfc/
F: include/linux/platform_data/nfcmrvl.h
@@ -12908,7 +12908,7 @@ F: net/nfc/
NFC VIRTUAL NCI DEVICE DRIVER
M: Bongsu Jeon <bongsu.jeon@samsung.com>
L: netdev@vger.kernel.org
-L: linux-nfc@lists.01.org (moderated for non-subscribers)
+L: linux-nfc@lists.01.org (subscribers-only)
S: Supported
F: drivers/nfc/virtual_ncidev.c
F: tools/testing/selftests/nci/
@@ -13205,9 +13205,8 @@ F: Documentation/devicetree/bindings/sound/tfa9879.txt
F: sound/soc/codecs/tfa9879*
NXP-NCI NFC DRIVER
-M: Clément Perrochaud <clement.perrochaud@effinnov.com>
R: Charles Gorand <charles.gorand@effinnov.com>
-L: linux-nfc@lists.01.org (moderated for non-subscribers)
+L: linux-nfc@lists.01.org (subscribers-only)
S: Supported
F: drivers/nfc/nxp-nci
@@ -14110,6 +14109,7 @@ F: drivers/pci/controller/pci-v3-semi.c
PCI ENDPOINT SUBSYSTEM
M: Kishon Vijay Abraham I <kishon@ti.com>
M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+R: Krzysztof Wilczyński <kw@linux.com>
L: linux-pci@vger.kernel.org
S: Supported
F: Documentation/PCI/endpoint/*
@@ -14158,6 +14158,7 @@ F: drivers/pci/controller/pci-xgene-msi.c
PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS
M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
R: Rob Herring <robh@kernel.org>
+R: Krzysztof Wilczyński <kw@linux.com>
L: linux-pci@vger.kernel.org
S: Supported
Q: http://patchwork.ozlabs.org/project/linux-pci/list/
@@ -14317,10 +14318,12 @@ PER-CPU MEMORY ALLOCATOR
M: Dennis Zhou <dennis@kernel.org>
M: Tejun Heo <tj@kernel.org>
M: Christoph Lameter <cl@linux.com>
+L: linux-mm@kvack.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu.git
F: arch/*/include/asm/percpu.h
F: include/linux/percpu*.h
+F: lib/percpu*.c
F: mm/percpu*.c
PER-TASK DELAY ACCOUNTING
@@ -14734,7 +14737,6 @@ W: https://wireless.wiki.kernel.org/en/users/Drivers/p54
F: drivers/net/wireless/intersil/prism54/
PROC FILESYSTEM
-R: Alexey Dobriyan <adobriyan@gmail.com>
L: linux-kernel@vger.kernel.org
L: linux-fsdevel@vger.kernel.org
S: Maintained
@@ -15815,7 +15817,7 @@ F: include/uapi/linux/rose.h
F: net/rose/
ROTATION DRIVER FOR ALLWINNER A83T
-M: Jernej Skrabec <jernej.skrabec@siol.net>
+M: Jernej Skrabec <jernej.skrabec@gmail.com>
L: linux-media@vger.kernel.org
S: Maintained
T: git git://linuxtv.org/media_tree.git
@@ -15945,6 +15947,7 @@ S390 IUCV NETWORK LAYER
M: Julian Wiedmann <jwi@linux.ibm.com>
M: Karsten Graul <kgraul@linux.ibm.com>
L: linux-s390@vger.kernel.org
+L: netdev@vger.kernel.org
S: Supported
W: http://www.ibm.com/developerworks/linux/linux390/
F: drivers/s390/net/*iucv*
@@ -15955,6 +15958,7 @@ S390 NETWORK DRIVERS
M: Julian Wiedmann <jwi@linux.ibm.com>
M: Karsten Graul <kgraul@linux.ibm.com>
L: linux-s390@vger.kernel.org
+L: netdev@vger.kernel.org
S: Supported
W: http://www.ibm.com/developerworks/linux/linux390/
F: drivers/s390/net/
@@ -16133,7 +16137,7 @@ F: include/media/drv-intf/s3c_camif.h
SAMSUNG S3FWRN5 NFC DRIVER
M: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
M: Krzysztof Opasiak <k.opasiak@samsung.com>
-L: linux-nfc@lists.01.org (moderated for non-subscribers)
+L: linux-nfc@lists.01.org (subscribers-only)
S: Maintained
F: Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml
F: drivers/nfc/s3fwrn5
@@ -16546,6 +16550,7 @@ F: drivers/misc/sgi-xp/
SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS
M: Karsten Graul <kgraul@linux.ibm.com>
+M: Guvenc Gulce <guvenc@linux.ibm.com>
L: linux-s390@vger.kernel.org
S: Supported
W: http://www.ibm.com/developerworks/linux/linux390/
@@ -17304,6 +17309,12 @@ L: linux-i2c@vger.kernel.org
S: Maintained
F: drivers/i2c/busses/i2c-stm32*
+ST STM32 SPI DRIVER
+M: Alain Volmat <alain.volmat@foss.st.com>
+L: linux-spi@vger.kernel.org
+S: Maintained
+F: drivers/spi/spi-stm32.c
+
ST STPDDC60 DRIVER
M: Daniel Nilsson <daniel.nilsson@flex.com>
L: linux-hwmon@vger.kernel.org
@@ -17662,7 +17673,6 @@ R: Mika Westerberg <mika.westerberg@linux.intel.com>
L: linux-i2c@vger.kernel.org
S: Maintained
F: drivers/i2c/busses/i2c-designware-*
-F: include/linux/platform_data/i2c-designware.h
SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER
M: Jaehoon Chung <jh80.chung@samsung.com>
@@ -18318,7 +18328,7 @@ F: sound/soc/codecs/tas571x*
TI TRF7970A NFC DRIVER
M: Mark Greer <mgreer@animalcreek.com>
L: linux-wireless@vger.kernel.org
-L: linux-nfc@lists.01.org (moderated for non-subscribers)
+L: linux-nfc@lists.01.org (subscribers-only)
S: Supported
F: Documentation/devicetree/bindings/net/nfc/trf7970a.txt
F: drivers/nfc/trf7970a.c
@@ -18854,6 +18864,13 @@ S: Maintained
F: drivers/usb/host/isp116x*
F: include/linux/usb/isp116x.h
+USB ISP1760 DRIVER
+M: Rui Miguel Silva <rui.silva@linaro.org>
+L: linux-usb@vger.kernel.org
+S: Maintained
+F: drivers/usb/isp1760/*
+F: Documentation/devicetree/bindings/usb/nxp,isp1760.yaml
+
USB LAN78XX ETHERNET DRIVER
M: Woojung Huh <woojung.huh@microchip.com>
M: UNGLinuxDriver@microchip.com
@@ -19551,6 +19568,10 @@ F: include/dt-bindings/regulator/
F: include/linux/regulator/
K: regulator_get_optional
+VOLTAGE AND CURRENT REGULATOR IRQ HELPERS
+R: Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
+F: drivers/regulator/irq_helpers.c
+
VRF
M: David Ahern <dsahern@kernel.org>
L: netdev@vger.kernel.org
@@ -19998,6 +20019,7 @@ F: arch/x86/xen/*swiotlb*
F: drivers/xen/*swiotlb*
XFS FILESYSTEM
+C: irc://irc.oftc.net/xfs
M: Darrick J. Wong <djwong@kernel.org>
M: linux-xfs@vger.kernel.org
L: linux-xfs@vger.kernel.org
diff --git a/Makefile b/Makefile
index 53d09c414635..0565caea0362 100644
--- a/Makefile
+++ b/Makefile
@@ -2,8 +2,8 @@
VERSION = 5
PATCHLEVEL = 13
SUBLEVEL = 0
-EXTRAVERSION = -rc1
-NAME = Frozen Wasteland
+EXTRAVERSION =
+NAME = Opossums on Parade
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
@@ -928,6 +928,14 @@ CC_FLAGS_LTO += -fvisibility=hidden
# Limit inlining across translation units to reduce binary size
KBUILD_LDFLAGS += -mllvm -import-instr-limit=5
+
+# Check for frame size exceeding threshold during prolog/epilog insertion
+# when using lld < 13.0.0.
+ifneq ($(CONFIG_FRAME_WARN),0)
+ifeq ($(shell test $(CONFIG_LLD_VERSION) -lt 130000; echo $$?),0)
+KBUILD_LDFLAGS += -plugin-opt=-warn-stack-size=$(CONFIG_FRAME_WARN)
+endif
+endif
endif
ifdef CONFIG_LTO
diff --git a/arch/alpha/configs/defconfig b/arch/alpha/configs/defconfig
index 724c4075df40..dd2dd9f0861f 100644
--- a/arch/alpha/configs/defconfig
+++ b/arch/alpha/configs/defconfig
@@ -25,19 +25,18 @@ CONFIG_PNP=y
CONFIG_ISAPNP=y
CONFIG_BLK_DEV_FD=y
CONFIG_BLK_DEV_LOOP=m
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_IDE_GENERIC=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_ALI15X3=y
-CONFIG_BLK_DEV_CMD64X=y
-CONFIG_BLK_DEV_CY82C693=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
CONFIG_SCSI_AIC7XXX=m
CONFIG_AIC7XXX_CMDS_PER_DEVICE=253
# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
+CONFIG_ATA=y
+# CONFIG_SATA_PMP is not set
+CONFIG_PATA_ALI=y
+CONFIG_PATA_CMD64X=y
+CONFIG_PATA_CYPRESS=y
+CONFIG_ATA_GENERIC=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_NET_ETHERNET=y
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index e41c113c6688..f2861a43a61e 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -26,11 +26,11 @@
#define ATOMIC64_INIT(i) { (i) }
-#define atomic_read(v) READ_ONCE((v)->counter)
-#define atomic64_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic64_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
-#define atomic64_set(v,i) WRITE_ONCE((v)->counter, (i))
+#define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
+#define arch_atomic64_set(v,i) WRITE_ONCE((v)->counter, (i))
/*
* To get proper branch prediction for the main line, we must branch
@@ -39,7 +39,7 @@
*/
#define ATOMIC_OP(op, asm_op) \
-static __inline__ void atomic_##op(int i, atomic_t * v) \
+static __inline__ void arch_atomic_##op(int i, atomic_t * v) \
{ \
unsigned long temp; \
__asm__ __volatile__( \
@@ -55,7 +55,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
} \
#define ATOMIC_OP_RETURN(op, asm_op) \
-static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
{ \
long temp, result; \
__asm__ __volatile__( \
@@ -74,7 +74,7 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, asm_op) \
-static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
{ \
long temp, result; \
__asm__ __volatile__( \
@@ -92,7 +92,7 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
}
#define ATOMIC64_OP(op, asm_op) \
-static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \
+static __inline__ void arch_atomic64_##op(s64 i, atomic64_t * v) \
{ \
s64 temp; \
__asm__ __volatile__( \
@@ -108,7 +108,8 @@ static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \
} \
#define ATOMIC64_OP_RETURN(op, asm_op) \
-static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
+static __inline__ s64 \
+arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
{ \
s64 temp, result; \
__asm__ __volatile__( \
@@ -127,7 +128,8 @@ static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
}
#define ATOMIC64_FETCH_OP(op, asm_op) \
-static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
+static __inline__ s64 \
+arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
{ \
s64 temp, result; \
__asm__ __volatile__( \
@@ -155,18 +157,18 @@ static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
-#define atomic_add_return_relaxed atomic_add_return_relaxed
-#define atomic_sub_return_relaxed atomic_sub_return_relaxed
-#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
-#define atomic64_add_return_relaxed atomic64_add_return_relaxed
-#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
-#define atomic_andnot atomic_andnot
-#define atomic64_andnot atomic64_andnot
+#define arch_atomic_andnot arch_atomic_andnot
+#define arch_atomic64_andnot arch_atomic64_andnot
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, asm) \
@@ -180,15 +182,15 @@ ATOMIC_OPS(andnot, bic)
ATOMIC_OPS(or, bis)
ATOMIC_OPS(xor, xor)
-#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
-#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
-#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
-#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
#undef ATOMIC_OPS
#undef ATOMIC64_FETCH_OP
@@ -198,14 +200,18 @@ ATOMIC_OPS(xor, xor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic64_cmpxchg(v, old, new) \
+ (arch_cmpxchg(&((v)->counter), old, new))
+#define arch_atomic64_xchg(v, new) \
+ (arch_xchg(&((v)->counter), new))
-#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic_cmpxchg(v, old, new) \
+ (arch_cmpxchg(&((v)->counter), old, new))
+#define arch_atomic_xchg(v, new) \
+ (arch_xchg(&((v)->counter), new))
/**
- * atomic_fetch_add_unless - add unless the number is a given value
+ * arch_atomic_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
@@ -213,7 +219,7 @@ ATOMIC_OPS(xor, xor)
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
-static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int c, new, old;
smp_mb();
@@ -234,10 +240,10 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
smp_mb();
return old;
}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
/**
- * atomic64_fetch_add_unless - add unless the number is a given value
+ * arch_atomic64_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
@@ -245,7 +251,7 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
-static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 c, new, old;
smp_mb();
@@ -266,16 +272,16 @@ static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
smp_mb();
return old;
}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
/*
- * atomic64_dec_if_positive - decrement by 1 if old value positive
+ * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
* @v: pointer of type atomic_t
*
* The function returns the old value of *v minus 1, even if
* the atomic variable, v, was not decremented.
*/
-static inline s64 atomic64_dec_if_positive(atomic64_t *v)
+static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{
s64 old, tmp;
smp_mb();
@@ -295,6 +301,6 @@ static inline s64 atomic64_dec_if_positive(atomic64_t *v)
smp_mb();
return old - 1;
}
-#define atomic64_dec_if_positive atomic64_dec_if_positive
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
#endif /* _ALPHA_ATOMIC_H */
diff --git a/arch/alpha/include/asm/cmpxchg.h b/arch/alpha/include/asm/cmpxchg.h
index 6c7c39452471..6e0a850aa9d3 100644
--- a/arch/alpha/include/asm/cmpxchg.h
+++ b/arch/alpha/include/asm/cmpxchg.h
@@ -17,7 +17,7 @@
sizeof(*(ptr))); \
})
-#define cmpxchg_local(ptr, o, n) \
+#define arch_cmpxchg_local(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -26,7 +26,7 @@
sizeof(*(ptr))); \
})
-#define cmpxchg64_local(ptr, o, n) \
+#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
@@ -42,7 +42,7 @@
* The leading and the trailing memory barriers guarantee that these
* operations are fully ordered.
*/
-#define xchg(ptr, x) \
+#define arch_xchg(ptr, x) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _x_ = (x); \
@@ -53,7 +53,7 @@
__ret; \
})
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _o_ = (o); \
@@ -65,10 +65,10 @@
__ret; \
})
-#define cmpxchg64(ptr, o, n) \
+#define arch_cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg((ptr), (o), (n)); \
+ arch_cmpxchg((ptr), (o), (n)); \
})
#undef ____cmpxchg
diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl
index 5622578742fd..3000a2e8ee21 100644
--- a/arch/alpha/kernel/syscalls/syscall.tbl
+++ b/arch/alpha/kernel/syscalls/syscall.tbl
@@ -482,7 +482,7 @@
550 common process_madvise sys_process_madvise
551 common epoll_pwait2 sys_epoll_pwait2
552 common mount_setattr sys_mount_setattr
-553 common quotactl_path sys_quotactl_path
+# 553 reserved for quotactl_path
554 common landlock_create_ruleset sys_landlock_create_ruleset
555 common landlock_add_rule sys_landlock_add_rule
556 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 4392c9c189c4..e47adc97a89b 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -31,7 +31,7 @@ endif
ifdef CONFIG_ARC_CURR_IN_REG
-# For a global register defintion, make sure it gets passed to every file
+# For a global register definition, make sure it gets passed to every file
# We had a customer reported bug where some code built in kernel was NOT using
# any kernel headers, and missing the r25 global register
# Can't do unconditionally because of recursive include issues
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 5afc79c9b2f5..7a36d79b5b2f 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -14,14 +14,14 @@
#include <asm/barrier.h>
#include <asm/smp.h>
-#define atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
#ifdef CONFIG_ARC_HAS_LLSC
-#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#define ATOMIC_OP(op, c_op, asm_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned int val; \
\
@@ -37,7 +37,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned int val; \
\
@@ -63,7 +63,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned int val, orig; \
\
@@ -94,11 +94,11 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
#ifndef CONFIG_SMP
/* violating atomic_xxx API locking protocol in UP for optimization sake */
-#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#else
-static inline void atomic_set(atomic_t *v, int i)
+static inline void arch_atomic_set(atomic_t *v, int i)
{
/*
* Independent of hardware support, all of the atomic_xxx() APIs need
@@ -116,7 +116,7 @@ static inline void atomic_set(atomic_t *v, int i)
atomic_ops_unlock(flags);
}
-#define atomic_set_release(v, i) atomic_set((v), (i))
+#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
#endif
@@ -126,7 +126,7 @@ static inline void atomic_set(atomic_t *v, int i)
*/
#define ATOMIC_OP(op, c_op, asm_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
\
@@ -136,7 +136,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
}
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long flags; \
unsigned long temp; \
@@ -154,7 +154,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
unsigned long orig; \
@@ -180,9 +180,6 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)
-#define atomic_andnot atomic_andnot
-#define atomic_fetch_andnot atomic_fetch_andnot
-
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
@@ -193,6 +190,9 @@ ATOMIC_OPS(andnot, &= ~, bic)
ATOMIC_OPS(or, |=, or)
ATOMIC_OPS(xor, ^=, xor)
+#define arch_atomic_andnot arch_atomic_andnot
+#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
+
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
@@ -220,7 +220,7 @@ typedef struct {
#define ATOMIC64_INIT(a) { (a) }
-static inline s64 atomic64_read(const atomic64_t *v)
+static inline s64 arch_atomic64_read(const atomic64_t *v)
{
s64 val;
@@ -232,7 +232,7 @@ static inline s64 atomic64_read(const atomic64_t *v)
return val;
}
-static inline void atomic64_set(atomic64_t *v, s64 a)
+static inline void arch_atomic64_set(atomic64_t *v, s64 a)
{
/*
* This could have been a simple assignment in "C" but would need
@@ -253,7 +253,7 @@ static inline void atomic64_set(atomic64_t *v, s64 a)
}
#define ATOMIC64_OP(op, op1, op2) \
-static inline void atomic64_##op(s64 a, atomic64_t *v) \
+static inline void arch_atomic64_##op(s64 a, atomic64_t *v) \
{ \
s64 val; \
\
@@ -270,7 +270,7 @@ static inline void atomic64_##op(s64 a, atomic64_t *v) \
} \
#define ATOMIC64_OP_RETURN(op, op1, op2) \
-static inline s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
+static inline s64 arch_atomic64_##op##_return(s64 a, atomic64_t *v) \
{ \
s64 val; \
\
@@ -293,7 +293,7 @@ static inline s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
}
#define ATOMIC64_FETCH_OP(op, op1, op2) \
-static inline s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
+static inline s64 arch_atomic64_fetch_##op(s64 a, atomic64_t *v) \
{ \
s64 val, orig; \
\
@@ -320,9 +320,6 @@ static inline s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
ATOMIC64_OP_RETURN(op, op1, op2) \
ATOMIC64_FETCH_OP(op, op1, op2)
-#define atomic64_andnot atomic64_andnot
-#define atomic64_fetch_andnot atomic64_fetch_andnot
-
ATOMIC64_OPS(add, add.f, adc)
ATOMIC64_OPS(sub, sub.f, sbc)
ATOMIC64_OPS(and, and, and)
@@ -330,13 +327,16 @@ ATOMIC64_OPS(andnot, bic, bic)
ATOMIC64_OPS(or, or, or)
ATOMIC64_OPS(xor, xor, xor)
+#define arch_atomic64_andnot arch_atomic64_andnot
+#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
+
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
static inline s64
-atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
+arch_atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
{
s64 prev;
@@ -358,7 +358,7 @@ atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
return prev;
}
-static inline s64 atomic64_xchg(atomic64_t *ptr, s64 new)
+static inline s64 arch_atomic64_xchg(atomic64_t *ptr, s64 new)
{
s64 prev;
@@ -379,14 +379,14 @@ static inline s64 atomic64_xchg(atomic64_t *ptr, s64 new)
}
/**
- * atomic64_dec_if_positive - decrement by 1 if old value positive
+ * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
* @v: pointer of type atomic64_t
*
* The function returns the old value of *v minus 1, even if
* the atomic variable, v, was not decremented.
*/
-static inline s64 atomic64_dec_if_positive(atomic64_t *v)
+static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{
s64 val;
@@ -408,10 +408,10 @@ static inline s64 atomic64_dec_if_positive(atomic64_t *v)
return val;
}
-#define atomic64_dec_if_positive atomic64_dec_if_positive
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
/**
- * atomic64_fetch_add_unless - add unless the number is a given value
+ * arch_atomic64_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
@@ -419,7 +419,7 @@ static inline s64 atomic64_dec_if_positive(atomic64_t *v)
* Atomically adds @a to @v, if it was not @u.
* Returns the old value of @v
*/
-static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 old, temp;
@@ -443,7 +443,7 @@ static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
return old;
}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
#endif /* !CONFIG_GENERIC_ATOMIC64 */
diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
index 9b87e162e539..d42917e803e1 100644
--- a/arch/arc/include/asm/cmpxchg.h
+++ b/arch/arc/include/asm/cmpxchg.h
@@ -63,7 +63,7 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
#endif
-#define cmpxchg(ptr, o, n) ({ \
+#define arch_cmpxchg(ptr, o, n) ({ \
(typeof(*(ptr)))__cmpxchg((ptr), \
(unsigned long)(o), \
(unsigned long)(n)); \
@@ -75,7 +75,7 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
* !LLSC: cmpxchg() has to use an external lock atomic_ops_lock to guarantee
* semantics, and this lock also happens to be used by atomic_*()
*/
-#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic_cmpxchg(v, o, n) ((int)arch_cmpxchg(&((v)->counter), (o), (n)))
/*
@@ -116,14 +116,14 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
*
* Technically the lock is also needed for UP (boils down to irq save/restore)
* but we can cheat a bit since cmpxchg() atomic_ops_lock() would cause irqs to
- * be disabled thus can't possibly be interrpted/preempted/clobbered by xchg()
+ * be disabled thus can't possibly be interrupted/preempted/clobbered by xchg()
* Other way around, xchg is one instruction anyways, so can't be interrupted
* as such
*/
#if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
-#define xchg(ptr, with) \
+#define arch_xchg(ptr, with) \
({ \
unsigned long flags; \
typeof(*(ptr)) old_val; \
@@ -136,14 +136,14 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
#else
-#define xchg(ptr, with) _xchg(ptr, with)
+#define arch_xchg(ptr, with) _xchg(ptr, with)
#endif
/*
* "atomic" variant of xchg()
* REQ: It needs to follow the same serialization rules as other atomic_xxx()
- * Since xchg() doesn't always do that, it would seem that following defintion
+ * Since xchg() doesn't always do that, it would seem that following definition
* is incorrect. But here's the rationale:
* SMP : Even xchg() takes the atomic_ops_lock, so OK.
* LLSC: atomic_ops_lock are not relevant at all (even if SMP, since LLSC
@@ -153,6 +153,6 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
* can't be clobbered by others. Thus no serialization required when
* atomic_xchg is involved.
*/
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
#endif
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
index ad9b7fe4dba3..4a9d33372fe2 100644
--- a/arch/arc/include/asm/page.h
+++ b/arch/arc/include/asm/page.h
@@ -7,6 +7,18 @@
#include <uapi/asm/page.h>
+#ifdef CONFIG_ARC_HAS_PAE40
+
+#define MAX_POSSIBLE_PHYSMEM_BITS 40
+#define PAGE_MASK_PHYS (0xff00000000ull | PAGE_MASK)
+
+#else /* CONFIG_ARC_HAS_PAE40 */
+
+#define MAX_POSSIBLE_PHYSMEM_BITS 32
+#define PAGE_MASK_PHYS PAGE_MASK
+
+#endif /* CONFIG_ARC_HAS_PAE40 */
+
#ifndef __ASSEMBLY__
#define clear_page(paddr) memset((paddr), 0, PAGE_SIZE)
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 163641726a2b..5878846f00cf 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -107,8 +107,8 @@
#define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
/* Set of bits not changed in pte_modify */
-#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
-
+#define _PAGE_CHG_MASK (PAGE_MASK_PHYS | _PAGE_ACCESSED | _PAGE_DIRTY | \
+ _PAGE_SPECIAL)
/* More Abbrevaited helpers */
#define PAGE_U_NONE __pgprot(___DEF)
#define PAGE_U_R __pgprot(___DEF | _PAGE_READ)
@@ -132,13 +132,7 @@
#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
#define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
-#ifdef CONFIG_ARC_HAS_PAE40
-#define PTE_BITS_NON_RWX_IN_PD1 (0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
-#define MAX_POSSIBLE_PHYSMEM_BITS 40
-#else
-#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE)
-#define MAX_POSSIBLE_PHYSMEM_BITS 32
-#endif
+#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK_PHYS | _PAGE_CACHEABLE)
/**************************************************************************
* Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
diff --git a/arch/arc/include/uapi/asm/page.h b/arch/arc/include/uapi/asm/page.h
index 2a97e2718a21..2a4ad619abfb 100644
--- a/arch/arc/include/uapi/asm/page.h
+++ b/arch/arc/include/uapi/asm/page.h
@@ -33,5 +33,4 @@
#define PAGE_MASK (~(PAGE_SIZE-1))
-
#endif /* _UAPI__ASM_ARC_PAGE_H */
diff --git a/arch/arc/include/uapi/asm/sigcontext.h b/arch/arc/include/uapi/asm/sigcontext.h
index 95f8a4380e11..7a5449dfcb29 100644
--- a/arch/arc/include/uapi/asm/sigcontext.h
+++ b/arch/arc/include/uapi/asm/sigcontext.h
@@ -18,6 +18,7 @@
*/
struct sigcontext {
struct user_regs_struct regs;
+ struct user_regs_arcv2 v2abi;
};
#endif /* _ASM_ARC_SIGCONTEXT_H */
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
index 1743506081da..2cb8dfe866b6 100644
--- a/arch/arc/kernel/entry.S
+++ b/arch/arc/kernel/entry.S
@@ -177,7 +177,7 @@ tracesys:
; Do the Sys Call as we normally would.
; Validate the Sys Call number
- cmp r8, NR_syscalls
+ cmp r8, NR_syscalls - 1
mov.hi r0, -ENOSYS
bhi tracesys_exit
@@ -255,7 +255,7 @@ ENTRY(EV_Trap)
;============ Normal syscall case
; syscall num shd not exceed the total system calls avail
- cmp r8, NR_syscalls
+ cmp r8, NR_syscalls - 1
mov.hi r0, -ENOSYS
bhi .Lret_from_system_call
diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c
index ecfbc42d3a40..345a0000554c 100644
--- a/arch/arc/kernel/kgdb.c
+++ b/arch/arc/kernel/kgdb.c
@@ -140,6 +140,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
ptr = &remcomInBuffer[1];
if (kgdb_hex2long(&ptr, &addr))
regs->ret = addr;
+ fallthrough;
case 'D':
case 'k':
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index d838d0d57696..3793876f42d9 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -50,14 +50,14 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
int ret;
/*
- * This is only for old cores lacking LLOCK/SCOND, which by defintion
+ * This is only for old cores lacking LLOCK/SCOND, which by definition
* can't possibly be SMP. Thus doesn't need to be SMP safe.
* And this also helps reduce the overhead for serializing in
* the UP case
*/
WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
- /* Z indicates to userspace if operation succeded */
+ /* Z indicates to userspace if operation succeeded */
regs->status32 &= ~STATUS_Z_MASK;
ret = access_ok(uaddr, sizeof(*uaddr));
@@ -107,7 +107,7 @@ fail:
void arch_cpu_idle(void)
{
- /* Re-enable interrupts <= default irq priority before commiting SLEEP */
+ /* Re-enable interrupts <= default irq priority before committing SLEEP */
const unsigned int arg = 0x10 | ARCV2_IRQ_DEF_PRIO;
__asm__ __volatile__(
@@ -120,7 +120,7 @@ void arch_cpu_idle(void)
void arch_cpu_idle(void)
{
- /* sleep, but enable both set E1/E2 (levels of interrutps) before committing */
+ /* sleep, but enable both set E1/E2 (levels of interrupts) before committing */
__asm__ __volatile__("sleep 0x3 \n");
}
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index fdbe06c98895..cb2f88502baf 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -61,6 +61,41 @@ struct rt_sigframe {
unsigned int sigret_magic;
};
+static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
+{
+ int err = 0;
+#ifndef CONFIG_ISA_ARCOMPACT
+ struct user_regs_arcv2 v2abi;
+
+ v2abi.r30 = regs->r30;
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+ v2abi.r58 = regs->r58;
+ v2abi.r59 = regs->r59;
+#else
+ v2abi.r58 = v2abi.r59 = 0;
+#endif
+ err = __copy_to_user(&mctx->v2abi, &v2abi, sizeof(v2abi));
+#endif
+ return err;
+}
+
+static int restore_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
+{
+ int err = 0;
+#ifndef CONFIG_ISA_ARCOMPACT
+ struct user_regs_arcv2 v2abi;
+
+ err = __copy_from_user(&v2abi, &mctx->v2abi, sizeof(v2abi));
+
+ regs->r30 = v2abi.r30;
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+ regs->r58 = v2abi.r58;
+ regs->r59 = v2abi.r59;
+#endif
+#endif
+ return err;
+}
+
static int
stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
sigset_t *set)
@@ -94,6 +129,10 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), &uregs.scratch,
sizeof(sf->uc.uc_mcontext.regs.scratch));
+
+ if (is_isa_arcv2())
+ err |= save_arcv2_regs(&(sf->uc.uc_mcontext), regs);
+
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
return err ? -EFAULT : 0;
@@ -109,6 +148,10 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
err |= __copy_from_user(&uregs.scratch,
&(sf->uc.uc_mcontext.regs.scratch),
sizeof(sf->uc.uc_mcontext.regs.scratch));
+
+ if (is_isa_arcv2())
+ err |= restore_arcv2_regs(&(sf->uc.uc_mcontext), regs);
+
if (err)
return -EFAULT;
@@ -259,7 +302,7 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
regs->r2 = (unsigned long)&sf->uc;
/*
- * small optim to avoid unconditonally calling do_sigaltstack
+ * small optim to avoid unconditionally calling do_sigaltstack
* in sigreturn path, now that we only have rt_sigreturn
*/
magic = MAGIC_SIGALTSTK;
@@ -391,7 +434,7 @@ void do_signal(struct pt_regs *regs)
void do_notify_resume(struct pt_regs *regs)
{
/*
- * ASM glue gaurantees that this is only called when returning to
+ * ASM glue guarantees that this is only called when returning to
* user mode
*/
if (test_thread_flag(TIF_NOTIFY_RESUME))
diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S
index 33ce59d91461..e2146a8da195 100644
--- a/arch/arc/kernel/vmlinux.lds.S
+++ b/arch/arc/kernel/vmlinux.lds.S
@@ -57,7 +57,6 @@ SECTIONS
.init.ramfs : { INIT_RAM_FS }
. = ALIGN(PAGE_SIZE);
- _stext = .;
HEAD_TEXT_SECTION
INIT_TEXT_SECTION(L1_CACHE_BYTES)
@@ -83,6 +82,7 @@ SECTIONS
.text : {
_text = .;
+ _stext = .;
TEXT_TEXT
SCHED_TEXT
CPUIDLE_TEXT
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index 33832e36bdb7..e2ed355438c9 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -157,7 +157,16 @@ void __init setup_arch_memory(void)
min_high_pfn = PFN_DOWN(high_mem_start);
max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
- max_zone_pfn[ZONE_HIGHMEM] = min_low_pfn;
+ /*
+ * max_high_pfn should be ok here for both HIGHMEM and HIGHMEM+PAE.
+ * For HIGHMEM without PAE max_high_pfn should be less than
+ * min_low_pfn to guarantee that these two regions don't overlap.
+ * For PAE case highmem is greater than lowmem, so it is natural
+ * to use max_high_pfn.
+ *
+ * In both cases, holes should be handled by pfn_valid().
+ */
+ max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn;
high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
index fac4adc90204..95c649fbc95a 100644
--- a/arch/arc/mm/ioremap.c
+++ b/arch/arc/mm/ioremap.c
@@ -53,9 +53,10 @@ EXPORT_SYMBOL(ioremap);
void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
unsigned long flags)
{
+ unsigned int off;
unsigned long vaddr;
struct vm_struct *area;
- phys_addr_t off, end;
+ phys_addr_t end;
pgprot_t prot = __pgprot(flags);
/* Don't allow wraparound, zero size */
@@ -72,7 +73,7 @@ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
/* Mappings have to be page-aligned */
off = paddr & ~PAGE_MASK;
- paddr &= PAGE_MASK;
+ paddr &= PAGE_MASK_PHYS;
size = PAGE_ALIGN(end + 1) - paddr;
/*
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index 9bb3c24f3677..9c7c68247289 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -576,7 +576,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
pte_t *ptep)
{
unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
- phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK;
+ phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;
struct page *page = pfn_to_page(pte_pfn(*ptep));
create_tlb(vma, vaddr, ptep);
diff --git a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
index 7d2c72562c73..9148a01ed6d9 100644
--- a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
+++ b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
@@ -105,9 +105,13 @@
phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
phy-reset-duration = <20>;
phy-supply = <&sw2_reg>;
- phy-handle = <&ethphy0>;
status = "okay";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+
mdio {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/imx6q-dhcom-som.dtsi b/arch/arm/boot/dts/imx6q-dhcom-som.dtsi
index 236fc205c389..d0768ae429fa 100644
--- a/arch/arm/boot/dts/imx6q-dhcom-som.dtsi
+++ b/arch/arm/boot/dts/imx6q-dhcom-som.dtsi
@@ -406,6 +406,18 @@
vin-supply = <&sw1_reg>;
};
+&reg_pu {
+ vin-supply = <&sw1_reg>;
+};
+
+&reg_vdd1p1 {
+ vin-supply = <&sw2_reg>;
+};
+
+&reg_vdd2p5 {
+ vin-supply = <&sw2_reg>;
+};
+
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
diff --git a/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi b/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi
index 828cf3e39784..c4e146f3341b 100644
--- a/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi
@@ -126,7 +126,7 @@
compatible = "nxp,pca8574";
reg = <0x3a>;
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
};
};
diff --git a/arch/arm/boot/dts/imx7d-meerkat96.dts b/arch/arm/boot/dts/imx7d-meerkat96.dts
index 5339210b63d0..dd8003bd1fc0 100644
--- a/arch/arm/boot/dts/imx7d-meerkat96.dts
+++ b/arch/arm/boot/dts/imx7d-meerkat96.dts
@@ -193,7 +193,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc1>;
keep-power-in-suspend;
- tuning-step = <2>;
+ fsl,tuning-step = <2>;
vmmc-supply = <&reg_3p3v>;
no-1-8-v;
broken-cd;
diff --git a/arch/arm/boot/dts/imx7d-pico.dtsi b/arch/arm/boot/dts/imx7d-pico.dtsi
index e57da0d32b98..e519897fae08 100644
--- a/arch/arm/boot/dts/imx7d-pico.dtsi
+++ b/arch/arm/boot/dts/imx7d-pico.dtsi
@@ -351,7 +351,7 @@
pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
bus-width = <4>;
- tuning-step = <2>;
+ fsl,tuning-step = <2>;
vmmc-supply = <&reg_3p3v>;
wakeup-source;
no-1-8-v;
diff --git a/arch/arm/configs/footbridge_defconfig b/arch/arm/configs/footbridge_defconfig
index 2aa3ebeb89d7..7a32de51f0fa 100644
--- a/arch/arm/configs/footbridge_defconfig
+++ b/arch/arm/configs/footbridge_defconfig
@@ -64,7 +64,6 @@ CONFIG_PARIDE_ON26=m
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
CONFIG_NET_VENDOR_3COM=y
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index 875a3c28a267..363f1b1b08e3 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -215,8 +215,6 @@ CONFIG_IIO=m
CONFIG_AD5446=m
CONFIG_EEPROM_AT24=m
CONFIG_SENSORS_LIS3_SPI=m
-CONFIG_IDE=m
-CONFIG_BLK_DEV_IDECS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=m
CONFIG_CHR_DEV_ST=m
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 455eb19a5ac1..db8512d9a918 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -22,8 +22,8 @@
* strex/ldrex monitor on some implementations. The reason we can use it for
* atomic_set() is the clrex or dummy strex done on every exception return.
*/
-#define atomic_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
#if __LINUX_ARM_ARCH__ >= 6
@@ -34,7 +34,7 @@
*/
#define ATOMIC_OP(op, c_op, asm_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
@@ -52,7 +52,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
@@ -73,7 +73,7 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
-static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result, val; \
@@ -93,17 +93,17 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
return result; \
}
-#define atomic_add_return_relaxed atomic_add_return_relaxed
-#define atomic_sub_return_relaxed atomic_sub_return_relaxed
-#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
-#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
-#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
-static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
+static inline int arch_atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
{
int oldval;
unsigned long res;
@@ -123,9 +123,9 @@ static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
return oldval;
}
-#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
+#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
-static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int oldval, newval;
unsigned long tmp;
@@ -151,7 +151,7 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
return oldval;
}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
#else /* ARM_ARCH_6 */
@@ -160,7 +160,7 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
#endif
#define ATOMIC_OP(op, c_op, asm_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
\
@@ -170,7 +170,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long flags; \
int val; \
@@ -184,7 +184,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
int val; \
@@ -197,7 +197,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
return val; \
}
-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
unsigned long flags;
@@ -211,7 +211,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
-#define atomic_fetch_andnot atomic_fetch_andnot
+#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
#endif /* __LINUX_ARM_ARCH__ */
@@ -223,7 +223,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)
-#define atomic_andnot atomic_andnot
+#define arch_atomic_andnot arch_atomic_andnot
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op, asm_op) \
@@ -240,7 +240,7 @@ ATOMIC_OPS(xor, ^=, eor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
#ifndef CONFIG_GENERIC_ATOMIC64
typedef struct {
@@ -250,7 +250,7 @@ typedef struct {
#define ATOMIC64_INIT(i) { (i) }
#ifdef CONFIG_ARM_LPAE
-static inline s64 atomic64_read(const atomic64_t *v)
+static inline s64 arch_atomic64_read(const atomic64_t *v)
{
s64 result;
@@ -263,7 +263,7 @@ static inline s64 atomic64_read(const atomic64_t *v)
return result;
}
-static inline void atomic64_set(atomic64_t *v, s64 i)
+static inline void arch_atomic64_set(atomic64_t *v, s64 i)
{
__asm__ __volatile__("@ atomic64_set\n"
" strd %2, %H2, [%1]"
@@ -272,7 +272,7 @@ static inline void atomic64_set(atomic64_t *v, s64 i)
);
}
#else
-static inline s64 atomic64_read(const atomic64_t *v)
+static inline s64 arch_atomic64_read(const atomic64_t *v)
{
s64 result;
@@ -285,7 +285,7 @@ static inline s64 atomic64_read(const atomic64_t *v)
return result;
}
-static inline void atomic64_set(atomic64_t *v, s64 i)
+static inline void arch_atomic64_set(atomic64_t *v, s64 i)
{
s64 tmp;
@@ -302,7 +302,7 @@ static inline void atomic64_set(atomic64_t *v, s64 i)
#endif
#define ATOMIC64_OP(op, op1, op2) \
-static inline void atomic64_##op(s64 i, atomic64_t *v) \
+static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
{ \
s64 result; \
unsigned long tmp; \
@@ -322,7 +322,7 @@ static inline void atomic64_##op(s64 i, atomic64_t *v) \
#define ATOMIC64_OP_RETURN(op, op1, op2) \
static inline s64 \
-atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \
+arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \
{ \
s64 result; \
unsigned long tmp; \
@@ -345,7 +345,7 @@ atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \
#define ATOMIC64_FETCH_OP(op, op1, op2) \
static inline s64 \
-atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \
+arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \
{ \
s64 result, val; \
unsigned long tmp; \
@@ -374,34 +374,34 @@ atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \
ATOMIC64_OPS(add, adds, adc)
ATOMIC64_OPS(sub, subs, sbc)
-#define atomic64_add_return_relaxed atomic64_add_return_relaxed
-#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
#undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, op1, op2) \
ATOMIC64_OP(op, op1, op2) \
ATOMIC64_FETCH_OP(op, op1, op2)
-#define atomic64_andnot atomic64_andnot
+#define arch_atomic64_andnot arch_atomic64_andnot
ATOMIC64_OPS(and, and, and)
ATOMIC64_OPS(andnot, bic, bic)
ATOMIC64_OPS(or, orr, orr)
ATOMIC64_OPS(xor, eor, eor)
-#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
-#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
-static inline s64 atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
+static inline s64 arch_atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
{
s64 oldval;
unsigned long res;
@@ -422,9 +422,9 @@ static inline s64 atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
return oldval;
}
-#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
+#define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg_relaxed
-static inline s64 atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
+static inline s64 arch_atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
{
s64 result;
unsigned long tmp;
@@ -442,9 +442,9 @@ static inline s64 atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
return result;
}
-#define atomic64_xchg_relaxed atomic64_xchg_relaxed
+#define arch_atomic64_xchg_relaxed arch_atomic64_xchg_relaxed
-static inline s64 atomic64_dec_if_positive(atomic64_t *v)
+static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{
s64 result;
unsigned long tmp;
@@ -470,9 +470,9 @@ static inline s64 atomic64_dec_if_positive(atomic64_t *v)
return result;
}
-#define atomic64_dec_if_positive atomic64_dec_if_positive
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
-static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 oldval, newval;
unsigned long tmp;
@@ -500,7 +500,7 @@ static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
return oldval;
}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
#endif /* !CONFIG_GENERIC_ATOMIC64 */
#endif
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index 8b701f8e175c..4dfe538dfc68 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -114,7 +114,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
return ret;
}
-#define xchg_relaxed(ptr, x) ({ \
+#define arch_xchg_relaxed(ptr, x) ({ \
(__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
sizeof(*(ptr))); \
})
@@ -128,20 +128,20 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
#error "SMP is not supported on this platform"
#endif
-#define xchg xchg_relaxed
+#define arch_xchg arch_xchg_relaxed
/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
-#define cmpxchg_local(ptr, o, n) ({ \
- (__typeof(*ptr))__cmpxchg_local_generic((ptr), \
+#define arch_cmpxchg_local(ptr, o, n) ({ \
+ (__typeof(*ptr))__generic_cmpxchg_local((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
sizeof(*(ptr))); \
})
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
#include <asm-generic/cmpxchg.h>
@@ -207,7 +207,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
return oldval;
}
-#define cmpxchg_relaxed(ptr,o,n) ({ \
+#define arch_cmpxchg_relaxed(ptr,o,n) ({ \
(__typeof__(*(ptr)))__cmpxchg((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
@@ -224,7 +224,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
#ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */
case 1:
case 2:
- ret = __cmpxchg_local_generic(ptr, old, new, size);
+ ret = __generic_cmpxchg_local(ptr, old, new, size);
break;
#endif
default:
@@ -234,7 +234,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
return ret;
}
-#define cmpxchg_local(ptr, o, n) ({ \
+#define arch_cmpxchg_local(ptr, o, n) ({ \
(__typeof(*ptr))__cmpxchg_local((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
@@ -266,13 +266,13 @@ static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
return oldval;
}
-#define cmpxchg64_relaxed(ptr, o, n) ({ \
+#define arch_cmpxchg64_relaxed(ptr, o, n) ({ \
(__typeof__(*(ptr)))__cmpxchg64((ptr), \
(unsigned long long)(o), \
(unsigned long long)(n)); \
})
-#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) arch_cmpxchg64_relaxed((ptr), (o), (n))
#endif /* __LINUX_ARM_ARCH__ >= 6 */
diff --git a/arch/arm/include/asm/cpuidle.h b/arch/arm/include/asm/cpuidle.h
index 0d67ed682e07..bc4ffa7ca04c 100644
--- a/arch/arm/include/asm/cpuidle.h
+++ b/arch/arm/include/asm/cpuidle.h
@@ -7,9 +7,11 @@
#ifdef CONFIG_CPU_IDLE
extern int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
+#define __cpuidle_method_section __used __section("__cpuidle_method_of_table")
#else
static inline int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index) { return -ENODEV; }
+#define __cpuidle_method_section __maybe_unused /* drop silently */
#endif
/* Common ARM WFI state */
@@ -42,8 +44,7 @@ struct of_cpuidle_method {
#define CPUIDLE_METHOD_OF_DECLARE(name, _method, _ops) \
static const struct of_cpuidle_method __cpuidle_method_of_table_##name \
- __used __section("__cpuidle_method_of_table") \
- = { .method = _method, .ops = _ops }
+ __cpuidle_method_section = { .method = _method, .ops = _ops }
extern int arm_cpuidle_suspend(int index);
diff --git a/arch/arm/include/asm/sync_bitops.h b/arch/arm/include/asm/sync_bitops.h
index 39ff217136d1..6f5d627c44a3 100644
--- a/arch/arm/include/asm/sync_bitops.h
+++ b/arch/arm/include/asm/sync_bitops.h
@@ -21,7 +21,7 @@
#define sync_test_and_clear_bit(nr, p) _test_and_clear_bit(nr, p)
#define sync_test_and_change_bit(nr, p) _test_and_change_bit(nr, p)
#define sync_test_bit(nr, addr) test_bit(nr, addr)
-#define sync_cmpxchg cmpxchg
+#define arch_sync_cmpxchg arch_cmpxchg
#endif
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 1a5edf562e85..73ca7797b92f 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -545,9 +545,11 @@ void notrace cpu_init(void)
* In Thumb-2, msr with an immediate value is not allowed.
*/
#ifdef CONFIG_THUMB2_KERNEL
-#define PLC "r"
+#define PLC_l "l"
+#define PLC_r "r"
#else
-#define PLC "I"
+#define PLC_l "I"
+#define PLC_r "I"
#endif
/*
@@ -569,15 +571,15 @@ void notrace cpu_init(void)
"msr cpsr_c, %9"
:
: "r" (stk),
- PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
+ PLC_r (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
"I" (offsetof(struct stack, irq[0])),
- PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
+ PLC_r (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
"I" (offsetof(struct stack, abt[0])),
- PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
+ PLC_r (PSR_F_BIT | PSR_I_BIT | UND_MODE),
"I" (offsetof(struct stack, und[0])),
- PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
+ PLC_r (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
"I" (offsetof(struct stack, fiq[0])),
- PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
+ PLC_l (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
: "r14");
#endif
}
diff --git a/arch/arm/mach-imx/pm-imx27.c b/arch/arm/mach-imx/pm-imx27.c
index 020e6deb67c8..237e8aa9fe83 100644
--- a/arch/arm/mach-imx/pm-imx27.c
+++ b/arch/arm/mach-imx/pm-imx27.c
@@ -12,6 +12,7 @@
#include <linux/suspend.h>
#include <linux/io.h>
+#include "common.h"
#include "hardware.h"
static int mx27_suspend_enter(suspend_state_t state)
diff --git a/arch/arm/mach-npcm/Kconfig b/arch/arm/mach-npcm/Kconfig
index 658c8efb4ca1..a71cf1d189ae 100644
--- a/arch/arm/mach-npcm/Kconfig
+++ b/arch/arm/mach-npcm/Kconfig
@@ -10,6 +10,7 @@ config ARCH_WPCM450
bool "Support for WPCM450 BMC (Hermon)"
depends on ARCH_MULTI_V5
select CPU_ARM926T
+ select WPCM450_AIC
select NPCM7XX_TIMER
help
General support for WPCM450 BMC (Hermon).
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index 2ee527c00284..1026a816dcc0 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -458,20 +458,6 @@ static struct gpiod_lookup_table leds_gpio_table = {
#ifdef CONFIG_LEDS_TRIGGERS
DEFINE_LED_TRIGGER(ams_delta_camera_led_trigger);
-
-static int ams_delta_camera_power(struct device *dev, int power)
-{
- /*
- * turn on camera LED
- */
- if (power)
- led_trigger_event(ams_delta_camera_led_trigger, LED_FULL);
- else
- led_trigger_event(ams_delta_camera_led_trigger, LED_OFF);
- return 0;
-}
-#else
-#define ams_delta_camera_power NULL
#endif
static struct platform_device ams_delta_audio_device = {
diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c
index c40cf5ef8607..977b0b744c22 100644
--- a/arch/arm/mach-omap1/board-h2.c
+++ b/arch/arm/mach-omap1/board-h2.c
@@ -320,7 +320,7 @@ static int tps_setup(struct i2c_client *client, void *context)
{
if (!IS_BUILTIN(CONFIG_TPS65010))
return -ENOSYS;
-
+
tps65010_config_vregs1(TPS_LDO2_ENABLE | TPS_VLDO2_3_0V |
TPS_LDO1_ENABLE | TPS_VLDO1_3_0V);
@@ -394,6 +394,8 @@ static void __init h2_init(void)
BUG_ON(gpio_request(H2_NAND_RB_GPIO_PIN, "NAND ready") < 0);
gpio_direction_input(H2_NAND_RB_GPIO_PIN);
+ gpiod_add_lookup_table(&isp1301_gpiod_table);
+
omap_cfg_reg(L3_1610_FLASH_CS2B_OE);
omap_cfg_reg(M8_1610_FLASH_CS2B_WE);
diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
index 2c1e2b32b9b3..a745d64d4699 100644
--- a/arch/arm/mach-omap1/pm.c
+++ b/arch/arm/mach-omap1/pm.c
@@ -655,9 +655,13 @@ static int __init omap_pm_init(void)
irq = INT_7XX_WAKE_UP_REQ;
else if (cpu_is_omap16xx())
irq = INT_1610_WAKE_UP_REQ;
- if (request_irq(irq, omap_wakeup_interrupt, 0, "peripheral wakeup",
- NULL))
- pr_err("Failed to request irq %d (peripheral wakeup)\n", irq);
+ else
+ irq = -1;
+
+ if (irq >= 0) {
+ if (request_irq(irq, omap_wakeup_interrupt, 0, "peripheral wakeup", NULL))
+ pr_err("Failed to request irq %d (peripheral wakeup)\n", irq);
+ }
/* Program new power ramp-up time
* (0 for most boards since we don't lower voltage when in deep sleep)
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index 418a61ecb827..5e86145db0e2 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -322,6 +322,7 @@ static int n8x0_mmc_get_cover_state(struct device *dev, int slot)
static void n8x0_mmc_callback(void *data, u8 card_mask)
{
+#ifdef CONFIG_MMC_OMAP
int bit, *openp, index;
if (board_is_n800()) {
@@ -339,7 +340,6 @@ static void n8x0_mmc_callback(void *data, u8 card_mask)
else
*openp = 0;
-#ifdef CONFIG_MMC_OMAP
omap_mmc_notify_cover_event(mmc_device, index, *openp);
#else
pr_warn("MMC: notify cover event not available\n");
diff --git a/arch/arm/mach-pxa/pxa_cplds_irqs.c b/arch/arm/mach-pxa/pxa_cplds_irqs.c
index ec0d9b094744..bddfc7cd5d40 100644
--- a/arch/arm/mach-pxa/pxa_cplds_irqs.c
+++ b/arch/arm/mach-pxa/pxa_cplds_irqs.c
@@ -121,8 +121,13 @@ static int cplds_probe(struct platform_device *pdev)
return fpga->irq;
base_irq = platform_get_irq(pdev, 1);
- if (base_irq < 0)
+ if (base_irq < 0) {
base_irq = 0;
+ } else {
+ ret = devm_irq_alloc_descs(&pdev->dev, base_irq, base_irq, CPLDS_NB_IRQ, 0);
+ if (ret < 0)
+ return ret;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
fpga->base = devm_ioremap_resource(&pdev->dev, res);
diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl
index c7679d7db98b..28e03b5fec00 100644
--- a/arch/arm/tools/syscall.tbl
+++ b/arch/arm/tools/syscall.tbl
@@ -456,7 +456,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index f8f07469d259..a7e54a087b80 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -135,24 +135,18 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
return;
}
-int xen_swiotlb_detect(void)
-{
- if (!xen_domain())
- return 0;
- if (xen_feature(XENFEAT_direct_mapped))
- return 1;
- /* legacy case */
- if (!xen_feature(XENFEAT_not_direct_mapped) && xen_initial_domain())
- return 1;
- return 0;
-}
-
static int __init xen_mm_init(void)
{
struct gnttab_cache_flush cflush;
+ int rc;
+
if (!xen_swiotlb_detect())
return 0;
- xen_swiotlb_init();
+
+ rc = xen_swiotlb_init();
+ /* we can work with the default swiotlb */
+ if (rc < 0 && rc != -EEXIST)
+ return rc;
cflush.op = 0;
cflush.a.dev_bus_addr = 0;
diff --git a/arch/arm64/Kbuild b/arch/arm64/Kbuild
index d6465823b281..7b393cfec071 100644
--- a/arch/arm64/Kbuild
+++ b/arch/arm64/Kbuild
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-y += kernel/ mm/
-obj-$(CONFIG_NET) += net/
+obj-y += kernel/ mm/ net/
obj-$(CONFIG_KVM) += kvm/
obj-$(CONFIG_XEN) += xen/
obj-$(CONFIG_CRYPTO) += crypto/
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 6409b47b73e4..7336c1fd0dda 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -165,6 +165,7 @@ config ARCH_MEDIATEK
config ARCH_MESON
bool "Amlogic Platforms"
+ select COMMON_CLK
select MESON_IRQ_GPIO
help
This enables support for the arm64 based Amlogic SoCs
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 7ef44478560d..b52481f0605d 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -175,6 +175,9 @@ vdso_install:
$(if $(CONFIG_COMPAT_VDSO), \
$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 $@)
+archprepare:
+ $(Q)$(MAKE) $(build)=arch/arm64/tools kapi
+
# We use MRPROPER_FILES and CLEAN_FILES now
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts
index 6c309b97587d..e8d31279b7a3 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts
@@ -46,7 +46,8 @@
eee-broken-100tx;
qca,clk-out-frequency = <125000000>;
qca,clk-out-strength = <AR803X_STRENGTH_FULL>;
- vddio-supply = <&vddh>;
+ qca,keep-pll-enabled;
+ vddio-supply = <&vddio>;
vddio: vddio-regulator {
regulator-name = "VDDIO";
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts
index df212ed5bb94..e65d1c477e2c 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts
@@ -31,11 +31,10 @@
reg = <0x4>;
eee-broken-1000t;
eee-broken-100tx;
-
qca,clk-out-frequency = <125000000>;
qca,clk-out-strength = <AR803X_STRENGTH_FULL>;
-
- vddio-supply = <&vddh>;
+ qca,keep-pll-enabled;
+ vddio-supply = <&vddio>;
vddio: vddio-regulator {
regulator-name = "VDDIO";
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
index eca06a0c3cf8..a30249ebffa8 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
@@ -197,8 +197,8 @@
ddr: memory-controller@1080000 {
compatible = "fsl,qoriq-memory-controller";
reg = <0x0 0x1080000 0x0 0x1000>;
- interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
- big-endian;
+ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+ little-endian;
};
dcfg: syscon@1e00000 {
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts
index 631e01c1b9fd..be1e7d6f0ecb 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts
@@ -88,11 +88,11 @@
pinctrl-0 = <&pinctrl_codec2>;
reg = <0x18>;
#sound-dai-cells = <0>;
- HPVDD-supply = <&reg_3p3v>;
- SPRVDD-supply = <&reg_3p3v>;
- SPLVDD-supply = <&reg_3p3v>;
- AVDD-supply = <&reg_3p3v>;
- IOVDD-supply = <&reg_3p3v>;
+ HPVDD-supply = <&reg_gen_3p3>;
+ SPRVDD-supply = <&reg_gen_3p3>;
+ SPLVDD-supply = <&reg_gen_3p3>;
+ AVDD-supply = <&reg_gen_3p3>;
+ IOVDD-supply = <&reg_gen_3p3>;
DVDD-supply = <&vgen4_reg>;
reset-gpios = <&gpio3 4 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
index 4dc8383478ee..a08a568c31d9 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
@@ -45,8 +45,8 @@
reg_12p0_main: regulator-12p0-main {
compatible = "regulator-fixed";
regulator-name = "12V_MAIN";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
regulator-always-on;
};
@@ -77,15 +77,6 @@
regulator-always-on;
};
- reg_3p3v: regulator-3p3v {
- compatible = "regulator-fixed";
- vin-supply = <&reg_3p3_main>;
- regulator-name = "GEN_3V3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- };
-
reg_usdhc2_vmmc: regulator-vsd-3v3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_reg_usdhc2>;
@@ -415,11 +406,11 @@
pinctrl-0 = <&pinctrl_codec1>;
reg = <0x18>;
#sound-dai-cells = <0>;
- HPVDD-supply = <&reg_3p3v>;
- SPRVDD-supply = <&reg_3p3v>;
- SPLVDD-supply = <&reg_3p3v>;
- AVDD-supply = <&reg_3p3v>;
- IOVDD-supply = <&reg_3p3v>;
+ HPVDD-supply = <&reg_gen_3p3>;
+ SPRVDD-supply = <&reg_gen_3p3>;
+ SPLVDD-supply = <&reg_gen_3p3>;
+ AVDD-supply = <&reg_gen_3p3>;
+ IOVDD-supply = <&reg_gen_3p3>;
DVDD-supply = <&vgen4_reg>;
reset-gpios = <&gpio3 3 GPIO_ACTIVE_LOW>;
};
diff --git a/arch/arm64/boot/dts/renesas/hihope-rzg2-ex-aistarvision-mipi-adapter-2.1.dtsi b/arch/arm64/boot/dts/renesas/hihope-rzg2-ex-aistarvision-mipi-adapter-2.1.dtsi
index c62ddb9b2ba5..3771144a2ce4 100644
--- a/arch/arm64/boot/dts/renesas/hihope-rzg2-ex-aistarvision-mipi-adapter-2.1.dtsi
+++ b/arch/arm64/boot/dts/renesas/hihope-rzg2-ex-aistarvision-mipi-adapter-2.1.dtsi
@@ -14,7 +14,6 @@
ports {
port@0 {
- reg = <0>;
csi20_in: endpoint {
clock-lanes = <0>;
data-lanes = <1 2>;
@@ -29,7 +28,6 @@
ports {
port@0 {
- reg = <0>;
csi40_in: endpoint {
clock-lanes = <0>;
data-lanes = <1 2>;
diff --git a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
index d64fb8b1b86c..46f8dbf68904 100644
--- a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
@@ -2573,6 +2573,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -2628,6 +2632,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a774b1.dtsi b/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
index 5b05474dc272..d16a4be5ef77 100644
--- a/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
@@ -2419,6 +2419,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -2474,6 +2478,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a774c0-ek874-mipi-2.1.dts b/arch/arm64/boot/dts/renesas/r8a774c0-ek874-mipi-2.1.dts
index e7b4a929bb17..2e3d1981cac4 100644
--- a/arch/arm64/boot/dts/renesas/r8a774c0-ek874-mipi-2.1.dts
+++ b/arch/arm64/boot/dts/renesas/r8a774c0-ek874-mipi-2.1.dts
@@ -33,7 +33,7 @@
status = "okay";
ports {
- port {
+ port@0 {
csi40_in: endpoint {
clock-lanes = <0>;
data-lanes = <1 2>;
diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
index 20fa3caa050e..1aef34447abd 100644
--- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
@@ -1823,6 +1823,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a774e1.dtsi b/arch/arm64/boot/dts/renesas/r8a774e1.dtsi
index 8eb006cbd9af..1f51237ab0a6 100644
--- a/arch/arm64/boot/dts/renesas/r8a774e1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774e1.dtsi
@@ -2709,6 +2709,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -2764,6 +2768,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77950.dtsi b/arch/arm64/boot/dts/renesas/r8a77950.dtsi
index 25b87da32eeb..b643d3079db1 100644
--- a/arch/arm64/boot/dts/renesas/r8a77950.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77950.dtsi
@@ -192,6 +192,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77951.dtsi b/arch/arm64/boot/dts/renesas/r8a77951.dtsi
index 5c39152e4570..85d66d15465a 100644
--- a/arch/arm64/boot/dts/renesas/r8a77951.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77951.dtsi
@@ -3097,6 +3097,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -3152,6 +3156,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -3191,6 +3199,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77960.dtsi b/arch/arm64/boot/dts/renesas/r8a77960.dtsi
index 25d947a81b29..12476e354d74 100644
--- a/arch/arm64/boot/dts/renesas/r8a77960.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77960.dtsi
@@ -2761,6 +2761,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -2816,6 +2820,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77961.dtsi b/arch/arm64/boot/dts/renesas/r8a77961.dtsi
index ab081f14af9a..d9804768425a 100644
--- a/arch/arm64/boot/dts/renesas/r8a77961.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77961.dtsi
@@ -2499,6 +2499,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -2554,6 +2558,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77965.dtsi b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
index 657b20d3533b..dcb9df861d74 100644
--- a/arch/arm64/boot/dts/renesas/r8a77965.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
@@ -2575,6 +2575,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -2630,6 +2634,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77970.dtsi b/arch/arm64/boot/dts/renesas/r8a77970.dtsi
index 5a5d5649332a..e8f6352c3665 100644
--- a/arch/arm64/boot/dts/renesas/r8a77970.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77970.dtsi
@@ -1106,6 +1106,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77980.dtsi b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
index 1ffa4a995a7a..7b51d464de0e 100644
--- a/arch/arm64/boot/dts/renesas/r8a77980.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
@@ -1439,6 +1439,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -1478,6 +1482,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
index 295d34f1d216..4715e4a4abe0 100644
--- a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
+++ b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
@@ -298,8 +298,6 @@
ports {
port@0 {
- reg = <0>;
-
csi40_in: endpoint {
clock-lanes = <0>;
data-lanes = <1 2>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
index 5010f23fafcc..0eaea58f4210 100644
--- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
@@ -1970,6 +1970,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
index e18747df219f..453ffcef24fa 100644
--- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi
+++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
@@ -349,7 +349,6 @@
ports {
port@0 {
- reg = <0>;
csi20_in: endpoint {
clock-lanes = <0>;
data-lanes = <1>;
@@ -364,8 +363,6 @@
ports {
port@0 {
- reg = <0>;
-
csi40_in: endpoint {
clock-lanes = <0>;
data-lanes = <1 2 3 4>;
diff --git a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
index b2bcbf23eefd..ca59d1f711f8 100644
--- a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
@@ -42,12 +42,12 @@
};
};
- dmss: dmss {
+ dmss: bus@48000000 {
compatible = "simple-mfd";
#address-cells = <2>;
#size-cells = <2>;
dma-ranges;
- ranges;
+ ranges = <0x00 0x48000000 0x00 0x48000000 0x00 0x06400000>;
ti,sci-dev-id = <25>;
@@ -134,7 +134,7 @@
};
};
- dmsc: dmsc@44043000 {
+ dmsc: system-controller@44043000 {
compatible = "ti,k2g-sci";
ti,host-id = <12>;
mbox-names = "rx", "tx";
@@ -148,7 +148,7 @@
#power-domain-cells = <2>;
};
- k3_clks: clocks {
+ k3_clks: clock-controller {
compatible = "ti,k2g-sci-clk";
#clock-cells = <2>;
};
@@ -373,8 +373,9 @@
clocks = <&k3_clks 145 0>;
};
- main_gpio_intr: interrupt-controller0 {
+ main_gpio_intr: interrupt-controller@a00000 {
compatible = "ti,sci-intr";
+ reg = <0x00 0x00a00000 0x00 0x800>;
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
diff --git a/arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi
index 99e94dee1bd4..deb19ae5e168 100644
--- a/arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi
@@ -74,8 +74,9 @@
clocks = <&k3_clks 148 0>;
};
- mcu_gpio_intr: interrupt-controller1 {
+ mcu_gpio_intr: interrupt-controller@4210000 {
compatible = "ti,sci-intr";
+ reg = <0x00 0x04210000 0x00 0x200>;
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
index cb340d1b401f..6cd3131eb9ff 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
@@ -433,8 +433,9 @@
#phy-cells = <0>;
};
- intr_main_gpio: interrupt-controller0 {
+ intr_main_gpio: interrupt-controller@a00000 {
compatible = "ti,sci-intr";
+ reg = <0x0 0x00a00000 0x0 0x400>;
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
@@ -444,18 +445,19 @@
ti,interrupt-ranges = <0 392 32>;
};
- main-navss {
+ main_navss: bus@30800000 {
compatible = "simple-mfd";
#address-cells = <2>;
#size-cells = <2>;
- ranges;
+ ranges = <0x0 0x30800000 0x0 0x30800000 0x0 0xbc00000>;
dma-coherent;
dma-ranges;
ti,sci-dev-id = <118>;
- intr_main_navss: interrupt-controller1 {
+ intr_main_navss: interrupt-controller@310e0000 {
compatible = "ti,sci-intr";
+ reg = <0x0 0x310e0000 0x0 0x2000>;
ti,intr-trigger-type = <4>;
interrupt-controller;
interrupt-parent = <&gic500>;
diff --git a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
index 0388c02c2203..f5b8ef2f5f77 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
@@ -116,11 +116,11 @@
};
};
- mcu-navss {
+ mcu_navss: bus@28380000 {
compatible = "simple-mfd";
#address-cells = <2>;
#size-cells = <2>;
- ranges;
+ ranges = <0x00 0x28380000 0x00 0x28380000 0x00 0x03880000>;
dma-coherent;
dma-ranges;
diff --git a/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
index ed42f13e7663..7cb864b4d74a 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
@@ -6,24 +6,24 @@
*/
&cbass_wakeup {
- dmsc: dmsc {
+ dmsc: system-controller@44083000 {
compatible = "ti,am654-sci";
ti,host-id = <12>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
mbox-names = "rx", "tx";
mboxes= <&secure_proxy_main 11>,
<&secure_proxy_main 13>;
+ reg-names = "debug_messages";
+ reg = <0x44083000 0x1000>;
+
k3_pds: power-controller {
compatible = "ti,sci-pm-domain";
#power-domain-cells = <2>;
};
- k3_clks: clocks {
+ k3_clks: clock-controller {
compatible = "ti,k2g-sci-clk";
#clock-cells = <2>;
};
@@ -69,8 +69,9 @@
power-domains = <&k3_pds 115 TI_SCI_PD_EXCLUSIVE>;
};
- intr_wkup_gpio: interrupt-controller2 {
+ intr_wkup_gpio: interrupt-controller@42200000 {
compatible = "ti,sci-intr";
+ reg = <0x42200000 0x200>;
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
diff --git a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
index 9e87fb313a54..eddb2ffb93ca 100644
--- a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
+++ b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
@@ -85,12 +85,6 @@
gpios = <&wkup_gpio0 27 GPIO_ACTIVE_LOW>;
};
};
-
- clk_ov5640_fixed: clock {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <24000000>;
- };
};
&wkup_pmx0 {
@@ -287,23 +281,6 @@
pinctrl-names = "default";
pinctrl-0 = <&main_i2c1_pins_default>;
clock-frequency = <400000>;
-
- ov5640: camera@3c {
- compatible = "ovti,ov5640";
- reg = <0x3c>;
-
- clocks = <&clk_ov5640_fixed>;
- clock-names = "xclk";
-
- port {
- csi2_cam0: endpoint {
- remote-endpoint = <&csi2_phy0>;
- clock-lanes = <0>;
- data-lanes = <1 2>;
- };
- };
- };
-
};
&main_i2c2 {
@@ -496,14 +473,6 @@
};
};
-&csi2_0 {
- csi2_phy0: endpoint {
- remote-endpoint = <&csi2_cam0>;
- clock-lanes = <0>;
- data-lanes = <1 2>;
- };
-};
-
&mcu_cpsw {
pinctrl-names = "default";
pinctrl-0 = <&mcu_cpsw_pins_default &mcu_mdio_pins_default>;
diff --git a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
index f86c493a44f1..19fea8adbcff 100644
--- a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
@@ -68,8 +68,9 @@
};
};
- main_gpio_intr: interrupt-controller0 {
+ main_gpio_intr: interrupt-controller@a00000 {
compatible = "ti,sci-intr";
+ reg = <0x00 0x00a00000 0x00 0x800>;
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
@@ -85,9 +86,12 @@
#size-cells = <2>;
ranges = <0x00 0x30000000 0x00 0x30000000 0x00 0x0c400000>;
ti,sci-dev-id = <199>;
+ dma-coherent;
+ dma-ranges;
- main_navss_intr: interrupt-controller1 {
+ main_navss_intr: interrupt-controller@310e0000 {
compatible = "ti,sci-intr";
+ reg = <0x00 0x310e0000 0x00 0x4000>;
ti,intr-trigger-type = <4>;
interrupt-controller;
interrupt-parent = <&gic500>;
diff --git a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
index 5e74e43822c3..5663fe3ea466 100644
--- a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
@@ -6,7 +6,7 @@
*/
&cbass_mcu_wakeup {
- dmsc: dmsc@44083000 {
+ dmsc: system-controller@44083000 {
compatible = "ti,k2g-sci";
ti,host-id = <12>;
@@ -23,7 +23,7 @@
#power-domain-cells = <2>;
};
- k3_clks: clocks {
+ k3_clks: clock-controller {
compatible = "ti,k2g-sci-clk";
#clock-cells = <2>;
};
@@ -96,8 +96,9 @@
clock-names = "fclk";
};
- wkup_gpio_intr: interrupt-controller2 {
+ wkup_gpio_intr: interrupt-controller@42200000 {
compatible = "ti,sci-intr";
+ reg = <0x00 0x42200000 0x00 0x400>;
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
index c2aa45a3ac79..3bcafe4c1742 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
@@ -76,8 +76,9 @@
};
};
- main_gpio_intr: interrupt-controller0 {
+ main_gpio_intr: interrupt-controller@a00000 {
compatible = "ti,sci-intr";
+ reg = <0x00 0x00a00000 0x00 0x800>;
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
@@ -87,18 +88,19 @@
ti,interrupt-ranges = <8 392 56>;
};
- main-navss {
+ main_navss: bus@30000000 {
compatible = "simple-mfd";
#address-cells = <2>;
#size-cells = <2>;
- ranges;
+ ranges = <0x00 0x30000000 0x00 0x30000000 0x00 0x0c400000>;
dma-coherent;
dma-ranges;
ti,sci-dev-id = <199>;
- main_navss_intr: interrupt-controller1 {
+ main_navss_intr: interrupt-controller@310e0000 {
compatible = "ti,sci-intr";
+ reg = <0x0 0x310e0000 0x0 0x4000>;
ti,intr-trigger-type = <4>;
interrupt-controller;
interrupt-parent = <&gic500>;
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
index d56e3475aee7..5e825e4d0306 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
@@ -6,7 +6,7 @@
*/
&cbass_mcu_wakeup {
- dmsc: dmsc@44083000 {
+ dmsc: system-controller@44083000 {
compatible = "ti,k2g-sci";
ti,host-id = <12>;
@@ -23,7 +23,7 @@
#power-domain-cells = <2>;
};
- k3_clks: clocks {
+ k3_clks: clock-controller {
compatible = "ti,k2g-sci-clk";
#clock-cells = <2>;
};
@@ -96,8 +96,9 @@
clock-names = "fclk";
};
- wkup_gpio_intr: interrupt-controller2 {
+ wkup_gpio_intr: interrupt-controller@42200000 {
compatible = "ti,sci-intr";
+ reg = <0x00 0x42200000 0x00 0x400>;
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
@@ -249,11 +250,11 @@
};
};
- mcu-navss {
+ mcu_navss: bus@28380000 {
compatible = "simple-mfd";
#address-cells = <2>;
#size-cells = <2>;
- ranges;
+ ranges = <0x00 0x28380000 0x00 0x28380000 0x00 0x03880000>;
dma-coherent;
dma-ranges;
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 07ac208edc89..26889dbfe904 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -5,3 +5,5 @@ generic-y += qrwlock.h
generic-y += qspinlock.h
generic-y += set_memory.h
generic-y += user.h
+
+generated-y += cpucaps.h
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index b56a4b2bc248..c9979273d389 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -223,6 +223,4 @@ static __always_inline long arch_atomic64_dec_if_positive(atomic64_t *v)
#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
-#define ARCH_ATOMIC
-
#endif /* __ASM_ATOMIC_H */
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 2175ec0004ed..451e11e5fd23 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -74,7 +74,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long idx,
* This insanity brought to you by speculative system register reads,
* out-of-order memory accesses, sequence locks and Thomas Gleixner.
*
- * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
+ * https://lore.kernel.org/r/alpine.DEB.2.21.1902081950260.1662@nanos.tec.linutronix.de/
*/
#define arch_counter_enforce_ordering(val) do { \
u64 tmp, _val = (val); \
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
deleted file mode 100644
index b0c5eda0498f..000000000000
--- a/arch/arm64/include/asm/cpucaps.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * arch/arm64/include/asm/cpucaps.h
- *
- * Copyright (C) 2016 ARM Ltd.
- */
-#ifndef __ASM_CPUCAPS_H
-#define __ASM_CPUCAPS_H
-
-#define ARM64_WORKAROUND_CLEAN_CACHE 0
-#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
-#define ARM64_WORKAROUND_845719 2
-#define ARM64_HAS_SYSREG_GIC_CPUIF 3
-#define ARM64_HAS_PAN 4
-#define ARM64_HAS_LSE_ATOMICS 5
-#define ARM64_WORKAROUND_CAVIUM_23154 6
-#define ARM64_WORKAROUND_834220 7
-#define ARM64_HAS_NO_HW_PREFETCH 8
-#define ARM64_HAS_VIRT_HOST_EXTN 11
-#define ARM64_WORKAROUND_CAVIUM_27456 12
-#define ARM64_HAS_32BIT_EL0 13
-#define ARM64_SPECTRE_V3A 14
-#define ARM64_HAS_CNP 15
-#define ARM64_HAS_NO_FPSIMD 16
-#define ARM64_WORKAROUND_REPEAT_TLBI 17
-#define ARM64_WORKAROUND_QCOM_FALKOR_E1003 18
-#define ARM64_WORKAROUND_858921 19
-#define ARM64_WORKAROUND_CAVIUM_30115 20
-#define ARM64_HAS_DCPOP 21
-#define ARM64_SVE 22
-#define ARM64_UNMAP_KERNEL_AT_EL0 23
-#define ARM64_SPECTRE_V2 24
-#define ARM64_HAS_RAS_EXTN 25
-#define ARM64_WORKAROUND_843419 26
-#define ARM64_HAS_CACHE_IDC 27
-#define ARM64_HAS_CACHE_DIC 28
-#define ARM64_HW_DBM 29
-#define ARM64_SPECTRE_V4 30
-#define ARM64_MISMATCHED_CACHE_TYPE 31
-#define ARM64_HAS_STAGE2_FWB 32
-#define ARM64_HAS_CRC32 33
-#define ARM64_SSBS 34
-#define ARM64_WORKAROUND_1418040 35
-#define ARM64_HAS_SB 36
-#define ARM64_WORKAROUND_SPECULATIVE_AT 37
-#define ARM64_HAS_ADDRESS_AUTH_ARCH 38
-#define ARM64_HAS_ADDRESS_AUTH_IMP_DEF 39
-#define ARM64_HAS_GENERIC_AUTH_ARCH 40
-#define ARM64_HAS_GENERIC_AUTH_IMP_DEF 41
-#define ARM64_HAS_IRQ_PRIO_MASKING 42
-#define ARM64_HAS_DCPODP 43
-#define ARM64_WORKAROUND_1463225 44
-#define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM 45
-#define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46
-#define ARM64_WORKAROUND_1542419 47
-#define ARM64_HAS_E0PD 48
-#define ARM64_HAS_RNG 49
-#define ARM64_HAS_AMU_EXTN 50
-#define ARM64_HAS_ADDRESS_AUTH 51
-#define ARM64_HAS_GENERIC_AUTH 52
-#define ARM64_HAS_32BIT_EL1 53
-#define ARM64_BTI 54
-#define ARM64_HAS_ARMv8_4_TTL 55
-#define ARM64_HAS_TLB_RANGE 56
-#define ARM64_MTE 57
-#define ARM64_WORKAROUND_1508412 58
-#define ARM64_HAS_LDAPR 59
-#define ARM64_KVM_PROTECTED_MODE 60
-#define ARM64_WORKAROUND_NVIDIA_CARMEL_CNP 61
-#define ARM64_HAS_EPAN 62
-
-#define ARM64_NCAPS 63
-
-#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index cf8df032b9c3..5e9b33cbac51 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -63,6 +63,7 @@
#define __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector 18
#define __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize 19
#define __KVM_HOST_SMCCC_FUNC___pkvm_mark_hyp 20
+#define __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc 21
#ifndef __ASSEMBLY__
@@ -201,6 +202,8 @@ extern void __kvm_timer_set_cntvoff(u64 cntvoff);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
+extern void __kvm_adjust_pc(struct kvm_vcpu *vcpu);
+
extern u64 __vgic_v3_get_gic_config(void);
extern u64 __vgic_v3_read_vmcr(void);
extern void __vgic_v3_write_vmcr(u32 vmcr);
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index f612c090f2e4..01b9857757f2 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -463,4 +463,9 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
vcpu->arch.flags |= KVM_ARM64_INCREMENT_PC;
}
+static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
+{
+ return test_bit(feature, vcpu->arch.features);
+}
+
#endif /* __ARM64_KVM_EMULATE_H__ */
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 7859749d6628..5dab69d2c22b 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -893,8 +893,7 @@ __SYSCALL(__NR_process_madvise, sys_process_madvise)
__SYSCALL(__NR_epoll_pwait2, compat_sys_epoll_pwait2)
#define __NR_mount_setattr 442
__SYSCALL(__NR_mount_setattr, sys_mount_setattr)
-#define __NR_quotactl_path 443
-__SYSCALL(__NR_quotactl_path, sys_quotactl_path)
+/* 443 is reserved for quotactl_path */
#define __NR_landlock_create_ruleset 444
__SYSCALL(__NR_landlock_create_ruleset, sys_landlock_create_ruleset)
#define __NR_landlock_add_rule 445
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 1cb39c0803a4..e720148232a0 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -720,11 +720,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
return ret;
}
- if (run->immediate_exit)
- return -EINTR;
-
vcpu_load(vcpu);
+ if (run->immediate_exit) {
+ ret = -EINTR;
+ goto out;
+ }
+
kvm_sigset_activate(vcpu);
ret = 1;
@@ -897,6 +899,18 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
kvm_sigset_deactivate(vcpu);
+out:
+ /*
+ * In the unlikely event that we are returning to userspace
+ * with pending exceptions or PC adjustment, commit these
+ * adjustments in order to give userspace a consistent view of
+ * the vcpu state. Note that this relies on __kvm_adjust_pc()
+ * being preempt-safe on VHE.
+ */
+ if (unlikely(vcpu->arch.flags & (KVM_ARM64_PENDING_EXCEPTION |
+ KVM_ARM64_INCREMENT_PC)))
+ kvm_call_hyp(__kvm_adjust_pc, vcpu);
+
vcpu_put(vcpu);
return ret;
}
diff --git a/arch/arm64/kvm/hyp/exception.c b/arch/arm64/kvm/hyp/exception.c
index 73629094f903..11541b94b328 100644
--- a/arch/arm64/kvm/hyp/exception.c
+++ b/arch/arm64/kvm/hyp/exception.c
@@ -296,7 +296,7 @@ static void enter_exception32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
*vcpu_pc(vcpu) = vect_offset;
}
-void kvm_inject_exception(struct kvm_vcpu *vcpu)
+static void kvm_inject_exception(struct kvm_vcpu *vcpu)
{
if (vcpu_el1_is_32bit(vcpu)) {
switch (vcpu->arch.flags & KVM_ARM64_EXCEPT_MASK) {
@@ -329,3 +329,19 @@ void kvm_inject_exception(struct kvm_vcpu *vcpu)
}
}
}
+
+/*
+ * Adjust the guest PC (and potentially exception state) depending on
+ * flags provided by the emulation code.
+ */
+void __kvm_adjust_pc(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.flags & KVM_ARM64_PENDING_EXCEPTION) {
+ kvm_inject_exception(vcpu);
+ vcpu->arch.flags &= ~(KVM_ARM64_PENDING_EXCEPTION |
+ KVM_ARM64_EXCEPT_MASK);
+ } else if (vcpu->arch.flags & KVM_ARM64_INCREMENT_PC) {
+ kvm_skip_instr(vcpu);
+ vcpu->arch.flags &= ~KVM_ARM64_INCREMENT_PC;
+ }
+}
diff --git a/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h b/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
index 61716359035d..4fdfeabefeb4 100644
--- a/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
+++ b/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
@@ -13,8 +13,6 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_host.h>
-void kvm_inject_exception(struct kvm_vcpu *vcpu);
-
static inline void kvm_skip_instr(struct kvm_vcpu *vcpu)
{
if (vcpu_mode_is_32bit(vcpu)) {
@@ -44,22 +42,6 @@ static inline void __kvm_skip_instr(struct kvm_vcpu *vcpu)
}
/*
- * Adjust the guest PC on entry, depending on flags provided by EL1
- * for the purpose of emulation (MMIO, sysreg) or exception injection.
- */
-static inline void __adjust_pc(struct kvm_vcpu *vcpu)
-{
- if (vcpu->arch.flags & KVM_ARM64_PENDING_EXCEPTION) {
- kvm_inject_exception(vcpu);
- vcpu->arch.flags &= ~(KVM_ARM64_PENDING_EXCEPTION |
- KVM_ARM64_EXCEPT_MASK);
- } else if (vcpu->arch.flags & KVM_ARM64_INCREMENT_PC) {
- kvm_skip_instr(vcpu);
- vcpu->arch.flags &= ~KVM_ARM64_INCREMENT_PC;
- }
-}
-
-/*
* Skip an instruction while host sysregs are live.
* Assumes host is always 64-bit.
*/
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index f36420a80474..1632f001f4ed 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -28,6 +28,13 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
cpu_reg(host_ctxt, 1) = __kvm_vcpu_run(kern_hyp_va(vcpu));
}
+static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
+
+ __kvm_adjust_pc(kern_hyp_va(vcpu));
+}
+
static void handle___kvm_flush_vm_context(struct kvm_cpu_context *host_ctxt)
{
__kvm_flush_vm_context();
@@ -170,6 +177,7 @@ typedef void (*hcall_t)(struct kvm_cpu_context *);
static const hcall_t host_hcall[] = {
HANDLE_FUNC(__kvm_vcpu_run),
+ HANDLE_FUNC(__kvm_adjust_pc),
HANDLE_FUNC(__kvm_flush_vm_context),
HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
HANDLE_FUNC(__kvm_tlb_flush_vmid),
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index e342f7f4f4fb..4b60c0056c04 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -23,8 +23,8 @@
extern unsigned long hyp_nr_cpus;
struct host_kvm host_kvm;
-struct hyp_pool host_s2_mem;
-struct hyp_pool host_s2_dev;
+static struct hyp_pool host_s2_mem;
+static struct hyp_pool host_s2_dev;
/*
* Copies of the host's CPU features registers holding sanitized values.
diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
index 7488f53b0aa2..a3d3a275344e 100644
--- a/arch/arm64/kvm/hyp/nvhe/setup.c
+++ b/arch/arm64/kvm/hyp/nvhe/setup.c
@@ -17,7 +17,6 @@
#include <nvhe/trap_handler.h>
struct hyp_pool hpool;
-struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
unsigned long hyp_nr_cpus;
#define hyp_percpu_size ((unsigned long)__per_cpu_end - \
@@ -27,6 +26,7 @@ static void *vmemmap_base;
static void *hyp_pgt_base;
static void *host_s2_mem_pgt_base;
static void *host_s2_dev_pgt_base;
+static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
static int divide_memory_pool(void *virt, unsigned long size)
{
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index e9f6ea704d07..f7af9688c1f7 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -4,7 +4,6 @@
* Author: Marc Zyngier <marc.zyngier@arm.com>
*/
-#include <hyp/adjust_pc.h>
#include <hyp/switch.h>
#include <hyp/sysreg-sr.h>
@@ -201,7 +200,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
*/
__debug_save_host_buffers_nvhe(vcpu);
- __adjust_pc(vcpu);
+ __kvm_adjust_pc(vcpu);
/*
* We must restore the 32-bit state before the sysregs, thanks
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index 7b8f7db5c1ed..b3229924d243 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -4,7 +4,6 @@
* Author: Marc Zyngier <marc.zyngier@arm.com>
*/
-#include <hyp/adjust_pc.h>
#include <hyp/switch.h>
#include <linux/arm-smccc.h>
@@ -132,7 +131,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
__load_guest_stage2(vcpu->arch.hw_mmu);
__activate_traps(vcpu);
- __adjust_pc(vcpu);
+ __kvm_adjust_pc(vcpu);
sysreg_restore_guest_state_vhe(guest_ctxt);
__debug_switch_to_guest(vcpu);
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index c5d1f3c87dbd..c10207fed2f3 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1156,13 +1156,13 @@ out_unlock:
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
{
if (!kvm->arch.mmu.pgt)
- return 0;
+ return false;
__unmap_stage2_range(&kvm->arch.mmu, range->start << PAGE_SHIFT,
(range->end - range->start) << PAGE_SHIFT,
range->may_block);
- return 0;
+ return false;
}
bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
@@ -1170,7 +1170,7 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
kvm_pfn_t pfn = pte_pfn(range->pte);
if (!kvm->arch.mmu.pgt)
- return 0;
+ return false;
WARN_ON(range->end - range->start != 1);
@@ -1190,7 +1190,7 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
PAGE_SIZE, __pfn_to_phys(pfn),
KVM_PGTABLE_PROT_R, NULL);
- return 0;
+ return false;
}
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
@@ -1200,7 +1200,7 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
pte_t pte;
if (!kvm->arch.mmu.pgt)
- return 0;
+ return false;
WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
@@ -1213,7 +1213,7 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
if (!kvm->arch.mmu.pgt)
- return 0;
+ return false;
return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt,
range->start << PAGE_SHIFT);
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 956cdc240148..d37ebee085cf 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -166,6 +166,25 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
return 0;
}
+static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu *tmp;
+ bool is32bit;
+ int i;
+
+ is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
+ if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit)
+ return false;
+
+ /* Check that the vcpus are either all 32bit or all 64bit */
+ kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
+ if (vcpu_has_feature(tmp, KVM_ARM_VCPU_EL1_32BIT) != is32bit)
+ return false;
+ }
+
+ return true;
+}
+
/**
* kvm_reset_vcpu - sets core registers and sys_regs to reset value
* @vcpu: The VCPU pointer
@@ -217,13 +236,14 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
}
}
+ if (!vcpu_allowed_register_width(vcpu)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
switch (vcpu->arch.target) {
default:
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
- if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1)) {
- ret = -EINVAL;
- goto out;
- }
pstate = VCPU_RESET_PSTATE_SVC;
} else {
pstate = VCPU_RESET_PSTATE_EL1;
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 76ea2800c33e..1a7968ad078c 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -399,14 +399,14 @@ static bool trap_bvr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
+ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
if (p->is_write)
reg_to_dbg(vcpu, p, rd, dbg_reg);
else
dbg_to_reg(vcpu, p, rd, dbg_reg);
- trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
+ trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
return true;
}
@@ -414,7 +414,7 @@ static bool trap_bvr(struct kvm_vcpu *vcpu,
static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@@ -424,7 +424,7 @@ static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@@ -434,21 +434,21 @@ static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static void reset_bvr(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
- vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
+ vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
}
static bool trap_bcr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
+ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
if (p->is_write)
reg_to_dbg(vcpu, p, rd, dbg_reg);
else
dbg_to_reg(vcpu, p, rd, dbg_reg);
- trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
+ trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
return true;
}
@@ -456,7 +456,7 @@ static bool trap_bcr(struct kvm_vcpu *vcpu,
static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@@ -467,7 +467,7 @@ static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@@ -477,22 +477,22 @@ static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static void reset_bcr(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
- vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
+ vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
}
static bool trap_wvr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
+ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
if (p->is_write)
reg_to_dbg(vcpu, p, rd, dbg_reg);
else
dbg_to_reg(vcpu, p, rd, dbg_reg);
- trace_trap_reg(__func__, rd->reg, p->is_write,
- vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
+ trace_trap_reg(__func__, rd->CRm, p->is_write,
+ vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]);
return true;
}
@@ -500,7 +500,7 @@ static bool trap_wvr(struct kvm_vcpu *vcpu,
static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@@ -510,7 +510,7 @@ static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@@ -520,21 +520,21 @@ static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static void reset_wvr(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
- vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
+ vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
}
static bool trap_wcr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
+ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
if (p->is_write)
reg_to_dbg(vcpu, p, rd, dbg_reg);
else
dbg_to_reg(vcpu, p, rd, dbg_reg);
- trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
+ trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
return true;
}
@@ -542,7 +542,7 @@ static bool trap_wcr(struct kvm_vcpu *vcpu,
static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@@ -552,7 +552,7 @@ static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@@ -562,7 +562,7 @@ static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static void reset_wcr(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
- vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
+ vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
}
static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index ac485163a4a7..6d44c028d1c9 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -55,8 +55,10 @@ void __sync_icache_dcache(pte_t pte)
{
struct page *page = pte_page(pte);
- if (!test_and_set_bit(PG_dcache_clean, &page->flags))
+ if (!test_bit(PG_dcache_clean, &page->flags)) {
sync_icache_aliases(page_address(page), page_size(page));
+ set_bit(PG_dcache_clean, &page->flags);
+ }
}
EXPORT_SYMBOL_GPL(__sync_icache_dcache);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 16a2b2b1c54d..e55409caaee3 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -43,6 +43,7 @@
#include <linux/sizes.h>
#include <asm/tlb.h>
#include <asm/alternative.h>
+#include <asm/xen/swiotlb-xen.h>
/*
* We need to be able to catch inadvertent references to memstart_addr
@@ -482,7 +483,7 @@ void __init mem_init(void)
if (swiotlb_force == SWIOTLB_FORCE ||
max_pfn > PFN_DOWN(arm64_dma_phys_limit))
swiotlb_init(1);
- else
+ else if (!xen_swiotlb_detect())
swiotlb_force = SWIOTLB_NO_FORCE;
set_max_mapnr(max_pfn - PHYS_PFN_OFFSET);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 6dd9369e3ea0..89b66ef43a0f 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -515,7 +515,8 @@ static void __init map_mem(pgd_t *pgdp)
*/
BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end));
- if (rodata_full || crash_mem_map || debug_pagealloc_enabled())
+ if (rodata_full || crash_mem_map || debug_pagealloc_enabled() ||
+ IS_ENABLED(CONFIG_KFENCE))
flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
/*
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 0a48191534ff..97d7bcd8d4f2 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -447,6 +447,18 @@ SYM_FUNC_START(__cpu_setup)
mov x10, #(SYS_GCR_EL1_RRND | SYS_GCR_EL1_EXCL_MASK)
msr_s SYS_GCR_EL1, x10
+ /*
+ * If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then
+ * RGSR_EL1.SEED must be non-zero for IRG to produce
+ * pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we
+ * must initialize it.
+ */
+ mrs x10, CNTVCT_EL0
+ ands x10, x10, #SYS_RGSR_EL1_SEED_MASK
+ csinc x10, x10, xzr, ne
+ lsl x10, x10, #SYS_RGSR_EL1_SEED_SHIFT
+ msr_s SYS_RGSR_EL1, x10
+
/* clear any pending tag check faults in TFSR*_EL1 */
msr_s SYS_TFSR_EL1, xzr
msr_s SYS_TFSRE0_EL1, xzr
diff --git a/arch/arm64/tools/Makefile b/arch/arm64/tools/Makefile
new file mode 100644
index 000000000000..932b4fe5c768
--- /dev/null
+++ b/arch/arm64/tools/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+
+gen := arch/$(ARCH)/include/generated
+kapi := $(gen)/asm
+
+kapi-hdrs-y := $(kapi)/cpucaps.h
+
+targets += $(addprefix ../../../,$(gen-y) $(kapi-hdrs-y))
+
+PHONY += kapi
+
+kapi: $(kapi-hdrs-y) $(gen-y)
+
+# Create output directory if not already present
+_dummy := $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+
+quiet_cmd_gen_cpucaps = GEN $@
+ cmd_gen_cpucaps = mkdir -p $(dir $@) && \
+ $(AWK) -f $(filter-out $(PHONY),$^) > $@
+
+$(kapi)/cpucaps.h: $(src)/gen-cpucaps.awk $(src)/cpucaps FORCE
+ $(call if_changed,gen_cpucaps)
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
new file mode 100644
index 000000000000..21fbdda7086e
--- /dev/null
+++ b/arch/arm64/tools/cpucaps
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Internal CPU capabilities constants, keep this list sorted
+
+BTI
+HAS_32BIT_EL0
+HAS_32BIT_EL1
+HAS_ADDRESS_AUTH
+HAS_ADDRESS_AUTH_ARCH
+HAS_ADDRESS_AUTH_IMP_DEF
+HAS_AMU_EXTN
+HAS_ARMv8_4_TTL
+HAS_CACHE_DIC
+HAS_CACHE_IDC
+HAS_CNP
+HAS_CRC32
+HAS_DCPODP
+HAS_DCPOP
+HAS_E0PD
+HAS_EPAN
+HAS_GENERIC_AUTH
+HAS_GENERIC_AUTH_ARCH
+HAS_GENERIC_AUTH_IMP_DEF
+HAS_IRQ_PRIO_MASKING
+HAS_LDAPR
+HAS_LSE_ATOMICS
+HAS_NO_FPSIMD
+HAS_NO_HW_PREFETCH
+HAS_PAN
+HAS_RAS_EXTN
+HAS_RNG
+HAS_SB
+HAS_STAGE2_FWB
+HAS_SYSREG_GIC_CPUIF
+HAS_TLB_RANGE
+HAS_VIRT_HOST_EXTN
+HW_DBM
+KVM_PROTECTED_MODE
+MISMATCHED_CACHE_TYPE
+MTE
+SPECTRE_V2
+SPECTRE_V3A
+SPECTRE_V4
+SSBS
+SVE
+UNMAP_KERNEL_AT_EL0
+WORKAROUND_834220
+WORKAROUND_843419
+WORKAROUND_845719
+WORKAROUND_858921
+WORKAROUND_1418040
+WORKAROUND_1463225
+WORKAROUND_1508412
+WORKAROUND_1542419
+WORKAROUND_CAVIUM_23154
+WORKAROUND_CAVIUM_27456
+WORKAROUND_CAVIUM_30115
+WORKAROUND_CAVIUM_TX2_219_PRFM
+WORKAROUND_CAVIUM_TX2_219_TVM
+WORKAROUND_CLEAN_CACHE
+WORKAROUND_DEVICE_LOAD_ACQUIRE
+WORKAROUND_NVIDIA_CARMEL_CNP
+WORKAROUND_QCOM_FALKOR_E1003
+WORKAROUND_REPEAT_TLBI
+WORKAROUND_SPECULATIVE_AT
diff --git a/arch/arm64/tools/gen-cpucaps.awk b/arch/arm64/tools/gen-cpucaps.awk
new file mode 100755
index 000000000000..00c9e72a200a
--- /dev/null
+++ b/arch/arm64/tools/gen-cpucaps.awk
@@ -0,0 +1,40 @@
+#!/bin/awk -f
+# SPDX-License-Identifier: GPL-2.0
+# gen-cpucaps.awk: arm64 cpucaps header generator
+#
+# Usage: awk -f gen-cpucaps.awk cpucaps.txt
+
+# Log an error and terminate
+function fatal(msg) {
+ print "Error at line " NR ": " msg > "/dev/stderr"
+ exit 1
+}
+
+# skip blank lines and comment lines
+/^$/ { next }
+/^#/ { next }
+
+BEGIN {
+ print "#ifndef __ASM_CPUCAPS_H"
+ print "#define __ASM_CPUCAPS_H"
+ print ""
+ print "/* Generated file - do not edit */"
+ cap_num = 0
+ print ""
+}
+
+/^[vA-Z0-9_]+$/ {
+ printf("#define ARM64_%-30s\t%d\n", $0, cap_num++)
+ next
+}
+
+END {
+ printf("#define ARM64_NCAPS\t\t\t\t%d\n", cap_num)
+ print ""
+ print "#endif /* __ASM_CPUCAPS_H */"
+}
+
+# Any lines not handled by previous rules are unexpected
+{
+ fatal("unhandled statement")
+}
diff --git a/arch/csky/include/asm/cmpxchg.h b/arch/csky/include/asm/cmpxchg.h
index dabc8e46ce7b..d1bef11f8dc9 100644
--- a/arch/csky/include/asm/cmpxchg.h
+++ b/arch/csky/include/asm/cmpxchg.h
@@ -31,7 +31,7 @@ extern void __bad_xchg(void);
__ret; \
})
-#define xchg_relaxed(ptr, x) \
+#define arch_xchg_relaxed(ptr, x) \
(__xchg_relaxed((x), (ptr), sizeof(*(ptr))))
#define __cmpxchg_relaxed(ptr, old, new, size) \
@@ -61,14 +61,14 @@ extern void __bad_xchg(void);
__ret; \
})
-#define cmpxchg_relaxed(ptr, o, n) \
+#define arch_cmpxchg_relaxed(ptr, o, n) \
(__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) __ret; \
__smp_release_fence(); \
- __ret = cmpxchg_relaxed(ptr, o, n); \
+ __ret = arch_cmpxchg_relaxed(ptr, o, n); \
__smp_acquire_fence(); \
__ret; \
})
diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild
index 60ee7f0d60a8..e23139c8fc0d 100644
--- a/arch/h8300/include/asm/Kbuild
+++ b/arch/h8300/include/asm/Kbuild
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
generic-y += asm-offsets.h
+generic-y += cmpxchg.h
generic-y += extable.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h
deleted file mode 100644
index a990d151f163..000000000000
--- a/arch/h8300/include/asm/atomic.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ARCH_H8300_ATOMIC__
-#define __ARCH_H8300_ATOMIC__
-
-#include <linux/compiler.h>
-#include <linux/types.h>
-#include <asm/cmpxchg.h>
-#include <asm/irqflags.h>
-
-/*
- * Atomic operations that C can't guarantee us. Useful for
- * resource counting etc..
- */
-
-#define atomic_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
-
-#define ATOMIC_OP_RETURN(op, c_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
-{ \
- h8300flags flags; \
- int ret; \
- \
- flags = arch_local_irq_save(); \
- ret = v->counter c_op i; \
- arch_local_irq_restore(flags); \
- return ret; \
-}
-
-#define ATOMIC_FETCH_OP(op, c_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
-{ \
- h8300flags flags; \
- int ret; \
- \
- flags = arch_local_irq_save(); \
- ret = v->counter; \
- v->counter c_op i; \
- arch_local_irq_restore(flags); \
- return ret; \
-}
-
-#define ATOMIC_OP(op, c_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
-{ \
- h8300flags flags; \
- \
- flags = arch_local_irq_save(); \
- v->counter c_op i; \
- arch_local_irq_restore(flags); \
-}
-
-ATOMIC_OP_RETURN(add, +=)
-ATOMIC_OP_RETURN(sub, -=)
-
-#define ATOMIC_OPS(op, c_op) \
- ATOMIC_OP(op, c_op) \
- ATOMIC_FETCH_OP(op, c_op)
-
-ATOMIC_OPS(and, &=)
-ATOMIC_OPS(or, |=)
-ATOMIC_OPS(xor, ^=)
-ATOMIC_OPS(add, +=)
-ATOMIC_OPS(sub, -=)
-
-#undef ATOMIC_OPS
-#undef ATOMIC_OP_RETURN
-#undef ATOMIC_OP
-
-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
-{
- int ret;
- h8300flags flags;
-
- flags = arch_local_irq_save();
- ret = v->counter;
- if (likely(ret == old))
- v->counter = new;
- arch_local_irq_restore(flags);
- return ret;
-}
-
-static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
-{
- int ret;
- h8300flags flags;
-
- flags = arch_local_irq_save();
- ret = v->counter;
- if (ret != u)
- v->counter += a;
- arch_local_irq_restore(flags);
- return ret;
-}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
-
-#endif /* __ARCH_H8300_ATOMIC __ */
diff --git a/arch/h8300/include/asm/cmpxchg.h b/arch/h8300/include/asm/cmpxchg.h
deleted file mode 100644
index c64bb38ce242..000000000000
--- a/arch/h8300/include/asm/cmpxchg.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ARCH_H8300_CMPXCHG__
-#define __ARCH_H8300_CMPXCHG__
-
-#include <linux/irqflags.h>
-
-#define xchg(ptr, x) \
- ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
- sizeof(*(ptr))))
-
-struct __xchg_dummy { unsigned long a[100]; };
-#define __xg(x) ((volatile struct __xchg_dummy *)(x))
-
-static inline unsigned long __xchg(unsigned long x,
- volatile void *ptr, int size)
-{
- unsigned long tmp, flags;
-
- local_irq_save(flags);
-
- switch (size) {
- case 1:
- __asm__ __volatile__
- ("mov.b %2,%0\n\t"
- "mov.b %1,%2"
- : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)));
- break;
- case 2:
- __asm__ __volatile__
- ("mov.w %2,%0\n\t"
- "mov.w %1,%2"
- : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)));
- break;
- case 4:
- __asm__ __volatile__
- ("mov.l %2,%0\n\t"
- "mov.l %1,%2"
- : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)));
- break;
- default:
- tmp = 0;
- }
- local_irq_restore(flags);
- return tmp;
-}
-
-#include <asm-generic/cmpxchg-local.h>
-
-/*
- * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
- * them available.
- */
-#define cmpxchg_local(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), \
- (unsigned long)(o), \
- (unsigned long)(n), \
- sizeof(*(ptr))))
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-
-#ifndef CONFIG_SMP
-#include <asm-generic/cmpxchg.h>
-#endif
-
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-
-#endif /* __ARCH_H8300_CMPXCHG__ */
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
index 4ab895d7111f..6e94f8d04146 100644
--- a/arch/hexagon/include/asm/atomic.h
+++ b/arch/hexagon/include/asm/atomic.h
@@ -14,7 +14,7 @@
/* Normal writes in our arch don't clear lock reservations */
-static inline void atomic_set(atomic_t *v, int new)
+static inline void arch_atomic_set(atomic_t *v, int new)
{
asm volatile(
"1: r6 = memw_locked(%0);\n"
@@ -26,26 +26,26 @@ static inline void atomic_set(atomic_t *v, int new)
);
}
-#define atomic_set_release(v, i) atomic_set((v), (i))
+#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
/**
- * atomic_read - reads a word, atomically
+ * arch_atomic_read - reads a word, atomically
* @v: pointer to atomic value
*
* Assumes all word reads on our architecture are atomic.
*/
-#define atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
/**
- * atomic_xchg - atomic
+ * arch_atomic_xchg - atomic
* @v: pointer to memory to change
* @new: new value (technically passed in a register -- see xchg)
*/
-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), (new)))
/**
- * atomic_cmpxchg - atomic compare-and-exchange values
+ * arch_atomic_cmpxchg - atomic compare-and-exchange values
* @v: pointer to value to change
* @old: desired old value to match
* @new: new value to put in
@@ -61,7 +61,7 @@ static inline void atomic_set(atomic_t *v, int new)
*
* "old" is "expected" old val, __oldval is actual old value
*/
-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
int __oldval;
@@ -81,7 +81,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
}
#define ATOMIC_OP(op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
int output; \
\
@@ -97,7 +97,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN(op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
int output; \
\
@@ -114,7 +114,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
int output, val; \
\
@@ -148,7 +148,7 @@ ATOMIC_OPS(xor)
#undef ATOMIC_OP
/**
- * atomic_fetch_add_unless - add unless the number is a given value
+ * arch_atomic_fetch_add_unless - add unless the number is a given value
* @v: pointer to value
* @a: amount to add
* @u: unless value is equal to u
@@ -157,7 +157,7 @@ ATOMIC_OPS(xor)
*
*/
-static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int __oldval;
register int tmp;
@@ -180,6 +180,6 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
);
return __oldval;
}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
#endif
diff --git a/arch/hexagon/include/asm/cmpxchg.h b/arch/hexagon/include/asm/cmpxchg.h
index 92b8a02e588a..cdb705e1496a 100644
--- a/arch/hexagon/include/asm/cmpxchg.h
+++ b/arch/hexagon/include/asm/cmpxchg.h
@@ -42,7 +42,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
* Atomically swap the contents of a register with memory. Should be atomic
* between multiple CPU's and within interrupts on the same CPU.
*/
-#define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), \
+#define arch_xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), \
sizeof(*(ptr))))
/*
@@ -51,7 +51,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
* variable casting.
*/
-#define cmpxchg(ptr, old, new) \
+#define arch_cmpxchg(ptr, old, new) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(*(ptr)) __old = (old); \
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index f267d956458f..266c429b9137 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -21,11 +21,11 @@
#define ATOMIC64_INIT(i) { (i) }
-#define atomic_read(v) READ_ONCE((v)->counter)
-#define atomic64_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic64_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
-#define atomic64_set(v,i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic64_set(v,i) WRITE_ONCE(((v)->counter), (i))
#define ATOMIC_OP(op, c_op) \
static __inline__ int \
@@ -36,7 +36,7 @@ ia64_atomic_##op (int i, atomic_t *v) \
\
do { \
CMPXCHG_BUGCHECK(v); \
- old = atomic_read(v); \
+ old = arch_atomic_read(v); \
new = old c_op i; \
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
return new; \
@@ -51,7 +51,7 @@ ia64_atomic_fetch_##op (int i, atomic_t *v) \
\
do { \
CMPXCHG_BUGCHECK(v); \
- old = atomic_read(v); \
+ old = arch_atomic_read(v); \
new = old c_op i; \
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
return old; \
@@ -74,7 +74,7 @@ ATOMIC_OPS(sub, -)
#define __ia64_atomic_const(i) 0
#endif
-#define atomic_add_return(i,v) \
+#define arch_atomic_add_return(i,v) \
({ \
int __ia64_aar_i = (i); \
__ia64_atomic_const(i) \
@@ -82,7 +82,7 @@ ATOMIC_OPS(sub, -)
: ia64_atomic_add(__ia64_aar_i, v); \
})
-#define atomic_sub_return(i,v) \
+#define arch_atomic_sub_return(i,v) \
({ \
int __ia64_asr_i = (i); \
__ia64_atomic_const(i) \
@@ -90,7 +90,7 @@ ATOMIC_OPS(sub, -)
: ia64_atomic_sub(__ia64_asr_i, v); \
})
-#define atomic_fetch_add(i,v) \
+#define arch_atomic_fetch_add(i,v) \
({ \
int __ia64_aar_i = (i); \
__ia64_atomic_const(i) \
@@ -98,7 +98,7 @@ ATOMIC_OPS(sub, -)
: ia64_atomic_fetch_add(__ia64_aar_i, v); \
})
-#define atomic_fetch_sub(i,v) \
+#define arch_atomic_fetch_sub(i,v) \
({ \
int __ia64_asr_i = (i); \
__ia64_atomic_const(i) \
@@ -110,13 +110,13 @@ ATOMIC_FETCH_OP(and, &)
ATOMIC_FETCH_OP(or, |)
ATOMIC_FETCH_OP(xor, ^)
-#define atomic_and(i,v) (void)ia64_atomic_fetch_and(i,v)
-#define atomic_or(i,v) (void)ia64_atomic_fetch_or(i,v)
-#define atomic_xor(i,v) (void)ia64_atomic_fetch_xor(i,v)
+#define arch_atomic_and(i,v) (void)ia64_atomic_fetch_and(i,v)
+#define arch_atomic_or(i,v) (void)ia64_atomic_fetch_or(i,v)
+#define arch_atomic_xor(i,v) (void)ia64_atomic_fetch_xor(i,v)
-#define atomic_fetch_and(i,v) ia64_atomic_fetch_and(i,v)
-#define atomic_fetch_or(i,v) ia64_atomic_fetch_or(i,v)
-#define atomic_fetch_xor(i,v) ia64_atomic_fetch_xor(i,v)
+#define arch_atomic_fetch_and(i,v) ia64_atomic_fetch_and(i,v)
+#define arch_atomic_fetch_or(i,v) ia64_atomic_fetch_or(i,v)
+#define arch_atomic_fetch_xor(i,v) ia64_atomic_fetch_xor(i,v)
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
@@ -131,7 +131,7 @@ ia64_atomic64_##op (s64 i, atomic64_t *v) \
\
do { \
CMPXCHG_BUGCHECK(v); \
- old = atomic64_read(v); \
+ old = arch_atomic64_read(v); \
new = old c_op i; \
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
return new; \
@@ -146,7 +146,7 @@ ia64_atomic64_fetch_##op (s64 i, atomic64_t *v) \
\
do { \
CMPXCHG_BUGCHECK(v); \
- old = atomic64_read(v); \
+ old = arch_atomic64_read(v); \
new = old c_op i; \
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
return old; \
@@ -159,7 +159,7 @@ ia64_atomic64_fetch_##op (s64 i, atomic64_t *v) \
ATOMIC64_OPS(add, +)
ATOMIC64_OPS(sub, -)
-#define atomic64_add_return(i,v) \
+#define arch_atomic64_add_return(i,v) \
({ \
s64 __ia64_aar_i = (i); \
__ia64_atomic_const(i) \
@@ -167,7 +167,7 @@ ATOMIC64_OPS(sub, -)
: ia64_atomic64_add(__ia64_aar_i, v); \
})
-#define atomic64_sub_return(i,v) \
+#define arch_atomic64_sub_return(i,v) \
({ \
s64 __ia64_asr_i = (i); \
__ia64_atomic_const(i) \
@@ -175,7 +175,7 @@ ATOMIC64_OPS(sub, -)
: ia64_atomic64_sub(__ia64_asr_i, v); \
})
-#define atomic64_fetch_add(i,v) \
+#define arch_atomic64_fetch_add(i,v) \
({ \
s64 __ia64_aar_i = (i); \
__ia64_atomic_const(i) \
@@ -183,7 +183,7 @@ ATOMIC64_OPS(sub, -)
: ia64_atomic64_fetch_add(__ia64_aar_i, v); \
})
-#define atomic64_fetch_sub(i,v) \
+#define arch_atomic64_fetch_sub(i,v) \
({ \
s64 __ia64_asr_i = (i); \
__ia64_atomic_const(i) \
@@ -195,29 +195,29 @@ ATOMIC64_FETCH_OP(and, &)
ATOMIC64_FETCH_OP(or, |)
ATOMIC64_FETCH_OP(xor, ^)
-#define atomic64_and(i,v) (void)ia64_atomic64_fetch_and(i,v)
-#define atomic64_or(i,v) (void)ia64_atomic64_fetch_or(i,v)
-#define atomic64_xor(i,v) (void)ia64_atomic64_fetch_xor(i,v)
+#define arch_atomic64_and(i,v) (void)ia64_atomic64_fetch_and(i,v)
+#define arch_atomic64_or(i,v) (void)ia64_atomic64_fetch_or(i,v)
+#define arch_atomic64_xor(i,v) (void)ia64_atomic64_fetch_xor(i,v)
-#define atomic64_fetch_and(i,v) ia64_atomic64_fetch_and(i,v)
-#define atomic64_fetch_or(i,v) ia64_atomic64_fetch_or(i,v)
-#define atomic64_fetch_xor(i,v) ia64_atomic64_fetch_xor(i,v)
+#define arch_atomic64_fetch_and(i,v) ia64_atomic64_fetch_and(i,v)
+#define arch_atomic64_fetch_or(i,v) ia64_atomic64_fetch_or(i,v)
+#define arch_atomic64_fetch_xor(i,v) ia64_atomic64_fetch_xor(i,v)
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP
-#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), old, new))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
-#define atomic64_cmpxchg(v, old, new) \
- (cmpxchg(&((v)->counter), old, new))
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic64_cmpxchg(v, old, new) \
+ (arch_cmpxchg(&((v)->counter), old, new))
+#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
-#define atomic_add(i,v) (void)atomic_add_return((i), (v))
-#define atomic_sub(i,v) (void)atomic_sub_return((i), (v))
+#define arch_atomic_add(i,v) (void)arch_atomic_add_return((i), (v))
+#define arch_atomic_sub(i,v) (void)arch_atomic_sub_return((i), (v))
-#define atomic64_add(i,v) (void)atomic64_add_return((i), (v))
-#define atomic64_sub(i,v) (void)atomic64_sub_return((i), (v))
+#define arch_atomic64_add(i,v) (void)arch_atomic64_add_return((i), (v))
+#define arch_atomic64_sub(i,v) (void)arch_atomic64_sub_return((i), (v))
#endif /* _ASM_IA64_ATOMIC_H */
diff --git a/arch/ia64/include/asm/cmpxchg.h b/arch/ia64/include/asm/cmpxchg.h
new file mode 100644
index 000000000000..94ef84429843
--- /dev/null
+++ b/arch/ia64/include/asm/cmpxchg.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_CMPXCHG_H
+#define _ASM_IA64_CMPXCHG_H
+
+#include <uapi/asm/cmpxchg.h>
+
+#define arch_xchg(ptr, x) \
+({(__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));})
+
+#define arch_cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
+#define arch_cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
+
+#define arch_cmpxchg_local arch_cmpxchg
+#define arch_cmpxchg64_local arch_cmpxchg64
+
+#endif /* _ASM_IA64_CMPXCHG_H */
diff --git a/arch/ia64/include/uapi/asm/cmpxchg.h b/arch/ia64/include/uapi/asm/cmpxchg.h
index 5d90307fd6e0..926c6cb1e029 100644
--- a/arch/ia64/include/uapi/asm/cmpxchg.h
+++ b/arch/ia64/include/uapi/asm/cmpxchg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _ASM_IA64_CMPXCHG_H
-#define _ASM_IA64_CMPXCHG_H
+#ifndef _UAPI_ASM_IA64_CMPXCHG_H
+#define _UAPI_ASM_IA64_CMPXCHG_H
/*
* Compare/Exchange, forked from asm/intrinsics.h
@@ -53,8 +53,10 @@ extern void ia64_xchg_called_with_bad_pointer(void);
__xchg_result; \
})
+#ifndef __KERNEL__
#define xchg(ptr, x) \
({(__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));})
+#endif
/*
* Atomic compare and exchange. Compare OLD with MEM, if identical,
@@ -126,12 +128,14 @@ extern long ia64_cmpxchg_called_with_bad_pointer(void);
* we had to back-pedal and keep the "legacy" behavior of a full fence :-(
*/
+#ifndef __KERNEL__
/* for compatibility with other platforms: */
#define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
#define cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
#define cmpxchg_local cmpxchg
#define cmpxchg64_local cmpxchg64
+#endif
#ifdef CONFIG_IA64_DEBUG_CMPXCHG
# define CMPXCHG_BUGCHECK_DECL int _cmpxchg_bugcheck_count = 128;
@@ -152,4 +156,4 @@ do { \
#endif /* !__ASSEMBLY__ */
-#endif /* _ASM_IA64_CMPXCHG_H */
+#endif /* _UAPI_ASM_IA64_CMPXCHG_H */
diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl
index 1ee8e736a48e..bb11fe4c875a 100644
--- a/arch/ia64/kernel/syscalls/syscall.tbl
+++ b/arch/ia64/kernel/syscalls/syscall.tbl
@@ -363,7 +363,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c
index 44f9b5216ac9..261a0f57cc9a 100644
--- a/arch/m68k/atari/config.c
+++ b/arch/m68k/atari/config.c
@@ -875,16 +875,8 @@ static const struct resource atari_scsi_tt_rsrc[] __initconst = {
#define FALCON_IDE_BASE 0xfff00000
static const struct resource atari_falconide_rsrc[] __initconst = {
- {
- .flags = IORESOURCE_MEM,
- .start = FALCON_IDE_BASE,
- .end = FALCON_IDE_BASE + 0x39,
- },
- {
- .flags = IORESOURCE_IRQ,
- .start = IRQ_MFP_FSCSI,
- .end = IRQ_MFP_FSCSI,
- },
+ DEFINE_RES_MEM(FALCON_IDE_BASE, 0x38),
+ DEFINE_RES_MEM(FALCON_IDE_BASE + 0x38, 2),
};
int __init atari_platform_init(void)
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 59b727b69357..4fe26d54627e 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -323,11 +323,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
CONFIG_DUMMY_IRQ=m
-CONFIG_IDE=y
-CONFIG_IDE_GD_ATAPI=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_GAYLE=y
-CONFIG_BLK_DEV_BUDDHA=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
@@ -344,6 +339,11 @@ CONFIG_GVP11_SCSI=y
CONFIG_SCSI_A4000T=y
CONFIG_SCSI_ZORRO7XX=y
CONFIG_SCSI_ZORRO_ESP=y
+CONFIG_ATA=y
+# CONFIG_ATA_VERBOSE_ERROR is not set
+# CONFIG_ATA_BMDMA is not set
+CONFIG_PATA_GAYLE=y
+CONFIG_PATA_BUDDHA=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 9cc9f1a06516..21b2990fe9af 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -324,10 +324,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
CONFIG_DUMMY_IRQ=m
-CONFIG_IDE=y
-CONFIG_IDE_GD_ATAPI=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_FALCON_IDE=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
@@ -339,6 +335,10 @@ CONFIG_SCSI_SAS_ATTRS=m
CONFIG_ISCSI_TCP=m
CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_ATARI_SCSI=y
+CONFIG_ATA=y
+# CONFIG_ATA_VERBOSE_ERROR is not set
+# CONFIG_ATA_BMDMA is not set
+CONFIG_PATA_FALCON=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 4e68b72d9c50..b03300df13fc 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -315,11 +315,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
CONFIG_DUMMY_IRQ=m
-CONFIG_IDE=y
-CONFIG_IDE_GD_ATAPI=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_PLATFORM=y
-CONFIG_BLK_DEV_MAC_IDE=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
@@ -332,6 +327,10 @@ CONFIG_ISCSI_TCP=m
CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_MAC_SCSI=y
CONFIG_SCSI_MAC_ESP=y
+CONFIG_ATA=y
+# CONFIG_ATA_VERBOSE_ERROR is not set
+# CONFIG_ATA_BMDMA is not set
+CONFIG_PATA_PLATFORM=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index d31896293c39..e2c8368e2231 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -344,15 +344,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
CONFIG_DUMMY_IRQ=m
-CONFIG_IDE=y
-CONFIG_IDE_GD_ATAPI=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_PLATFORM=y
-CONFIG_BLK_DEV_GAYLE=y
-CONFIG_BLK_DEV_BUDDHA=y
-CONFIG_BLK_DEV_FALCON_IDE=y
-CONFIG_BLK_DEV_MAC_IDE=y
-CONFIG_BLK_DEV_Q40IDE=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
@@ -376,6 +367,13 @@ CONFIG_MVME147_SCSI=y
CONFIG_MVME16x_SCSI=y
CONFIG_BVME6000_SCSI=y
CONFIG_SUN3X_ESP=y
+CONFIG_ATA=y
+# CONFIG_ATA_VERBOSE_ERROR is not set
+# CONFIG_ATA_BMDMA is not set
+CONFIG_PATA_FALCON=y
+CONFIG_PATA_GAYLE=y
+CONFIG_PATA_BUDDHA=y
+CONFIG_PATA_PLATFORM=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index 664025a0f6a4..514e2e8cddbd 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -314,10 +314,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
CONFIG_DUMMY_IRQ=m
-CONFIG_IDE=y
-CONFIG_IDE_GD_ATAPI=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_Q40IDE=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
@@ -328,6 +324,10 @@ CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SAS_ATTRS=m
CONFIG_ISCSI_TCP=m
CONFIG_ISCSI_BOOT_SYSFS=m
+CONFIG_ATA=y
+# CONFIG_ATA_VERBOSE_ERROR is not set
+# CONFIG_ATA_BMDMA is not set
+CONFIG_PATA_FALCON=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index 756c5cc58f94..8637bf8a2f65 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -16,8 +16,8 @@
* We do not have SMP m68k systems, so we don't have to deal with that.
*/
-#define atomic_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
/*
* The ColdFire parts cannot do some immediate to memory operations,
@@ -30,7 +30,7 @@
#endif
#define ATOMIC_OP(op, c_op, asm_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
__asm__ __volatile__(#asm_op "l %1,%0" : "+m" (*v) : ASM_DI (i));\
} \
@@ -38,7 +38,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
#ifdef CONFIG_RMW_INSNS
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
int t, tmp; \
\
@@ -48,12 +48,12 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
" casl %2,%1,%0\n" \
" jne 1b" \
: "+m" (*v), "=&d" (t), "=&d" (tmp) \
- : "g" (i), "2" (atomic_read(v))); \
+ : "g" (i), "2" (arch_atomic_read(v))); \
return t; \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
int t, tmp; \
\
@@ -63,14 +63,14 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
" casl %2,%1,%0\n" \
" jne 1b" \
: "+m" (*v), "=&d" (t), "=&d" (tmp) \
- : "g" (i), "2" (atomic_read(v))); \
+ : "g" (i), "2" (arch_atomic_read(v))); \
return tmp; \
}
#else
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static inline int atomic_##op##_return(int i, atomic_t * v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t * v) \
{ \
unsigned long flags; \
int t; \
@@ -83,7 +83,7 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
-static inline int atomic_fetch_##op(int i, atomic_t * v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \
{ \
unsigned long flags; \
int t; \
@@ -120,27 +120,27 @@ ATOMIC_OPS(xor, ^=, eor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-static inline void atomic_inc(atomic_t *v)
+static inline void arch_atomic_inc(atomic_t *v)
{
__asm__ __volatile__("addql #1,%0" : "+m" (*v));
}
-#define atomic_inc atomic_inc
+#define arch_atomic_inc arch_atomic_inc
-static inline void atomic_dec(atomic_t *v)
+static inline void arch_atomic_dec(atomic_t *v)
{
__asm__ __volatile__("subql #1,%0" : "+m" (*v));
}
-#define atomic_dec atomic_dec
+#define arch_atomic_dec arch_atomic_dec
-static inline int atomic_dec_and_test(atomic_t *v)
+static inline int arch_atomic_dec_and_test(atomic_t *v)
{
char c;
__asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
return c != 0;
}
-#define atomic_dec_and_test atomic_dec_and_test
+#define arch_atomic_dec_and_test arch_atomic_dec_and_test
-static inline int atomic_dec_and_test_lt(atomic_t *v)
+static inline int arch_atomic_dec_and_test_lt(atomic_t *v)
{
char c;
__asm__ __volatile__(
@@ -150,49 +150,49 @@ static inline int atomic_dec_and_test_lt(atomic_t *v)
return c != 0;
}
-static inline int atomic_inc_and_test(atomic_t *v)
+static inline int arch_atomic_inc_and_test(atomic_t *v)
{
char c;
__asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
return c != 0;
}
-#define atomic_inc_and_test atomic_inc_and_test
+#define arch_atomic_inc_and_test arch_atomic_inc_and_test
#ifdef CONFIG_RMW_INSNS
-#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic_cmpxchg(v, o, n) ((int)arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
#else /* !CONFIG_RMW_INSNS */
-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
unsigned long flags;
int prev;
local_irq_save(flags);
- prev = atomic_read(v);
+ prev = arch_atomic_read(v);
if (prev == old)
- atomic_set(v, new);
+ arch_atomic_set(v, new);
local_irq_restore(flags);
return prev;
}
-static inline int atomic_xchg(atomic_t *v, int new)
+static inline int arch_atomic_xchg(atomic_t *v, int new)
{
unsigned long flags;
int prev;
local_irq_save(flags);
- prev = atomic_read(v);
- atomic_set(v, new);
+ prev = arch_atomic_read(v);
+ arch_atomic_set(v, new);
local_irq_restore(flags);
return prev;
}
#endif /* !CONFIG_RMW_INSNS */
-static inline int atomic_sub_and_test(int i, atomic_t *v)
+static inline int arch_atomic_sub_and_test(int i, atomic_t *v)
{
char c;
__asm__ __volatile__("subl %2,%1; seq %0"
@@ -200,9 +200,9 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
: ASM_DI (i));
return c != 0;
}
-#define atomic_sub_and_test atomic_sub_and_test
+#define arch_atomic_sub_and_test arch_atomic_sub_and_test
-static inline int atomic_add_negative(int i, atomic_t *v)
+static inline int arch_atomic_add_negative(int i, atomic_t *v)
{
char c;
__asm__ __volatile__("addl %2,%1; smi %0"
@@ -210,6 +210,6 @@ static inline int atomic_add_negative(int i, atomic_t *v)
: ASM_DI (i));
return c != 0;
}
-#define atomic_add_negative atomic_add_negative
+#define arch_atomic_add_negative arch_atomic_add_negative
#endif /* __ARCH_M68K_ATOMIC __ */
diff --git a/arch/m68k/include/asm/cmpxchg.h b/arch/m68k/include/asm/cmpxchg.h
index a4aa82021d3b..e8ca4b0ccefa 100644
--- a/arch/m68k/include/asm/cmpxchg.h
+++ b/arch/m68k/include/asm/cmpxchg.h
@@ -76,11 +76,11 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
}
#endif
-#define xchg(ptr,x) ({(__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));})
+#define arch_xchg(ptr,x) ({(__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));})
#include <asm-generic/cmpxchg-local.h>
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
extern unsigned long __invalid_cmpxchg_size(volatile void *,
unsigned long, unsigned long, int);
@@ -118,14 +118,14 @@ static inline unsigned long __cmpxchg(volatile void *p, unsigned long old,
return old;
}
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({(__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr)));})
-#define cmpxchg_local(ptr, o, n) \
+#define arch_cmpxchg_local(ptr, o, n) \
({(__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr)));})
-#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
+#define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
#else
diff --git a/arch/m68k/include/asm/mmu_context.h b/arch/m68k/include/asm/mmu_context.h
index a5d358855878..8ed6ac14d99f 100644
--- a/arch/m68k/include/asm/mmu_context.h
+++ b/arch/m68k/include/asm/mmu_context.h
@@ -31,7 +31,7 @@ static inline void get_mmu_context(struct mm_struct *mm)
if (mm->context != NO_CONTEXT)
return;
- while (atomic_dec_and_test_lt(&nr_free_contexts)) {
+ while (arch_atomic_dec_and_test_lt(&nr_free_contexts)) {
atomic_inc(&nr_free_contexts);
steal_context();
}
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
index a4b7ee1df211..8f215e79e70e 100644
--- a/arch/m68k/kernel/signal.c
+++ b/arch/m68k/kernel/signal.c
@@ -623,7 +623,8 @@ static inline void siginfo_build_tests(void)
BUILD_BUG_ON(offsetof(siginfo_t, si_pkey) != 0x12);
/* _sigfault._perf */
- BUILD_BUG_ON(offsetof(siginfo_t, si_perf) != 0x10);
+ BUILD_BUG_ON(offsetof(siginfo_t, si_perf_data) != 0x10);
+ BUILD_BUG_ON(offsetof(siginfo_t, si_perf_type) != 0x14);
/* _sigpoll */
BUILD_BUG_ON(offsetof(siginfo_t, si_band) != 0x0c);
diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl
index 0dd019dc2136..79c2d24c89dd 100644
--- a/arch/m68k/kernel/syscalls/syscall.tbl
+++ b/arch/m68k/kernel/syscalls/syscall.tbl
@@ -442,7 +442,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index 1cdac959bd91..5d16f9b47aa9 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -933,13 +933,15 @@ static const struct resource mac_scsi_ccl_rsrc[] __initconst = {
},
};
-static const struct resource mac_ide_quadra_rsrc[] __initconst = {
- DEFINE_RES_MEM(0x50F1A000, 0x104),
+static const struct resource mac_pata_quadra_rsrc[] __initconst = {
+ DEFINE_RES_MEM(0x50F1A000, 0x38),
+ DEFINE_RES_MEM(0x50F1A038, 0x04),
DEFINE_RES_IRQ(IRQ_NUBUS_F),
};
-static const struct resource mac_ide_pb_rsrc[] __initconst = {
- DEFINE_RES_MEM(0x50F1A000, 0x104),
+static const struct resource mac_pata_pb_rsrc[] __initconst = {
+ DEFINE_RES_MEM(0x50F1A000, 0x38),
+ DEFINE_RES_MEM(0x50F1A038, 0x04),
DEFINE_RES_IRQ(IRQ_NUBUS_C),
};
@@ -949,7 +951,7 @@ static const struct resource mac_pata_baboon_rsrc[] __initconst = {
DEFINE_RES_IRQ(IRQ_BABOON_1),
};
-static const struct pata_platform_info mac_pata_baboon_data __initconst = {
+static const struct pata_platform_info mac_pata_data __initconst = {
.ioport_shift = 2,
};
@@ -1067,17 +1069,19 @@ int __init mac_platform_init(void)
switch (macintosh_config->ide_type) {
case MAC_IDE_QUADRA:
- platform_device_register_simple("mac_ide", -1,
- mac_ide_quadra_rsrc, ARRAY_SIZE(mac_ide_quadra_rsrc));
+ platform_device_register_resndata(NULL, "pata_platform", -1,
+ mac_pata_quadra_rsrc, ARRAY_SIZE(mac_pata_quadra_rsrc),
+ &mac_pata_data, sizeof(mac_pata_data));
break;
case MAC_IDE_PB:
- platform_device_register_simple("mac_ide", -1,
- mac_ide_pb_rsrc, ARRAY_SIZE(mac_ide_pb_rsrc));
+ platform_device_register_resndata(NULL, "pata_platform", -1,
+ mac_pata_pb_rsrc, ARRAY_SIZE(mac_pata_pb_rsrc),
+ &mac_pata_data, sizeof(mac_pata_data));
break;
case MAC_IDE_BABOON:
platform_device_register_resndata(NULL, "pata_platform", -1,
mac_pata_baboon_rsrc, ARRAY_SIZE(mac_pata_baboon_rsrc),
- &mac_pata_baboon_data, sizeof(mac_pata_baboon_data));
+ &mac_pata_data, sizeof(mac_pata_data));
break;
}
diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c
index d6a423875231..5caf1e5be1c2 100644
--- a/arch/m68k/q40/config.c
+++ b/arch/m68k/q40/config.c
@@ -286,14 +286,39 @@ static int q40_set_rtc_pll(struct rtc_pll_info *pll)
return -EINVAL;
}
-static __init int q40_add_kbd_device(void)
-{
- struct platform_device *pdev;
+#define PCIDE_BASE1 0x1f0
+#define PCIDE_BASE2 0x170
+#define PCIDE_CTL 0x206
+
+static const struct resource q40_pata_rsrc_0[] __initconst = {
+ DEFINE_RES_MEM(q40_isa_io_base + PCIDE_BASE1 * 4, 0x38),
+ DEFINE_RES_MEM(q40_isa_io_base + (PCIDE_BASE1 + PCIDE_CTL) * 4, 2),
+ DEFINE_RES_IO(PCIDE_BASE1, 8),
+ DEFINE_RES_IO(PCIDE_BASE1 + PCIDE_CTL, 1),
+ DEFINE_RES_IRQ(14),
+};
+static const struct resource q40_pata_rsrc_1[] __initconst = {
+ DEFINE_RES_MEM(q40_isa_io_base + PCIDE_BASE2 * 4, 0x38),
+ DEFINE_RES_MEM(q40_isa_io_base + (PCIDE_BASE2 + PCIDE_CTL) * 4, 2),
+ DEFINE_RES_IO(PCIDE_BASE2, 8),
+ DEFINE_RES_IO(PCIDE_BASE2 + PCIDE_CTL, 1),
+ DEFINE_RES_IRQ(15),
+};
+
+static __init int q40_platform_init(void)
+{
if (!MACH_IS_Q40)
return -ENODEV;
- pdev = platform_device_register_simple("q40kbd", -1, NULL, 0);
- return PTR_ERR_OR_ZERO(pdev);
+ platform_device_register_simple("q40kbd", -1, NULL, 0);
+
+ platform_device_register_simple("atari-falcon-ide", 0, q40_pata_rsrc_0,
+ ARRAY_SIZE(q40_pata_rsrc_0));
+
+ platform_device_register_simple("atari-falcon-ide", 1, q40_pata_rsrc_1,
+ ARRAY_SIZE(q40_pata_rsrc_1));
+
+ return 0;
}
-arch_initcall(q40_add_kbd_device);
+arch_initcall(q40_platform_init);
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index 29b0e557aa7c..a055f5dbe00a 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
generated-y += syscall_table.h
+generic-y += cmpxchg.h
generic-y += extable.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
diff --git a/arch/microblaze/include/asm/atomic.h b/arch/microblaze/include/asm/atomic.h
deleted file mode 100644
index 41e9aff23a62..000000000000
--- a/arch/microblaze/include/asm/atomic.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_MICROBLAZE_ATOMIC_H
-#define _ASM_MICROBLAZE_ATOMIC_H
-
-#include <asm/cmpxchg.h>
-#include <asm-generic/atomic.h>
-#include <asm-generic/atomic64.h>
-
-/*
- * Atomically test *v and decrement if it is greater than 0.
- * The function returns the old value of *v minus 1.
- */
-static inline int atomic_dec_if_positive(atomic_t *v)
-{
- unsigned long flags;
- int res;
-
- local_irq_save(flags);
- res = v->counter - 1;
- if (res >= 0)
- v->counter = res;
- local_irq_restore(flags);
-
- return res;
-}
-#define atomic_dec_if_positive atomic_dec_if_positive
-
-#endif /* _ASM_MICROBLAZE_ATOMIC_H */
diff --git a/arch/microblaze/include/asm/cmpxchg.h b/arch/microblaze/include/asm/cmpxchg.h
deleted file mode 100644
index 3523b51aab36..000000000000
--- a/arch/microblaze/include/asm/cmpxchg.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_MICROBLAZE_CMPXCHG_H
-#define _ASM_MICROBLAZE_CMPXCHG_H
-
-#ifndef CONFIG_SMP
-# include <asm-generic/cmpxchg.h>
-#endif
-
-#endif /* _ASM_MICROBLAZE_CMPXCHG_H */
diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl
index 2ac716984ca2..b11395a20c20 100644
--- a/arch/microblaze/kernel/syscalls/syscall.tbl
+++ b/arch/microblaze/kernel/syscalls/syscall.tbl
@@ -448,7 +448,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/mips/alchemy/board-xxs1500.c b/arch/mips/alchemy/board-xxs1500.c
index b184baa4e56a..f175bce2987f 100644
--- a/arch/mips/alchemy/board-xxs1500.c
+++ b/arch/mips/alchemy/board-xxs1500.c
@@ -18,6 +18,7 @@
#include <asm/reboot.h>
#include <asm/setup.h>
#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/gpio-au1000.h>
#include <prom.h>
const char *get_system_type(void)
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 27ad76791539..95e1f7f3597f 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -25,24 +25,25 @@
#include <asm/war.h>
#define ATOMIC_OPS(pfx, type) \
-static __always_inline type pfx##_read(const pfx##_t *v) \
+static __always_inline type arch_##pfx##_read(const pfx##_t *v) \
{ \
return READ_ONCE(v->counter); \
} \
\
-static __always_inline void pfx##_set(pfx##_t *v, type i) \
+static __always_inline void arch_##pfx##_set(pfx##_t *v, type i) \
{ \
WRITE_ONCE(v->counter, i); \
} \
\
-static __always_inline type pfx##_cmpxchg(pfx##_t *v, type o, type n) \
+static __always_inline type \
+arch_##pfx##_cmpxchg(pfx##_t *v, type o, type n) \
{ \
- return cmpxchg(&v->counter, o, n); \
+ return arch_cmpxchg(&v->counter, o, n); \
} \
\
-static __always_inline type pfx##_xchg(pfx##_t *v, type n) \
+static __always_inline type arch_##pfx##_xchg(pfx##_t *v, type n) \
{ \
- return xchg(&v->counter, n); \
+ return arch_xchg(&v->counter, n); \
}
ATOMIC_OPS(atomic, int)
@@ -53,7 +54,7 @@ ATOMIC_OPS(atomic64, s64)
#endif
#define ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
-static __inline__ void pfx##_##op(type i, pfx##_t * v) \
+static __inline__ void arch_##pfx##_##op(type i, pfx##_t * v) \
{ \
type temp; \
\
@@ -80,7 +81,8 @@ static __inline__ void pfx##_##op(type i, pfx##_t * v) \
}
#define ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \
-static __inline__ type pfx##_##op##_return_relaxed(type i, pfx##_t * v) \
+static __inline__ type \
+arch_##pfx##_##op##_return_relaxed(type i, pfx##_t * v) \
{ \
type temp, result; \
\
@@ -113,7 +115,8 @@ static __inline__ type pfx##_##op##_return_relaxed(type i, pfx##_t * v) \
}
#define ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc) \
-static __inline__ type pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \
+static __inline__ type \
+arch_##pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \
{ \
int temp, result; \
\
@@ -153,18 +156,18 @@ static __inline__ type pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \
ATOMIC_OPS(atomic, add, int, +=, addu, ll, sc)
ATOMIC_OPS(atomic, sub, int, -=, subu, ll, sc)
-#define atomic_add_return_relaxed atomic_add_return_relaxed
-#define atomic_sub_return_relaxed atomic_sub_return_relaxed
-#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
#ifdef CONFIG_64BIT
ATOMIC_OPS(atomic64, add, s64, +=, daddu, lld, scd)
ATOMIC_OPS(atomic64, sub, s64, -=, dsubu, lld, scd)
-# define atomic64_add_return_relaxed atomic64_add_return_relaxed
-# define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-# define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-# define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+# define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+# define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+# define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+# define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
#endif /* CONFIG_64BIT */
#undef ATOMIC_OPS
@@ -176,17 +179,17 @@ ATOMIC_OPS(atomic, and, int, &=, and, ll, sc)
ATOMIC_OPS(atomic, or, int, |=, or, ll, sc)
ATOMIC_OPS(atomic, xor, int, ^=, xor, ll, sc)
-#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
#ifdef CONFIG_64BIT
ATOMIC_OPS(atomic64, and, s64, &=, and, lld, scd)
ATOMIC_OPS(atomic64, or, s64, |=, or, lld, scd)
ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
-# define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-# define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-# define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+# define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+# define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+# define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
#endif
#undef ATOMIC_OPS
@@ -203,7 +206,7 @@ ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
* The function returns the old value of @v minus @i.
*/
#define ATOMIC_SIP_OP(pfx, type, op, ll, sc) \
-static __inline__ int pfx##_sub_if_positive(type i, pfx##_t * v) \
+static __inline__ int arch_##pfx##_sub_if_positive(type i, pfx##_t * v) \
{ \
type temp, result; \
\
@@ -255,11 +258,11 @@ static __inline__ int pfx##_sub_if_positive(type i, pfx##_t * v) \
}
ATOMIC_SIP_OP(atomic, int, subu, ll, sc)
-#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
+#define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(1, v)
#ifdef CONFIG_64BIT
ATOMIC_SIP_OP(atomic64, s64, dsubu, lld, scd)
-#define atomic64_dec_if_positive(v) atomic64_sub_if_positive(1, v)
+#define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(1, v)
#endif
#undef ATOMIC_SIP_OP
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index ed8f3f3c4304..0b983800f48b 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -90,7 +90,7 @@ unsigned long __xchg(volatile void *ptr, unsigned long x, int size)
}
}
-#define xchg(ptr, x) \
+#define arch_xchg(ptr, x) \
({ \
__typeof__(*(ptr)) __res; \
\
@@ -175,14 +175,14 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
}
}
-#define cmpxchg_local(ptr, old, new) \
+#define arch_cmpxchg_local(ptr, old, new) \
((__typeof__(*(ptr))) \
__cmpxchg((ptr), \
(unsigned long)(__typeof__(*(ptr)))(old), \
(unsigned long)(__typeof__(*(ptr)))(new), \
sizeof(*(ptr))))
-#define cmpxchg(ptr, old, new) \
+#define arch_cmpxchg(ptr, old, new) \
({ \
__typeof__(*(ptr)) __res; \
\
@@ -194,7 +194,7 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
if (__SYNC_loongson3_war == 0) \
smp_mb__before_llsc(); \
\
- __res = cmpxchg_local((ptr), (old), (new)); \
+ __res = arch_cmpxchg_local((ptr), (old), (new)); \
\
/* \
* In the Loongson3 workaround case __cmpxchg_asm() already \
@@ -208,21 +208,21 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
})
#ifdef CONFIG_64BIT
-#define cmpxchg64_local(ptr, o, n) \
+#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_local((ptr), (o), (n)); \
+ arch_cmpxchg_local((ptr), (o), (n)); \
})
-#define cmpxchg64(ptr, o, n) \
+#define arch_cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg((ptr), (o), (n)); \
+ arch_cmpxchg((ptr), (o), (n)); \
})
#else
# include <asm-generic/cmpxchg-local.h>
-# define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+# define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
# ifdef CONFIG_SMP
@@ -294,7 +294,7 @@ static inline unsigned long __cmpxchg64(volatile void *ptr,
return ret;
}
-# define cmpxchg64(ptr, o, n) ({ \
+# define arch_cmpxchg64(ptr, o, n) ({ \
unsigned long long __old = (__typeof__(*(ptr)))(o); \
unsigned long long __new = (__typeof__(*(ptr)))(n); \
__typeof__(*(ptr)) __res; \
@@ -317,7 +317,7 @@ static inline unsigned long __cmpxchg64(volatile void *ptr,
})
# else /* !CONFIG_SMP */
-# define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
+# define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
# endif /* !CONFIG_SMP */
#endif /* !CONFIG_64BIT */
diff --git a/arch/mips/include/asm/mips-boards/launch.h b/arch/mips/include/asm/mips-boards/launch.h
index f93aa5ee2e2e..3481ed4c117b 100644
--- a/arch/mips/include/asm/mips-boards/launch.h
+++ b/arch/mips/include/asm/mips-boards/launch.h
@@ -3,6 +3,9 @@
*
*/
+#ifndef _ASM_MIPS_BOARDS_LAUNCH_H
+#define _ASM_MIPS_BOARDS_LAUNCH_H
+
#ifndef _ASSEMBLER_
struct cpulaunch {
@@ -34,3 +37,5 @@ struct cpulaunch {
/* Polling period in count cycles for secondary CPU's */
#define LAUNCHPERIOD 10000
+
+#endif /* _ASM_MIPS_BOARDS_LAUNCH_H */
diff --git a/arch/mips/kernel/cmpxchg.c b/arch/mips/kernel/cmpxchg.c
index 89107deb03fc..ac9c8cfb2ba9 100644
--- a/arch/mips/kernel/cmpxchg.c
+++ b/arch/mips/kernel/cmpxchg.c
@@ -41,7 +41,7 @@ unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int s
do {
old32 = load32;
new32 = (load32 & ~mask) | (val << shift);
- load32 = cmpxchg(ptr32, old32, new32);
+ load32 = arch_cmpxchg(ptr32, old32, new32);
} while (load32 != old32);
return (load32 & mask) >> shift;
@@ -97,7 +97,7 @@ unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
*/
old32 = (load32 & ~mask) | (old << shift);
new32 = (load32 & ~mask) | (new << shift);
- load32 = cmpxchg(ptr32, old32, new32);
+ load32 = arch_cmpxchg(ptr32, old32, new32);
if (load32 == old32)
return old;
}
diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl
index 5e0096657251..9220909526f9 100644
--- a/arch/mips/kernel/syscalls/syscall_n32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n32.tbl
@@ -381,7 +381,7 @@
440 n32 process_madvise sys_process_madvise
441 n32 epoll_pwait2 compat_sys_epoll_pwait2
442 n32 mount_setattr sys_mount_setattr
-443 n32 quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 n32 landlock_create_ruleset sys_landlock_create_ruleset
445 n32 landlock_add_rule sys_landlock_add_rule
446 n32 landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl
index 9974f5f8e49b..9cd1c34f31b5 100644
--- a/arch/mips/kernel/syscalls/syscall_n64.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n64.tbl
@@ -357,7 +357,7 @@
440 n64 process_madvise sys_process_madvise
441 n64 epoll_pwait2 sys_epoll_pwait2
442 n64 mount_setattr sys_mount_setattr
-443 n64 quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 n64 landlock_create_ruleset sys_landlock_create_ruleset
445 n64 landlock_add_rule sys_landlock_add_rule
446 n64 landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl
index 39d6e71e57b6..d560c467a8c6 100644
--- a/arch/mips/kernel/syscalls/syscall_o32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_o32.tbl
@@ -430,7 +430,7 @@
440 o32 process_madvise sys_process_madvise
441 o32 epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 o32 mount_setattr sys_mount_setattr
-443 o32 quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 o32 landlock_create_ruleset sys_landlock_create_ruleset
445 o32 landlock_add_rule sys_landlock_add_rule
446 o32 landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c
index de03838b343b..a9b72eacfc0b 100644
--- a/arch/mips/lib/mips-atomic.c
+++ b/arch/mips/lib/mips-atomic.c
@@ -37,7 +37,7 @@
*/
notrace void arch_local_irq_disable(void)
{
- preempt_disable();
+ preempt_disable_notrace();
__asm__ __volatile__(
" .set push \n"
@@ -53,7 +53,7 @@ notrace void arch_local_irq_disable(void)
: /* no inputs */
: "memory");
- preempt_enable();
+ preempt_enable_notrace();
}
EXPORT_SYMBOL(arch_local_irq_disable);
@@ -61,7 +61,7 @@ notrace unsigned long arch_local_irq_save(void)
{
unsigned long flags;
- preempt_disable();
+ preempt_disable_notrace();
__asm__ __volatile__(
" .set push \n"
@@ -78,7 +78,7 @@ notrace unsigned long arch_local_irq_save(void)
: /* no inputs */
: "memory");
- preempt_enable();
+ preempt_enable_notrace();
return flags;
}
@@ -88,7 +88,7 @@ notrace void arch_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
- preempt_disable();
+ preempt_disable_notrace();
__asm__ __volatile__(
" .set push \n"
@@ -106,7 +106,7 @@ notrace void arch_local_irq_restore(unsigned long flags)
: "0" (flags)
: "memory");
- preempt_enable();
+ preempt_enable_notrace();
}
EXPORT_SYMBOL(arch_local_irq_restore);
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index a7bf0c80371c..830ab91e574f 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -158,31 +158,29 @@ unsigned long _page_cachable_default;
EXPORT_SYMBOL(_page_cachable_default);
#define PM(p) __pgprot(_page_cachable_default | (p))
-#define PVA(p) PM(_PAGE_VALID | _PAGE_ACCESSED | (p))
static inline void setup_protection_map(void)
{
protection_map[0] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
- protection_map[1] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
- protection_map[2] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
- protection_map[3] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
- protection_map[4] = PVA(_PAGE_PRESENT);
- protection_map[5] = PVA(_PAGE_PRESENT);
- protection_map[6] = PVA(_PAGE_PRESENT);
- protection_map[7] = PVA(_PAGE_PRESENT);
+ protection_map[1] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
+ protection_map[2] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
+ protection_map[3] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
+ protection_map[4] = PM(_PAGE_PRESENT);
+ protection_map[5] = PM(_PAGE_PRESENT);
+ protection_map[6] = PM(_PAGE_PRESENT);
+ protection_map[7] = PM(_PAGE_PRESENT);
protection_map[8] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
- protection_map[9] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
- protection_map[10] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
+ protection_map[9] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
+ protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
_PAGE_NO_READ);
- protection_map[11] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
- protection_map[12] = PVA(_PAGE_PRESENT);
- protection_map[13] = PVA(_PAGE_PRESENT);
- protection_map[14] = PVA(_PAGE_PRESENT);
- protection_map[15] = PVA(_PAGE_PRESENT);
+ protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
+ protection_map[12] = PM(_PAGE_PRESENT);
+ protection_map[13] = PM(_PAGE_PRESENT);
+ protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE);
+ protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE);
}
-#undef _PVA
#undef PM
void cpu_cache_init(void)
diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c
index 0c5de07da097..0135376c5de5 100644
--- a/arch/mips/ralink/of.c
+++ b/arch/mips/ralink/of.c
@@ -8,6 +8,7 @@
#include <linux/io.h>
#include <linux/clk.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/sizes.h>
#include <linux/of_fdt.h>
@@ -25,6 +26,7 @@
__iomem void *rt_sysc_membase;
__iomem void *rt_memc_membase;
+EXPORT_SYMBOL_GPL(rt_sysc_membase);
__iomem void *plat_of_remap_node(const char *node)
{
diff --git a/arch/openrisc/include/asm/atomic.h b/arch/openrisc/include/asm/atomic.h
index b589fac39b92..326167e4783a 100644
--- a/arch/openrisc/include/asm/atomic.h
+++ b/arch/openrisc/include/asm/atomic.h
@@ -13,7 +13,7 @@
/* Atomically perform op with v->counter and i */
#define ATOMIC_OP(op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
int tmp; \
\
@@ -30,7 +30,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
/* Atomically perform op with v->counter and i, return the result */
#define ATOMIC_OP_RETURN(op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
int tmp; \
\
@@ -49,7 +49,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
/* Atomically perform op with v->counter and i, return orig v->counter */
#define ATOMIC_FETCH_OP(op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
int tmp, old; \
\
@@ -75,6 +75,8 @@ ATOMIC_FETCH_OP(and)
ATOMIC_FETCH_OP(or)
ATOMIC_FETCH_OP(xor)
+ATOMIC_OP(add)
+ATOMIC_OP(sub)
ATOMIC_OP(and)
ATOMIC_OP(or)
ATOMIC_OP(xor)
@@ -83,16 +85,18 @@ ATOMIC_OP(xor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define atomic_add_return atomic_add_return
-#define atomic_sub_return atomic_sub_return
-#define atomic_fetch_add atomic_fetch_add
-#define atomic_fetch_sub atomic_fetch_sub
-#define atomic_fetch_and atomic_fetch_and
-#define atomic_fetch_or atomic_fetch_or
-#define atomic_fetch_xor atomic_fetch_xor
-#define atomic_and atomic_and
-#define atomic_or atomic_or
-#define atomic_xor atomic_xor
+#define arch_atomic_add_return arch_atomic_add_return
+#define arch_atomic_sub_return arch_atomic_sub_return
+#define arch_atomic_fetch_add arch_atomic_fetch_add
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
+#define arch_atomic_fetch_and arch_atomic_fetch_and
+#define arch_atomic_fetch_or arch_atomic_fetch_or
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
+#define arch_atomic_add arch_atomic_add
+#define arch_atomic_sub arch_atomic_sub
+#define arch_atomic_and arch_atomic_and
+#define arch_atomic_or arch_atomic_or
+#define arch_atomic_xor arch_atomic_xor
/*
* Atomically add a to v->counter as long as v is not already u.
@@ -100,7 +104,7 @@ ATOMIC_OP(xor)
*
* This is often used through atomic_inc_not_zero()
*/
-static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int old, tmp;
@@ -119,8 +123,14 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
return old;
}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
-#include <asm-generic/atomic.h>
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
+
+#include <asm/cmpxchg.h>
+
+#define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (v)))
+#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (old), (new)))
#endif /* __ASM_OPENRISC_ATOMIC_H */
diff --git a/arch/openrisc/include/asm/barrier.h b/arch/openrisc/include/asm/barrier.h
new file mode 100644
index 000000000000..7538294721be
--- /dev/null
+++ b/arch/openrisc/include/asm/barrier.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+#define mb() asm volatile ("l.msync" ::: "memory")
+
+#include <asm-generic/barrier.h>
+
+#endif /* __ASM_BARRIER_H */
diff --git a/arch/openrisc/include/asm/cmpxchg.h b/arch/openrisc/include/asm/cmpxchg.h
index f9cd43a39d72..79fd16162ccb 100644
--- a/arch/openrisc/include/asm/cmpxchg.h
+++ b/arch/openrisc/include/asm/cmpxchg.h
@@ -132,7 +132,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
}
}
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({ \
(__typeof__(*(ptr))) __cmpxchg((ptr), \
(unsigned long)(o), \
@@ -161,7 +161,7 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long with,
}
}
-#define xchg(ptr, with) \
+#define arch_xchg(ptr, with) \
({ \
(__typeof__(*(ptr))) __xchg((ptr), \
(unsigned long)(with), \
diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
index 2416a9f91533..c6f9e7b9f7cb 100644
--- a/arch/openrisc/kernel/setup.c
+++ b/arch/openrisc/kernel/setup.c
@@ -278,6 +278,8 @@ void calibrate_delay(void)
pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n",
loops_per_jiffy / (500000 / HZ),
(loops_per_jiffy / (5000 / HZ)) % 100, loops_per_jiffy);
+
+ of_node_put(cpu);
}
void __init setup_arch(char **cmdline_p)
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
index d5641198b90c..cfef61a7b6c2 100644
--- a/arch/openrisc/mm/init.c
+++ b/arch/openrisc/mm/init.c
@@ -75,7 +75,6 @@ static void __init map_ram(void)
/* These mark extents of read-only kernel pages...
* ...from vmlinux.lds.S
*/
- struct memblock_region *region;
v = PAGE_OFFSET;
@@ -121,7 +120,7 @@ static void __init map_ram(void)
}
printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
- region->base, region->base + region->size);
+ start, end);
}
}
@@ -129,7 +128,6 @@ void __init paging_init(void)
{
extern void tlb_init(void);
- unsigned long end;
int i;
printk(KERN_INFO "Setting up paging and PTEs.\n");
@@ -145,8 +143,6 @@ void __init paging_init(void)
*/
current_pgd[smp_processor_id()] = init_mm.pgd;
- end = (unsigned long)__va(max_low_pfn * PAGE_SIZE);
-
map_ram();
zone_sizes_init();
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 21b375c67e53..dd5a299ada69 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -56,7 +56,7 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
* are atomic, so a reader never sees inconsistent values.
*/
-static __inline__ void atomic_set(atomic_t *v, int i)
+static __inline__ void arch_atomic_set(atomic_t *v, int i)
{
unsigned long flags;
_atomic_spin_lock_irqsave(v, flags);
@@ -66,19 +66,19 @@ static __inline__ void atomic_set(atomic_t *v, int i)
_atomic_spin_unlock_irqrestore(v, flags);
}
-#define atomic_set_release(v, i) atomic_set((v), (i))
+#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
-static __inline__ int atomic_read(const atomic_t *v)
+static __inline__ int arch_atomic_read(const atomic_t *v)
{
return READ_ONCE((v)->counter);
}
/* exported interface */
-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
#define ATOMIC_OP(op, c_op) \
-static __inline__ void atomic_##op(int i, atomic_t *v) \
+static __inline__ void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
\
@@ -88,7 +88,7 @@ static __inline__ void atomic_##op(int i, atomic_t *v) \
}
#define ATOMIC_OP_RETURN(op, c_op) \
-static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
+static __inline__ int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long flags; \
int ret; \
@@ -101,7 +101,7 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op) \
-static __inline__ int atomic_fetch_##op(int i, atomic_t *v) \
+static __inline__ int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
int ret; \
@@ -141,7 +141,7 @@ ATOMIC_OPS(xor, ^=)
#define ATOMIC64_INIT(i) { (i) }
#define ATOMIC64_OP(op, c_op) \
-static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \
+static __inline__ void arch_atomic64_##op(s64 i, atomic64_t *v) \
{ \
unsigned long flags; \
\
@@ -151,7 +151,7 @@ static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \
}
#define ATOMIC64_OP_RETURN(op, c_op) \
-static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
+static __inline__ s64 arch_atomic64_##op##_return(s64 i, atomic64_t *v) \
{ \
unsigned long flags; \
s64 ret; \
@@ -164,7 +164,7 @@ static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
}
#define ATOMIC64_FETCH_OP(op, c_op) \
-static __inline__ s64 atomic64_fetch_##op(s64 i, atomic64_t *v) \
+static __inline__ s64 arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \
{ \
unsigned long flags; \
s64 ret; \
@@ -200,7 +200,7 @@ ATOMIC64_OPS(xor, ^=)
#undef ATOMIC64_OP
static __inline__ void
-atomic64_set(atomic64_t *v, s64 i)
+arch_atomic64_set(atomic64_t *v, s64 i)
{
unsigned long flags;
_atomic_spin_lock_irqsave(v, flags);
@@ -210,18 +210,18 @@ atomic64_set(atomic64_t *v, s64 i)
_atomic_spin_unlock_irqrestore(v, flags);
}
-#define atomic64_set_release(v, i) atomic64_set((v), (i))
+#define arch_atomic64_set_release(v, i) arch_atomic64_set((v), (i))
static __inline__ s64
-atomic64_read(const atomic64_t *v)
+arch_atomic64_read(const atomic64_t *v)
{
return READ_ONCE((v)->counter);
}
/* exported interface */
-#define atomic64_cmpxchg(v, o, n) \
- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic64_cmpxchg(v, o, n) \
+ ((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
#endif /* !CONFIG_64BIT */
diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h
index 84ee232278a6..5f274be10567 100644
--- a/arch/parisc/include/asm/cmpxchg.h
+++ b/arch/parisc/include/asm/cmpxchg.h
@@ -44,7 +44,7 @@ __xchg(unsigned long x, volatile void *ptr, int size)
** if (((unsigned long)p & 0xf) == 0)
** return __ldcw(p);
*/
-#define xchg(ptr, x) \
+#define arch_xchg(ptr, x) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _x_ = (x); \
@@ -78,7 +78,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
return old;
}
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -98,7 +98,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
#endif
case 4: return __cmpxchg_u32(ptr, old, new_);
default:
- return __cmpxchg_local_generic(ptr, old, new_, size);
+ return __generic_cmpxchg_local(ptr, old, new_, size);
}
}
@@ -106,19 +106,19 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
-#define cmpxchg_local(ptr, o, n) \
+#define arch_cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr))))
#ifdef CONFIG_64BIT
-#define cmpxchg64_local(ptr, o, n) \
+#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
})
#else
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
#endif
-#define cmpxchg64(ptr, o, n) __cmpxchg_u64(ptr, o, n)
+#define arch_cmpxchg64(ptr, o, n) __cmpxchg_u64(ptr, o, n)
#endif /* _ASM_PARISC_CMPXCHG_H_ */
diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl
index 5ac80b83d745..aabc37f8cae3 100644
--- a/arch/parisc/kernel/syscalls/syscall.tbl
+++ b/arch/parisc/kernel/syscalls/syscall.tbl
@@ -440,7 +440,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
index c2717f31925a..ccda0a91abf0 100644
--- a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
@@ -122,7 +122,15 @@
};
/include/ "pq3-i2c-0.dtsi"
+ i2c@3000 {
+ fsl,i2c-erratum-a004447;
+ };
+
/include/ "pq3-i2c-1.dtsi"
+ i2c@3100 {
+ fsl,i2c-erratum-a004447;
+ };
+
/include/ "pq3-duart-0.dtsi"
/include/ "pq3-espi-0.dtsi"
spi0: spi@7000 {
diff --git a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
index 872e4485dc3f..ddc018d42252 100644
--- a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
@@ -371,7 +371,23 @@
};
/include/ "qoriq-i2c-0.dtsi"
+ i2c@118000 {
+ fsl,i2c-erratum-a004447;
+ };
+
+ i2c@118100 {
+ fsl,i2c-erratum-a004447;
+ };
+
/include/ "qoriq-i2c-1.dtsi"
+ i2c@119000 {
+ fsl,i2c-erratum-a004447;
+ };
+
+ i2c@119100 {
+ fsl,i2c-erratum-a004447;
+ };
+
/include/ "qoriq-duart-0.dtsi"
/include/ "qoriq-duart-1.dtsi"
/include/ "qoriq-gpio-0.dtsi"
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 61c6e8b200e8..a1732a79e92a 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -23,7 +23,7 @@
#define __atomic_release_fence() \
__asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
-static __inline__ int atomic_read(const atomic_t *v)
+static __inline__ int arch_atomic_read(const atomic_t *v)
{
int t;
@@ -32,13 +32,13 @@ static __inline__ int atomic_read(const atomic_t *v)
return t;
}
-static __inline__ void atomic_set(atomic_t *v, int i)
+static __inline__ void arch_atomic_set(atomic_t *v, int i)
{
__asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"UPD_CONSTR(v->counter) : "r"(i));
}
#define ATOMIC_OP(op, asm_op) \
-static __inline__ void atomic_##op(int a, atomic_t *v) \
+static __inline__ void arch_atomic_##op(int a, atomic_t *v) \
{ \
int t; \
\
@@ -53,7 +53,7 @@ static __inline__ void atomic_##op(int a, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
-static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
+static inline int arch_atomic_##op##_return_relaxed(int a, atomic_t *v) \
{ \
int t; \
\
@@ -70,7 +70,7 @@ static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
}
#define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \
-static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
+static inline int arch_atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
{ \
int res, t; \
\
@@ -94,11 +94,11 @@ static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
ATOMIC_OPS(add, add)
ATOMIC_OPS(sub, subf)
-#define atomic_add_return_relaxed atomic_add_return_relaxed
-#define atomic_sub_return_relaxed atomic_sub_return_relaxed
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
-#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, asm_op) \
@@ -109,16 +109,16 @@ ATOMIC_OPS(and, and)
ATOMIC_OPS(or, or)
ATOMIC_OPS(xor, xor)
-#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP_RELAXED
#undef ATOMIC_OP_RETURN_RELAXED
#undef ATOMIC_OP
-static __inline__ void atomic_inc(atomic_t *v)
+static __inline__ void arch_atomic_inc(atomic_t *v)
{
int t;
@@ -131,9 +131,9 @@ static __inline__ void atomic_inc(atomic_t *v)
: "r" (&v->counter)
: "cc", "xer");
}
-#define atomic_inc atomic_inc
+#define arch_atomic_inc arch_atomic_inc
-static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
+static __inline__ int arch_atomic_inc_return_relaxed(atomic_t *v)
{
int t;
@@ -149,7 +149,7 @@ static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
return t;
}
-static __inline__ void atomic_dec(atomic_t *v)
+static __inline__ void arch_atomic_dec(atomic_t *v)
{
int t;
@@ -162,9 +162,9 @@ static __inline__ void atomic_dec(atomic_t *v)
: "r" (&v->counter)
: "cc", "xer");
}
-#define atomic_dec atomic_dec
+#define arch_atomic_dec arch_atomic_dec
-static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
+static __inline__ int arch_atomic_dec_return_relaxed(atomic_t *v)
{
int t;
@@ -180,17 +180,20 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
return t;
}
-#define atomic_inc_return_relaxed atomic_inc_return_relaxed
-#define atomic_dec_return_relaxed atomic_dec_return_relaxed
+#define arch_atomic_inc_return_relaxed arch_atomic_inc_return_relaxed
+#define arch_atomic_dec_return_relaxed arch_atomic_dec_return_relaxed
-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
-#define atomic_cmpxchg_relaxed(v, o, n) \
- cmpxchg_relaxed(&((v)->counter), (o), (n))
-#define atomic_cmpxchg_acquire(v, o, n) \
- cmpxchg_acquire(&((v)->counter), (o), (n))
+#define arch_atomic_cmpxchg(v, o, n) \
+ (arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic_cmpxchg_relaxed(v, o, n) \
+ arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
+#define arch_atomic_cmpxchg_acquire(v, o, n) \
+ arch_cmpxchg_acquire(&((v)->counter), (o), (n))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
+#define arch_atomic_xchg(v, new) \
+ (arch_xchg(&((v)->counter), new))
+#define arch_atomic_xchg_relaxed(v, new) \
+ arch_xchg_relaxed(&((v)->counter), (new))
/*
* Don't want to override the generic atomic_try_cmpxchg_acquire, because
@@ -199,7 +202,7 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
* would be a surprise).
*/
static __always_inline bool
-atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
+arch_atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
{
int r, o = *old;
@@ -229,7 +232,7 @@ atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
-static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int t;
@@ -250,7 +253,7 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
return t;
}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
/**
* atomic_inc_not_zero - increment unless the number is zero
@@ -259,7 +262,7 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
* Atomically increments @v by 1, so long as @v is non-zero.
* Returns non-zero if @v was non-zero, and zero otherwise.
*/
-static __inline__ int atomic_inc_not_zero(atomic_t *v)
+static __inline__ int arch_atomic_inc_not_zero(atomic_t *v)
{
int t1, t2;
@@ -280,14 +283,14 @@ static __inline__ int atomic_inc_not_zero(atomic_t *v)
return t1;
}
-#define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
+#define arch_atomic_inc_not_zero(v) arch_atomic_inc_not_zero((v))
/*
* Atomically test *v and decrement if it is greater than 0.
* The function returns the old value of *v minus 1, even if
* the atomic variable, v, was not decremented.
*/
-static __inline__ int atomic_dec_if_positive(atomic_t *v)
+static __inline__ int arch_atomic_dec_if_positive(atomic_t *v)
{
int t;
@@ -307,13 +310,13 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
return t;
}
-#define atomic_dec_if_positive atomic_dec_if_positive
+#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
#ifdef __powerpc64__
#define ATOMIC64_INIT(i) { (i) }
-static __inline__ s64 atomic64_read(const atomic64_t *v)
+static __inline__ s64 arch_atomic64_read(const atomic64_t *v)
{
s64 t;
@@ -322,13 +325,13 @@ static __inline__ s64 atomic64_read(const atomic64_t *v)
return t;
}
-static __inline__ void atomic64_set(atomic64_t *v, s64 i)
+static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i)
{
__asm__ __volatile__("std%U0%X0 %1,%0" : "=m"UPD_CONSTR(v->counter) : "r"(i));
}
#define ATOMIC64_OP(op, asm_op) \
-static __inline__ void atomic64_##op(s64 a, atomic64_t *v) \
+static __inline__ void arch_atomic64_##op(s64 a, atomic64_t *v) \
{ \
s64 t; \
\
@@ -344,7 +347,7 @@ static __inline__ void atomic64_##op(s64 a, atomic64_t *v) \
#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
static inline s64 \
-atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
+arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
{ \
s64 t; \
\
@@ -362,7 +365,7 @@ atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
#define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
static inline s64 \
-atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
+arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
{ \
s64 res, t; \
\
@@ -386,11 +389,11 @@ atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
ATOMIC64_OPS(add, add)
ATOMIC64_OPS(sub, subf)
-#define atomic64_add_return_relaxed atomic64_add_return_relaxed
-#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
-#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
#undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, asm_op) \
@@ -401,16 +404,16 @@ ATOMIC64_OPS(and, and)
ATOMIC64_OPS(or, or)
ATOMIC64_OPS(xor, xor)
-#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
#undef ATOPIC64_OPS
#undef ATOMIC64_FETCH_OP_RELAXED
#undef ATOMIC64_OP_RETURN_RELAXED
#undef ATOMIC64_OP
-static __inline__ void atomic64_inc(atomic64_t *v)
+static __inline__ void arch_atomic64_inc(atomic64_t *v)
{
s64 t;
@@ -423,9 +426,9 @@ static __inline__ void atomic64_inc(atomic64_t *v)
: "r" (&v->counter)
: "cc", "xer");
}
-#define atomic64_inc atomic64_inc
+#define arch_atomic64_inc arch_atomic64_inc
-static __inline__ s64 atomic64_inc_return_relaxed(atomic64_t *v)
+static __inline__ s64 arch_atomic64_inc_return_relaxed(atomic64_t *v)
{
s64 t;
@@ -441,7 +444,7 @@ static __inline__ s64 atomic64_inc_return_relaxed(atomic64_t *v)
return t;
}
-static __inline__ void atomic64_dec(atomic64_t *v)
+static __inline__ void arch_atomic64_dec(atomic64_t *v)
{
s64 t;
@@ -454,9 +457,9 @@ static __inline__ void atomic64_dec(atomic64_t *v)
: "r" (&v->counter)
: "cc", "xer");
}
-#define atomic64_dec atomic64_dec
+#define arch_atomic64_dec arch_atomic64_dec
-static __inline__ s64 atomic64_dec_return_relaxed(atomic64_t *v)
+static __inline__ s64 arch_atomic64_dec_return_relaxed(atomic64_t *v)
{
s64 t;
@@ -472,14 +475,14 @@ static __inline__ s64 atomic64_dec_return_relaxed(atomic64_t *v)
return t;
}
-#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
-#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
+#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed
+#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed
/*
* Atomically test *v and decrement if it is greater than 0.
* The function returns the old value of *v minus 1.
*/
-static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v)
+static __inline__ s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{
s64 t;
@@ -498,16 +501,19 @@ static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v)
return t;
}
-#define atomic64_dec_if_positive atomic64_dec_if_positive
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
-#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
-#define atomic64_cmpxchg_relaxed(v, o, n) \
- cmpxchg_relaxed(&((v)->counter), (o), (n))
-#define atomic64_cmpxchg_acquire(v, o, n) \
- cmpxchg_acquire(&((v)->counter), (o), (n))
+#define arch_atomic64_cmpxchg(v, o, n) \
+ (arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic64_cmpxchg_relaxed(v, o, n) \
+ arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
+#define arch_atomic64_cmpxchg_acquire(v, o, n) \
+ arch_cmpxchg_acquire(&((v)->counter), (o), (n))
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
-#define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
+#define arch_atomic64_xchg(v, new) \
+ (arch_xchg(&((v)->counter), new))
+#define arch_atomic64_xchg_relaxed(v, new) \
+ arch_xchg_relaxed(&((v)->counter), (new))
/**
* atomic64_fetch_add_unless - add unless the number is a given value
@@ -518,7 +524,7 @@ static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v)
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
-static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 t;
@@ -539,7 +545,7 @@ static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
return t;
}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
/**
* atomic_inc64_not_zero - increment unless the number is zero
@@ -548,7 +554,7 @@ static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
* Atomically increments @v by 1, so long as @v is non-zero.
* Returns non-zero if @v was non-zero, and zero otherwise.
*/
-static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
+static __inline__ int arch_atomic64_inc_not_zero(atomic64_t *v)
{
s64 t1, t2;
@@ -569,7 +575,7 @@ static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
return t1 != 0;
}
-#define atomic64_inc_not_zero(v) atomic64_inc_not_zero((v))
+#define arch_atomic64_inc_not_zero(v) arch_atomic64_inc_not_zero((v))
#endif /* __powerpc64__ */
diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
index cf091c4c22e5..05f246c0e36e 100644
--- a/arch/powerpc/include/asm/cmpxchg.h
+++ b/arch/powerpc/include/asm/cmpxchg.h
@@ -185,14 +185,14 @@ __xchg_relaxed(void *ptr, unsigned long x, unsigned int size)
BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg_local");
return x;
}
-#define xchg_local(ptr,x) \
+#define arch_xchg_local(ptr,x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg_local((ptr), \
(unsigned long)_x_, sizeof(*(ptr))); \
})
-#define xchg_relaxed(ptr, x) \
+#define arch_xchg_relaxed(ptr, x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg_relaxed((ptr), \
@@ -467,7 +467,7 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_acquire");
return old;
}
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -476,7 +476,7 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
})
-#define cmpxchg_local(ptr, o, n) \
+#define arch_cmpxchg_local(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -484,7 +484,7 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
(unsigned long)_n_, sizeof(*(ptr))); \
})
-#define cmpxchg_relaxed(ptr, o, n) \
+#define arch_cmpxchg_relaxed(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -493,7 +493,7 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
sizeof(*(ptr))); \
})
-#define cmpxchg_acquire(ptr, o, n) \
+#define arch_cmpxchg_acquire(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -502,29 +502,29 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
sizeof(*(ptr))); \
})
#ifdef CONFIG_PPC64
-#define cmpxchg64(ptr, o, n) \
+#define arch_cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg((ptr), (o), (n)); \
+ arch_cmpxchg((ptr), (o), (n)); \
})
-#define cmpxchg64_local(ptr, o, n) \
+#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_local((ptr), (o), (n)); \
+ arch_cmpxchg_local((ptr), (o), (n)); \
})
-#define cmpxchg64_relaxed(ptr, o, n) \
+#define arch_cmpxchg64_relaxed(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_relaxed((ptr), (o), (n)); \
+ arch_cmpxchg_relaxed((ptr), (o), (n)); \
})
-#define cmpxchg64_acquire(ptr, o, n) \
+#define arch_cmpxchg64_acquire(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_acquire((ptr), (o), (n)); \
+ arch_cmpxchg_acquire((ptr), (o), (n)); \
})
#else
#include <asm-generic/cmpxchg-local.h>
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
#endif
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 443050906018..e3b29eda8074 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -448,6 +448,9 @@
*/
long plpar_hcall_norets(unsigned long opcode, ...);
+/* Variant which does not do hcall tracing */
+long plpar_hcall_norets_notrace(unsigned long opcode, ...);
+
/**
* plpar_hcall: - Make a pseries hypervisor call
* @opcode: The hypervisor call to make.
diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
index 44cde2e129b8..59f704408d65 100644
--- a/arch/powerpc/include/asm/interrupt.h
+++ b/arch/powerpc/include/asm/interrupt.h
@@ -153,8 +153,6 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup
*/
static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
{
- if (user_mode(regs))
- kuep_unlock();
}
static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
@@ -222,6 +220,13 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !(regs->msr & MSR_PR) &&
+ regs->nip < (unsigned long)__end_interrupts) {
+ // Kernel code running below __end_interrupts is
+ // implicitly soft-masked.
+ regs->softe = IRQS_ALL_DISABLED;
+ }
+
/* Don't do any per-CPU operations until interrupt state is fixed */
if (nmi_disables_ftrace(regs)) {
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
index 2d5c6bec2b4f..93ce3ec25387 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -50,7 +50,7 @@ l_yes:
1098: nop; \
.pushsection __jump_table, "aw"; \
.long 1098b - ., LABEL - .; \
- FTR_ENTRY_LONG KEY; \
+ FTR_ENTRY_LONG KEY - .; \
.popsection
#endif
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 1e83359f286b..7f2e90db2050 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -51,6 +51,7 @@
/* PPC-specific vcpu->requests bit members */
#define KVM_REQ_WATCHDOG KVM_ARCH_REQ(0)
#define KVM_REQ_EPR_EXIT KVM_ARCH_REQ(1)
+#define KVM_REQ_PENDING_TIMER KVM_ARCH_REQ(2)
#include <linux/mmu_notifier.h>
diff --git a/arch/powerpc/include/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h
index 5d1726bb28e7..bcb7b5f917be 100644
--- a/arch/powerpc/include/asm/paravirt.h
+++ b/arch/powerpc/include/asm/paravirt.h
@@ -28,19 +28,35 @@ static inline u32 yield_count_of(int cpu)
return be32_to_cpu(yield_count);
}
+/*
+ * Spinlock code confers and prods, so don't trace the hcalls because the
+ * tracing code takes spinlocks which can cause recursion deadlocks.
+ *
+ * These calls are made while the lock is not held: the lock slowpath yields if
+ * it can not acquire the lock, and unlock slow path might prod if a waiter has
+ * yielded). So this may not be a problem for simple spin locks because the
+ * tracing does not technically recurse on the lock, but we avoid it anyway.
+ *
+ * However the queued spin lock contended path is more strictly ordered: the
+ * H_CONFER hcall is made after the task has queued itself on the lock, so then
+ * recursing on that lock will cause the task to then queue up again behind the
+ * first instance (or worse: queued spinlocks use tricks that assume a context
+ * never waits on more than one spinlock, so such recursion may cause random
+ * corruption in the lock code).
+ */
static inline void yield_to_preempted(int cpu, u32 yield_count)
{
- plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
+ plpar_hcall_norets_notrace(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
}
static inline void prod_cpu(int cpu)
{
- plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
+ plpar_hcall_norets_notrace(H_PROD, get_hard_smp_processor_id(cpu));
}
static inline void yield_to_any(void)
{
- plpar_hcall_norets(H_CONFER, -1, 0);
+ plpar_hcall_norets_notrace(H_CONFER, -1, 0);
}
#else
static inline bool is_shared_processor(void)
diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h
index ece84a430701..83e0f701ebc6 100644
--- a/arch/powerpc/include/asm/plpar_wrappers.h
+++ b/arch/powerpc/include/asm/plpar_wrappers.h
@@ -28,7 +28,11 @@ static inline void set_cede_latency_hint(u8 latency_hint)
static inline long cede_processor(void)
{
- return plpar_hcall_norets(H_CEDE);
+ /*
+ * We cannot call tracepoints inside RCU idle regions which
+ * means we must not trace H_CEDE.
+ */
+ return plpar_hcall_norets_notrace(H_CEDE);
}
static inline long extended_cede_processor(unsigned long latency_hint)
diff --git a/arch/powerpc/include/asm/pte-walk.h b/arch/powerpc/include/asm/pte-walk.h
index 33fa5dd8ee6a..714a35f0d425 100644
--- a/arch/powerpc/include/asm/pte-walk.h
+++ b/arch/powerpc/include/asm/pte-walk.h
@@ -31,6 +31,35 @@ static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift)
pgd_t *pgdir = init_mm.pgd;
return __find_linux_pte(pgdir, ea, NULL, hshift);
}
+
+/*
+ * Convert a kernel vmap virtual address (vmalloc or ioremap space) to a
+ * physical address, without taking locks. This can be used in real-mode.
+ */
+static inline phys_addr_t ppc_find_vmap_phys(unsigned long addr)
+{
+ pte_t *ptep;
+ phys_addr_t pa;
+ int hugepage_shift;
+
+ /*
+ * init_mm does not free page tables, and does not do THP. It may
+ * have huge pages from huge vmalloc / ioremap etc.
+ */
+ ptep = find_init_mm_pte(addr, &hugepage_shift);
+ if (WARN_ON(!ptep))
+ return 0;
+
+ pa = PFN_PHYS(pte_pfn(*ptep));
+
+ if (!hugepage_shift)
+ hugepage_shift = PAGE_SHIFT;
+
+ pa |= addr & ((1ul << hugepage_shift) - 1);
+
+ return pa;
+}
+
/*
* This is what we should always use. Any other lockless page table lookup needs
* careful audit against THP split.
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index 9c9ab2746168..b476a685f066 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -19,6 +19,7 @@
#ifndef _ASM_POWERPC_PTRACE_H
#define _ASM_POWERPC_PTRACE_H
+#include <linux/err.h>
#include <uapi/asm/ptrace.h>
#include <asm/asm-const.h>
@@ -152,25 +153,6 @@ extern unsigned long profile_pc(struct pt_regs *regs);
long do_syscall_trace_enter(struct pt_regs *regs);
void do_syscall_trace_leave(struct pt_regs *regs);
-#define kernel_stack_pointer(regs) ((regs)->gpr[1])
-static inline int is_syscall_success(struct pt_regs *regs)
-{
- return !(regs->ccr & 0x10000000);
-}
-
-static inline long regs_return_value(struct pt_regs *regs)
-{
- if (is_syscall_success(regs))
- return regs->gpr[3];
- else
- return -regs->gpr[3];
-}
-
-static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
-{
- regs->gpr[3] = rc;
-}
-
#ifdef __powerpc64__
#define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
#else
@@ -235,6 +217,31 @@ static __always_inline void set_trap_norestart(struct pt_regs *regs)
regs->trap |= 0x1;
}
+#define kernel_stack_pointer(regs) ((regs)->gpr[1])
+static inline int is_syscall_success(struct pt_regs *regs)
+{
+ if (trap_is_scv(regs))
+ return !IS_ERR_VALUE((unsigned long)regs->gpr[3]);
+ else
+ return !(regs->ccr & 0x10000000);
+}
+
+static inline long regs_return_value(struct pt_regs *regs)
+{
+ if (trap_is_scv(regs))
+ return regs->gpr[3];
+
+ if (is_syscall_success(regs))
+ return regs->gpr[3];
+ else
+ return -regs->gpr[3];
+}
+
+static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
+{
+ regs->gpr[3] = rc;
+}
+
#define arch_has_single_step() (1)
#define arch_has_block_step() (true)
#define ARCH_HAS_USER_SINGLE_STEP_REPORT
diff --git a/arch/powerpc/include/asm/qspinlock.h b/arch/powerpc/include/asm/qspinlock.h
index 07318bc63e3d..b676c4fb90fd 100644
--- a/arch/powerpc/include/asm/qspinlock.h
+++ b/arch/powerpc/include/asm/qspinlock.h
@@ -37,7 +37,7 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock)
{
u32 val = 0;
- if (likely(atomic_try_cmpxchg_lock(&lock->val, &val, _Q_LOCKED_VAL)))
+ if (likely(arch_atomic_try_cmpxchg_lock(&lock->val, &val, _Q_LOCKED_VAL)))
return;
queued_spin_lock_slowpath(lock, val);
diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h
index fd1b518eed17..ba0f88f3a30d 100644
--- a/arch/powerpc/include/asm/syscall.h
+++ b/arch/powerpc/include/asm/syscall.h
@@ -41,11 +41,17 @@ static inline void syscall_rollback(struct task_struct *task,
static inline long syscall_get_error(struct task_struct *task,
struct pt_regs *regs)
{
- /*
- * If the system call failed,
- * regs->gpr[3] contains a positive ERRORCODE.
- */
- return (regs->ccr & 0x10000000UL) ? -regs->gpr[3] : 0;
+ if (trap_is_scv(regs)) {
+ unsigned long error = regs->gpr[3];
+
+ return IS_ERR_VALUE(error) ? error : 0;
+ } else {
+ /*
+ * If the system call failed,
+ * regs->gpr[3] contains a positive ERRORCODE.
+ */
+ return (regs->ccr & 0x10000000UL) ? -regs->gpr[3] : 0;
+ }
}
static inline long syscall_get_return_value(struct task_struct *task,
@@ -58,18 +64,22 @@ static inline void syscall_set_return_value(struct task_struct *task,
struct pt_regs *regs,
int error, long val)
{
- /*
- * In the general case it's not obvious that we must deal with CCR
- * here, as the syscall exit path will also do that for us. However
- * there are some places, eg. the signal code, which check ccr to
- * decide if the value in r3 is actually an error.
- */
- if (error) {
- regs->ccr |= 0x10000000L;
- regs->gpr[3] = error;
+ if (trap_is_scv(regs)) {
+ regs->gpr[3] = (long) error ?: val;
} else {
- regs->ccr &= ~0x10000000L;
- regs->gpr[3] = val;
+ /*
+ * In the general case it's not obvious that we must deal with
+ * CCR here, as the syscall exit path will also do that for us.
+ * However there are some places, eg. the signal code, which
+ * check ccr to decide if the value in r3 is actually an error.
+ */
+ if (error) {
+ regs->ccr |= 0x10000000L;
+ regs->gpr[3] = error;
+ } else {
+ regs->ccr &= ~0x10000000L;
+ regs->gpr[3] = val;
+ }
}
}
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index a09e4240c5b1..22c79ab40006 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -157,7 +157,7 @@ do { \
"2: lwz%X1 %L0, %L1\n" \
EX_TABLE(1b, %l2) \
EX_TABLE(2b, %l2) \
- : "=r" (x) \
+ : "=&r" (x) \
: "m" (*addr) \
: \
: label)
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index f24cd53ff26e..3bbdcc86d01b 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -346,28 +346,7 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
*/
static inline unsigned long eeh_token_to_phys(unsigned long token)
{
- pte_t *ptep;
- unsigned long pa;
- int hugepage_shift;
-
- /*
- * We won't find hugepages here(this is iomem). Hence we are not
- * worried about _PAGE_SPLITTING/collapse. Also we will not hit
- * page table free, because of init_mm.
- */
- ptep = find_init_mm_pte(token, &hugepage_shift);
- if (!ptep)
- return token;
-
- pa = pte_pfn(*ptep);
-
- /* On radix we can do hugepage mappings for io, so handle that */
- if (!hugepage_shift)
- hugepage_shift = PAGE_SHIFT;
-
- pa <<= PAGE_SHIFT;
- pa |= token & ((1ul << hugepage_shift) - 1);
- return pa;
+ return ppc_find_vmap_phys(token);
}
/*
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 7c3654b0d0f4..f1ae710274bc 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -340,6 +340,12 @@ ret_from_mc_except:
andi. r10,r10,IRQS_DISABLED; /* yes -> go out of line */ \
bne masked_interrupt_book3e_##n
+/*
+ * Additional regs must be re-loaded from paca before EXCEPTION_COMMON* is
+ * called, because that does SAVE_NVGPRS which must see the original register
+ * values, otherwise the scratch values might be restored when exiting the
+ * interrupt.
+ */
#define PROLOG_ADDITION_2REGS_GEN(n) \
std r14,PACA_EXGEN+EX_R14(r13); \
std r15,PACA_EXGEN+EX_R15(r13)
@@ -535,6 +541,10 @@ __end_interrupts:
PROLOG_ADDITION_2REGS)
mfspr r14,SPRN_DEAR
mfspr r15,SPRN_ESR
+ std r14,_DAR(r1)
+ std r15,_DSISR(r1)
+ ld r14,PACA_EXGEN+EX_R14(r13)
+ ld r15,PACA_EXGEN+EX_R15(r13)
EXCEPTION_COMMON(0x300)
b storage_fault_common
@@ -544,6 +554,10 @@ __end_interrupts:
PROLOG_ADDITION_2REGS)
li r15,0
mr r14,r10
+ std r14,_DAR(r1)
+ std r15,_DSISR(r1)
+ ld r14,PACA_EXGEN+EX_R14(r13)
+ ld r15,PACA_EXGEN+EX_R15(r13)
EXCEPTION_COMMON(0x400)
b storage_fault_common
@@ -557,6 +571,10 @@ __end_interrupts:
PROLOG_ADDITION_2REGS)
mfspr r14,SPRN_DEAR
mfspr r15,SPRN_ESR
+ std r14,_DAR(r1)
+ std r15,_DSISR(r1)
+ ld r14,PACA_EXGEN+EX_R14(r13)
+ ld r15,PACA_EXGEN+EX_R15(r13)
EXCEPTION_COMMON(0x600)
b alignment_more /* no room, go out of line */
@@ -565,10 +583,10 @@ __end_interrupts:
NORMAL_EXCEPTION_PROLOG(0x700, BOOKE_INTERRUPT_PROGRAM,
PROLOG_ADDITION_1REG)
mfspr r14,SPRN_ESR
- EXCEPTION_COMMON(0x700)
std r14,_DSISR(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
ld r14,PACA_EXGEN+EX_R14(r13)
+ EXCEPTION_COMMON(0x700)
+ addi r3,r1,STACK_FRAME_OVERHEAD
bl program_check_exception
REST_NVGPRS(r1)
b interrupt_return
@@ -725,11 +743,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
* normal exception
*/
mfspr r14,SPRN_DBSR
- EXCEPTION_COMMON_CRIT(0xd00)
std r14,_DSISR(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
ld r14,PACA_EXCRIT+EX_R14(r13)
ld r15,PACA_EXCRIT+EX_R15(r13)
+ EXCEPTION_COMMON_CRIT(0xd00)
+ addi r3,r1,STACK_FRAME_OVERHEAD
bl DebugException
REST_NVGPRS(r1)
b interrupt_return
@@ -796,11 +814,11 @@ kernel_dbg_exc:
* normal exception
*/
mfspr r14,SPRN_DBSR
- EXCEPTION_COMMON_DBG(0xd08)
std r14,_DSISR(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
ld r14,PACA_EXDBG+EX_R14(r13)
ld r15,PACA_EXDBG+EX_R15(r13)
+ EXCEPTION_COMMON_DBG(0xd08)
+ addi r3,r1,STACK_FRAME_OVERHEAD
bl DebugException
REST_NVGPRS(r1)
b interrupt_return
@@ -931,11 +949,7 @@ masked_interrupt_book3e_0x2c0:
* original values stashed away in the PACA
*/
storage_fault_common:
- std r14,_DAR(r1)
- std r15,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
- ld r14,PACA_EXGEN+EX_R14(r13)
- ld r15,PACA_EXGEN+EX_R15(r13)
bl do_page_fault
b interrupt_return
@@ -944,11 +958,7 @@ storage_fault_common:
* continues here.
*/
alignment_more:
- std r14,_DAR(r1)
- std r15,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
- ld r14,PACA_EXGEN+EX_R14(r13)
- ld r15,PACA_EXGEN+EX_R15(r13)
bl alignment_exception
REST_NVGPRS(r1)
b interrupt_return
diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
index e4559f8914eb..e0938ba298f2 100644
--- a/arch/powerpc/kernel/interrupt.c
+++ b/arch/powerpc/kernel/interrupt.c
@@ -34,9 +34,6 @@ notrace long system_call_exception(long r3, long r4, long r5,
syscall_fn f;
kuep_lock();
-#ifdef CONFIG_PPC32
- kuap_save_and_lock(regs);
-#endif
regs->orig_gpr3 = r3;
@@ -427,6 +424,7 @@ again:
/* Restore user access locks last */
kuap_user_restore(regs);
+ kuep_unlock();
return ret;
}
diff --git a/arch/powerpc/kernel/io-workarounds.c b/arch/powerpc/kernel/io-workarounds.c
index 51bbaae94ccc..c877f074d174 100644
--- a/arch/powerpc/kernel/io-workarounds.c
+++ b/arch/powerpc/kernel/io-workarounds.c
@@ -55,7 +55,6 @@ static struct iowa_bus *iowa_pci_find(unsigned long vaddr, unsigned long paddr)
#ifdef CONFIG_PPC_INDIRECT_MMIO
struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
{
- unsigned hugepage_shift;
struct iowa_bus *bus;
int token;
@@ -65,22 +64,13 @@ struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
bus = &iowa_busses[token - 1];
else {
unsigned long vaddr, paddr;
- pte_t *ptep;
vaddr = (unsigned long)PCI_FIX_ADDR(addr);
if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END)
return NULL;
- /*
- * We won't find huge pages here (iomem). Also can't hit
- * a page table free due to init_mm
- */
- ptep = find_init_mm_pte(vaddr, &hugepage_shift);
- if (ptep == NULL)
- paddr = 0;
- else {
- WARN_ON(hugepage_shift);
- paddr = pte_pfn(*ptep) << PAGE_SHIFT;
- }
+
+ paddr = ppc_find_vmap_phys(vaddr);
+
bus = iowa_pci_find(vaddr, paddr);
if (bus == NULL)
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 57d6b85e9b96..2af89a5e379f 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -898,7 +898,6 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
unsigned int order;
unsigned int nio_pages, io_order;
struct page *page;
- size_t size_io = size;
size = PAGE_ALIGN(size);
order = get_order(size);
@@ -925,9 +924,8 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
memset(ret, 0, size);
/* Set up tces to cover the allocated range */
- size_io = IOMMU_PAGE_ALIGN(size_io, tbl);
- nio_pages = size_io >> tbl->it_page_shift;
- io_order = get_iommu_order(size_io, tbl);
+ nio_pages = size >> tbl->it_page_shift;
+ io_order = get_iommu_order(size, tbl);
mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
mask >> tbl->it_page_shift, io_order, 0);
if (mapping == DMA_MAPPING_ERROR) {
@@ -942,9 +940,10 @@ void iommu_free_coherent(struct iommu_table *tbl, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
if (tbl) {
- size_t size_io = IOMMU_PAGE_ALIGN(size, tbl);
- unsigned int nio_pages = size_io >> tbl->it_page_shift;
+ unsigned int nio_pages;
+ size = PAGE_ALIGN(size);
+ nio_pages = size >> tbl->it_page_shift;
iommu_free(tbl, dma_handle, nio_pages);
size = PAGE_ALIGN(size);
free_pages((unsigned long)vaddr, get_order(size));
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 3f700830169f..c64a5feaebbe 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -108,7 +108,6 @@ int arch_prepare_kprobe(struct kprobe *p)
int ret = 0;
struct kprobe *prev;
struct ppc_inst insn = ppc_inst_read((struct ppc_inst *)p->addr);
- struct ppc_inst prefix = ppc_inst_read((struct ppc_inst *)(p->addr - 1));
if ((unsigned long)p->addr & 0x03) {
printk("Attempt to register kprobe at an unaligned address\n");
@@ -116,7 +115,8 @@ int arch_prepare_kprobe(struct kprobe *p)
} else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) {
printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n");
ret = -EINVAL;
- } else if (ppc_inst_prefixed(prefix)) {
+ } else if ((unsigned long)p->addr & ~PAGE_MASK &&
+ ppc_inst_prefixed(ppc_inst_read((struct ppc_inst *)(p->addr - 1)))) {
printk("Cannot register a kprobe on the second word of prefixed instruction\n");
ret = -EINVAL;
}
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index 8b2c1a8553a0..cfc03e016ff2 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -356,13 +356,16 @@ static void __init setup_legacy_serial_console(int console)
static int __init ioremap_legacy_serial_console(void)
{
- struct legacy_serial_info *info = &legacy_serial_infos[legacy_serial_console];
- struct plat_serial8250_port *port = &legacy_serial_ports[legacy_serial_console];
+ struct plat_serial8250_port *port;
+ struct legacy_serial_info *info;
void __iomem *vaddr;
if (legacy_serial_console < 0)
return 0;
+ info = &legacy_serial_infos[legacy_serial_console];
+ port = &legacy_serial_ports[legacy_serial_console];
+
if (!info->early_addr)
return 0;
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index b779d25761cf..e42b85e4f1aa 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -369,11 +369,11 @@ void __init early_setup(unsigned long dt_ptr)
apply_feature_fixups();
setup_feature_keys();
- early_ioremap_setup();
-
/* Initialize the hash table or TLB handling */
early_init_mmu();
+ early_ioremap_setup();
+
/*
* After firmware and early platform setup code has set things up,
* we note the SPR values for configurable control/performance
diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h
index f4aafa337c2e..1f07317964e4 100644
--- a/arch/powerpc/kernel/signal.h
+++ b/arch/powerpc/kernel/signal.h
@@ -166,9 +166,9 @@ copy_ckfpr_from_user(struct task_struct *task, void __user *from)
}
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
#else
-#define unsafe_copy_fpr_to_user(to, task, label) do { } while (0)
+#define unsafe_copy_fpr_to_user(to, task, label) do { if (0) goto label;} while (0)
-#define unsafe_copy_fpr_from_user(task, from, label) do { } while (0)
+#define unsafe_copy_fpr_from_user(task, from, label) do { if (0) goto label;} while (0)
static inline unsigned long
copy_fpr_to_user(void __user *to, struct task_struct *task)
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index dca66481d0c2..f9e1f5428b9e 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -902,6 +902,10 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
unsafe_copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set), badframe_block);
user_write_access_end();
+ /* Save the siginfo outside of the unsafe block. */
+ if (copy_siginfo_to_user(&frame->info, &ksig->info))
+ goto badframe;
+
/* Make sure signal handler doesn't get spurious FP exceptions */
tsk->thread.fp_state.fpscr = 0;
@@ -915,11 +919,6 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
regs->nip = (unsigned long) &frame->tramp[0];
}
-
- /* Save the siginfo outside of the unsafe block. */
- if (copy_siginfo_to_user(&frame->info, &ksig->info))
- goto badframe;
-
/* Allocate a dummy caller frame for the signal handler. */
newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
err |= put_user(regs->gpr[1], (unsigned long __user *)newsp);
diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl
index 2e68fbb57cc6..8f052ff4058c 100644
--- a/arch/powerpc/kernel/syscalls/syscall.tbl
+++ b/arch/powerpc/kernel/syscalls/syscall.tbl
@@ -522,7 +522,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 2d9193cd73be..c63e263312a4 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -840,7 +840,7 @@ bool kvm_unmap_gfn_range_hv(struct kvm *kvm, struct kvm_gfn_range *range)
kvm_unmap_radix(kvm, range->slot, gfn);
} else {
for (gfn = range->start; gfn < range->end; gfn++)
- kvm_unmap_rmapp(kvm, range->slot, range->start);
+ kvm_unmap_rmapp(kvm, range->slot, gfn);
}
return false;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 28a80d240b76..bc0813644666 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -3936,7 +3936,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
break;
}
cur = ktime_get();
- } while (single_task_running() && ktime_before(cur, stop));
+ } while (kvm_vcpu_can_poll(cur, stop));
spin_lock(&vc->lock);
vc->vcore_state = VCORE_INACTIVE;
@@ -4455,7 +4455,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
mtspr(SPRN_EBBRR, ebb_regs[1]);
mtspr(SPRN_BESCR, ebb_regs[2]);
mtspr(SPRN_TAR, user_tar);
- mtspr(SPRN_FSCR, current->thread.fscr);
}
mtspr(SPRN_VRSAVE, user_vrsave);
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 7af7c70f1468..7a0f12404e0e 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -23,20 +23,9 @@
#include <asm/pte-walk.h>
/* Translate address of a vmalloc'd thing to a linear map address */
-static void *real_vmalloc_addr(void *x)
+static void *real_vmalloc_addr(void *addr)
{
- unsigned long addr = (unsigned long) x;
- pte_t *p;
- /*
- * assume we don't have huge pages in vmalloc space...
- * So don't worry about THP collapse/split. Called
- * Only in realmode with MSR_EE = 0, hence won't need irq_save/restore.
- */
- p = find_init_mm_pte(addr, NULL);
- if (!p || !pte_present(*p))
- return NULL;
- addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
- return __va(addr);
+ return __va(ppc_find_vmap_phys((unsigned long)addr));
}
/* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 5e634db4809b..004f0d4e665f 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -59,6 +59,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
#define STACK_SLOT_UAMOR (SFS-88)
#define STACK_SLOT_DAWR1 (SFS-96)
#define STACK_SLOT_DAWRX1 (SFS-104)
+#define STACK_SLOT_FSCR (SFS-112)
/* the following is used by the P9 short path */
#define STACK_SLOT_NVGPRS (SFS-152) /* 18 gprs */
@@ -686,6 +687,8 @@ BEGIN_FTR_SECTION
std r6, STACK_SLOT_DAWR0(r1)
std r7, STACK_SLOT_DAWRX0(r1)
std r8, STACK_SLOT_IAMR(r1)
+ mfspr r5, SPRN_FSCR
+ std r5, STACK_SLOT_FSCR(r1)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
BEGIN_FTR_SECTION
mfspr r6, SPRN_DAWR1
@@ -1663,6 +1666,10 @@ FTR_SECTION_ELSE
ld r7, STACK_SLOT_HFSCR(r1)
mtspr SPRN_HFSCR, r7
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
+BEGIN_FTR_SECTION
+ ld r5, STACK_SLOT_FSCR(r1)
+ mtspr SPRN_FSCR, r5
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
/*
* Restore various registers to 0, where non-zero values
* set by the guest could disrupt the host.
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 1fd31b4b0e13..fe26f2fa0f3f 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -14,6 +14,7 @@
#include <linux/string.h>
#include <linux/init.h>
#include <linux/sched/mm.h>
+#include <linux/stop_machine.h>
#include <asm/cputable.h>
#include <asm/code-patching.h>
#include <asm/page.h>
@@ -149,17 +150,17 @@ static void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
-
- if (types & STF_BARRIER_FALLBACK)
+ // See comment in do_entry_flush_fixups() RE order of patching
+ if (types & STF_BARRIER_FALLBACK) {
+ patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
+ patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
patch_branch((struct ppc_inst *)(dest + 1),
- (unsigned long)&stf_barrier_fallback,
- BRANCH_SET_LINK);
- else
- patch_instruction((struct ppc_inst *)(dest + 1),
- ppc_inst(instrs[1]));
-
- patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
+ (unsigned long)&stf_barrier_fallback, BRANCH_SET_LINK);
+ } else {
+ patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
+ patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
+ patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
+ }
}
printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i,
@@ -227,11 +228,25 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
: "unknown");
}
+static int __do_stf_barrier_fixups(void *data)
+{
+ enum stf_barrier_type *types = data;
+
+ do_stf_entry_barrier_fixups(*types);
+ do_stf_exit_barrier_fixups(*types);
+
+ return 0;
+}
void do_stf_barrier_fixups(enum stf_barrier_type types)
{
- do_stf_entry_barrier_fixups(types);
- do_stf_exit_barrier_fixups(types);
+ /*
+ * The call to the fallback entry flush, and the fallback/sync-ori exit
+ * flush can not be safely patched in/out while other CPUs are executing
+ * them. So call __do_stf_barrier_fixups() on one CPU while all other CPUs
+ * spin in the stop machine core with interrupts hard disabled.
+ */
+ stop_machine(__do_stf_barrier_fixups, &types, NULL);
}
void do_uaccess_flush_fixups(enum l1d_flush_type types)
@@ -284,8 +299,9 @@ void do_uaccess_flush_fixups(enum l1d_flush_type types)
: "unknown");
}
-void do_entry_flush_fixups(enum l1d_flush_type types)
+static int __do_entry_flush_fixups(void *data)
{
+ enum l1d_flush_type types = *(enum l1d_flush_type *)data;
unsigned int instrs[3], *dest;
long *start, *end;
int i;
@@ -309,6 +325,31 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
if (types & L1D_FLUSH_MTTRIG)
instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
+ /*
+ * If we're patching in or out the fallback flush we need to be careful about the
+ * order in which we patch instructions. That's because it's possible we could
+ * take a page fault after patching one instruction, so the sequence of
+ * instructions must be safe even in a half patched state.
+ *
+ * To make that work, when patching in the fallback flush we patch in this order:
+ * - the mflr (dest)
+ * - the mtlr (dest + 2)
+ * - the branch (dest + 1)
+ *
+ * That ensures the sequence is safe to execute at any point. In contrast if we
+ * patch the mtlr last, it's possible we could return from the branch and not
+ * restore LR, leading to a crash later.
+ *
+ * When patching out the fallback flush (either with nops or another flush type),
+ * we patch in this order:
+ * - the branch (dest + 1)
+ * - the mtlr (dest + 2)
+ * - the mflr (dest)
+ *
+ * Note we are protected by stop_machine() from other CPUs executing the code in a
+ * semi-patched state.
+ */
+
start = PTRRELOC(&__start___entry_flush_fixup);
end = PTRRELOC(&__stop___entry_flush_fixup);
for (i = 0; start < end; start++, i++) {
@@ -316,15 +357,16 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
-
- if (types == L1D_FLUSH_FALLBACK)
- patch_branch((struct ppc_inst *)(dest + 1), (unsigned long)&entry_flush_fallback,
- BRANCH_SET_LINK);
- else
+ if (types == L1D_FLUSH_FALLBACK) {
+ patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
+ patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
+ patch_branch((struct ppc_inst *)(dest + 1),
+ (unsigned long)&entry_flush_fallback, BRANCH_SET_LINK);
+ } else {
patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
-
- patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
+ patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
+ patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
+ }
}
start = PTRRELOC(&__start___scv_entry_flush_fixup);
@@ -334,15 +376,16 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
-
- if (types == L1D_FLUSH_FALLBACK)
- patch_branch((struct ppc_inst *)(dest + 1), (unsigned long)&scv_entry_flush_fallback,
- BRANCH_SET_LINK);
- else
+ if (types == L1D_FLUSH_FALLBACK) {
+ patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
+ patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
+ patch_branch((struct ppc_inst *)(dest + 1),
+ (unsigned long)&scv_entry_flush_fallback, BRANCH_SET_LINK);
+ } else {
patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
-
- patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
+ patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
+ patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
+ }
}
@@ -354,6 +397,19 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
: "ori type" :
(types & L1D_FLUSH_MTTRIG) ? "mttrig type"
: "unknown");
+
+ return 0;
+}
+
+void do_entry_flush_fixups(enum l1d_flush_type types)
+{
+ /*
+ * The call to the fallback flush can not be safely patched in/out while
+ * other CPUs are executing it. So call __do_entry_flush_fixups() on one
+ * CPU while all other CPUs spin in the stop machine core with interrupts
+ * hard disabled.
+ */
+ stop_machine(__do_entry_flush_fixups, &types, NULL);
}
void do_rfi_flush_fixups(enum l1d_flush_type types)
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 043bbeaf407c..a6b36a40897a 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -20,6 +20,7 @@
#include <asm/machdep.h>
#include <asm/rtas.h>
#include <asm/kasan.h>
+#include <asm/sparsemem.h>
#include <asm/svm.h>
#include <mm/mmu_decl.h>
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 16d4d1b6a1ff..51622411a7cc 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -2254,7 +2254,7 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs)
bool use_siar = regs_use_siar(regs);
unsigned long siar = mfspr(SPRN_SIAR);
- if (ppmu->flags & PPMU_P10_DD1) {
+ if (ppmu && (ppmu->flags & PPMU_P10_DD1)) {
if (siar)
return siar;
else
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index 2136e42833af..8a2b8d64265b 100644
--- a/arch/powerpc/platforms/pseries/hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
@@ -102,6 +102,16 @@ END_FTR_SECTION(0, 1); \
#define HCALL_BRANCH(LABEL)
#endif
+_GLOBAL_TOC(plpar_hcall_norets_notrace)
+ HMT_MEDIUM
+
+ mfcr r0
+ stw r0,8(r1)
+ HVSC /* invoke the hypervisor */
+ lwz r0,8(r1)
+ mtcrf 0xff,r0
+ blr /* return r3 = status */
+
_GLOBAL_TOC(plpar_hcall_norets)
HMT_MEDIUM
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 1f3152ad7213..dab356e3ff87 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -1829,30 +1829,28 @@ void hcall_tracepoint_unregfunc(void)
#endif
/*
- * Since the tracing code might execute hcalls we need to guard against
- * recursion. One example of this are spinlocks calling H_YIELD on
- * shared processor partitions.
+ * Keep track of hcall tracing depth and prevent recursion. Warn if any is
+ * detected because it may indicate a problem. This will not catch all
+ * problems with tracing code making hcalls, because the tracing might have
+ * been invoked from a non-hcall, so the first hcall could recurse into it
+ * without warning here, but this better than nothing.
+ *
+ * Hcalls with specific problems being traced should use the _notrace
+ * plpar_hcall variants.
*/
static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
-void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
+notrace void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
{
unsigned long flags;
unsigned int *depth;
- /*
- * We cannot call tracepoints inside RCU idle regions which
- * means we must not trace H_CEDE.
- */
- if (opcode == H_CEDE)
- return;
-
local_irq_save(flags);
depth = this_cpu_ptr(&hcall_trace_depth);
- if (*depth)
+ if (WARN_ON_ONCE(*depth))
goto out;
(*depth)++;
@@ -1864,19 +1862,16 @@ out:
local_irq_restore(flags);
}
-void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf)
+notrace void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf)
{
unsigned long flags;
unsigned int *depth;
- if (opcode == H_CEDE)
- return;
-
local_irq_save(flags);
depth = this_cpu_ptr(&hcall_trace_depth);
- if (*depth)
+ if (*depth) /* Don't warn again on the way out */
goto out;
(*depth)++;
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index a8ad8eb76120..18ec0f9bb8d5 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -34,6 +34,7 @@ config RISCV
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
select ARCH_SUPPORTS_HUGETLBFS if MMU
+ select ARCH_USE_MEMTEST
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
@@ -60,11 +61,11 @@ config RISCV
select GENERIC_TIME_VSYSCALL if MMU && 64BIT
select HANDLE_DOMAIN_IRQ
select HAVE_ARCH_AUDITSYSCALL
- select HAVE_ARCH_JUMP_LABEL
- select HAVE_ARCH_JUMP_LABEL_RELATIVE
+ select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
+ select HAVE_ARCH_JUMP_LABEL_RELATIVE if !XIP_KERNEL
select HAVE_ARCH_KASAN if MMU && 64BIT
select HAVE_ARCH_KASAN_VMALLOC if MMU && 64BIT
- select HAVE_ARCH_KGDB
+ select HAVE_ARCH_KGDB if !XIP_KERNEL
select HAVE_ARCH_KGDB_QXFER_PKT
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_SECCOMP_FILTER
@@ -79,9 +80,9 @@ config RISCV
select HAVE_GCC_PLUGINS
select HAVE_GENERIC_VDSO if MMU && 64BIT
select HAVE_IRQ_TIME_ACCOUNTING
- select HAVE_KPROBES
- select HAVE_KPROBES_ON_FTRACE
- select HAVE_KRETPROBES
+ select HAVE_KPROBES if !XIP_KERNEL
+ select HAVE_KPROBES_ON_FTRACE if !XIP_KERNEL
+ select HAVE_KRETPROBES if !XIP_KERNEL
select HAVE_PCI
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
@@ -230,11 +231,11 @@ config ARCH_RV64I
bool "RV64I"
select 64BIT
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000
- select HAVE_DYNAMIC_FTRACE if MMU && $(cc-option,-fpatchable-function-entry=8)
+ select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && $(cc-option,-fpatchable-function-entry=8)
select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
- select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
select HAVE_FUNCTION_GRAPH_TRACER
- select HAVE_FUNCTION_TRACER
+ select HAVE_FUNCTION_TRACER if !XIP_KERNEL
select SWIOTLB if MMU
endchoice
diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs
index ed963761fbd2..30676ebb16eb 100644
--- a/arch/riscv/Kconfig.socs
+++ b/arch/riscv/Kconfig.socs
@@ -14,6 +14,7 @@ config SOC_SIFIVE
select CLK_SIFIVE
select CLK_SIFIVE_PRCI
select SIFIVE_PLIC
+ select RISCV_ERRATA_ALTERNATIVE
select ERRATA_SIFIVE
help
This enables support for SiFive SoC platform hardware.
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index 3eb9590a0775..99ecd8bcfd77 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -16,7 +16,7 @@ ifeq ($(CONFIG_DYNAMIC_FTRACE),y)
CC_FLAGS_FTRACE := -fpatchable-function-entry=8
endif
-ifeq ($(CONFIG_64BIT)$(CONFIG_CMODEL_MEDLOW),yy)
+ifeq ($(CONFIG_CMODEL_MEDLOW),y)
KBUILD_CFLAGS_MODULE += -mcmodel=medany
endif
@@ -38,6 +38,15 @@ else
KBUILD_LDFLAGS += -melf32lriscv
endif
+ifeq ($(CONFIG_LD_IS_LLD),y)
+ KBUILD_CFLAGS += -mno-relax
+ KBUILD_AFLAGS += -mno-relax
+ifneq ($(LLVM_IAS),1)
+ KBUILD_CFLAGS += -Wa,-mno-relax
+ KBUILD_AFLAGS += -Wa,-mno-relax
+endif
+endif
+
# ISA string setting
riscv-march-$(CONFIG_ARCH_RV32I) := rv32ima
riscv-march-$(CONFIG_ARCH_RV64I) := rv64ima
diff --git a/arch/riscv/boot/dts/microchip/Makefile b/arch/riscv/boot/dts/microchip/Makefile
index 622b12771fd3..855c1502d912 100644
--- a/arch/riscv/boot/dts/microchip/Makefile
+++ b/arch/riscv/boot/dts/microchip/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_SOC_MICROCHIP_POLARFIRE) += microchip-mpfs-icicle-kit.dtb
+obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/riscv/boot/dts/sifive/Makefile b/arch/riscv/boot/dts/sifive/Makefile
index 74c47fe9fc22..d90e4eb0ade8 100644
--- a/arch/riscv/boot/dts/sifive/Makefile
+++ b/arch/riscv/boot/dts/sifive/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_SOC_SIFIVE) += hifive-unleashed-a00.dtb \
hifive-unmatched-a00.dtb
+obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi
index 8eef82e4199f..abbb960f90a0 100644
--- a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi
+++ b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi
@@ -273,7 +273,7 @@
cache-size = <2097152>;
cache-unified;
interrupt-parent = <&plic0>;
- interrupts = <19 20 21 22>;
+ interrupts = <19 21 22 20>;
reg = <0x0 0x2010000 0x0 0x1000>;
};
gpio: gpio@10060000 {
diff --git a/arch/riscv/errata/sifive/Makefile b/arch/riscv/errata/sifive/Makefile
index bdd5fc843b8e..2fde48db0619 100644
--- a/arch/riscv/errata/sifive/Makefile
+++ b/arch/riscv/errata/sifive/Makefile
@@ -1,2 +1,2 @@
-obj-y += errata_cip_453.o
+obj-$(CONFIG_ERRATA_SIFIVE_CIP_453) += errata_cip_453.o
obj-y += errata.o
diff --git a/arch/riscv/include/asm/alternative-macros.h b/arch/riscv/include/asm/alternative-macros.h
index 88c08705f64a..67406c376389 100644
--- a/arch/riscv/include/asm/alternative-macros.h
+++ b/arch/riscv/include/asm/alternative-macros.h
@@ -51,7 +51,7 @@
REG_ASM " " newlen "\n" \
".word " errata_id "\n"
-#define ALT_NEW_CONSTENT(vendor_id, errata_id, enable, new_c) \
+#define ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c) \
".if " __stringify(enable) " == 1\n" \
".pushsection .alternative, \"a\"\n" \
ALT_ENTRY("886b", "888f", __stringify(vendor_id), __stringify(errata_id), "889f - 888f") \
@@ -69,7 +69,7 @@
"886 :\n" \
old_c "\n" \
"887 :\n" \
- ALT_NEW_CONSTENT(vendor_id, errata_id, enable, new_c)
+ ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c)
#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
__ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k))
diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
index 400a8c8b6de7..ac9bdf4fc404 100644
--- a/arch/riscv/include/asm/atomic.h
+++ b/arch/riscv/include/asm/atomic.h
@@ -25,22 +25,22 @@
#define __atomic_release_fence() \
__asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory");
-static __always_inline int atomic_read(const atomic_t *v)
+static __always_inline int arch_atomic_read(const atomic_t *v)
{
return READ_ONCE(v->counter);
}
-static __always_inline void atomic_set(atomic_t *v, int i)
+static __always_inline void arch_atomic_set(atomic_t *v, int i)
{
WRITE_ONCE(v->counter, i);
}
#ifndef CONFIG_GENERIC_ATOMIC64
#define ATOMIC64_INIT(i) { (i) }
-static __always_inline s64 atomic64_read(const atomic64_t *v)
+static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
{
return READ_ONCE(v->counter);
}
-static __always_inline void atomic64_set(atomic64_t *v, s64 i)
+static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
{
WRITE_ONCE(v->counter, i);
}
@@ -53,7 +53,7 @@ static __always_inline void atomic64_set(atomic64_t *v, s64 i)
*/
#define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix) \
static __always_inline \
-void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
+void arch_atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
{ \
__asm__ __volatile__ ( \
" amo" #asm_op "." #asm_type " zero, %1, %0" \
@@ -87,7 +87,7 @@ ATOMIC_OPS(xor, xor, i)
*/
#define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix) \
static __always_inline \
-c_type atomic##prefix##_fetch_##op##_relaxed(c_type i, \
+c_type arch_atomic##prefix##_fetch_##op##_relaxed(c_type i, \
atomic##prefix##_t *v) \
{ \
register c_type ret; \
@@ -99,7 +99,7 @@ c_type atomic##prefix##_fetch_##op##_relaxed(c_type i, \
return ret; \
} \
static __always_inline \
-c_type atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \
+c_type arch_atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \
{ \
register c_type ret; \
__asm__ __volatile__ ( \
@@ -112,15 +112,15 @@ c_type atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \
#define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_type, c_type, prefix) \
static __always_inline \
-c_type atomic##prefix##_##op##_return_relaxed(c_type i, \
+c_type arch_atomic##prefix##_##op##_return_relaxed(c_type i, \
atomic##prefix##_t *v) \
{ \
- return atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I; \
+ return arch_atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I; \
} \
static __always_inline \
-c_type atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \
+c_type arch_atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \
{ \
- return atomic##prefix##_fetch_##op(i, v) c_op I; \
+ return arch_atomic##prefix##_fetch_##op(i, v) c_op I; \
}
#ifdef CONFIG_GENERIC_ATOMIC64
@@ -138,26 +138,26 @@ c_type atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \
ATOMIC_OPS(add, add, +, i)
ATOMIC_OPS(sub, add, +, -i)
-#define atomic_add_return_relaxed atomic_add_return_relaxed
-#define atomic_sub_return_relaxed atomic_sub_return_relaxed
-#define atomic_add_return atomic_add_return
-#define atomic_sub_return atomic_sub_return
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_add_return arch_atomic_add_return
+#define arch_atomic_sub_return arch_atomic_sub_return
-#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
-#define atomic_fetch_add atomic_fetch_add
-#define atomic_fetch_sub atomic_fetch_sub
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
+#define arch_atomic_fetch_add arch_atomic_fetch_add
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
#ifndef CONFIG_GENERIC_ATOMIC64
-#define atomic64_add_return_relaxed atomic64_add_return_relaxed
-#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-#define atomic64_add_return atomic64_add_return
-#define atomic64_sub_return atomic64_sub_return
-
-#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
-#define atomic64_fetch_add atomic64_fetch_add
-#define atomic64_fetch_sub atomic64_fetch_sub
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+#define arch_atomic64_add_return arch_atomic64_add_return
+#define arch_atomic64_sub_return arch_atomic64_sub_return
+
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
+#define arch_atomic64_fetch_add arch_atomic64_fetch_add
+#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
#endif
#undef ATOMIC_OPS
@@ -175,20 +175,20 @@ ATOMIC_OPS(and, and, i)
ATOMIC_OPS( or, or, i)
ATOMIC_OPS(xor, xor, i)
-#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
-#define atomic_fetch_and atomic_fetch_and
-#define atomic_fetch_or atomic_fetch_or
-#define atomic_fetch_xor atomic_fetch_xor
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_and arch_atomic_fetch_and
+#define arch_atomic_fetch_or arch_atomic_fetch_or
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
#ifndef CONFIG_GENERIC_ATOMIC64
-#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
-#define atomic64_fetch_and atomic64_fetch_and
-#define atomic64_fetch_or atomic64_fetch_or
-#define atomic64_fetch_xor atomic64_fetch_xor
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
+#define arch_atomic64_fetch_and arch_atomic64_fetch_and
+#define arch_atomic64_fetch_or arch_atomic64_fetch_or
+#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
#endif
#undef ATOMIC_OPS
@@ -197,7 +197,7 @@ ATOMIC_OPS(xor, xor, i)
#undef ATOMIC_OP_RETURN
/* This is required to provide a full barrier on success. */
-static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int prev, rc;
@@ -214,10 +214,10 @@ static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
: "memory");
return prev;
}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
#ifndef CONFIG_GENERIC_ATOMIC64
-static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 prev;
long rc;
@@ -235,7 +235,7 @@ static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u
: "memory");
return prev;
}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
#endif
/*
@@ -244,45 +244,45 @@ static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u
*/
#define ATOMIC_OP(c_t, prefix, size) \
static __always_inline \
-c_t atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n) \
+c_t arch_atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n) \
{ \
return __xchg_relaxed(&(v->counter), n, size); \
} \
static __always_inline \
-c_t atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n) \
+c_t arch_atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n) \
{ \
return __xchg_acquire(&(v->counter), n, size); \
} \
static __always_inline \
-c_t atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \
+c_t arch_atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \
{ \
return __xchg_release(&(v->counter), n, size); \
} \
static __always_inline \
-c_t atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \
+c_t arch_atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \
{ \
return __xchg(&(v->counter), n, size); \
} \
static __always_inline \
-c_t atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v, \
+c_t arch_atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v, \
c_t o, c_t n) \
{ \
return __cmpxchg_relaxed(&(v->counter), o, n, size); \
} \
static __always_inline \
-c_t atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v, \
+c_t arch_atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v, \
c_t o, c_t n) \
{ \
return __cmpxchg_acquire(&(v->counter), o, n, size); \
} \
static __always_inline \
-c_t atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v, \
+c_t arch_atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v, \
c_t o, c_t n) \
{ \
return __cmpxchg_release(&(v->counter), o, n, size); \
} \
static __always_inline \
-c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \
+c_t arch_atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \
{ \
return __cmpxchg(&(v->counter), o, n, size); \
}
@@ -298,19 +298,19 @@ c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \
ATOMIC_OPS()
-#define atomic_xchg_relaxed atomic_xchg_relaxed
-#define atomic_xchg_acquire atomic_xchg_acquire
-#define atomic_xchg_release atomic_xchg_release
-#define atomic_xchg atomic_xchg
-#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
-#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
-#define atomic_cmpxchg_release atomic_cmpxchg_release
-#define atomic_cmpxchg atomic_cmpxchg
+#define arch_atomic_xchg_relaxed arch_atomic_xchg_relaxed
+#define arch_atomic_xchg_acquire arch_atomic_xchg_acquire
+#define arch_atomic_xchg_release arch_atomic_xchg_release
+#define arch_atomic_xchg arch_atomic_xchg
+#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
+#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire
+#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release
+#define arch_atomic_cmpxchg arch_atomic_cmpxchg
#undef ATOMIC_OPS
#undef ATOMIC_OP
-static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset)
+static __always_inline int arch_atomic_sub_if_positive(atomic_t *v, int offset)
{
int prev, rc;
@@ -328,10 +328,10 @@ static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset)
return prev - offset;
}
-#define atomic_dec_if_positive(v) atomic_sub_if_positive(v, 1)
+#define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(v, 1)
#ifndef CONFIG_GENERIC_ATOMIC64
-static __always_inline s64 atomic64_sub_if_positive(atomic64_t *v, s64 offset)
+static __always_inline s64 arch_atomic64_sub_if_positive(atomic64_t *v, s64 offset)
{
s64 prev;
long rc;
@@ -350,7 +350,7 @@ static __always_inline s64 atomic64_sub_if_positive(atomic64_t *v, s64 offset)
return prev - offset;
}
-#define atomic64_dec_if_positive(v) atomic64_sub_if_positive(v, 1)
+#define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(v, 1)
#endif
#endif /* _ASM_RISCV_ATOMIC_H */
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index 262e5bbb2776..36dc962f6343 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -37,7 +37,7 @@
__ret; \
})
-#define xchg_relaxed(ptr, x) \
+#define arch_xchg_relaxed(ptr, x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg_relaxed((ptr), \
@@ -72,7 +72,7 @@
__ret; \
})
-#define xchg_acquire(ptr, x) \
+#define arch_xchg_acquire(ptr, x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg_acquire((ptr), \
@@ -107,7 +107,7 @@
__ret; \
})
-#define xchg_release(ptr, x) \
+#define arch_xchg_release(ptr, x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg_release((ptr), \
@@ -140,7 +140,7 @@
__ret; \
})
-#define xchg(ptr, x) \
+#define arch_xchg(ptr, x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg((ptr), _x_, sizeof(*(ptr))); \
@@ -149,13 +149,13 @@
#define xchg32(ptr, x) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
- xchg((ptr), (x)); \
+ arch_xchg((ptr), (x)); \
})
#define xchg64(ptr, x) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- xchg((ptr), (x)); \
+ arch_xchg((ptr), (x)); \
})
/*
@@ -199,7 +199,7 @@
__ret; \
})
-#define cmpxchg_relaxed(ptr, o, n) \
+#define arch_cmpxchg_relaxed(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -245,7 +245,7 @@
__ret; \
})
-#define cmpxchg_acquire(ptr, o, n) \
+#define arch_cmpxchg_acquire(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -291,7 +291,7 @@
__ret; \
})
-#define cmpxchg_release(ptr, o, n) \
+#define arch_cmpxchg_release(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -337,7 +337,7 @@
__ret; \
})
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -345,31 +345,31 @@
_o_, _n_, sizeof(*(ptr))); \
})
-#define cmpxchg_local(ptr, o, n) \
+#define arch_cmpxchg_local(ptr, o, n) \
(__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
#define cmpxchg32(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
- cmpxchg((ptr), (o), (n)); \
+ arch_cmpxchg((ptr), (o), (n)); \
})
#define cmpxchg32_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
- cmpxchg_relaxed((ptr), (o), (n)) \
+ arch_cmpxchg_relaxed((ptr), (o), (n)) \
})
-#define cmpxchg64(ptr, o, n) \
+#define arch_cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg((ptr), (o), (n)); \
+ arch_cmpxchg((ptr), (o), (n)); \
})
-#define cmpxchg64_local(ptr, o, n) \
+#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_relaxed((ptr), (o), (n)); \
+ arch_cmpxchg_relaxed((ptr), (o), (n)); \
})
#endif /* _ASM_RISCV_CMPXCHG_H */
diff --git a/arch/riscv/include/asm/kexec.h b/arch/riscv/include/asm/kexec.h
index 1e954101906a..e4e291d40759 100644
--- a/arch/riscv/include/asm/kexec.h
+++ b/arch/riscv/include/asm/kexec.h
@@ -42,8 +42,8 @@ struct kimage_arch {
unsigned long fdt_addr;
};
-const extern unsigned char riscv_kexec_relocate[];
-const extern unsigned int riscv_kexec_relocate_size;
+extern const unsigned char riscv_kexec_relocate[];
+extern const unsigned int riscv_kexec_relocate_size;
typedef void (*riscv_kexec_method)(unsigned long first_ind_entry,
unsigned long jump_addr,
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 9469f464e71a..380cd3a7e548 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -30,9 +30,8 @@
#define BPF_JIT_REGION_SIZE (SZ_128M)
#ifdef CONFIG_64BIT
-/* KASLR should leave at least 128MB for BPF after the kernel */
-#define BPF_JIT_REGION_START PFN_ALIGN((unsigned long)&_end)
-#define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE)
+#define BPF_JIT_REGION_START (BPF_JIT_REGION_END - BPF_JIT_REGION_SIZE)
+#define BPF_JIT_REGION_END (MODULES_END)
#else
#define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE)
#define BPF_JIT_REGION_END (VMALLOC_END)
diff --git a/arch/riscv/kernel/machine_kexec.c b/arch/riscv/kernel/machine_kexec.c
index cc048143fba5..9e99e1db156b 100644
--- a/arch/riscv/kernel/machine_kexec.c
+++ b/arch/riscv/kernel/machine_kexec.c
@@ -14,8 +14,9 @@
#include <asm/set_memory.h> /* For set_memory_x() */
#include <linux/compiler.h> /* For unreachable() */
#include <linux/cpu.h> /* For cpu_down() */
+#include <linux/reboot.h>
-/**
+/*
* kexec_image_info - Print received image details
*/
static void
@@ -39,7 +40,7 @@ kexec_image_info(const struct kimage *image)
}
}
-/**
+/*
* machine_kexec_prepare - Initialize kexec
*
* This function is called from do_kexec_load, when the user has
@@ -100,7 +101,7 @@ machine_kexec_prepare(struct kimage *image)
}
-/**
+/*
* machine_kexec_cleanup - Cleanup any leftovers from
* machine_kexec_prepare
*
@@ -135,7 +136,7 @@ void machine_shutdown(void)
#endif
}
-/**
+/*
* machine_crash_shutdown - Prepare to kexec after a kernel crash
*
* This function is called by crash_kexec just before machine_kexec
@@ -151,7 +152,7 @@ machine_crash_shutdown(struct pt_regs *regs)
pr_info("Starting crashdump kernel...\n");
}
-/**
+/*
* machine_kexec - Jump to the loaded kimage
*
* This function is called by kernel_kexec which is called by the
diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c
index 9b71a6363bda..247e33fa5bc7 100644
--- a/arch/riscv/kernel/probes/kprobes.c
+++ b/arch/riscv/kernel/probes/kprobes.c
@@ -84,6 +84,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
return 0;
}
+#ifdef CONFIG_MMU
void *alloc_insn_page(void)
{
return __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END,
@@ -91,6 +92,7 @@ void *alloc_insn_page(void)
VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
__builtin_return_address(0));
}
+#endif
/* install breakpoint in text */
void __kprobes arch_arm_kprobe(struct kprobe *p)
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 03901d3a8b02..9a1b7a0603b2 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -231,13 +231,13 @@ static void __init init_resources(void)
/* Clean-up any unused pre-allocated resources */
mem_res_sz = (num_resources - res_idx + 1) * sizeof(*mem_res);
- memblock_free((phys_addr_t) mem_res, mem_res_sz);
+ memblock_free(__pa(mem_res), mem_res_sz);
return;
error:
/* Better an empty resource tree than an inconsistent one */
release_child_resources(&iomem_resource);
- memblock_free((phys_addr_t) mem_res, mem_res_sz);
+ memblock_free(__pa(mem_res), mem_res_sz);
}
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index 2b3e0cb90d78..bde85fc53357 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -27,10 +27,10 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
fp = frame_pointer(regs);
sp = user_stack_pointer(regs);
pc = instruction_pointer(regs);
- } else if (task == NULL || task == current) {
- fp = (unsigned long)__builtin_frame_address(0);
- sp = sp_in_global;
- pc = (unsigned long)walk_stackframe;
+ } else if (task == current) {
+ fp = (unsigned long)__builtin_frame_address(1);
+ sp = (unsigned long)__builtin_frame_address(0);
+ pc = (unsigned long)__builtin_return_address(0);
} else {
/* task blocked in __switch_to */
fp = task->thread.s[0];
@@ -106,15 +106,15 @@ static bool print_trace_address(void *arg, unsigned long pc)
return true;
}
-void dump_backtrace(struct pt_regs *regs, struct task_struct *task,
+noinline void dump_backtrace(struct pt_regs *regs, struct task_struct *task,
const char *loglvl)
{
- pr_cont("%sCall Trace:\n", loglvl);
walk_stackframe(task, regs, print_trace_address, (void *)loglvl);
}
void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
{
+ pr_cont("%sCall Trace:\n", loglvl);
dump_backtrace(NULL, task, loglvl);
}
@@ -139,7 +139,7 @@ unsigned long get_wchan(struct task_struct *task)
#ifdef CONFIG_STACKTRACE
-void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+noinline void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
struct task_struct *task, struct pt_regs *regs)
{
walk_stackframe(task, regs, consume_entry, cookie);
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 0721b9798595..7bc88d8aab97 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -86,8 +86,13 @@ static void do_trap_error(struct pt_regs *regs, int signo, int code,
}
}
+#if defined (CONFIG_XIP_KERNEL) && defined (CONFIG_RISCV_ERRATA_ALTERNATIVE)
+#define __trap_section __section(".xip.traps")
+#else
+#define __trap_section
+#endif
#define DO_ERROR_INFO(name, signo, code, str) \
-asmlinkage __visible void name(struct pt_regs *regs) \
+asmlinkage __visible __trap_section void name(struct pt_regs *regs) \
{ \
do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \
}
@@ -111,7 +116,7 @@ DO_ERROR_INFO(do_trap_store_misaligned,
int handle_misaligned_load(struct pt_regs *regs);
int handle_misaligned_store(struct pt_regs *regs);
-asmlinkage void do_trap_load_misaligned(struct pt_regs *regs)
+asmlinkage void __trap_section do_trap_load_misaligned(struct pt_regs *regs)
{
if (!handle_misaligned_load(regs))
return;
@@ -119,7 +124,7 @@ asmlinkage void do_trap_load_misaligned(struct pt_regs *regs)
"Oops - load address misaligned");
}
-asmlinkage void do_trap_store_misaligned(struct pt_regs *regs)
+asmlinkage void __trap_section do_trap_store_misaligned(struct pt_regs *regs)
{
if (!handle_misaligned_store(regs))
return;
@@ -146,7 +151,7 @@ static inline unsigned long get_break_insn_length(unsigned long pc)
return GET_INSN_LENGTH(insn);
}
-asmlinkage __visible void do_trap_break(struct pt_regs *regs)
+asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs)
{
#ifdef CONFIG_KPROBES
if (kprobe_single_step_handler(regs))
diff --git a/arch/riscv/kernel/vmlinux-xip.lds.S b/arch/riscv/kernel/vmlinux-xip.lds.S
index 4b29b9917f99..a3ff09c4c3f9 100644
--- a/arch/riscv/kernel/vmlinux-xip.lds.S
+++ b/arch/riscv/kernel/vmlinux-xip.lds.S
@@ -99,9 +99,22 @@ SECTIONS
}
PERCPU_SECTION(L1_CACHE_BYTES)
- . = ALIGN(PAGE_SIZE);
+ . = ALIGN(8);
+ .alternative : {
+ __alt_start = .;
+ *(.alternative)
+ __alt_end = .;
+ }
__init_end = .;
+ . = ALIGN(16);
+ .xip.traps : {
+ __xip_traps_start = .;
+ *(.xip.traps)
+ __xip_traps_end = .;
+ }
+
+ . = ALIGN(PAGE_SIZE);
.sdata : {
__global_pointer$ = . + 0x800;
*(.sdata*)
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 4faf8bd157ea..4c4c92ce0bb8 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -746,14 +746,18 @@ void __init protect_kernel_text_data(void)
unsigned long init_data_start = (unsigned long)__init_data_begin;
unsigned long rodata_start = (unsigned long)__start_rodata;
unsigned long data_start = (unsigned long)_data;
- unsigned long max_low = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));
+#if defined(CONFIG_64BIT) && defined(CONFIG_MMU)
+ unsigned long end_va = kernel_virt_addr + load_sz;
+#else
+ unsigned long end_va = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));
+#endif
set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
set_memory_ro(init_text_start, (init_data_start - init_text_start) >> PAGE_SHIFT);
set_memory_nx(init_data_start, (rodata_start - init_data_start) >> PAGE_SHIFT);
/* rodata section is marked readonly in mark_rodata_ro */
set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
- set_memory_nx(data_start, (max_low - data_start) >> PAGE_SHIFT);
+ set_memory_nx(data_start, (end_va - data_start) >> PAGE_SHIFT);
}
void mark_rodata_ro(void)
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
index 9daacae93e33..d7189c8714a9 100644
--- a/arch/riscv/mm/kasan_init.c
+++ b/arch/riscv/mm/kasan_init.c
@@ -169,7 +169,7 @@ static void __init kasan_shallow_populate(void *start, void *end)
void __init kasan_init(void)
{
- phys_addr_t _start, _end;
+ phys_addr_t p_start, p_end;
u64 i;
/*
@@ -189,9 +189,9 @@ void __init kasan_init(void)
(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
/* Populate the linear mapping */
- for_each_mem_range(i, &_start, &_end) {
- void *start = (void *)__va(_start);
- void *end = (void *)__va(_end);
+ for_each_mem_range(i, &p_start, &p_end) {
+ void *start = (void *)__va(p_start);
+ void *end = (void *)__va(p_end);
if (start >= end)
break;
@@ -201,7 +201,7 @@ void __init kasan_init(void)
/* Populate kernel, BPF, modules mapping */
kasan_populate(kasan_mem_to_shadow((const void *)MODULES_VADDR),
- kasan_mem_to_shadow((const void *)BPF_JIT_REGION_END));
+ kasan_mem_to_shadow((const void *)MODULES_VADDR + SZ_2G));
for (i = 0; i < PTRS_PER_PTE; i++)
set_pte(&kasan_early_shadow_pte[i],
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 7c93c6573524..7138d189cc42 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -147,6 +147,4 @@ ATOMIC64_OPS(xor)
#define arch_atomic64_fetch_sub(_i, _v) arch_atomic64_fetch_add(-(s64)(_i), _v)
#define arch_atomic64_sub(_i, _v) arch_atomic64_add(-(s64)(_i), _v)
-#define ARCH_ATOMIC
-
#endif /* __ARCH_S390_ATOMIC__ */
diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h
index 2b543163d90a..76c6034428be 100644
--- a/arch/s390/include/asm/stacktrace.h
+++ b/arch/s390/include/asm/stacktrace.h
@@ -91,12 +91,16 @@ struct stack_frame {
CALL_ARGS_4(arg1, arg2, arg3, arg4); \
register unsigned long r4 asm("6") = (unsigned long)(arg5)
-#define CALL_FMT_0 "=&d" (r2) :
-#define CALL_FMT_1 "+&d" (r2) :
-#define CALL_FMT_2 CALL_FMT_1 "d" (r3),
-#define CALL_FMT_3 CALL_FMT_2 "d" (r4),
-#define CALL_FMT_4 CALL_FMT_3 "d" (r5),
-#define CALL_FMT_5 CALL_FMT_4 "d" (r6),
+/*
+ * To keep this simple mark register 2-6 as being changed (volatile)
+ * by the called function, even though register 6 is saved/nonvolatile.
+ */
+#define CALL_FMT_0 "=&d" (r2)
+#define CALL_FMT_1 "+&d" (r2)
+#define CALL_FMT_2 CALL_FMT_1, "+&d" (r3)
+#define CALL_FMT_3 CALL_FMT_2, "+&d" (r4)
+#define CALL_FMT_4 CALL_FMT_3, "+&d" (r5)
+#define CALL_FMT_5 CALL_FMT_4, "+&d" (r6)
#define CALL_CLOBBER_5 "0", "1", "14", "cc", "memory"
#define CALL_CLOBBER_4 CALL_CLOBBER_5
@@ -118,7 +122,7 @@ struct stack_frame {
" brasl 14,%[_fn]\n" \
" la 15,0(%[_prev])\n" \
: [_prev] "=&a" (prev), CALL_FMT_##nr \
- [_stack] "R" (stack), \
+ : [_stack] "R" (stack), \
[_bc] "i" (offsetof(struct stack_frame, back_chain)), \
[_frame] "d" (frame), \
[_fn] "X" (fn) : CALL_CLOBBER_##nr); \
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 12de7a9c85b3..e84f495e7eb2 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -418,6 +418,7 @@ ENTRY(\name)
xgr %r6,%r6
xgr %r7,%r7
xgr %r10,%r10
+ xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
stmg %r8,%r9,__PT_PSW(%r11)
tm %r8,0x0001 # coming from user space?
@@ -651,9 +652,9 @@ ENDPROC(stack_overflow)
.Lcleanup_sie_mcck:
larl %r13,.Lsie_entry
slgr %r9,%r13
- larl %r13,.Lsie_skip
+ lghi %r13,.Lsie_skip - .Lsie_entry
clgr %r9,%r13
- jh .Lcleanup_sie_int
+ jhe .Lcleanup_sie_int
oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
.Lcleanup_sie_int:
BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 90163e6184f5..080e7aed181f 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -512,7 +512,6 @@ void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal)
/* No handlers present - check for system call restart */
clear_pt_regs_flag(regs, PIF_SYSCALL);
- clear_pt_regs_flag(regs, PIF_SYSCALL_RESTART);
if (current->thread.system_call) {
regs->int_code = current->thread.system_call;
switch (regs->gprs[2]) {
diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl
index 7e4a2aba366d..0690263df1dd 100644
--- a/arch/s390/kernel/syscalls/syscall.tbl
+++ b/arch/s390/kernel/syscalls/syscall.tbl
@@ -445,7 +445,7 @@
440 common process_madvise sys_process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index bfcc327acc6b..26aa2614ee35 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -66,7 +66,10 @@ static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int c
{
static cpumask_t mask;
- cpumask_copy(&mask, cpumask_of(cpu));
+ cpumask_clear(&mask);
+ if (!cpu_online(cpu))
+ goto out;
+ cpumask_set_cpu(cpu, &mask);
switch (topology_mode) {
case TOPOLOGY_MODE_HW:
while (info) {
@@ -83,10 +86,10 @@ static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int c
default:
fallthrough;
case TOPOLOGY_MODE_SINGLE:
- cpumask_copy(&mask, cpumask_of(cpu));
break;
}
cpumask_and(&mask, &mask, cpu_online_mask);
+out:
cpumask_copy(dst, &mask);
}
@@ -95,7 +98,10 @@ static void cpu_thread_map(cpumask_t *dst, unsigned int cpu)
static cpumask_t mask;
int i;
- cpumask_copy(&mask, cpumask_of(cpu));
+ cpumask_clear(&mask);
+ if (!cpu_online(cpu))
+ goto out;
+ cpumask_set_cpu(cpu, &mask);
if (topology_mode != TOPOLOGY_MODE_HW)
goto out;
cpu -= cpu % (smp_cpu_mtid + 1);
diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c
index 813b6e93dc83..c8841f476e91 100644
--- a/arch/s390/kvm/pv.c
+++ b/arch/s390/kvm/pv.c
@@ -140,7 +140,12 @@ static int kvm_s390_pv_alloc_vm(struct kvm *kvm)
/* Allocate variable storage */
vlen = ALIGN(virt * ((npages * PAGE_SIZE) / HPAGE_SIZE), PAGE_SIZE);
vlen += uv_info.guest_virt_base_stor_len;
- kvm->arch.pv.stor_var = vzalloc(vlen);
+ /*
+ * The Create Secure Configuration Ultravisor Call does not support
+ * using large pages for the virtual memory area.
+ * This is a hardware limitation.
+ */
+ kvm->arch.pv.stor_var = vmalloc_no_huge(vlen);
if (!kvm->arch.pv.stor_var)
goto out_err;
return 0;
diff --git a/arch/sh/include/asm/atomic-grb.h b/arch/sh/include/asm/atomic-grb.h
index aace62d42288..059791fd394f 100644
--- a/arch/sh/include/asm/atomic-grb.h
+++ b/arch/sh/include/asm/atomic-grb.h
@@ -3,7 +3,7 @@
#define __ASM_SH_ATOMIC_GRB_H
#define ATOMIC_OP(op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
int tmp; \
\
@@ -23,7 +23,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN(op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
int tmp; \
\
@@ -45,7 +45,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
int res, tmp; \
\
diff --git a/arch/sh/include/asm/atomic-irq.h b/arch/sh/include/asm/atomic-irq.h
index ee523bd2120f..7665de9d00d0 100644
--- a/arch/sh/include/asm/atomic-irq.h
+++ b/arch/sh/include/asm/atomic-irq.h
@@ -11,7 +11,7 @@
*/
#define ATOMIC_OP(op, c_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
\
@@ -21,7 +21,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
}
#define ATOMIC_OP_RETURN(op, c_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long temp, flags; \
\
@@ -35,7 +35,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long temp, flags; \
\
diff --git a/arch/sh/include/asm/atomic-llsc.h b/arch/sh/include/asm/atomic-llsc.h
index 1d06e4d288dc..b63dcfbfa14e 100644
--- a/arch/sh/include/asm/atomic-llsc.h
+++ b/arch/sh/include/asm/atomic-llsc.h
@@ -17,7 +17,7 @@
*/
#define ATOMIC_OP(op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long tmp; \
\
@@ -32,7 +32,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
}
#define ATOMIC_OP_RETURN(op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long temp; \
\
@@ -50,7 +50,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long res, temp; \
\
diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h
index 7c2a8a703b9a..528bfeda78f5 100644
--- a/arch/sh/include/asm/atomic.h
+++ b/arch/sh/include/asm/atomic.h
@@ -19,8 +19,8 @@
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
-#define atomic_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
#if defined(CONFIG_GUSA_RB)
#include <asm/atomic-grb.h>
@@ -30,8 +30,8 @@
#include <asm/atomic-irq.h>
#endif
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
+#define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n)))
#endif /* CONFIG_CPU_J2 */
diff --git a/arch/sh/include/asm/cmpxchg.h b/arch/sh/include/asm/cmpxchg.h
index e9501d85c278..0ed9b3f4a577 100644
--- a/arch/sh/include/asm/cmpxchg.h
+++ b/arch/sh/include/asm/cmpxchg.h
@@ -45,7 +45,7 @@ extern void __xchg_called_with_bad_pointer(void);
__xchg__res; \
})
-#define xchg(ptr,x) \
+#define arch_xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr))))
/* This function doesn't exist, so you'll get a linker error
@@ -63,7 +63,7 @@ static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
return old;
}
-#define cmpxchg(ptr,o,n) \
+#define arch_cmpxchg(ptr,o,n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl
index f47a0dc55445..0b91499ebdcf 100644
--- a/arch/sh/kernel/syscalls/syscall.tbl
+++ b/arch/sh/kernel/syscalls/syscall.tbl
@@ -445,7 +445,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index f5beecdac693..e76b22157099 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -180,7 +180,6 @@ static inline void arch_ftrace_nmi_exit(void) { }
BUILD_TRAP_HANDLER(nmi)
{
- unsigned int cpu = smp_processor_id();
TRAP_HANDLER_DECL;
arch_ftrace_nmi_enter();
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index efad5532f169..d775daa83d12 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -18,30 +18,30 @@
#include <asm/barrier.h>
#include <asm-generic/atomic64.h>
-int atomic_add_return(int, atomic_t *);
-int atomic_fetch_add(int, atomic_t *);
-int atomic_fetch_and(int, atomic_t *);
-int atomic_fetch_or(int, atomic_t *);
-int atomic_fetch_xor(int, atomic_t *);
-int atomic_cmpxchg(atomic_t *, int, int);
-int atomic_xchg(atomic_t *, int);
-int atomic_fetch_add_unless(atomic_t *, int, int);
-void atomic_set(atomic_t *, int);
+int arch_atomic_add_return(int, atomic_t *);
+int arch_atomic_fetch_add(int, atomic_t *);
+int arch_atomic_fetch_and(int, atomic_t *);
+int arch_atomic_fetch_or(int, atomic_t *);
+int arch_atomic_fetch_xor(int, atomic_t *);
+int arch_atomic_cmpxchg(atomic_t *, int, int);
+int arch_atomic_xchg(atomic_t *, int);
+int arch_atomic_fetch_add_unless(atomic_t *, int, int);
+void arch_atomic_set(atomic_t *, int);
-#define atomic_fetch_add_unless atomic_fetch_add_unless
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
-#define atomic_set_release(v, i) atomic_set((v), (i))
+#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
-#define atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
-#define atomic_add(i, v) ((void)atomic_add_return( (int)(i), (v)))
-#define atomic_sub(i, v) ((void)atomic_add_return(-(int)(i), (v)))
+#define arch_atomic_add(i, v) ((void)arch_atomic_add_return( (int)(i), (v)))
+#define arch_atomic_sub(i, v) ((void)arch_atomic_add_return(-(int)(i), (v)))
-#define atomic_and(i, v) ((void)atomic_fetch_and((i), (v)))
-#define atomic_or(i, v) ((void)atomic_fetch_or((i), (v)))
-#define atomic_xor(i, v) ((void)atomic_fetch_xor((i), (v)))
+#define arch_atomic_and(i, v) ((void)arch_atomic_fetch_and((i), (v)))
+#define arch_atomic_or(i, v) ((void)arch_atomic_fetch_or((i), (v)))
+#define arch_atomic_xor(i, v) ((void)arch_atomic_fetch_xor((i), (v)))
-#define atomic_sub_return(i, v) (atomic_add_return(-(int)(i), (v)))
-#define atomic_fetch_sub(i, v) (atomic_fetch_add (-(int)(i), (v)))
+#define arch_atomic_sub_return(i, v) (arch_atomic_add_return(-(int)(i), (v)))
+#define arch_atomic_fetch_sub(i, v) (arch_atomic_fetch_add (-(int)(i), (v)))
#endif /* !(__ARCH_SPARC_ATOMIC__) */
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index 6b235d3d1d9d..077891686715 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -14,23 +14,23 @@
#define ATOMIC64_INIT(i) { (i) }
-#define atomic_read(v) READ_ONCE((v)->counter)
-#define atomic64_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic64_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
-#define atomic64_set(v, i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic64_set(v, i) WRITE_ONCE(((v)->counter), (i))
#define ATOMIC_OP(op) \
-void atomic_##op(int, atomic_t *); \
-void atomic64_##op(s64, atomic64_t *);
+void arch_atomic_##op(int, atomic_t *); \
+void arch_atomic64_##op(s64, atomic64_t *);
#define ATOMIC_OP_RETURN(op) \
-int atomic_##op##_return(int, atomic_t *); \
-s64 atomic64_##op##_return(s64, atomic64_t *);
+int arch_atomic_##op##_return(int, atomic_t *); \
+s64 arch_atomic64_##op##_return(s64, atomic64_t *);
#define ATOMIC_FETCH_OP(op) \
-int atomic_fetch_##op(int, atomic_t *); \
-s64 atomic64_fetch_##op(s64, atomic64_t *);
+int arch_atomic_fetch_##op(int, atomic_t *); \
+s64 arch_atomic64_fetch_##op(s64, atomic64_t *);
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
@@ -49,18 +49,18 @@ ATOMIC_OPS(xor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n)))
-static inline int atomic_xchg(atomic_t *v, int new)
+static inline int arch_atomic_xchg(atomic_t *v, int new)
{
- return xchg(&v->counter, new);
+ return arch_xchg(&v->counter, new);
}
-#define atomic64_cmpxchg(v, o, n) \
- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic64_cmpxchg(v, o, n) \
+ ((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
-s64 atomic64_dec_if_positive(atomic64_t *v);
-#define atomic64_dec_if_positive atomic64_dec_if_positive
+s64 arch_atomic64_dec_if_positive(atomic64_t *v);
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
#endif /* !(__ARCH_SPARC64_ATOMIC__) */
diff --git a/arch/sparc/include/asm/cmpxchg_32.h b/arch/sparc/include/asm/cmpxchg_32.h
index a53d744d4212..27a57a3a7597 100644
--- a/arch/sparc/include/asm/cmpxchg_32.h
+++ b/arch/sparc/include/asm/cmpxchg_32.h
@@ -25,7 +25,7 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int
return x;
}
-#define xchg(ptr,x) ({(__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));})
+#define arch_xchg(ptr,x) ({(__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));})
/* Emulate cmpxchg() the same way we emulate atomics,
* by hashing the object address and indexing into an array
@@ -55,7 +55,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
return old;
}
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -64,7 +64,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
})
u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new);
-#define cmpxchg64(ptr, old, new) __cmpxchg_u64(ptr, old, new)
+#define arch_cmpxchg64(ptr, old, new) __cmpxchg_u64(ptr, old, new)
#include <asm-generic/cmpxchg-local.h>
@@ -72,9 +72,9 @@ u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new);
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
-#define cmpxchg_local(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
+#define arch_cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__generic_cmpxchg_local((ptr), (unsigned long)(o),\
(unsigned long)(n), sizeof(*(ptr))))
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
#endif /* __ARCH_SPARC_CMPXCHG__ */
diff --git a/arch/sparc/include/asm/cmpxchg_64.h b/arch/sparc/include/asm/cmpxchg_64.h
index 316faa0130ba..8c39a9981187 100644
--- a/arch/sparc/include/asm/cmpxchg_64.h
+++ b/arch/sparc/include/asm/cmpxchg_64.h
@@ -52,7 +52,7 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long
return val;
}
-#define xchg(ptr,x) \
+#define arch_xchg(ptr,x) \
({ __typeof__(*(ptr)) __ret; \
__ret = (__typeof__(*(ptr))) \
__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \
@@ -168,7 +168,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
return old;
}
-#define cmpxchg(ptr,o,n) \
+#define arch_cmpxchg(ptr,o,n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -189,20 +189,20 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
case 4:
case 8: return __cmpxchg(ptr, old, new, size);
default:
- return __cmpxchg_local_generic(ptr, old, new, size);
+ return __generic_cmpxchg_local(ptr, old, new, size);
}
return old;
}
-#define cmpxchg_local(ptr, o, n) \
+#define arch_cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr))))
-#define cmpxchg64_local(ptr, o, n) \
+#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
})
-#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
+#define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
#endif /* __ARCH_SPARC64_CMPXCHG__ */
diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl
index b9e1c0e735b7..e34cc30ef22c 100644
--- a/arch/sparc/kernel/syscalls/syscall.tbl
+++ b/arch/sparc/kernel/syscalls/syscall.tbl
@@ -488,7 +488,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index 281fa634bb1a..8b81d0f00c97 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -29,7 +29,7 @@ static DEFINE_SPINLOCK(dummy);
#endif /* SMP */
#define ATOMIC_FETCH_OP(op, c_op) \
-int atomic_fetch_##op(int i, atomic_t *v) \
+int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
int ret; \
unsigned long flags; \
@@ -41,10 +41,10 @@ int atomic_fetch_##op(int i, atomic_t *v) \
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
return ret; \
} \
-EXPORT_SYMBOL(atomic_fetch_##op);
+EXPORT_SYMBOL(arch_atomic_fetch_##op);
#define ATOMIC_OP_RETURN(op, c_op) \
-int atomic_##op##_return(int i, atomic_t *v) \
+int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
int ret; \
unsigned long flags; \
@@ -55,7 +55,7 @@ int atomic_##op##_return(int i, atomic_t *v) \
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
return ret; \
} \
-EXPORT_SYMBOL(atomic_##op##_return);
+EXPORT_SYMBOL(arch_atomic_##op##_return);
ATOMIC_OP_RETURN(add, +=)
@@ -67,7 +67,7 @@ ATOMIC_FETCH_OP(xor, ^=)
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
-int atomic_xchg(atomic_t *v, int new)
+int arch_atomic_xchg(atomic_t *v, int new)
{
int ret;
unsigned long flags;
@@ -78,9 +78,9 @@ int atomic_xchg(atomic_t *v, int new)
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
return ret;
}
-EXPORT_SYMBOL(atomic_xchg);
+EXPORT_SYMBOL(arch_atomic_xchg);
-int atomic_cmpxchg(atomic_t *v, int old, int new)
+int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
unsigned long flags;
@@ -93,9 +93,9 @@ int atomic_cmpxchg(atomic_t *v, int old, int new)
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
return ret;
}
-EXPORT_SYMBOL(atomic_cmpxchg);
+EXPORT_SYMBOL(arch_atomic_cmpxchg);
-int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int ret;
unsigned long flags;
@@ -107,10 +107,10 @@ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
return ret;
}
-EXPORT_SYMBOL(atomic_fetch_add_unless);
+EXPORT_SYMBOL(arch_atomic_fetch_add_unless);
/* Atomic operations are already serializing */
-void atomic_set(atomic_t *v, int i)
+void arch_atomic_set(atomic_t *v, int i)
{
unsigned long flags;
@@ -118,7 +118,7 @@ void atomic_set(atomic_t *v, int i)
v->counter = i;
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
}
-EXPORT_SYMBOL(atomic_set);
+EXPORT_SYMBOL(arch_atomic_set);
unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
{
diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
index 456b65a30ecf..8245d4a97301 100644
--- a/arch/sparc/lib/atomic_64.S
+++ b/arch/sparc/lib/atomic_64.S
@@ -19,7 +19,7 @@
*/
#define ATOMIC_OP(op) \
-ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+ENTRY(arch_atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
BACKOFF_SETUP(%o2); \
1: lduw [%o1], %g1; \
op %g1, %o0, %g7; \
@@ -30,11 +30,11 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
nop; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic_##op); \
-EXPORT_SYMBOL(atomic_##op);
+ENDPROC(arch_atomic_##op); \
+EXPORT_SYMBOL(arch_atomic_##op);
#define ATOMIC_OP_RETURN(op) \
-ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
+ENTRY(arch_atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */\
BACKOFF_SETUP(%o2); \
1: lduw [%o1], %g1; \
op %g1, %o0, %g7; \
@@ -45,11 +45,11 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
sra %g1, 0, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic_##op##_return); \
-EXPORT_SYMBOL(atomic_##op##_return);
+ENDPROC(arch_atomic_##op##_return); \
+EXPORT_SYMBOL(arch_atomic_##op##_return);
#define ATOMIC_FETCH_OP(op) \
-ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+ENTRY(arch_atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
BACKOFF_SETUP(%o2); \
1: lduw [%o1], %g1; \
op %g1, %o0, %g7; \
@@ -60,8 +60,8 @@ ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
sra %g1, 0, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic_fetch_##op); \
-EXPORT_SYMBOL(atomic_fetch_##op);
+ENDPROC(arch_atomic_fetch_##op); \
+EXPORT_SYMBOL(arch_atomic_fetch_##op);
ATOMIC_OP(add)
ATOMIC_OP_RETURN(add)
@@ -85,7 +85,7 @@ ATOMIC_FETCH_OP(xor)
#undef ATOMIC_OP
#define ATOMIC64_OP(op) \
-ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+ENTRY(arch_atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
BACKOFF_SETUP(%o2); \
1: ldx [%o1], %g1; \
op %g1, %o0, %g7; \
@@ -96,11 +96,11 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
nop; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic64_##op); \
-EXPORT_SYMBOL(atomic64_##op);
+ENDPROC(arch_atomic64_##op); \
+EXPORT_SYMBOL(arch_atomic64_##op);
#define ATOMIC64_OP_RETURN(op) \
-ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
+ENTRY(arch_atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
BACKOFF_SETUP(%o2); \
1: ldx [%o1], %g1; \
op %g1, %o0, %g7; \
@@ -111,11 +111,11 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
op %g1, %o0, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic64_##op##_return); \
-EXPORT_SYMBOL(atomic64_##op##_return);
+ENDPROC(arch_atomic64_##op##_return); \
+EXPORT_SYMBOL(arch_atomic64_##op##_return);
#define ATOMIC64_FETCH_OP(op) \
-ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+ENTRY(arch_atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
BACKOFF_SETUP(%o2); \
1: ldx [%o1], %g1; \
op %g1, %o0, %g7; \
@@ -126,8 +126,8 @@ ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
mov %g1, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic64_fetch_##op); \
-EXPORT_SYMBOL(atomic64_fetch_##op);
+ENDPROC(arch_atomic64_fetch_##op); \
+EXPORT_SYMBOL(arch_atomic64_fetch_##op);
ATOMIC64_OP(add)
ATOMIC64_OP_RETURN(add)
@@ -150,7 +150,7 @@ ATOMIC64_FETCH_OP(xor)
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
-ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
+ENTRY(arch_atomic64_dec_if_positive) /* %o0 = atomic_ptr */
BACKOFF_SETUP(%o2)
1: ldx [%o0], %g1
brlez,pn %g1, 3f
@@ -162,5 +162,5 @@ ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
3: retl
sub %g1, 1, %o0
2: BACKOFF_SPIN(%o2, %o3, 1b)
-ENDPROC(atomic64_dec_if_positive)
-EXPORT_SYMBOL(atomic64_dec_if_positive)
+ENDPROC(arch_atomic64_dec_if_positive)
+EXPORT_SYMBOL(arch_atomic64_dec_if_positive)
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index c77c5d8a7b3e..cb5e8d39cac1 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -178,11 +178,6 @@ ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1)
KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,)
endif
-ifdef CONFIG_LTO_CLANG
-KBUILD_LDFLAGS += -plugin-opt=-code-model=kernel \
- -plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
-endif
-
# Workaround for a gcc prelease that unfortunately was shipped in a suse release
KBUILD_CFLAGS += -Wno-sign-compare
#
@@ -202,7 +197,13 @@ ifdef CONFIG_RETPOLINE
endif
endif
-KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE)
+KBUILD_LDFLAGS += -m elf_$(UTS_MACHINE)
+
+ifdef CONFIG_LTO_CLANG
+ifeq ($(shell test $(CONFIG_LLD_VERSION) -lt 130000; echo $$?),0)
+KBUILD_LDFLAGS += -plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
+endif
+endif
ifdef CONFIG_X86_NEED_RELOCS
LDFLAGS_vmlinux := --emit-relocs --discard-none
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 6e5522aebbbd..431bf7f846c3 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -30,6 +30,7 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
KBUILD_CFLAGS := -m$(BITS) -O2
KBUILD_CFLAGS += -fno-strict-aliasing -fPIE
+KBUILD_CFLAGS += -Wundef
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
cflags-$(CONFIG_X86_32) := -march=i386
cflags-$(CONFIG_X86_64) := -mcmodel=small -mno-red-zone
@@ -48,10 +49,10 @@ KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no)
KBUILD_CFLAGS += -include $(srctree)/include/linux/hidden.h
KBUILD_CFLAGS += $(CLANG_FLAGS)
-# sev-es.c indirectly inludes inat-table.h which is generated during
+# sev.c indirectly inludes inat-table.h which is generated during
# compilation and stored in $(objtree). Add the directory to the includes so
# that the compiler finds it even with out-of-tree builds (make O=/some/path).
-CFLAGS_sev-es.o += -I$(objtree)/arch/x86/lib/
+CFLAGS_sev.o += -I$(objtree)/arch/x86/lib/
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
GCOV_PROFILE := n
@@ -93,7 +94,7 @@ ifdef CONFIG_X86_64
vmlinux-objs-y += $(obj)/idt_64.o $(obj)/idt_handlers_64.o
vmlinux-objs-y += $(obj)/mem_encrypt.o
vmlinux-objs-y += $(obj)/pgtable_64.o
- vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev-es.o
+ vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev.o
endif
vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index dde042f64cca..743f13ea25c1 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -172,7 +172,7 @@ void __puthex(unsigned long value)
}
}
-#if CONFIG_X86_NEED_RELOCS
+#ifdef CONFIG_X86_NEED_RELOCS
static void handle_relocations(void *output, unsigned long output_len,
unsigned long virt_addr)
{
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index e5612f035498..31139256859f 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -79,7 +79,7 @@ struct mem_vector {
u64 size;
};
-#if CONFIG_RANDOMIZE_BASE
+#ifdef CONFIG_RANDOMIZE_BASE
/* kaslr.c */
void choose_random_location(unsigned long input,
unsigned long input_size,
diff --git a/arch/x86/boot/compressed/sev-es.c b/arch/x86/boot/compressed/sev.c
index 82041bd380e5..670e998fe930 100644
--- a/arch/x86/boot/compressed/sev-es.c
+++ b/arch/x86/boot/compressed/sev.c
@@ -13,7 +13,7 @@
#include "misc.h"
#include <asm/pgtable_types.h>
-#include <asm/sev-es.h>
+#include <asm/sev.h>
#include <asm/trapnr.h>
#include <asm/trap_pf.h>
#include <asm/msr-index.h>
@@ -117,7 +117,7 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
#include "../../lib/insn.c"
/* Include code for early handlers */
-#include "../../kernel/sev-es-shared.c"
+#include "../../kernel/sev-shared.c"
static bool early_setup_sev_es(void)
{
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 7b2542b13ebd..04bce95bc7e3 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -130,8 +130,8 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
/* User code screwed up. */
regs->ax = -EFAULT;
- instrumentation_end();
local_irq_disable();
+ instrumentation_end();
irqentry_exit_to_user_mode(regs);
return false;
}
@@ -269,15 +269,16 @@ __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
irqentry_state_t state = irqentry_enter(regs);
bool inhcall;
+ instrumentation_begin();
run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
inhcall = get_and_clear_inhcall();
if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
- instrumentation_begin();
irqentry_exit_cond_resched();
instrumentation_end();
restore_inhcall(inhcall);
} else {
+ instrumentation_end();
irqentry_exit(regs, state);
}
}
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index a16a5294d55f..1886aaf19914 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -506,7 +506,7 @@ SYM_CODE_START(\asmsym)
movq %rsp, %rdi /* pt_regs pointer */
- call \cfunc
+ call kernel_\cfunc
/*
* No need to switch back to the IST stack. The current stack is either
@@ -517,7 +517,7 @@ SYM_CODE_START(\asmsym)
/* Switch to the regular task stack */
.Lfrom_usermode_switch_stack_\@:
- idtentry_body safe_stack_\cfunc, has_error_code=1
+ idtentry_body user_\cfunc, has_error_code=1
_ASM_NOKPROBE(\asmsym)
SYM_CODE_END(\asmsym)
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index 28a1423ce32e..4bbc267fb36b 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -447,7 +447,7 @@
440 i386 process_madvise sys_process_madvise
441 i386 epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 i386 mount_setattr sys_mount_setattr
-443 i386 quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 i386 landlock_create_ruleset sys_landlock_create_ruleset
445 i386 landlock_add_rule sys_landlock_add_rule
446 i386 landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index ecd551b08d05..ce18119ea0d0 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -364,7 +364,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index c0167d52832e..1eb45139fcc6 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -396,10 +396,12 @@ int x86_reserve_hardware(void)
if (!atomic_inc_not_zero(&pmc_refcount)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_read(&pmc_refcount) == 0) {
- if (!reserve_pmc_hardware())
+ if (!reserve_pmc_hardware()) {
err = -EBUSY;
- else
+ } else {
reserve_ds_buffers();
+ reserve_lbr_buffers();
+ }
}
if (!err)
atomic_inc(&pmc_refcount);
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index e355db5da097..fca7a6e2242f 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -6019,7 +6019,13 @@ __init int intel_pmu_init(void)
tsx_attr = hsw_tsx_events_attrs;
intel_pmu_pebs_data_source_skl(pmem);
- if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
+ /*
+ * Processors with CPUID.RTM_ALWAYS_ABORT have TSX deprecated by default.
+ * TSX force abort hooks are not required on these systems. Only deploy
+ * workaround when microcode has not enabled X86_FEATURE_RTM_ALWAYS_ABORT.
+ */
+ if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT) &&
+ !boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT)) {
x86_pmu.flags |= PMU_FL_TFA;
x86_pmu.get_event_constraints = tfa_get_event_constraints;
x86_pmu.enable_all = intel_tfa_pmu_enable_all;
@@ -6262,7 +6268,7 @@ __init int intel_pmu_init(void)
* Check all LBT MSR here.
* Disable LBR access if any LBR MSRs can not be accessed.
*/
- if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
+ if (x86_pmu.lbr_tos && !check_msr(x86_pmu.lbr_tos, 0x3UL))
x86_pmu.lbr_nr = 0;
for (i = 0; i < x86_pmu.lbr_nr; i++) {
if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 76dbab6ac9fb..e8453de7a964 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -658,7 +658,6 @@ static inline bool branch_user_callstack(unsigned br_sel)
void intel_pmu_lbr_add(struct perf_event *event)
{
- struct kmem_cache *kmem_cache = event->pmu->task_ctx_cache;
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (!x86_pmu.lbr_nr)
@@ -696,11 +695,6 @@ void intel_pmu_lbr_add(struct perf_event *event)
perf_sched_cb_inc(event->ctx->pmu);
if (!cpuc->lbr_users++ && !event->total_time_running)
intel_pmu_lbr_reset();
-
- if (static_cpu_has(X86_FEATURE_ARCH_LBR) &&
- kmem_cache && !cpuc->lbr_xsave &&
- (cpuc->lbr_users != cpuc->lbr_pebs_users))
- cpuc->lbr_xsave = kmem_cache_alloc(kmem_cache, GFP_KERNEL);
}
void release_lbr_buffers(void)
@@ -722,6 +716,27 @@ void release_lbr_buffers(void)
}
}
+void reserve_lbr_buffers(void)
+{
+ struct kmem_cache *kmem_cache;
+ struct cpu_hw_events *cpuc;
+ int cpu;
+
+ if (!static_cpu_has(X86_FEATURE_ARCH_LBR))
+ return;
+
+ for_each_possible_cpu(cpu) {
+ cpuc = per_cpu_ptr(&cpu_hw_events, cpu);
+ kmem_cache = x86_get_pmu(cpu)->task_ctx_cache;
+ if (!kmem_cache || cpuc->lbr_xsave)
+ continue;
+
+ cpuc->lbr_xsave = kmem_cache_alloc_node(kmem_cache,
+ GFP_KERNEL | __GFP_ZERO,
+ cpu_to_node(cpu));
+ }
+}
+
void intel_pmu_lbr_del(struct perf_event *event)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index 76227627e3b4..bb6eb1e5569c 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -1413,6 +1413,8 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool
die_id = i;
else
die_id = topology_phys_to_logical_pkg(i);
+ if (die_id < 0)
+ die_id = -ENODEV;
map->pbus_to_dieid[bus] = die_id;
break;
}
@@ -1459,14 +1461,14 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool
i = -1;
if (reverse) {
for (bus = 255; bus >= 0; bus--) {
- if (map->pbus_to_dieid[bus] >= 0)
+ if (map->pbus_to_dieid[bus] != -1)
i = map->pbus_to_dieid[bus];
else
map->pbus_to_dieid[bus] = i;
}
} else {
for (bus = 0; bus <= 255; bus++) {
- if (map->pbus_to_dieid[bus] >= 0)
+ if (map->pbus_to_dieid[bus] != -1)
i = map->pbus_to_dieid[bus];
else
map->pbus_to_dieid[bus] = i;
@@ -5258,9 +5260,10 @@ static struct intel_uncore_type icx_uncore_m2m = {
.perf_ctr = SNR_M2M_PCI_PMON_CTR0,
.event_ctl = SNR_M2M_PCI_PMON_CTL0,
.event_mask = SNBEP_PMON_RAW_EVENT_MASK,
+ .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
.box_ctl = SNR_M2M_PCI_PMON_BOX_CTL,
.ops = &snr_m2m_uncore_pci_ops,
- .format_group = &skx_uncore_format_group,
+ .format_group = &snr_m2m_uncore_format_group,
};
static struct attribute *icx_upi_uncore_formats_attr[] = {
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index d6003e08b055..2bf1c7ea2758 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -1245,6 +1245,8 @@ void reserve_ds_buffers(void);
void release_lbr_buffers(void);
+void reserve_lbr_buffers(void);
+
extern struct event_constraint bts_constraint;
extern struct event_constraint vlbr_constraint;
@@ -1394,6 +1396,10 @@ static inline void release_lbr_buffers(void)
{
}
+static inline void reserve_lbr_buffers(void)
+{
+}
+
static inline int intel_pmu_init(void)
{
return 0;
diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
index 84a1042c3b01..85feafacc445 100644
--- a/arch/x86/events/rapl.c
+++ b/arch/x86/events/rapl.c
@@ -764,13 +764,14 @@ static struct rapl_model model_spr = {
.rapl_msrs = intel_rapl_spr_msrs,
};
-static struct rapl_model model_amd_fam17h = {
+static struct rapl_model model_amd_hygon = {
.events = BIT(PERF_RAPL_PKG),
.msr_power_unit = MSR_AMD_RAPL_POWER_UNIT,
.rapl_msrs = amd_rapl_msrs,
};
static const struct x86_cpu_id rapl_model_match[] __initconst = {
+ X86_MATCH_FEATURE(X86_FEATURE_RAPL, &model_amd_hygon),
X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &model_snb),
X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &model_snbep),
X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &model_snb),
@@ -803,9 +804,6 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &model_skl),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &model_skl),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &model_spr),
- X86_MATCH_VENDOR_FAM(AMD, 0x17, &model_amd_fam17h),
- X86_MATCH_VENDOR_FAM(HYGON, 0x18, &model_amd_fam17h),
- X86_MATCH_VENDOR_FAM(AMD, 0x19, &model_amd_fam17h),
{},
};
MODULE_DEVICE_TABLE(x86cpu, rapl_model_match);
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 412b51e059c8..48067af94678 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -174,6 +174,7 @@ static inline int apic_is_clustered_box(void)
extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask);
extern void lapic_assign_system_vectors(void);
extern void lapic_assign_legacy_vector(unsigned int isairq, bool replace);
+extern void lapic_update_legacy_vectors(void);
extern void lapic_online(void);
extern void lapic_offline(void);
extern bool apic_needs_pit(void);
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index f732741ad7c7..5e754e895767 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -269,6 +269,4 @@ static __always_inline int arch_atomic_fetch_xor(int i, atomic_t *v)
# include <asm/atomic64_64.h>
#endif
-#define ARCH_ATOMIC
-
#endif /* _ASM_X86_ATOMIC_H */
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index ac37830ae941..d0ce5cfd3ac1 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -108,7 +108,7 @@
#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* Extended APICID (8 bits) */
#define X86_FEATURE_AMD_DCM ( 3*32+27) /* AMD multi-node processor */
#define X86_FEATURE_APERFMPERF ( 3*32+28) /* P-State hardware coordination feedback capability (APERF/MPERF MSRs) */
-/* free ( 3*32+29) */
+#define X86_FEATURE_RAPL ( 3*32+29) /* AMD/Hygon RAPL interface */
#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */
@@ -378,6 +378,7 @@
#define X86_FEATURE_AVX512_VP2INTERSECT (18*32+ 8) /* AVX-512 Intersect for D/Q */
#define X86_FEATURE_SRBDS_CTRL (18*32+ 9) /* "" SRBDS mitigation MSR available */
#define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */
+#define X86_FEATURE_RTM_ALWAYS_ABORT (18*32+11) /* "" RTM transaction always aborts */
#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
#define X86_FEATURE_SERIALIZE (18*32+14) /* SERIALIZE instruction */
#define X86_FEATURE_HYBRID_CPU (18*32+15) /* "" This part has CPUs of more than one type */
diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
index b7dd944dc867..8f28fafa98b3 100644
--- a/arch/x86/include/asm/disabled-features.h
+++ b/arch/x86/include/asm/disabled-features.h
@@ -56,11 +56,8 @@
# define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31))
#endif
-#ifdef CONFIG_IOMMU_SUPPORT
-# define DISABLE_ENQCMD 0
-#else
-# define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31))
-#endif
+/* Force disable because it's broken beyond repair */
+#define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31))
#ifdef CONFIG_X86_SGX
# define DISABLE_SGX 0
diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
index ed33a14188f6..23bef08a8388 100644
--- a/arch/x86/include/asm/fpu/api.h
+++ b/arch/x86/include/asm/fpu/api.h
@@ -106,10 +106,6 @@ extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name);
*/
#define PASID_DISABLED 0
-#ifdef CONFIG_IOMMU_SUPPORT
-/* Update current's PASID MSR/state by mm's PASID. */
-void update_pasid(void);
-#else
static inline void update_pasid(void) { }
-#endif
+
#endif /* _ASM_X86_FPU_API_H */
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 8d33ad80704f..16bf4d4a8159 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -204,6 +204,14 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave));
}
+static inline void fxsave(struct fxregs_state *fx)
+{
+ if (IS_ENABLED(CONFIG_X86_32))
+ asm volatile( "fxsave %[fx]" : [fx] "=m" (*fx));
+ else
+ asm volatile("fxsaveq %[fx]" : [fx] "=m" (*fx));
+}
+
/* These macros all use (%edi)/(%rdi) as the single memory argument. */
#define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27"
#define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37"
@@ -272,28 +280,6 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
* This function is called only during boot time when x86 caps are not set
* up and alternative can not be used yet.
*/
-static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
-{
- u64 mask = xfeatures_mask_all;
- u32 lmask = mask;
- u32 hmask = mask >> 32;
- int err;
-
- WARN_ON(system_state != SYSTEM_BOOTING);
-
- if (boot_cpu_has(X86_FEATURE_XSAVES))
- XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
- else
- XSTATE_OP(XSAVE, xstate, lmask, hmask, err);
-
- /* We should never fault when copying to a kernel buffer: */
- WARN_ON_FPU(err);
-}
-
-/*
- * This function is called only during boot time when x86 caps are not set
- * up and alternative can not be used yet.
- */
static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
{
u64 mask = -1;
@@ -578,19 +564,19 @@ static inline void switch_fpu_finish(struct fpu *new_fpu)
* PKRU state is switched eagerly because it needs to be valid before we
* return to userland e.g. for a copy_to_user() operation.
*/
- if (current->mm) {
+ if (!(current->flags & PF_KTHREAD)) {
+ /*
+ * If the PKRU bit in xsave.header.xfeatures is not set,
+ * then the PKRU component was in init state, which means
+ * XRSTOR will set PKRU to 0. If the bit is not set then
+ * get_xsave_addr() will return NULL because the PKRU value
+ * in memory is not valid. This means pkru_val has to be
+ * set to 0 and not to init_pkru_value.
+ */
pk = get_xsave_addr(&new_fpu->state.xsave, XFEATURE_PKRU);
- if (pk)
- pkru_val = pk->pkru;
+ pkru_val = pk ? pk->pkru : 0;
}
__write_pkru(pkru_val);
-
- /*
- * Expensive PASID MSR write will be avoided in update_pasid() because
- * TIF_NEED_FPU_LOAD was set. And the PASID state won't be updated
- * unless it's different from mm->pasid to reduce overhead.
- */
- update_pasid();
}
#endif /* _ASM_X86_FPU_INTERNAL_H */
diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
index 73d45b0dfff2..cd9f3e304944 100644
--- a/arch/x86/include/asm/idtentry.h
+++ b/arch/x86/include/asm/idtentry.h
@@ -312,8 +312,8 @@ static __always_inline void __##func(struct pt_regs *regs)
*/
#define DECLARE_IDTENTRY_VC(vector, func) \
DECLARE_IDTENTRY_RAW_ERRORCODE(vector, func); \
- __visible noinstr void ist_##func(struct pt_regs *regs, unsigned long error_code); \
- __visible noinstr void safe_stack_##func(struct pt_regs *regs, unsigned long error_code)
+ __visible noinstr void kernel_##func(struct pt_regs *regs, unsigned long error_code); \
+ __visible noinstr void user_##func(struct pt_regs *regs, unsigned long error_code)
/**
* DEFINE_IDTENTRY_IST - Emit code for IST entry points
@@ -355,33 +355,24 @@ static __always_inline void __##func(struct pt_regs *regs)
DEFINE_IDTENTRY_RAW_ERRORCODE(func)
/**
- * DEFINE_IDTENTRY_VC_SAFE_STACK - Emit code for VMM communication handler
- which runs on a safe stack.
+ * DEFINE_IDTENTRY_VC_KERNEL - Emit code for VMM communication handler
+ when raised from kernel mode
* @func: Function name of the entry point
*
* Maps to DEFINE_IDTENTRY_RAW_ERRORCODE
*/
-#define DEFINE_IDTENTRY_VC_SAFE_STACK(func) \
- DEFINE_IDTENTRY_RAW_ERRORCODE(safe_stack_##func)
+#define DEFINE_IDTENTRY_VC_KERNEL(func) \
+ DEFINE_IDTENTRY_RAW_ERRORCODE(kernel_##func)
/**
- * DEFINE_IDTENTRY_VC_IST - Emit code for VMM communication handler
- which runs on the VC fall-back stack
+ * DEFINE_IDTENTRY_VC_USER - Emit code for VMM communication handler
+ when raised from user mode
* @func: Function name of the entry point
*
* Maps to DEFINE_IDTENTRY_RAW_ERRORCODE
*/
-#define DEFINE_IDTENTRY_VC_IST(func) \
- DEFINE_IDTENTRY_RAW_ERRORCODE(ist_##func)
-
-/**
- * DEFINE_IDTENTRY_VC - Emit code for VMM communication handler
- * @func: Function name of the entry point
- *
- * Maps to DEFINE_IDTENTRY_RAW_ERRORCODE
- */
-#define DEFINE_IDTENTRY_VC(func) \
- DEFINE_IDTENTRY_RAW_ERRORCODE(func)
+#define DEFINE_IDTENTRY_VC_USER(func) \
+ DEFINE_IDTENTRY_RAW_ERRORCODE(user_##func)
#else /* CONFIG_X86_64 */
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 955b06d6325a..27158436f322 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -102,7 +102,8 @@
#define INTEL_FAM6_TIGERLAKE_L 0x8C /* Willow Cove */
#define INTEL_FAM6_TIGERLAKE 0x8D /* Willow Cove */
-#define INTEL_FAM6_SAPPHIRERAPIDS_X 0x8F /* Willow Cove */
+
+#define INTEL_FAM6_SAPPHIRERAPIDS_X 0x8F /* Golden Cove */
#define INTEL_FAM6_ALDERLAKE 0x97 /* Golden Cove / Gracemont */
#define INTEL_FAM6_ALDERLAKE_L 0x9A /* Golden Cove / Gracemont */
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index 610a05374c02..0449b125d27f 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -4,8 +4,6 @@
#define HAVE_JUMP_LABEL_BATCH
-#define JUMP_LABEL_NOP_SIZE 5
-
#include <asm/asm.h>
#include <asm/nops.h>
@@ -14,15 +12,35 @@
#include <linux/stringify.h>
#include <linux/types.h>
+#define JUMP_TABLE_ENTRY \
+ ".pushsection __jump_table, \"aw\" \n\t" \
+ _ASM_ALIGN "\n\t" \
+ ".long 1b - . \n\t" \
+ ".long %l[l_yes] - . \n\t" \
+ _ASM_PTR "%c0 + %c1 - .\n\t" \
+ ".popsection \n\t"
+
+#ifdef CONFIG_STACK_VALIDATION
+
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
+{
+ asm_volatile_goto("1:"
+ "jmp %l[l_yes] # objtool NOPs this \n\t"
+ JUMP_TABLE_ENTRY
+ : : "i" (key), "i" (2 | branch) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+#else
+
static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch)
{
asm_volatile_goto("1:"
".byte " __stringify(BYTES_NOP5) "\n\t"
- ".pushsection __jump_table, \"aw\" \n\t"
- _ASM_ALIGN "\n\t"
- ".long 1b - ., %l[l_yes] - . \n\t"
- _ASM_PTR "%c0 + %c1 - .\n\t"
- ".popsection \n\t"
+ JUMP_TABLE_ENTRY
: : "i" (key), "i" (branch) : : l_yes);
return false;
@@ -30,16 +48,13 @@ l_yes:
return true;
}
+#endif /* STACK_VALIDATION */
+
static __always_inline bool arch_static_branch_jump(struct static_key * const key, const bool branch)
{
asm_volatile_goto("1:"
- ".byte 0xe9\n\t .long %l[l_yes] - 2f\n\t"
- "2:\n\t"
- ".pushsection __jump_table, \"aw\" \n\t"
- _ASM_ALIGN "\n\t"
- ".long 1b - ., %l[l_yes] - . \n\t"
- _ASM_PTR "%c0 + %c1 - .\n\t"
- ".popsection \n\t"
+ "jmp %l[l_yes]\n\t"
+ JUMP_TABLE_ENTRY
: : "i" (key), "i" (branch) : : l_yes);
return false;
@@ -47,41 +62,7 @@ l_yes:
return true;
}
-#else /* __ASSEMBLY__ */
-
-.macro STATIC_JUMP_IF_TRUE target, key, def
-.Lstatic_jump_\@:
- .if \def
- /* Equivalent to "jmp.d32 \target" */
- .byte 0xe9
- .long \target - .Lstatic_jump_after_\@
-.Lstatic_jump_after_\@:
- .else
- .byte BYTES_NOP5
- .endif
- .pushsection __jump_table, "aw"
- _ASM_ALIGN
- .long .Lstatic_jump_\@ - ., \target - .
- _ASM_PTR \key - .
- .popsection
-.endm
-
-.macro STATIC_JUMP_IF_FALSE target, key, def
-.Lstatic_jump_\@:
- .if \def
- .byte BYTES_NOP5
- .else
- /* Equivalent to "jmp.d32 \target" */
- .byte 0xe9
- .long \target - .Lstatic_jump_after_\@
-.Lstatic_jump_after_\@:
- .endif
- .pushsection __jump_table, "aw"
- _ASM_ALIGN
- .long .Lstatic_jump_\@ - ., \target - .
- _ASM_PTR \key + 1 - .
- .popsection
-.endm
+extern int arch_jump_entry_size(struct jump_entry *entry);
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index 323641097f63..e7bef91cee04 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -99,6 +99,7 @@ KVM_X86_OP_NULL(post_block)
KVM_X86_OP_NULL(vcpu_blocking)
KVM_X86_OP_NULL(vcpu_unblocking)
KVM_X86_OP_NULL(update_pi_irte)
+KVM_X86_OP_NULL(start_assignment)
KVM_X86_OP_NULL(apicv_post_state_restore)
KVM_X86_OP_NULL(dy_apicv_has_pending_interrupt)
KVM_X86_OP_NULL(set_hv_timer)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 55efbacfc244..9c7ced0e3171 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1352,6 +1352,7 @@ struct kvm_x86_ops {
int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set);
+ void (*start_assignment)(struct kvm *kvm);
void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index ddfb3cad8dff..0607ec4f5091 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -305,7 +305,7 @@ extern void apei_mce_report_mem_error(int corrected,
/* These may be used by multiple smca_hwid_mcatypes */
enum smca_bank_types {
SMCA_LS = 0, /* Load Store */
- SMCA_LS_V2, /* Load Store */
+ SMCA_LS_V2,
SMCA_IF, /* Instruction Fetch */
SMCA_L2_CACHE, /* L2 Cache */
SMCA_DE, /* Decoder Unit */
@@ -314,17 +314,22 @@ enum smca_bank_types {
SMCA_FP, /* Floating Point */
SMCA_L3_CACHE, /* L3 Cache */
SMCA_CS, /* Coherent Slave */
- SMCA_CS_V2, /* Coherent Slave */
+ SMCA_CS_V2,
SMCA_PIE, /* Power, Interrupts, etc. */
SMCA_UMC, /* Unified Memory Controller */
+ SMCA_UMC_V2,
SMCA_PB, /* Parameter Block */
SMCA_PSP, /* Platform Security Processor */
- SMCA_PSP_V2, /* Platform Security Processor */
+ SMCA_PSP_V2,
SMCA_SMU, /* System Management Unit */
- SMCA_SMU_V2, /* System Management Unit */
+ SMCA_SMU_V2,
SMCA_MP5, /* Microprocessor 5 Unit */
SMCA_NBIO, /* Northbridge IO Unit */
SMCA_PCIE, /* PCI Express Unit */
+ SMCA_PCIE_V2,
+ SMCA_XGMI_PCS, /* xGMI PCS Unit */
+ SMCA_XGMI_PHY, /* xGMI PHY Unit */
+ SMCA_WAFL_PHY, /* WAFL PHY Unit */
N_SMCA_BANK_TYPES
};
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 742d89a00721..a7c413432b33 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -537,9 +537,9 @@
/* K8 MSRs */
#define MSR_K8_TOP_MEM1 0xc001001a
#define MSR_K8_TOP_MEM2 0xc001001d
-#define MSR_K8_SYSCFG 0xc0010010
-#define MSR_K8_SYSCFG_MEM_ENCRYPT_BIT 23
-#define MSR_K8_SYSCFG_MEM_ENCRYPT BIT_ULL(MSR_K8_SYSCFG_MEM_ENCRYPT_BIT)
+#define MSR_AMD64_SYSCFG 0xc0010010
+#define MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT 23
+#define MSR_AMD64_SYSCFG_MEM_ENCRYPT BIT_ULL(MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT)
#define MSR_K8_INT_PENDING_MSG 0xc0010055
/* C1E active bits in int pending message */
#define K8_INTP_C1E_ACTIVE_MASK 0x18000000
@@ -772,6 +772,10 @@
#define MSR_TFA_RTM_FORCE_ABORT_BIT 0
#define MSR_TFA_RTM_FORCE_ABORT BIT_ULL(MSR_TFA_RTM_FORCE_ABORT_BIT)
+#define MSR_TFA_TSX_CPUID_CLEAR_BIT 1
+#define MSR_TFA_TSX_CPUID_CLEAR BIT_ULL(MSR_TFA_TSX_CPUID_CLEAR_BIT)
+#define MSR_TFA_SDV_ENABLE_RTM_BIT 2
+#define MSR_TFA_SDV_ENABLE_RTM BIT_ULL(MSR_TFA_SDV_ENABLE_RTM_BIT)
/* P4/Xeon+ specific */
#define MSR_IA32_MCG_EAX 0x00000180
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
index ca840fec7776..4bde0dc66100 100644
--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -75,7 +75,7 @@ void copy_page(void *to, void *from);
*
* With page table isolation enabled, we map the LDT in ... [stay tuned]
*/
-static inline unsigned long task_size_max(void)
+static __always_inline unsigned long task_size_max(void)
{
unsigned long ret;
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 154321d29050..556b2b17c3e2 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -787,8 +787,10 @@ DECLARE_PER_CPU(u64, msr_misc_features_shadow);
#ifdef CONFIG_CPU_SUP_AMD
extern u32 amd_get_nodes_per_socket(void);
+extern u32 amd_get_highest_perf(void);
#else
static inline u32 amd_get_nodes_per_socket(void) { return 0; }
+static inline u32 amd_get_highest_perf(void) { return 0; }
#endif
static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h
new file mode 100644
index 000000000000..2cef6c5a52c2
--- /dev/null
+++ b/arch/x86/include/asm/sev-common.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * AMD SEV header common between the guest and the hypervisor.
+ *
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ */
+
+#ifndef __ASM_X86_SEV_COMMON_H
+#define __ASM_X86_SEV_COMMON_H
+
+#define GHCB_MSR_INFO_POS 0
+#define GHCB_DATA_LOW 12
+#define GHCB_MSR_INFO_MASK (BIT_ULL(GHCB_DATA_LOW) - 1)
+
+#define GHCB_DATA(v) \
+ (((unsigned long)(v) & ~GHCB_MSR_INFO_MASK) >> GHCB_DATA_LOW)
+
+/* SEV Information Request/Response */
+#define GHCB_MSR_SEV_INFO_RESP 0x001
+#define GHCB_MSR_SEV_INFO_REQ 0x002
+#define GHCB_MSR_VER_MAX_POS 48
+#define GHCB_MSR_VER_MAX_MASK 0xffff
+#define GHCB_MSR_VER_MIN_POS 32
+#define GHCB_MSR_VER_MIN_MASK 0xffff
+#define GHCB_MSR_CBIT_POS 24
+#define GHCB_MSR_CBIT_MASK 0xff
+#define GHCB_MSR_SEV_INFO(_max, _min, _cbit) \
+ ((((_max) & GHCB_MSR_VER_MAX_MASK) << GHCB_MSR_VER_MAX_POS) | \
+ (((_min) & GHCB_MSR_VER_MIN_MASK) << GHCB_MSR_VER_MIN_POS) | \
+ (((_cbit) & GHCB_MSR_CBIT_MASK) << GHCB_MSR_CBIT_POS) | \
+ GHCB_MSR_SEV_INFO_RESP)
+#define GHCB_MSR_INFO(v) ((v) & 0xfffUL)
+#define GHCB_MSR_PROTO_MAX(v) (((v) >> GHCB_MSR_VER_MAX_POS) & GHCB_MSR_VER_MAX_MASK)
+#define GHCB_MSR_PROTO_MIN(v) (((v) >> GHCB_MSR_VER_MIN_POS) & GHCB_MSR_VER_MIN_MASK)
+
+/* CPUID Request/Response */
+#define GHCB_MSR_CPUID_REQ 0x004
+#define GHCB_MSR_CPUID_RESP 0x005
+#define GHCB_MSR_CPUID_FUNC_POS 32
+#define GHCB_MSR_CPUID_FUNC_MASK 0xffffffff
+#define GHCB_MSR_CPUID_VALUE_POS 32
+#define GHCB_MSR_CPUID_VALUE_MASK 0xffffffff
+#define GHCB_MSR_CPUID_REG_POS 30
+#define GHCB_MSR_CPUID_REG_MASK 0x3
+#define GHCB_CPUID_REQ_EAX 0
+#define GHCB_CPUID_REQ_EBX 1
+#define GHCB_CPUID_REQ_ECX 2
+#define GHCB_CPUID_REQ_EDX 3
+#define GHCB_CPUID_REQ(fn, reg) \
+ (GHCB_MSR_CPUID_REQ | \
+ (((unsigned long)reg & GHCB_MSR_CPUID_REG_MASK) << GHCB_MSR_CPUID_REG_POS) | \
+ (((unsigned long)fn) << GHCB_MSR_CPUID_FUNC_POS))
+
+/* AP Reset Hold */
+#define GHCB_MSR_AP_RESET_HOLD_REQ 0x006
+#define GHCB_MSR_AP_RESET_HOLD_RESP 0x007
+
+/* GHCB Hypervisor Feature Request/Response */
+#define GHCB_MSR_HV_FT_REQ 0x080
+#define GHCB_MSR_HV_FT_RESP 0x081
+
+#define GHCB_MSR_TERM_REQ 0x100
+#define GHCB_MSR_TERM_REASON_SET_POS 12
+#define GHCB_MSR_TERM_REASON_SET_MASK 0xf
+#define GHCB_MSR_TERM_REASON_POS 16
+#define GHCB_MSR_TERM_REASON_MASK 0xff
+#define GHCB_SEV_TERM_REASON(reason_set, reason_val) \
+ (((((u64)reason_set) & GHCB_MSR_TERM_REASON_SET_MASK) << GHCB_MSR_TERM_REASON_SET_POS) | \
+ ((((u64)reason_val) & GHCB_MSR_TERM_REASON_MASK) << GHCB_MSR_TERM_REASON_POS))
+
+#define GHCB_SEV_ES_REASON_GENERAL_REQUEST 0
+#define GHCB_SEV_ES_REASON_PROTOCOL_UNSUPPORTED 1
+
+#define GHCB_RESP_CODE(v) ((v) & GHCB_MSR_INFO_MASK)
+
+#endif
diff --git a/arch/x86/include/asm/sev-es.h b/arch/x86/include/asm/sev.h
index cf1d957c7091..fa5cd05d3b5b 100644
--- a/arch/x86/include/asm/sev-es.h
+++ b/arch/x86/include/asm/sev.h
@@ -10,34 +10,12 @@
#include <linux/types.h>
#include <asm/insn.h>
+#include <asm/sev-common.h>
-#define GHCB_SEV_INFO 0x001UL
-#define GHCB_SEV_INFO_REQ 0x002UL
-#define GHCB_INFO(v) ((v) & 0xfffUL)
-#define GHCB_PROTO_MAX(v) (((v) >> 48) & 0xffffUL)
-#define GHCB_PROTO_MIN(v) (((v) >> 32) & 0xffffUL)
-#define GHCB_PROTO_OUR 0x0001UL
-#define GHCB_SEV_CPUID_REQ 0x004UL
-#define GHCB_CPUID_REQ_EAX 0
-#define GHCB_CPUID_REQ_EBX 1
-#define GHCB_CPUID_REQ_ECX 2
-#define GHCB_CPUID_REQ_EDX 3
-#define GHCB_CPUID_REQ(fn, reg) (GHCB_SEV_CPUID_REQ | \
- (((unsigned long)reg & 3) << 30) | \
- (((unsigned long)fn) << 32))
+#define GHCB_PROTO_OUR 0x0001UL
+#define GHCB_PROTOCOL_MAX 1ULL
+#define GHCB_DEFAULT_USAGE 0ULL
-#define GHCB_PROTOCOL_MAX 0x0001UL
-#define GHCB_DEFAULT_USAGE 0x0000UL
-
-#define GHCB_SEV_CPUID_RESP 0x005UL
-#define GHCB_SEV_TERMINATE 0x100UL
-#define GHCB_SEV_TERMINATE_REASON(reason_set, reason_val) \
- (((((u64)reason_set) & 0x7) << 12) | \
- ((((u64)reason_val) & 0xff) << 16))
-#define GHCB_SEV_ES_REASON_GENERAL_REQUEST 0
-#define GHCB_SEV_ES_REASON_PROTOCOL_UNSUPPORTED 1
-
-#define GHCB_SEV_GHCB_RESP_CODE(v) ((v) & 0xfff)
#define VMGEXIT() { asm volatile("rep; vmmcall\n\r"); }
enum es_result {
diff --git a/arch/x86/include/asm/thermal.h b/arch/x86/include/asm/thermal.h
index ddbdefd5b94f..91a7b6687c3b 100644
--- a/arch/x86/include/asm/thermal.h
+++ b/arch/x86/include/asm/thermal.h
@@ -3,11 +3,13 @@
#define _ASM_X86_THERMAL_H
#ifdef CONFIG_X86_THERMAL_VECTOR
+void therm_lvt_init(void);
void intel_init_thermal(struct cpuinfo_x86 *c);
bool x86_thermal_enabled(void);
void intel_thermal_interrupt(void);
#else
-static inline void intel_init_thermal(struct cpuinfo_x86 *c) { }
+static inline void therm_lvt_init(void) { }
+static inline void intel_init_thermal(struct cpuinfo_x86 *c) { }
#endif
#endif /* _ASM_X86_THERMAL_H */
diff --git a/arch/x86/include/asm/vdso/clocksource.h b/arch/x86/include/asm/vdso/clocksource.h
index 119ac8612d89..136e5e57cfe1 100644
--- a/arch/x86/include/asm/vdso/clocksource.h
+++ b/arch/x86/include/asm/vdso/clocksource.h
@@ -7,4 +7,6 @@
VDSO_CLOCKMODE_PVCLOCK, \
VDSO_CLOCKMODE_HVCLOCK
+#define HAVE_VDSO_CLOCKMODE_HVCLOCK
+
#endif /* __ASM_VDSO_CLOCKSOURCE_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 0704c2a94272..0f66682ac02a 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -20,7 +20,7 @@ CFLAGS_REMOVE_kvmclock.o = -pg
CFLAGS_REMOVE_ftrace.o = -pg
CFLAGS_REMOVE_early_printk.o = -pg
CFLAGS_REMOVE_head64.o = -pg
-CFLAGS_REMOVE_sev-es.o = -pg
+CFLAGS_REMOVE_sev.o = -pg
endif
KASAN_SANITIZE_head$(BITS).o := n
@@ -28,7 +28,7 @@ KASAN_SANITIZE_dumpstack.o := n
KASAN_SANITIZE_dumpstack_$(BITS).o := n
KASAN_SANITIZE_stacktrace.o := n
KASAN_SANITIZE_paravirt.o := n
-KASAN_SANITIZE_sev-es.o := n
+KASAN_SANITIZE_sev.o := n
# With some compiler versions the generated code results in boot hangs, caused
# by several compilation units. To be safe, disable all instrumentation.
@@ -148,7 +148,7 @@ obj-$(CONFIG_UNWINDER_ORC) += unwind_orc.o
obj-$(CONFIG_UNWINDER_FRAME_POINTER) += unwind_frame.o
obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o
-obj-$(CONFIG_AMD_MEM_ENCRYPT) += sev-es.o
+obj-$(CONFIG_AMD_MEM_ENCRYPT) += sev.o
###
# 64 bit specific files
ifeq ($(CONFIG_X86_64),y)
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index 49ae4e1ac9cd..7de599eba7f0 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -197,7 +197,8 @@ static int __init ffh_cstate_init(void)
struct cpuinfo_x86 *c = &boot_cpu_data;
if (c->x86_vendor != X86_VENDOR_INTEL &&
- c->x86_vendor != X86_VENDOR_AMD)
+ c->x86_vendor != X86_VENDOR_AMD &&
+ c->x86_vendor != X86_VENDOR_HYGON)
return -1;
cpu_cstate_entry = alloc_percpu(struct cstate_entry);
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 6974b5174495..6fe5b44fcbc9 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -183,41 +183,69 @@ done:
}
/*
+ * optimize_nops_range() - Optimize a sequence of single byte NOPs (0x90)
+ *
+ * @instr: instruction byte stream
+ * @instrlen: length of the above
+ * @off: offset within @instr where the first NOP has been detected
+ *
+ * Return: number of NOPs found (and replaced).
+ */
+static __always_inline int optimize_nops_range(u8 *instr, u8 instrlen, int off)
+{
+ unsigned long flags;
+ int i = off, nnops;
+
+ while (i < instrlen) {
+ if (instr[i] != 0x90)
+ break;
+
+ i++;
+ }
+
+ nnops = i - off;
+
+ if (nnops <= 1)
+ return nnops;
+
+ local_irq_save(flags);
+ add_nops(instr + off, nnops);
+ local_irq_restore(flags);
+
+ DUMP_BYTES(instr, instrlen, "%px: [%d:%d) optimized NOPs: ", instr, off, i);
+
+ return nnops;
+}
+
+/*
* "noinline" to cause control flow change and thus invalidate I$ and
* cause refetch after modification.
*/
static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
{
- unsigned long flags;
struct insn insn;
- int nop, i = 0;
+ int i = 0;
/*
- * Jump over the non-NOP insns, the remaining bytes must be single-byte
- * NOPs, optimize them.
+ * Jump over the non-NOP insns and optimize single-byte NOPs into bigger
+ * ones.
*/
for (;;) {
if (insn_decode_kernel(&insn, &instr[i]))
return;
+ /*
+ * See if this and any potentially following NOPs can be
+ * optimized.
+ */
if (insn.length == 1 && insn.opcode.bytes[0] == 0x90)
- break;
-
- if ((i += insn.length) >= a->instrlen)
- return;
- }
+ i += optimize_nops_range(instr, a->instrlen, i);
+ else
+ i += insn.length;
- for (nop = i; i < a->instrlen; i++) {
- if (WARN_ONCE(instr[i] != 0x90, "Not a NOP at 0x%px\n", &instr[i]))
+ if (i >= a->instrlen)
return;
}
-
- local_irq_save(flags);
- add_nops(instr + nop, i - nop);
- local_irq_restore(flags);
-
- DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ",
- instr, nop, a->instrlen);
}
/*
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 09083094eb57..23dda362dc0f 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -25,6 +25,7 @@
#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
#define PCI_DEVICE_ID_AMD_19H_DF_F4 0x1654
+#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4 0x166e
/* Protect the PCI config register pairs used for SMN and DF indirect access. */
static DEFINE_MUTEX(smn_mutex);
@@ -57,6 +58,7 @@ static const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
{}
};
@@ -72,6 +74,7 @@ static const struct pci_device_id amd_nb_link_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
{}
};
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 4a39fb429f15..d262811ce14b 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -2604,6 +2604,7 @@ static void __init apic_bsp_setup(bool upmode)
end_local_APIC_setup();
irq_remap_enable_fault_handling();
setup_IO_APIC();
+ lapic_update_legacy_vectors();
}
#ifdef CONFIG_UP_LATE_INIT
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 6dbdc7c22bb7..fb67ed5e7e6a 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -738,6 +738,26 @@ void lapic_assign_legacy_vector(unsigned int irq, bool replace)
irq_matrix_assign_system(vector_matrix, ISA_IRQ_VECTOR(irq), replace);
}
+void __init lapic_update_legacy_vectors(void)
+{
+ unsigned int i;
+
+ if (IS_ENABLED(CONFIG_X86_IO_APIC) && nr_ioapics > 0)
+ return;
+
+ /*
+ * If the IO/APIC is disabled via config, kernel command line or
+ * lack of enumeration then all legacy interrupts are routed
+ * through the PIC. Make sure that they are marked as legacy
+ * vectors. PIC_CASCADE_IRQ has already been marked in
+ * lapic_assign_system_vectors().
+ */
+ for (i = 0; i < nr_legacy_irqs(); i++) {
+ if (i != PIC_CASCADE_IR)
+ lapic_assign_legacy_vector(i, true);
+ }
+}
+
void __init lapic_assign_system_vectors(void)
{
unsigned int i, vector = 0;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 2d11384dc9ab..b7c003013d41 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -593,8 +593,8 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
*/
if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
/* Check if memory encryption is enabled */
- rdmsrl(MSR_K8_SYSCFG, msr);
- if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
+ rdmsrl(MSR_AMD64_SYSCFG, msr);
+ if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
goto clear_all;
/*
@@ -646,6 +646,10 @@ static void early_init_amd(struct cpuinfo_x86 *c)
if (c->x86_power & BIT(12))
set_cpu_cap(c, X86_FEATURE_ACC_POWER);
+ /* Bit 14 indicates the Runtime Average Power Limit interface. */
+ if (c->x86_power & BIT(14))
+ set_cpu_cap(c, X86_FEATURE_RAPL);
+
#ifdef CONFIG_X86_64
set_cpu_cap(c, X86_FEATURE_SYSCALL32);
#else
@@ -1165,3 +1169,19 @@ void set_dr_addr_mask(unsigned long mask, int dr)
break;
}
}
+
+u32 amd_get_highest_perf(void)
+{
+ struct cpuinfo_x86 *c = &boot_cpu_data;
+
+ if (c->x86 == 0x17 && ((c->x86_model >= 0x30 && c->x86_model < 0x40) ||
+ (c->x86_model >= 0x70 && c->x86_model < 0x80)))
+ return 166;
+
+ if (c->x86 == 0x19 && ((c->x86_model >= 0x20 && c->x86_model < 0x30) ||
+ (c->x86_model >= 0x40 && c->x86_model < 0x70)))
+ return 166;
+
+ return 255;
+}
+EXPORT_SYMBOL_GPL(amd_get_highest_perf);
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 67944128876d..95521302630d 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -48,6 +48,7 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[],
enum tsx_ctrl_states {
TSX_CTRL_ENABLE,
TSX_CTRL_DISABLE,
+ TSX_CTRL_RTM_ALWAYS_ABORT,
TSX_CTRL_NOT_SUPPORTED,
};
@@ -56,6 +57,7 @@ extern __ro_after_init enum tsx_ctrl_states tsx_ctrl_state;
extern void __init tsx_init(void);
extern void tsx_enable(void);
extern void tsx_disable(void);
+extern void tsx_clear_cpuid(void);
#else
static inline void tsx_init(void) { }
#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c
index 0bd6c74e3ba1..6d50136f7ab9 100644
--- a/arch/x86/kernel/cpu/hygon.c
+++ b/arch/x86/kernel/cpu/hygon.c
@@ -260,6 +260,10 @@ static void early_init_hygon(struct cpuinfo_x86 *c)
if (c->x86_power & BIT(12))
set_cpu_cap(c, X86_FEATURE_ACC_POWER);
+ /* Bit 14 indicates the Runtime Average Power Limit interface. */
+ if (c->x86_power & BIT(14))
+ set_cpu_cap(c, X86_FEATURE_RAPL);
+
#ifdef CONFIG_X86_64
set_cpu_cap(c, X86_FEATURE_SYSCALL32);
#endif
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 8adffc17fa8b..861e919eba9a 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -717,8 +717,10 @@ static void init_intel(struct cpuinfo_x86 *c)
if (tsx_ctrl_state == TSX_CTRL_ENABLE)
tsx_enable();
- if (tsx_ctrl_state == TSX_CTRL_DISABLE)
+ else if (tsx_ctrl_state == TSX_CTRL_DISABLE)
tsx_disable();
+ else if (tsx_ctrl_state == TSX_CTRL_RTM_ALWAYS_ABORT)
+ tsx_clear_cpuid();
split_lock_init();
bus_lock_init();
diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
index e486f96b3cb3..08831acc1d03 100644
--- a/arch/x86/kernel/cpu/mce/amd.c
+++ b/arch/x86/kernel/cpu/mce/amd.c
@@ -77,27 +77,29 @@ struct smca_bank_name {
};
static struct smca_bank_name smca_names[] = {
- [SMCA_LS] = { "load_store", "Load Store Unit" },
- [SMCA_LS_V2] = { "load_store", "Load Store Unit" },
- [SMCA_IF] = { "insn_fetch", "Instruction Fetch Unit" },
- [SMCA_L2_CACHE] = { "l2_cache", "L2 Cache" },
- [SMCA_DE] = { "decode_unit", "Decode Unit" },
- [SMCA_RESERVED] = { "reserved", "Reserved" },
- [SMCA_EX] = { "execution_unit", "Execution Unit" },
- [SMCA_FP] = { "floating_point", "Floating Point Unit" },
- [SMCA_L3_CACHE] = { "l3_cache", "L3 Cache" },
- [SMCA_CS] = { "coherent_slave", "Coherent Slave" },
- [SMCA_CS_V2] = { "coherent_slave", "Coherent Slave" },
- [SMCA_PIE] = { "pie", "Power, Interrupts, etc." },
- [SMCA_UMC] = { "umc", "Unified Memory Controller" },
- [SMCA_PB] = { "param_block", "Parameter Block" },
- [SMCA_PSP] = { "psp", "Platform Security Processor" },
- [SMCA_PSP_V2] = { "psp", "Platform Security Processor" },
- [SMCA_SMU] = { "smu", "System Management Unit" },
- [SMCA_SMU_V2] = { "smu", "System Management Unit" },
- [SMCA_MP5] = { "mp5", "Microprocessor 5 Unit" },
- [SMCA_NBIO] = { "nbio", "Northbridge IO Unit" },
- [SMCA_PCIE] = { "pcie", "PCI Express Unit" },
+ [SMCA_LS ... SMCA_LS_V2] = { "load_store", "Load Store Unit" },
+ [SMCA_IF] = { "insn_fetch", "Instruction Fetch Unit" },
+ [SMCA_L2_CACHE] = { "l2_cache", "L2 Cache" },
+ [SMCA_DE] = { "decode_unit", "Decode Unit" },
+ [SMCA_RESERVED] = { "reserved", "Reserved" },
+ [SMCA_EX] = { "execution_unit", "Execution Unit" },
+ [SMCA_FP] = { "floating_point", "Floating Point Unit" },
+ [SMCA_L3_CACHE] = { "l3_cache", "L3 Cache" },
+ [SMCA_CS ... SMCA_CS_V2] = { "coherent_slave", "Coherent Slave" },
+ [SMCA_PIE] = { "pie", "Power, Interrupts, etc." },
+
+ /* UMC v2 is separate because both of them can exist in a single system. */
+ [SMCA_UMC] = { "umc", "Unified Memory Controller" },
+ [SMCA_UMC_V2] = { "umc_v2", "Unified Memory Controller v2" },
+ [SMCA_PB] = { "param_block", "Parameter Block" },
+ [SMCA_PSP ... SMCA_PSP_V2] = { "psp", "Platform Security Processor" },
+ [SMCA_SMU ... SMCA_SMU_V2] = { "smu", "System Management Unit" },
+ [SMCA_MP5] = { "mp5", "Microprocessor 5 Unit" },
+ [SMCA_NBIO] = { "nbio", "Northbridge IO Unit" },
+ [SMCA_PCIE ... SMCA_PCIE_V2] = { "pcie", "PCI Express Unit" },
+ [SMCA_XGMI_PCS] = { "xgmi_pcs", "Ext Global Memory Interconnect PCS Unit" },
+ [SMCA_XGMI_PHY] = { "xgmi_phy", "Ext Global Memory Interconnect PHY Unit" },
+ [SMCA_WAFL_PHY] = { "wafl_phy", "WAFL PHY Unit" },
};
static const char *smca_get_name(enum smca_bank_types t)
@@ -155,6 +157,7 @@ static struct smca_hwid smca_hwid_mcatypes[] = {
/* Unified Memory Controller MCA type */
{ SMCA_UMC, HWID_MCATYPE(0x96, 0x0) },
+ { SMCA_UMC_V2, HWID_MCATYPE(0x96, 0x1) },
/* Parameter Block MCA type */
{ SMCA_PB, HWID_MCATYPE(0x05, 0x0) },
@@ -175,6 +178,16 @@ static struct smca_hwid smca_hwid_mcatypes[] = {
/* PCI Express Unit MCA type */
{ SMCA_PCIE, HWID_MCATYPE(0x46, 0x0) },
+ { SMCA_PCIE_V2, HWID_MCATYPE(0x46, 0x1) },
+
+ /* xGMI PCS MCA type */
+ { SMCA_XGMI_PCS, HWID_MCATYPE(0x50, 0x0) },
+
+ /* xGMI PHY MCA type */
+ { SMCA_XGMI_PHY, HWID_MCATYPE(0x259, 0x0) },
+
+ /* WAFL PHY MCA type */
+ { SMCA_WAFL_PHY, HWID_MCATYPE(0x267, 0x0) },
};
struct smca_bank smca_banks[MAX_NR_BANKS];
diff --git a/arch/x86/kernel/cpu/mce/apei.c b/arch/x86/kernel/cpu/mce/apei.c
index b58b85380ddb..0e3ae64d3b76 100644
--- a/arch/x86/kernel/cpu/mce/apei.c
+++ b/arch/x86/kernel/cpu/mce/apei.c
@@ -36,7 +36,8 @@ void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err)
mce_setup(&m);
m.bank = -1;
/* Fake a memory read error with unknown channel */
- m.status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_ADDRV | 0x9f;
+ m.status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_ADDRV | MCI_STATUS_MISCV | 0x9f;
+ m.misc = (MCI_MISC_ADDR_PHYS << 6) | PAGE_SHIFT;
if (severity >= GHES_SEV_RECOVERABLE)
m.status |= MCI_STATUS_UC;
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c
index 0c3b372318b7..b5f43049fa5f 100644
--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
+++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
@@ -836,7 +836,7 @@ int __init amd_special_default_mtrr(void)
if (boot_cpu_data.x86 < 0xf)
return 0;
/* In case some hypervisor doesn't pass SYSCFG through: */
- if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0)
+ if (rdmsr_safe(MSR_AMD64_SYSCFG, &l, &h) < 0)
return 0;
/*
* Memory between 4GB and top of mem is forced WB by this magic bit.
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index b90f3f437765..558108296f3c 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -53,13 +53,13 @@ static inline void k8_check_syscfg_dram_mod_en(void)
(boot_cpu_data.x86 >= 0x0f)))
return;
- rdmsr(MSR_K8_SYSCFG, lo, hi);
+ rdmsr(MSR_AMD64_SYSCFG, lo, hi);
if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) {
pr_err(FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
" not cleared by BIOS, clearing this bit\n",
smp_processor_id());
lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY;
- mtrr_wrmsr(MSR_K8_SYSCFG, lo, hi);
+ mtrr_wrmsr(MSR_AMD64_SYSCFG, lo, hi);
}
}
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 3ef5868ac588..7aecb2fc3186 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -63,7 +63,7 @@ static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
case 15:
return msr - MSR_P4_BPU_PERFCTR0;
}
- fallthrough;
+ break;
case X86_VENDOR_ZHAOXIN:
case X86_VENDOR_CENTAUR:
return msr - MSR_ARCH_PERFMON_PERFCTR0;
@@ -96,7 +96,7 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
case 15:
return msr - MSR_P4_BSU_ESCR0;
}
- fallthrough;
+ break;
case X86_VENDOR_ZHAOXIN:
case X86_VENDOR_CENTAUR:
return msr - MSR_ARCH_PERFMON_EVENTSEL0;
diff --git a/arch/x86/kernel/cpu/sgx/virt.c b/arch/x86/kernel/cpu/sgx/virt.c
index 6ad165a5c0cc..64511c4a5200 100644
--- a/arch/x86/kernel/cpu/sgx/virt.c
+++ b/arch/x86/kernel/cpu/sgx/virt.c
@@ -212,6 +212,7 @@ static int sgx_vepc_release(struct inode *inode, struct file *file)
list_splice_tail(&secs_pages, &zombie_secs_pages);
mutex_unlock(&zombie_secs_pages_lock);
+ xa_destroy(&vepc->page_array);
kfree(vepc);
return 0;
diff --git a/arch/x86/kernel/cpu/tsx.c b/arch/x86/kernel/cpu/tsx.c
index e2ad30e474f8..9c7a5f049292 100644
--- a/arch/x86/kernel/cpu/tsx.c
+++ b/arch/x86/kernel/cpu/tsx.c
@@ -2,7 +2,7 @@
/*
* Intel Transactional Synchronization Extensions (TSX) control.
*
- * Copyright (C) 2019 Intel Corporation
+ * Copyright (C) 2019-2021 Intel Corporation
*
* Author:
* Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
@@ -84,13 +84,46 @@ static enum tsx_ctrl_states x86_get_tsx_auto_mode(void)
return TSX_CTRL_ENABLE;
}
+void tsx_clear_cpuid(void)
+{
+ u64 msr;
+
+ /*
+ * MSR_TFA_TSX_CPUID_CLEAR bit is only present when both CPUID
+ * bits RTM_ALWAYS_ABORT and TSX_FORCE_ABORT are present.
+ */
+ if (boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT) &&
+ boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
+ rdmsrl(MSR_TSX_FORCE_ABORT, msr);
+ msr |= MSR_TFA_TSX_CPUID_CLEAR;
+ wrmsrl(MSR_TSX_FORCE_ABORT, msr);
+ }
+}
+
void __init tsx_init(void)
{
char arg[5] = {};
int ret;
- if (!tsx_ctrl_is_supported())
+ /*
+ * Hardware will always abort a TSX transaction if both CPUID bits
+ * RTM_ALWAYS_ABORT and TSX_FORCE_ABORT are set. In this case, it is
+ * better not to enumerate CPUID.RTM and CPUID.HLE bits. Clear them
+ * here.
+ */
+ if (boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT) &&
+ boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
+ tsx_ctrl_state = TSX_CTRL_RTM_ALWAYS_ABORT;
+ tsx_clear_cpuid();
+ setup_clear_cpu_cap(X86_FEATURE_RTM);
+ setup_clear_cpu_cap(X86_FEATURE_HLE);
return;
+ }
+
+ if (!tsx_ctrl_is_supported()) {
+ tsx_ctrl_state = TSX_CTRL_NOT_SUPPORTED;
+ return;
+ }
ret = cmdline_find_option(boot_command_line, "tsx", arg, sizeof(arg));
if (ret >= 0) {
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index a4ec65317a7f..b7b92cdf3add 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -221,28 +221,18 @@ sanitize_restored_user_xstate(union fpregs_state *state,
if (use_xsave()) {
/*
- * Note: we don't need to zero the reserved bits in the
- * xstate_header here because we either didn't copy them at all,
- * or we checked earlier that they aren't set.
+ * Clear all feature bits which are not set in
+ * user_xfeatures and clear all extended features
+ * for fx_only mode.
*/
+ u64 mask = fx_only ? XFEATURE_MASK_FPSSE : user_xfeatures;
/*
- * 'user_xfeatures' might have bits clear which are
- * set in header->xfeatures. This represents features that
- * were in init state prior to a signal delivery, and need
- * to be reset back to the init state. Clear any user
- * feature bits which are set in the kernel buffer to get
- * them back to the init state.
- *
- * Supervisor state is unchanged by input from userspace.
- * Ensure supervisor state bits stay set and supervisor
- * state is not modified.
+ * Supervisor state has to be preserved. The sigframe
+ * restore can only modify user features, i.e. @mask
+ * cannot contain them.
*/
- if (fx_only)
- header->xfeatures = XFEATURE_MASK_FPSSE;
- else
- header->xfeatures &= user_xfeatures |
- xfeatures_mask_supervisor();
+ header->xfeatures &= mask | xfeatures_mask_supervisor();
}
if (use_fxsr()) {
@@ -307,13 +297,17 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
return 0;
}
- if (!access_ok(buf, size))
- return -EACCES;
+ if (!access_ok(buf, size)) {
+ ret = -EACCES;
+ goto out;
+ }
- if (!static_cpu_has(X86_FEATURE_FPU))
- return fpregs_soft_set(current, NULL,
- 0, sizeof(struct user_i387_ia32_struct),
- NULL, buf) != 0;
+ if (!static_cpu_has(X86_FEATURE_FPU)) {
+ ret = fpregs_soft_set(current, NULL, 0,
+ sizeof(struct user_i387_ia32_struct),
+ NULL, buf);
+ goto out;
+ }
if (use_xsave()) {
struct _fpx_sw_bytes fx_sw_user;
@@ -369,6 +363,25 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
fpregs_unlock();
return 0;
}
+
+ /*
+ * The above did an FPU restore operation, restricted to
+ * the user portion of the registers, and failed, but the
+ * microcode might have modified the FPU registers
+ * nevertheless.
+ *
+ * If the FPU registers do not belong to current, then
+ * invalidate the FPU register state otherwise the task might
+ * preempt current and return to user space with corrupted
+ * FPU registers.
+ *
+ * In case current owns the FPU registers then no further
+ * action is required. The fixup below will handle it
+ * correctly.
+ */
+ if (test_thread_flag(TIF_NEED_FPU_LOAD))
+ __cpu_invalidate_fpregs_state();
+
fpregs_unlock();
} else {
/*
@@ -377,7 +390,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
*/
ret = __copy_from_user(&env, buf, sizeof(env));
if (ret)
- goto err_out;
+ goto out;
envp = &env;
}
@@ -405,16 +418,9 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
if (use_xsave() && !fx_only) {
u64 init_bv = xfeatures_mask_user() & ~user_xfeatures;
- if (using_compacted_format()) {
- ret = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
- } else {
- ret = __copy_from_user(&fpu->state.xsave, buf_fx, state_size);
-
- if (!ret && state_size > offsetof(struct xregs_state, header))
- ret = validate_user_xstate_header(&fpu->state.xsave.header);
- }
+ ret = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
if (ret)
- goto err_out;
+ goto out;
sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures,
fx_only);
@@ -434,7 +440,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
ret = __copy_from_user(&fpu->state.fxsave, buf_fx, state_size);
if (ret) {
ret = -EFAULT;
- goto err_out;
+ goto out;
}
sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures,
@@ -452,7 +458,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
} else {
ret = __copy_from_user(&fpu->state.fsave, buf_fx, state_size);
if (ret)
- goto err_out;
+ goto out;
fpregs_lock();
ret = copy_kernel_to_fregs_err(&fpu->state.fsave);
@@ -463,7 +469,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
fpregs_deactivate(fpu);
fpregs_unlock();
-err_out:
+out:
if (ret)
fpu__clear_user_states(fpu);
return ret;
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index a85c64000218..1cadb2faf740 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -441,12 +441,35 @@ static void __init print_xstate_offset_size(void)
}
/*
+ * All supported features have either init state all zeros or are
+ * handled in setup_init_fpu() individually. This is an explicit
+ * feature list and does not use XFEATURE_MASK*SUPPORTED to catch
+ * newly added supported features at build time and make people
+ * actually look at the init state for the new feature.
+ */
+#define XFEATURES_INIT_FPSTATE_HANDLED \
+ (XFEATURE_MASK_FP | \
+ XFEATURE_MASK_SSE | \
+ XFEATURE_MASK_YMM | \
+ XFEATURE_MASK_OPMASK | \
+ XFEATURE_MASK_ZMM_Hi256 | \
+ XFEATURE_MASK_Hi16_ZMM | \
+ XFEATURE_MASK_PKRU | \
+ XFEATURE_MASK_BNDREGS | \
+ XFEATURE_MASK_BNDCSR | \
+ XFEATURE_MASK_PASID)
+
+/*
* setup the xstate image representing the init state
*/
static void __init setup_init_fpu_buf(void)
{
static int on_boot_cpu __initdata = 1;
+ BUILD_BUG_ON((XFEATURE_MASK_USER_SUPPORTED |
+ XFEATURE_MASK_SUPERVISOR_SUPPORTED) !=
+ XFEATURES_INIT_FPSTATE_HANDLED);
+
WARN_ON_FPU(!on_boot_cpu);
on_boot_cpu = 0;
@@ -466,10 +489,22 @@ static void __init setup_init_fpu_buf(void)
copy_kernel_to_xregs_booting(&init_fpstate.xsave);
/*
- * Dump the init state again. This is to identify the init state
- * of any feature which is not represented by all zero's.
+ * All components are now in init state. Read the state back so
+ * that init_fpstate contains all non-zero init state. This only
+ * works with XSAVE, but not with XSAVEOPT and XSAVES because
+ * those use the init optimization which skips writing data for
+ * components in init state.
+ *
+ * XSAVE could be used, but that would require to reshuffle the
+ * data when XSAVES is available because XSAVES uses xstate
+ * compaction. But doing so is a pointless exercise because most
+ * components have an all zeros init state except for the legacy
+ * ones (FP and SSE). Those can be saved with FXSAVE into the
+ * legacy area. Adding new features requires to ensure that init
+ * state is all zeroes or if not to add the necessary handling
+ * here.
*/
- copy_xregs_to_kernel_booting(&init_fpstate.xsave);
+ fxsave(&init_fpstate.fxsave);
}
static int xfeature_uncompacted_offset(int xfeature_nr)
@@ -1402,60 +1437,3 @@ int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns,
return 0;
}
#endif /* CONFIG_PROC_PID_ARCH_STATUS */
-
-#ifdef CONFIG_IOMMU_SUPPORT
-void update_pasid(void)
-{
- u64 pasid_state;
- u32 pasid;
-
- if (!cpu_feature_enabled(X86_FEATURE_ENQCMD))
- return;
-
- if (!current->mm)
- return;
-
- pasid = READ_ONCE(current->mm->pasid);
- /* Set the valid bit in the PASID MSR/state only for valid pasid. */
- pasid_state = pasid == PASID_DISABLED ?
- pasid : pasid | MSR_IA32_PASID_VALID;
-
- /*
- * No need to hold fregs_lock() since the task's fpstate won't
- * be changed by others (e.g. ptrace) while the task is being
- * switched to or is in IPI.
- */
- if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
- /* The MSR is active and can be directly updated. */
- wrmsrl(MSR_IA32_PASID, pasid_state);
- } else {
- struct fpu *fpu = &current->thread.fpu;
- struct ia32_pasid_state *ppasid_state;
- struct xregs_state *xsave;
-
- /*
- * The CPU's xstate registers are not currently active. Just
- * update the PASID state in the memory buffer here. The
- * PASID MSR will be loaded when returning to user mode.
- */
- xsave = &fpu->state.xsave;
- xsave->header.xfeatures |= XFEATURE_MASK_PASID;
- ppasid_state = get_xsave_addr(xsave, XFEATURE_PASID);
- /*
- * Since XFEATURE_MASK_PASID is set in xfeatures, ppasid_state
- * won't be NULL and no need to check its value.
- *
- * Only update the task's PASID state when it's different
- * from the mm's pasid.
- */
- if (ppasid_state->pasid != pasid_state) {
- /*
- * Invalid fpregs so that state restoring will pick up
- * the PASID state.
- */
- __fpu_invalidate_fpregs_state(fpu);
- ppasid_state->pasid = pasid_state;
- }
- }
-}
-#endif /* CONFIG_IOMMU_SUPPORT */
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 18be44163a50..de01903c3735 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -39,7 +39,7 @@
#include <asm/realmode.h>
#include <asm/extable.h>
#include <asm/trapnr.h>
-#include <asm/sev-es.h>
+#include <asm/sev.h>
/*
* Manage page tables very early on.
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index 6a2eb62c85e6..674906fad43b 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -15,50 +15,75 @@
#include <asm/kprobes.h>
#include <asm/alternative.h>
#include <asm/text-patching.h>
+#include <asm/insn.h>
-static void bug_at(const void *ip, int line)
+int arch_jump_entry_size(struct jump_entry *entry)
{
- /*
- * The location is not an op that we were expecting.
- * Something went wrong. Crash the box, as something could be
- * corrupting the kernel.
- */
- pr_crit("jump_label: Fatal kernel bug, unexpected op at %pS [%p] (%5ph) %d\n", ip, ip, ip, line);
- BUG();
+ struct insn insn = {};
+
+ insn_decode_kernel(&insn, (void *)jump_entry_code(entry));
+ BUG_ON(insn.length != 2 && insn.length != 5);
+
+ return insn.length;
}
-static const void *
-__jump_label_set_jump_code(struct jump_entry *entry, enum jump_label_type type)
+struct jump_label_patch {
+ const void *code;
+ int size;
+};
+
+static struct jump_label_patch
+__jump_label_patch(struct jump_entry *entry, enum jump_label_type type)
{
- const void *expect, *code;
+ const void *expect, *code, *nop;
const void *addr, *dest;
- int line;
+ int size;
addr = (void *)jump_entry_code(entry);
dest = (void *)jump_entry_target(entry);
- code = text_gen_insn(JMP32_INSN_OPCODE, addr, dest);
+ size = arch_jump_entry_size(entry);
+ switch (size) {
+ case JMP8_INSN_SIZE:
+ code = text_gen_insn(JMP8_INSN_OPCODE, addr, dest);
+ nop = x86_nops[size];
+ break;
- if (type == JUMP_LABEL_JMP) {
- expect = x86_nops[5]; line = __LINE__;
- } else {
- expect = code; line = __LINE__;
+ case JMP32_INSN_SIZE:
+ code = text_gen_insn(JMP32_INSN_OPCODE, addr, dest);
+ nop = x86_nops[size];
+ break;
+
+ default: BUG();
}
- if (memcmp(addr, expect, JUMP_LABEL_NOP_SIZE))
- bug_at(addr, line);
+ if (type == JUMP_LABEL_JMP)
+ expect = nop;
+ else
+ expect = code;
+
+ if (memcmp(addr, expect, size)) {
+ /*
+ * The location is not an op that we were expecting.
+ * Something went wrong. Crash the box, as something could be
+ * corrupting the kernel.
+ */
+ pr_crit("jump_label: Fatal kernel bug, unexpected op at %pS [%p] (%5ph != %5ph)) size:%d type:%d\n",
+ addr, addr, addr, expect, size, type);
+ BUG();
+ }
if (type == JUMP_LABEL_NOP)
- code = x86_nops[5];
+ code = nop;
- return code;
+ return (struct jump_label_patch){.code = code, .size = size};
}
static inline void __jump_label_transform(struct jump_entry *entry,
enum jump_label_type type,
int init)
{
- const void *opcode = __jump_label_set_jump_code(entry, type);
+ const struct jump_label_patch jlp = __jump_label_patch(entry, type);
/*
* As long as only a single processor is running and the code is still
@@ -72,12 +97,11 @@ static inline void __jump_label_transform(struct jump_entry *entry,
* always nop being the 'currently valid' instruction
*/
if (init || system_state == SYSTEM_BOOTING) {
- text_poke_early((void *)jump_entry_code(entry), opcode,
- JUMP_LABEL_NOP_SIZE);
+ text_poke_early((void *)jump_entry_code(entry), jlp.code, jlp.size);
return;
}
- text_poke_bp((void *)jump_entry_code(entry), opcode, JUMP_LABEL_NOP_SIZE, NULL);
+ text_poke_bp((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
}
static void __ref jump_label_transform(struct jump_entry *entry,
@@ -98,7 +122,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
bool arch_jump_label_transform_queue(struct jump_entry *entry,
enum jump_label_type type)
{
- const void *opcode;
+ struct jump_label_patch jlp;
if (system_state == SYSTEM_BOOTING) {
/*
@@ -109,9 +133,8 @@ bool arch_jump_label_transform_queue(struct jump_entry *entry,
}
mutex_lock(&text_mutex);
- opcode = __jump_label_set_jump_code(entry, type);
- text_poke_queue((void *)jump_entry_code(entry),
- opcode, JUMP_LABEL_NOP_SIZE, NULL);
+ jlp = __jump_label_patch(entry, type);
+ text_poke_queue((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
mutex_unlock(&text_mutex);
return true;
}
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c
index b5cb49e57df8..c94dec6a1834 100644
--- a/arch/x86/kernel/mmconf-fam10h_64.c
+++ b/arch/x86/kernel/mmconf-fam10h_64.c
@@ -95,7 +95,7 @@ static void get_fam10h_pci_mmconf_base(void)
return;
/* SYS_CFG */
- address = MSR_K8_SYSCFG;
+ address = MSR_AMD64_SYSCFG;
rdmsrl(address, val);
/* TOP_MEM2 is not enabled? */
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 2ef961cf4cfc..4bce802d25fb 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -33,7 +33,7 @@
#include <asm/reboot.h>
#include <asm/cache.h>
#include <asm/nospec-branch.h>
-#include <asm/sev-es.h>
+#include <asm/sev.h>
#define CREATE_TRACE_POINTS
#include <trace/events/nmi.h>
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 72920af0b3c0..1e720626069a 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -44,6 +44,7 @@
#include <asm/pci-direct.h>
#include <asm/prom.h>
#include <asm/proto.h>
+#include <asm/thermal.h>
#include <asm/unwind.h>
#include <asm/vsyscall.h>
#include <linux/vmalloc.h>
@@ -637,11 +638,11 @@ static void __init trim_snb_memory(void)
* them from accessing certain memory ranges, namely anything below
* 1M and in the pages listed in bad_pages[] above.
*
- * To avoid these pages being ever accessed by SNB gfx devices
- * reserve all memory below the 1 MB mark and bad_pages that have
- * not already been reserved at boot time.
+ * To avoid these pages being ever accessed by SNB gfx devices reserve
+ * bad_pages that have not already been reserved at boot time.
+ * All memory below the 1 MB mark is anyway reserved later during
+ * setup_arch(), so there is no need to reserve it here.
*/
- memblock_reserve(0, 1<<20);
for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
if (memblock_reserve(bad_pages[i], PAGE_SIZE))
@@ -733,14 +734,14 @@ static void __init early_reserve_memory(void)
* The first 4Kb of memory is a BIOS owned area, but generally it is
* not listed as such in the E820 table.
*
- * Reserve the first memory page and typically some additional
- * memory (64KiB by default) since some BIOSes are known to corrupt
- * low memory. See the Kconfig help text for X86_RESERVE_LOW.
+ * Reserve the first 64K of memory since some BIOSes are known to
+ * corrupt low memory. After the real mode trampoline is allocated the
+ * rest of the memory below 640k is reserved.
*
* In addition, make sure page 0 is always reserved because on
* systems with L1TF its contents can be leaked to user processes.
*/
- memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE));
+ memblock_reserve(0, SZ_64K);
early_reserve_initrd();
@@ -751,6 +752,7 @@ static void __init early_reserve_memory(void)
reserve_ibft_region();
reserve_bios_regions();
+ trim_snb_memory();
}
/*
@@ -1081,14 +1083,20 @@ void __init setup_arch(char **cmdline_p)
(max_pfn_mapped<<PAGE_SHIFT) - 1);
#endif
- reserve_real_mode();
-
/*
- * Reserving memory causing GPU hangs on Sandy Bridge integrated
- * graphics devices should be done after we allocated memory under
- * 1M for the real mode trampoline.
+ * Find free memory for the real mode trampoline and place it
+ * there.
+ * If there is not enough free memory under 1M, on EFI-enabled
+ * systems there will be additional attempt to reclaim the memory
+ * for the real mode trampoline at efi_free_boot_services().
+ *
+ * Unconditionally reserve the entire first 1M of RAM because
+ * BIOSes are know to corrupt low memory and several
+ * hundred kilobytes are not worth complex detection what memory gets
+ * clobbered. Moreover, on machines with SandyBridge graphics or in
+ * setups that use crashkernel the entire 1M is reserved anyway.
*/
- trim_snb_memory();
+ reserve_real_mode();
init_mem_mapping();
@@ -1226,6 +1234,14 @@ void __init setup_arch(char **cmdline_p)
x86_init.timers.wallclock_init();
+ /*
+ * This needs to run before setup_local_APIC() which soft-disables the
+ * local APIC temporarily and that masks the thermal LVT interrupt,
+ * leading to softlockups on machines which have configured SMI
+ * interrupt delivery.
+ */
+ therm_lvt_init();
+
mcheck_init();
register_refined_jiffies(CLOCK_TICK_RATE);
diff --git a/arch/x86/kernel/sev-es-shared.c b/arch/x86/kernel/sev-shared.c
index 0aa9f13efd57..9f90f460a28c 100644
--- a/arch/x86/kernel/sev-es-shared.c
+++ b/arch/x86/kernel/sev-shared.c
@@ -26,13 +26,13 @@ static bool __init sev_es_check_cpu_features(void)
static void __noreturn sev_es_terminate(unsigned int reason)
{
- u64 val = GHCB_SEV_TERMINATE;
+ u64 val = GHCB_MSR_TERM_REQ;
/*
* Tell the hypervisor what went wrong - only reason-set 0 is
* currently supported.
*/
- val |= GHCB_SEV_TERMINATE_REASON(0, reason);
+ val |= GHCB_SEV_TERM_REASON(0, reason);
/* Request Guest Termination from Hypvervisor */
sev_es_wr_ghcb_msr(val);
@@ -47,15 +47,15 @@ static bool sev_es_negotiate_protocol(void)
u64 val;
/* Do the GHCB protocol version negotiation */
- sev_es_wr_ghcb_msr(GHCB_SEV_INFO_REQ);
+ sev_es_wr_ghcb_msr(GHCB_MSR_SEV_INFO_REQ);
VMGEXIT();
val = sev_es_rd_ghcb_msr();
- if (GHCB_INFO(val) != GHCB_SEV_INFO)
+ if (GHCB_MSR_INFO(val) != GHCB_MSR_SEV_INFO_RESP)
return false;
- if (GHCB_PROTO_MAX(val) < GHCB_PROTO_OUR ||
- GHCB_PROTO_MIN(val) > GHCB_PROTO_OUR)
+ if (GHCB_MSR_PROTO_MAX(val) < GHCB_PROTO_OUR ||
+ GHCB_MSR_PROTO_MIN(val) > GHCB_PROTO_OUR)
return false;
return true;
@@ -63,6 +63,7 @@ static bool sev_es_negotiate_protocol(void)
static __always_inline void vc_ghcb_invalidate(struct ghcb *ghcb)
{
+ ghcb->save.sw_exit_code = 0;
memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
}
@@ -153,28 +154,28 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EAX));
VMGEXIT();
val = sev_es_rd_ghcb_msr();
- if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
+ if (GHCB_RESP_CODE(val) != GHCB_MSR_CPUID_RESP)
goto fail;
regs->ax = val >> 32;
sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EBX));
VMGEXIT();
val = sev_es_rd_ghcb_msr();
- if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
+ if (GHCB_RESP_CODE(val) != GHCB_MSR_CPUID_RESP)
goto fail;
regs->bx = val >> 32;
sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_ECX));
VMGEXIT();
val = sev_es_rd_ghcb_msr();
- if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
+ if (GHCB_RESP_CODE(val) != GHCB_MSR_CPUID_RESP)
goto fail;
regs->cx = val >> 32;
sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EDX));
VMGEXIT();
val = sev_es_rd_ghcb_msr();
- if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
+ if (GHCB_RESP_CODE(val) != GHCB_MSR_CPUID_RESP)
goto fail;
regs->dx = val >> 32;
diff --git a/arch/x86/kernel/sev-es.c b/arch/x86/kernel/sev.c
index 73873b007838..a6895e440bc3 100644
--- a/arch/x86/kernel/sev-es.c
+++ b/arch/x86/kernel/sev.c
@@ -7,12 +7,11 @@
* Author: Joerg Roedel <jroedel@suse.de>
*/
-#define pr_fmt(fmt) "SEV-ES: " fmt
+#define pr_fmt(fmt) "SEV: " fmt
#include <linux/sched/debug.h> /* For show_regs() */
#include <linux/percpu-defs.h>
#include <linux/mem_encrypt.h>
-#include <linux/lockdep.h>
#include <linux/printk.h>
#include <linux/mm_types.h>
#include <linux/set_memory.h>
@@ -22,7 +21,7 @@
#include <asm/cpu_entry_area.h>
#include <asm/stacktrace.h>
-#include <asm/sev-es.h>
+#include <asm/sev.h>
#include <asm/insn-eval.h>
#include <asm/fpu/internal.h>
#include <asm/processor.h>
@@ -192,19 +191,39 @@ void noinstr __sev_es_ist_exit(void)
this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist);
}
-static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
+/*
+ * Nothing shall interrupt this code path while holding the per-CPU
+ * GHCB. The backup GHCB is only for NMIs interrupting this path.
+ *
+ * Callers must disable local interrupts around it.
+ */
+static noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state)
{
struct sev_es_runtime_data *data;
struct ghcb *ghcb;
+ WARN_ON(!irqs_disabled());
+
data = this_cpu_read(runtime_data);
ghcb = &data->ghcb_page;
if (unlikely(data->ghcb_active)) {
/* GHCB is already in use - save its contents */
- if (unlikely(data->backup_ghcb_active))
- return NULL;
+ if (unlikely(data->backup_ghcb_active)) {
+ /*
+ * Backup-GHCB is also already in use. There is no way
+ * to continue here so just kill the machine. To make
+ * panic() work, mark GHCBs inactive so that messages
+ * can be printed out.
+ */
+ data->ghcb_active = false;
+ data->backup_ghcb_active = false;
+
+ instrumentation_begin();
+ panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
+ instrumentation_end();
+ }
/* Mark backup_ghcb active before writing to it */
data->backup_ghcb_active = true;
@@ -221,24 +240,6 @@ static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
return ghcb;
}
-static __always_inline void sev_es_put_ghcb(struct ghcb_state *state)
-{
- struct sev_es_runtime_data *data;
- struct ghcb *ghcb;
-
- data = this_cpu_read(runtime_data);
- ghcb = &data->ghcb_page;
-
- if (state->ghcb) {
- /* Restore GHCB from Backup */
- *ghcb = *state->ghcb;
- data->backup_ghcb_active = false;
- state->ghcb = NULL;
- } else {
- data->ghcb_active = false;
- }
-}
-
/* Needed in vc_early_forward_exception */
void do_early_exception(struct pt_regs *regs, int trapnr);
@@ -266,17 +267,24 @@ static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt,
static enum es_result __vc_decode_user_insn(struct es_em_ctxt *ctxt)
{
char buffer[MAX_INSN_SIZE];
- int res;
+ int insn_bytes;
- res = insn_fetch_from_user_inatomic(ctxt->regs, buffer);
- if (!res) {
+ insn_bytes = insn_fetch_from_user_inatomic(ctxt->regs, buffer);
+ if (insn_bytes == 0) {
+ /* Nothing could be copied */
ctxt->fi.vector = X86_TRAP_PF;
ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER;
ctxt->fi.cr2 = ctxt->regs->ip;
return ES_EXCEPTION;
+ } else if (insn_bytes == -EINVAL) {
+ /* Effective RIP could not be calculated */
+ ctxt->fi.vector = X86_TRAP_GP;
+ ctxt->fi.error_code = 0;
+ ctxt->fi.cr2 = 0;
+ return ES_EXCEPTION;
}
- if (!insn_decode_from_regs(&ctxt->insn, ctxt->regs, buffer, res))
+ if (!insn_decode_from_regs(&ctxt->insn, ctxt->regs, buffer, insn_bytes))
return ES_DECODE_FAILED;
if (ctxt->insn.immediate.got)
@@ -323,31 +331,44 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
u16 d2;
u8 d1;
- /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
- if (!user_mode(ctxt->regs) && !access_ok(target, size)) {
- memcpy(dst, buf, size);
- return ES_OK;
- }
-
+ /*
+ * This function uses __put_user() independent of whether kernel or user
+ * memory is accessed. This works fine because __put_user() does no
+ * sanity checks of the pointer being accessed. All that it does is
+ * to report when the access failed.
+ *
+ * Also, this function runs in atomic context, so __put_user() is not
+ * allowed to sleep. The page-fault handler detects that it is running
+ * in atomic context and will not try to take mmap_sem and handle the
+ * fault, so additional pagefault_enable()/disable() calls are not
+ * needed.
+ *
+ * The access can't be done via copy_to_user() here because
+ * vc_write_mem() must not use string instructions to access unsafe
+ * memory. The reason is that MOVS is emulated by the #VC handler by
+ * splitting the move up into a read and a write and taking a nested #VC
+ * exception on whatever of them is the MMIO access. Using string
+ * instructions here would cause infinite nesting.
+ */
switch (size) {
case 1:
memcpy(&d1, buf, 1);
- if (put_user(d1, target))
+ if (__put_user(d1, target))
goto fault;
break;
case 2:
memcpy(&d2, buf, 2);
- if (put_user(d2, target))
+ if (__put_user(d2, target))
goto fault;
break;
case 4:
memcpy(&d4, buf, 4);
- if (put_user(d4, target))
+ if (__put_user(d4, target))
goto fault;
break;
case 8:
memcpy(&d8, buf, 8);
- if (put_user(d8, target))
+ if (__put_user(d8, target))
goto fault;
break;
default:
@@ -378,30 +399,43 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
u16 d2;
u8 d1;
- /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
- if (!user_mode(ctxt->regs) && !access_ok(s, size)) {
- memcpy(buf, src, size);
- return ES_OK;
- }
-
+ /*
+ * This function uses __get_user() independent of whether kernel or user
+ * memory is accessed. This works fine because __get_user() does no
+ * sanity checks of the pointer being accessed. All that it does is
+ * to report when the access failed.
+ *
+ * Also, this function runs in atomic context, so __get_user() is not
+ * allowed to sleep. The page-fault handler detects that it is running
+ * in atomic context and will not try to take mmap_sem and handle the
+ * fault, so additional pagefault_enable()/disable() calls are not
+ * needed.
+ *
+ * The access can't be done via copy_from_user() here because
+ * vc_read_mem() must not use string instructions to access unsafe
+ * memory. The reason is that MOVS is emulated by the #VC handler by
+ * splitting the move up into a read and a write and taking a nested #VC
+ * exception on whatever of them is the MMIO access. Using string
+ * instructions here would cause infinite nesting.
+ */
switch (size) {
case 1:
- if (get_user(d1, s))
+ if (__get_user(d1, s))
goto fault;
memcpy(buf, &d1, 1);
break;
case 2:
- if (get_user(d2, s))
+ if (__get_user(d2, s))
goto fault;
memcpy(buf, &d2, 2);
break;
case 4:
- if (get_user(d4, s))
+ if (__get_user(d4, s))
goto fault;
memcpy(buf, &d4, 4);
break;
case 8:
- if (get_user(d8, s))
+ if (__get_user(d8, s))
goto fault;
memcpy(buf, &d8, 8);
break;
@@ -459,14 +493,39 @@ static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt
}
/* Include code shared with pre-decompression boot stage */
-#include "sev-es-shared.c"
+#include "sev-shared.c"
+
+static noinstr void __sev_put_ghcb(struct ghcb_state *state)
+{
+ struct sev_es_runtime_data *data;
+ struct ghcb *ghcb;
+
+ WARN_ON(!irqs_disabled());
+
+ data = this_cpu_read(runtime_data);
+ ghcb = &data->ghcb_page;
+
+ if (state->ghcb) {
+ /* Restore GHCB from Backup */
+ *ghcb = *state->ghcb;
+ data->backup_ghcb_active = false;
+ state->ghcb = NULL;
+ } else {
+ /*
+ * Invalidate the GHCB so a VMGEXIT instruction issued
+ * from userspace won't appear to be valid.
+ */
+ vc_ghcb_invalidate(ghcb);
+ data->ghcb_active = false;
+ }
+}
void noinstr __sev_es_nmi_complete(void)
{
struct ghcb_state state;
struct ghcb *ghcb;
- ghcb = sev_es_get_ghcb(&state);
+ ghcb = __sev_get_ghcb(&state);
vc_ghcb_invalidate(ghcb);
ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE);
@@ -476,7 +535,7 @@ void noinstr __sev_es_nmi_complete(void)
sev_es_wr_ghcb_msr(__pa_nodebug(ghcb));
VMGEXIT();
- sev_es_put_ghcb(&state);
+ __sev_put_ghcb(&state);
}
static u64 get_jump_table_addr(void)
@@ -488,7 +547,7 @@ static u64 get_jump_table_addr(void)
local_irq_save(flags);
- ghcb = sev_es_get_ghcb(&state);
+ ghcb = __sev_get_ghcb(&state);
vc_ghcb_invalidate(ghcb);
ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE);
@@ -502,7 +561,7 @@ static u64 get_jump_table_addr(void)
ghcb_sw_exit_info_2_is_valid(ghcb))
ret = ghcb->save.sw_exit_info_2;
- sev_es_put_ghcb(&state);
+ __sev_put_ghcb(&state);
local_irq_restore(flags);
@@ -627,7 +686,7 @@ static void sev_es_ap_hlt_loop(void)
struct ghcb_state state;
struct ghcb *ghcb;
- ghcb = sev_es_get_ghcb(&state);
+ ghcb = __sev_get_ghcb(&state);
while (true) {
vc_ghcb_invalidate(ghcb);
@@ -644,7 +703,7 @@ static void sev_es_ap_hlt_loop(void)
break;
}
- sev_es_put_ghcb(&state);
+ __sev_put_ghcb(&state);
}
/*
@@ -734,7 +793,7 @@ void __init sev_es_init_vc_handling(void)
sev_es_setup_play_dead();
/* Secondary CPUs use the runtime #VC handler */
- initial_vc_handler = (unsigned long)safe_stack_exc_vmm_communication;
+ initial_vc_handler = (unsigned long)kernel_exc_vmm_communication;
}
static void __init vc_early_forward_exception(struct es_em_ctxt *ctxt)
@@ -1172,14 +1231,6 @@ static enum es_result vc_handle_trap_ac(struct ghcb *ghcb,
return ES_EXCEPTION;
}
-static __always_inline void vc_handle_trap_db(struct pt_regs *regs)
-{
- if (user_mode(regs))
- noist_exc_debug(regs);
- else
- exc_debug(regs);
-}
-
static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt,
struct ghcb *ghcb,
unsigned long exit_code)
@@ -1255,6 +1306,10 @@ static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt)
case X86_TRAP_UD:
exc_invalid_op(ctxt->regs);
break;
+ case X86_TRAP_PF:
+ write_cr2(ctxt->fi.cr2);
+ exc_page_fault(ctxt->regs, error_code);
+ break;
case X86_TRAP_AC:
exc_alignment_check(ctxt->regs, error_code);
break;
@@ -1271,55 +1326,15 @@ static __always_inline bool on_vc_fallback_stack(struct pt_regs *regs)
return (sp >= __this_cpu_ist_bottom_va(VC2) && sp < __this_cpu_ist_top_va(VC2));
}
-/*
- * Main #VC exception handler. It is called when the entry code was able to
- * switch off the IST to a safe kernel stack.
- *
- * With the current implementation it is always possible to switch to a safe
- * stack because #VC exceptions only happen at known places, like intercepted
- * instructions or accesses to MMIO areas/IO ports. They can also happen with
- * code instrumentation when the hypervisor intercepts #DB, but the critical
- * paths are forbidden to be instrumented, so #DB exceptions currently also
- * only happen in safe places.
- */
-DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
+static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_code)
{
- struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
- irqentry_state_t irq_state;
struct ghcb_state state;
struct es_em_ctxt ctxt;
enum es_result result;
struct ghcb *ghcb;
+ bool ret = true;
- /*
- * Handle #DB before calling into !noinstr code to avoid recursive #DB.
- */
- if (error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB) {
- vc_handle_trap_db(regs);
- return;
- }
-
- irq_state = irqentry_nmi_enter(regs);
- lockdep_assert_irqs_disabled();
- instrumentation_begin();
-
- /*
- * This is invoked through an interrupt gate, so IRQs are disabled. The
- * code below might walk page-tables for user or kernel addresses, so
- * keep the IRQs disabled to protect us against concurrent TLB flushes.
- */
-
- ghcb = sev_es_get_ghcb(&state);
- if (!ghcb) {
- /*
- * Mark GHCBs inactive so that panic() is able to print the
- * message.
- */
- data->ghcb_active = false;
- data->backup_ghcb_active = false;
-
- panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
- }
+ ghcb = __sev_get_ghcb(&state);
vc_ghcb_invalidate(ghcb);
result = vc_init_em_ctxt(&ctxt, regs, error_code);
@@ -1327,7 +1342,7 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
if (result == ES_OK)
result = vc_handle_exitcode(&ctxt, ghcb, error_code);
- sev_es_put_ghcb(&state);
+ __sev_put_ghcb(&state);
/* Done - now check the result */
switch (result) {
@@ -1335,17 +1350,20 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
vc_finish_insn(&ctxt);
break;
case ES_UNSUPPORTED:
- pr_err_ratelimited("Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n",
+ pr_err_ratelimited("Unsupported exit-code 0x%02lx in #VC exception (IP: 0x%lx)\n",
error_code, regs->ip);
- goto fail;
+ ret = false;
+ break;
case ES_VMM_ERROR:
pr_err_ratelimited("Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
error_code, regs->ip);
- goto fail;
+ ret = false;
+ break;
case ES_DECODE_FAILED:
pr_err_ratelimited("Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
error_code, regs->ip);
- goto fail;
+ ret = false;
+ break;
case ES_EXCEPTION:
vc_forward_exception(&ctxt);
break;
@@ -1361,24 +1379,52 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
BUG();
}
-out:
- instrumentation_end();
- irqentry_nmi_exit(regs, irq_state);
+ return ret;
+}
- return;
+static __always_inline bool vc_is_db(unsigned long error_code)
+{
+ return error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB;
+}
-fail:
- if (user_mode(regs)) {
- /*
- * Do not kill the machine if user-space triggered the
- * exception. Send SIGBUS instead and let user-space deal with
- * it.
- */
- force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0);
- } else {
- pr_emerg("PANIC: Unhandled #VC exception in kernel space (result=%d)\n",
- result);
+/*
+ * Runtime #VC exception handler when raised from kernel mode. Runs in NMI mode
+ * and will panic when an error happens.
+ */
+DEFINE_IDTENTRY_VC_KERNEL(exc_vmm_communication)
+{
+ irqentry_state_t irq_state;
+
+ /*
+ * With the current implementation it is always possible to switch to a
+ * safe stack because #VC exceptions only happen at known places, like
+ * intercepted instructions or accesses to MMIO areas/IO ports. They can
+ * also happen with code instrumentation when the hypervisor intercepts
+ * #DB, but the critical paths are forbidden to be instrumented, so #DB
+ * exceptions currently also only happen in safe places.
+ *
+ * But keep this here in case the noinstr annotations are violated due
+ * to bug elsewhere.
+ */
+ if (unlikely(on_vc_fallback_stack(regs))) {
+ instrumentation_begin();
+ panic("Can't handle #VC exception from unsupported context\n");
+ instrumentation_end();
+ }
+
+ /*
+ * Handle #DB before calling into !noinstr code to avoid recursive #DB.
+ */
+ if (vc_is_db(error_code)) {
+ exc_debug(regs);
+ return;
+ }
+ irq_state = irqentry_nmi_enter(regs);
+
+ instrumentation_begin();
+
+ if (!vc_raw_handle_exception(regs, error_code)) {
/* Show some debug info */
show_regs(regs);
@@ -1389,23 +1435,38 @@ fail:
panic("Returned from Terminate-Request to Hypervisor\n");
}
- goto out;
+ instrumentation_end();
+ irqentry_nmi_exit(regs, irq_state);
}
-/* This handler runs on the #VC fall-back stack. It can cause further #VC exceptions */
-DEFINE_IDTENTRY_VC_IST(exc_vmm_communication)
+/*
+ * Runtime #VC exception handler when raised from user mode. Runs in IRQ mode
+ * and will kill the current task with SIGBUS when an error happens.
+ */
+DEFINE_IDTENTRY_VC_USER(exc_vmm_communication)
{
+ /*
+ * Handle #DB before calling into !noinstr code to avoid recursive #DB.
+ */
+ if (vc_is_db(error_code)) {
+ noist_exc_debug(regs);
+ return;
+ }
+
+ irqentry_enter_from_user_mode(regs);
instrumentation_begin();
- panic("Can't handle #VC exception from unsupported context\n");
- instrumentation_end();
-}
-DEFINE_IDTENTRY_VC(exc_vmm_communication)
-{
- if (likely(!on_vc_fallback_stack(regs)))
- safe_stack_exc_vmm_communication(regs, error_code);
- else
- ist_exc_vmm_communication(regs, error_code);
+ if (!vc_raw_handle_exception(regs, error_code)) {
+ /*
+ * Do not kill the machine if user-space triggered the
+ * exception. Send SIGBUS instead and let user-space deal with
+ * it.
+ */
+ force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0);
+ }
+
+ instrumentation_end();
+ irqentry_exit_to_user_mode(regs);
}
bool __init handle_vc_boot_ghcb(struct pt_regs *regs)
diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c
index 0e5d0a7e203b..06743ec054d2 100644
--- a/arch/x86/kernel/signal_compat.c
+++ b/arch/x86/kernel/signal_compat.c
@@ -127,6 +127,9 @@ static inline void signal_compat_build_tests(void)
BUILD_BUG_ON(offsetof(siginfo_t, si_addr) != 0x10);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_addr) != 0x0C);
+ BUILD_BUG_ON(offsetof(siginfo_t, si_trapno) != 0x18);
+ BUILD_BUG_ON(offsetof(compat_siginfo_t, si_trapno) != 0x10);
+
BUILD_BUG_ON(offsetof(siginfo_t, si_addr_lsb) != 0x18);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_addr_lsb) != 0x10);
@@ -138,8 +141,10 @@ static inline void signal_compat_build_tests(void)
BUILD_BUG_ON(offsetof(siginfo_t, si_pkey) != 0x20);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pkey) != 0x14);
- BUILD_BUG_ON(offsetof(siginfo_t, si_perf) != 0x18);
- BUILD_BUG_ON(offsetof(compat_siginfo_t, si_perf) != 0x10);
+ BUILD_BUG_ON(offsetof(siginfo_t, si_perf_data) != 0x18);
+ BUILD_BUG_ON(offsetof(siginfo_t, si_perf_type) != 0x20);
+ BUILD_BUG_ON(offsetof(compat_siginfo_t, si_perf_data) != 0x10);
+ BUILD_BUG_ON(offsetof(compat_siginfo_t, si_perf_type) != 0x14);
CHECK_CSI_OFFSET(_sigpoll);
CHECK_CSI_SIZE (_sigpoll, 2*sizeof(int));
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 0ad5214f598a..7770245cc7fa 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -2043,7 +2043,7 @@ static bool amd_set_max_freq_ratio(void)
return false;
}
- highest_perf = perf_caps.highest_perf;
+ highest_perf = amd_get_highest_perf();
nominal_perf = perf_caps.nominal_perf;
if (!highest_perf || !nominal_perf) {
diff --git a/arch/x86/kernel/umip.c b/arch/x86/kernel/umip.c
index 8daa70b0d2da..576b47e7523d 100644
--- a/arch/x86/kernel/umip.c
+++ b/arch/x86/kernel/umip.c
@@ -346,14 +346,12 @@ bool fixup_umip_exception(struct pt_regs *regs)
if (!regs)
return false;
- nr_copied = insn_fetch_from_user(regs, buf);
-
/*
- * The insn_fetch_from_user above could have failed if user code
- * is protected by a memory protection key. Give up on emulation
- * in such a case. Should we issue a page fault?
+ * Give up on emulation if fetching the instruction failed. Should a
+ * page fault or a #GP be issued?
*/
- if (!nr_copied)
+ nr_copied = insn_fetch_from_user(regs, buf);
+ if (nr_copied <= 0)
return false;
if (!insn_decode_from_regs(&insn, regs, buf, nr_copied))
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 9a48f138832d..b4da665bb892 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -655,6 +655,7 @@ static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func)
if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
entry->ecx = F(RDPID);
++array->nent;
+ break;
default:
break;
}
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 8a0ccdb56076..5e5de05a8fbf 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -5111,7 +5111,7 @@ done:
return rc;
}
-int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
+int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type)
{
int rc = X86EMUL_CONTINUE;
int mode = ctxt->mode;
@@ -5322,7 +5322,8 @@ done_prefixes:
ctxt->execute = opcode.u.execute;
- if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
+ if (unlikely(emulation_type & EMULTYPE_TRAP_UD) &&
+ likely(!(ctxt->d & EmulateOnUD)))
return EMULATION_FAILED;
if (unlikely(ctxt->d &
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index f98370a39936..f00830e5202f 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1172,6 +1172,7 @@ void kvm_hv_invalidate_tsc_page(struct kvm *kvm)
{
struct kvm_hv *hv = to_kvm_hv(kvm);
u64 gfn;
+ int idx;
if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET ||
@@ -1190,9 +1191,16 @@ void kvm_hv_invalidate_tsc_page(struct kvm *kvm)
gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
hv->tsc_ref.tsc_sequence = 0;
+
+ /*
+ * Take the srcu lock as memslots will be accessed to check the gfn
+ * cache generation against the memslots generation.
+ */
+ idx = srcu_read_lock(&kvm->srcu);
if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
&hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
+ srcu_read_unlock(&kvm->srcu, idx);
out_unlock:
mutex_unlock(&hv->hv_lock);
diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
index f016838faedd..3e870bf9ca4d 100644
--- a/arch/x86/kvm/kvm_emulate.h
+++ b/arch/x86/kvm/kvm_emulate.h
@@ -314,7 +314,6 @@ struct x86_emulate_ctxt {
int interruptibility;
bool perm_ok; /* do not check permissions if true */
- bool ud; /* inject an #UD if host doesn't support insn */
bool tf; /* TF value before instruction (after for syscall/sysret) */
bool have_exception;
@@ -491,7 +490,7 @@ enum x86_intercept {
#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
#endif
-int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len);
+int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type);
bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt);
#define EMULATION_FAILED -1
#define EMULATION_OK 0
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index c0ebef560bd1..17fa4ab1b834 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1410,6 +1410,9 @@ int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
if (!apic_x2apic_mode(apic))
valid_reg_mask |= APIC_REG_MASK(APIC_ARBPRI);
+ if (alignment + len > 4)
+ return 1;
+
if (offset > 0x3f0 || !(valid_reg_mask & APIC_REG_MASK(offset)))
return 1;
@@ -1494,6 +1497,15 @@ static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
static void cancel_hv_timer(struct kvm_lapic *apic);
+static void cancel_apic_timer(struct kvm_lapic *apic)
+{
+ hrtimer_cancel(&apic->lapic_timer.timer);
+ preempt_disable();
+ if (apic->lapic_timer.hv_timer_in_use)
+ cancel_hv_timer(apic);
+ preempt_enable();
+}
+
static void apic_update_lvtt(struct kvm_lapic *apic)
{
u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
@@ -1502,11 +1514,7 @@ static void apic_update_lvtt(struct kvm_lapic *apic)
if (apic->lapic_timer.timer_mode != timer_mode) {
if (apic_lvtt_tscdeadline(apic) != (timer_mode ==
APIC_LVT_TIMER_TSCDEADLINE)) {
- hrtimer_cancel(&apic->lapic_timer.timer);
- preempt_disable();
- if (apic->lapic_timer.hv_timer_in_use)
- cancel_hv_timer(apic);
- preempt_enable();
+ cancel_apic_timer(apic);
kvm_lapic_set_reg(apic, APIC_TMICT, 0);
apic->lapic_timer.period = 0;
apic->lapic_timer.tscdeadline = 0;
@@ -1598,11 +1606,19 @@ static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
apic->lapic_timer.advance_expire_delta = guest_tsc - tsc_deadline;
+ if (lapic_timer_advance_dynamic) {
+ adjust_lapic_timer_advance(vcpu, apic->lapic_timer.advance_expire_delta);
+ /*
+ * If the timer fired early, reread the TSC to account for the
+ * overhead of the above adjustment to avoid waiting longer
+ * than is necessary.
+ */
+ if (guest_tsc < tsc_deadline)
+ guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
+ }
+
if (guest_tsc < tsc_deadline)
__wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
-
- if (lapic_timer_advance_dynamic)
- adjust_lapic_timer_advance(vcpu, apic->lapic_timer.advance_expire_delta);
}
void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
@@ -1661,7 +1677,7 @@ static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
}
atomic_inc(&apic->lapic_timer.pending);
- kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
+ kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
if (from_timer_fn)
kvm_vcpu_kick(vcpu);
}
@@ -2084,7 +2100,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
if (apic_lvtt_tscdeadline(apic))
break;
- hrtimer_cancel(&apic->lapic_timer.timer);
+ cancel_apic_timer(apic);
kvm_lapic_set_reg(apic, APIC_TMICT, val);
start_apic_timer(apic);
break;
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 0144c40d09c7..8d5876dfc6b7 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4739,9 +4739,33 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
context->inject_page_fault = kvm_inject_page_fault;
}
+static union kvm_mmu_role kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu)
+{
+ union kvm_mmu_role role = kvm_calc_shadow_root_page_role_common(vcpu, false);
+
+ /*
+ * Nested MMUs are used only for walking L2's gva->gpa, they never have
+ * shadow pages of their own and so "direct" has no meaning. Set it
+ * to "true" to try to detect bogus usage of the nested MMU.
+ */
+ role.base.direct = true;
+
+ if (!is_paging(vcpu))
+ role.base.level = 0;
+ else if (is_long_mode(vcpu))
+ role.base.level = is_la57_mode(vcpu) ? PT64_ROOT_5LEVEL :
+ PT64_ROOT_4LEVEL;
+ else if (is_pae(vcpu))
+ role.base.level = PT32E_ROOT_LEVEL;
+ else
+ role.base.level = PT32_ROOT_LEVEL;
+
+ return role;
+}
+
static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
{
- union kvm_mmu_role new_role = kvm_calc_mmu_role_common(vcpu, false);
+ union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu);
struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
if (new_role.as_u64 == g_context->mmu_role.as_u64)
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 70b7e44e3035..823a5919f9fa 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -90,8 +90,8 @@ struct guest_walker {
gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS];
bool pte_writable[PT_MAX_FULL_LEVELS];
- unsigned pt_access;
- unsigned pte_access;
+ unsigned int pt_access[PT_MAX_FULL_LEVELS];
+ unsigned int pte_access;
gfn_t gfn;
struct x86_exception fault;
};
@@ -418,13 +418,15 @@ retry_walk:
}
walker->ptes[walker->level - 1] = pte;
+
+ /* Convert to ACC_*_MASK flags for struct guest_walker. */
+ walker->pt_access[walker->level - 1] = FNAME(gpte_access)(pt_access ^ walk_nx_mask);
} while (!is_last_gpte(mmu, walker->level, pte));
pte_pkey = FNAME(gpte_pkeys)(vcpu, pte);
accessed_dirty = have_ad ? pte_access & PT_GUEST_ACCESSED_MASK : 0;
/* Convert to ACC_*_MASK flags for struct guest_walker. */
- walker->pt_access = FNAME(gpte_access)(pt_access ^ walk_nx_mask);
walker->pte_access = FNAME(gpte_access)(pte_access ^ walk_nx_mask);
errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access);
if (unlikely(errcode))
@@ -463,7 +465,8 @@ retry_walk:
}
pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
- __func__, (u64)pte, walker->pte_access, walker->pt_access);
+ __func__, (u64)pte, walker->pte_access,
+ walker->pt_access[walker->level - 1]);
return 1;
error:
@@ -643,7 +646,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
struct kvm_mmu_page *sp = NULL;
struct kvm_shadow_walk_iterator it;
- unsigned direct_access, access = gw->pt_access;
+ unsigned int direct_access, access;
int top_level, level, req_level, ret;
gfn_t base_gfn = gw->gfn;
@@ -675,6 +678,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
sp = NULL;
if (!is_shadow_present_pte(*it.sptep)) {
table_gfn = gw->table_gfn[it.level - 2];
+ access = gw->pt_access[it.level - 2];
sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
false, access);
}
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 95eeb5ac6a8a..237317b1eddd 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -1192,9 +1192,9 @@ bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
}
/*
- * Remove write access from all the SPTEs mapping GFNs [start, end). If
- * skip_4k is set, SPTEs that map 4k pages, will not be write-protected.
- * Returns true if an SPTE has been changed and the TLBs need to be flushed.
+ * Remove write access from all SPTEs at or above min_level that map GFNs
+ * [start, end). Returns true if an SPTE has been changed and the TLBs need to
+ * be flushed.
*/
static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t start, gfn_t end, int min_level)
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index 712b4e0de481..5e7e920113f3 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -28,10 +28,8 @@
#include "svm.h"
/* enable / disable AVIC */
-int avic;
-#ifdef CONFIG_X86_LOCAL_APIC
-module_param(avic, int, S_IRUGO);
-#endif
+bool avic;
+module_param(avic, bool, S_IRUGO);
#define SVM_AVIC_DOORBELL 0xc001011b
@@ -223,7 +221,7 @@ static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
return &avic_physical_id_table[index];
}
-/**
+/*
* Note:
* AVIC hardware walks the nested page table to check permissions,
* but does not use the SPA address specified in the leaf page
@@ -766,7 +764,7 @@ out:
return ret;
}
-/**
+/*
* Note:
* The HW cannot support posting multicast/broadcast
* interrupts to a vCPU. So, we still use legacy interrupt
@@ -1007,7 +1005,7 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
}
-/**
+/*
* This function is called during VCPU halt/unhalt.
*/
static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 5bc887e9a986..8d36f0c73071 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -199,9 +199,19 @@ static void sev_asid_free(struct kvm_sev_info *sev)
sev->misc_cg = NULL;
}
-static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
+static void sev_decommission(unsigned int handle)
{
struct sev_data_decommission decommission;
+
+ if (!handle)
+ return;
+
+ decommission.handle = handle;
+ sev_guest_decommission(&decommission, NULL);
+}
+
+static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
+{
struct sev_data_deactivate deactivate;
if (!handle)
@@ -214,9 +224,7 @@ static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
sev_guest_deactivate(&deactivate, NULL);
up_read(&sev_deactivate_lock);
- /* decommission handle */
- decommission.handle = handle;
- sev_guest_decommission(&decommission, NULL);
+ sev_decommission(handle);
}
static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
@@ -341,8 +349,10 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
/* Bind ASID to this guest */
ret = sev_bind_asid(kvm, start.handle, error);
- if (ret)
+ if (ret) {
+ sev_decommission(start.handle);
goto e_free_session;
+ }
/* return handle to userspace */
params.handle = start.handle;
@@ -1103,10 +1113,9 @@ __sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp,
struct sev_data_send_start data;
int ret;
+ memset(&data, 0, sizeof(data));
data.handle = sev->handle;
ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
- if (ret < 0)
- return ret;
params->session_len = data.session_len;
if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
@@ -1215,10 +1224,9 @@ __sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp,
struct sev_data_send_update_data data;
int ret;
+ memset(&data, 0, sizeof(data));
data.handle = sev->handle;
ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
- if (ret < 0)
- return ret;
params->hdr_len = data.hdr_len;
params->trans_len = data.trans_len;
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index dfa351e605de..e088086f3de6 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -863,8 +863,8 @@ static __init void svm_adjust_mmio_mask(void)
return;
/* If memory encryption is not enabled, use existing mask */
- rdmsrl(MSR_K8_SYSCFG, msr);
- if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
+ rdmsrl(MSR_AMD64_SYSCFG, msr);
+ if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
return;
enc_bit = cpuid_ebx(0x8000001f) & 0x3f;
@@ -1010,9 +1010,7 @@ static __init int svm_hardware_setup(void)
}
if (avic) {
- if (!npt_enabled ||
- !boot_cpu_has(X86_FEATURE_AVIC) ||
- !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
+ if (!npt_enabled || !boot_cpu_has(X86_FEATURE_AVIC)) {
avic = false;
} else {
pr_info("AVIC enabled\n");
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index e44567ceb865..2908c6ab5bb4 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -20,6 +20,7 @@
#include <linux/bits.h>
#include <asm/svm.h>
+#include <asm/sev-common.h>
#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
@@ -479,7 +480,7 @@ extern struct kvm_x86_nested_ops svm_nested_ops;
#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
-extern int avic;
+extern bool avic;
static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
{
@@ -525,40 +526,9 @@ void svm_vcpu_unblocking(struct kvm_vcpu *vcpu);
/* sev.c */
-#define GHCB_VERSION_MAX 1ULL
-#define GHCB_VERSION_MIN 1ULL
-
-#define GHCB_MSR_INFO_POS 0
-#define GHCB_MSR_INFO_MASK (BIT_ULL(12) - 1)
-
-#define GHCB_MSR_SEV_INFO_RESP 0x001
-#define GHCB_MSR_SEV_INFO_REQ 0x002
-#define GHCB_MSR_VER_MAX_POS 48
-#define GHCB_MSR_VER_MAX_MASK 0xffff
-#define GHCB_MSR_VER_MIN_POS 32
-#define GHCB_MSR_VER_MIN_MASK 0xffff
-#define GHCB_MSR_CBIT_POS 24
-#define GHCB_MSR_CBIT_MASK 0xff
-#define GHCB_MSR_SEV_INFO(_max, _min, _cbit) \
- ((((_max) & GHCB_MSR_VER_MAX_MASK) << GHCB_MSR_VER_MAX_POS) | \
- (((_min) & GHCB_MSR_VER_MIN_MASK) << GHCB_MSR_VER_MIN_POS) | \
- (((_cbit) & GHCB_MSR_CBIT_MASK) << GHCB_MSR_CBIT_POS) | \
- GHCB_MSR_SEV_INFO_RESP)
-
-#define GHCB_MSR_CPUID_REQ 0x004
-#define GHCB_MSR_CPUID_RESP 0x005
-#define GHCB_MSR_CPUID_FUNC_POS 32
-#define GHCB_MSR_CPUID_FUNC_MASK 0xffffffff
-#define GHCB_MSR_CPUID_VALUE_POS 32
-#define GHCB_MSR_CPUID_VALUE_MASK 0xffffffff
-#define GHCB_MSR_CPUID_REG_POS 30
-#define GHCB_MSR_CPUID_REG_MASK 0x3
-
-#define GHCB_MSR_TERM_REQ 0x100
-#define GHCB_MSR_TERM_REASON_SET_POS 12
-#define GHCB_MSR_TERM_REASON_SET_MASK 0xf
-#define GHCB_MSR_TERM_REASON_POS 16
-#define GHCB_MSR_TERM_REASON_MASK 0xff
+#define GHCB_VERSION_MAX 1ULL
+#define GHCB_VERSION_MIN 1ULL
+
extern unsigned int max_sev_asid;
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index a61c015870e3..4f839148948b 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -1550,16 +1550,16 @@ TRACE_EVENT(kvm_nested_vmenter_failed,
TP_ARGS(msg, err),
TP_STRUCT__entry(
- __field(const char *, msg)
+ __string(msg, msg)
__field(u32, err)
),
TP_fast_assign(
- __entry->msg = msg;
+ __assign_str(msg, msg);
__entry->err = err;
),
- TP_printk("%s%s", __entry->msg, !__entry->err ? "" :
+ TP_printk("%s%s", __get_str(msg), !__entry->err ? "" :
__print_symbolic(__entry->err, VMX_VMENTER_INSTRUCTION_ERRORS))
);
diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
index 8dee8a5fbc17..aa0e7872fcc9 100644
--- a/arch/x86/kvm/vmx/capabilities.h
+++ b/arch/x86/kvm/vmx/capabilities.h
@@ -90,8 +90,7 @@ static inline bool cpu_has_vmx_preemption_timer(void)
static inline bool cpu_has_vmx_posted_intr(void)
{
- return IS_ENABLED(CONFIG_X86_LOCAL_APIC) &&
- vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
+ return vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
}
static inline bool cpu_has_load_ia32_efer(void)
diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c
index 459748680daf..5f81ef092bd4 100644
--- a/arch/x86/kvm/vmx/posted_intr.c
+++ b/arch/x86/kvm/vmx/posted_intr.c
@@ -238,6 +238,20 @@ bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu)
/*
+ * Bail out of the block loop if the VM has an assigned
+ * device, but the blocking vCPU didn't reconfigure the
+ * PI.NV to the wakeup vector, i.e. the assigned device
+ * came along after the initial check in pi_pre_block().
+ */
+void vmx_pi_start_assignment(struct kvm *kvm)
+{
+ if (!irq_remapping_cap(IRQ_POSTING_CAP))
+ return;
+
+ kvm_make_all_cpus_request(kvm, KVM_REQ_UNBLOCK);
+}
+
+/*
* pi_update_irte - set IRTE for Posted-Interrupts
*
* @kvm: kvm
diff --git a/arch/x86/kvm/vmx/posted_intr.h b/arch/x86/kvm/vmx/posted_intr.h
index 0bdc41391c5b..7f7b2326caf5 100644
--- a/arch/x86/kvm/vmx/posted_intr.h
+++ b/arch/x86/kvm/vmx/posted_intr.h
@@ -95,5 +95,6 @@ void __init pi_init_cpu(int cpu);
bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu);
int pi_update_irte(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq,
bool set);
+void vmx_pi_start_assignment(struct kvm *kvm);
#endif /* __KVM_X86_VMX_POSTED_INTR_H */
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 4bceb5ca3a89..c2a779b688e6 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -4843,7 +4843,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct kvm_run *kvm_run = vcpu->run;
u32 intr_info, ex_no, error_code;
- unsigned long cr2, rip, dr6;
+ unsigned long cr2, dr6;
u32 vect_info;
vect_info = vmx->idt_vectoring_info;
@@ -4933,8 +4933,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
vmx->vcpu.arch.event_exit_inst_len =
vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
kvm_run->exit_reason = KVM_EXIT_DEBUG;
- rip = kvm_rip_read(vcpu);
- kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
+ kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu);
kvm_run->debug.arch.exception = ex_no;
break;
case AC_VECTOR:
@@ -6248,6 +6247,7 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
switch (kvm_get_apic_mode(vcpu)) {
case LAPIC_MODE_INVALID:
WARN_ONCE(true, "Invalid local APIC state");
+ break;
case LAPIC_MODE_DISABLED:
break;
case LAPIC_MODE_XAPIC:
@@ -7721,6 +7721,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.nested_ops = &vmx_nested_ops,
.update_pi_irte = pi_update_irte,
+ .start_assignment = vmx_pi_start_assignment,
#ifdef CONFIG_X86_64
.set_hv_timer = vmx_set_hv_timer,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9b6bca616929..e0f4a46649d7 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3072,6 +3072,19 @@ static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu)
static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
{
++vcpu->stat.tlb_flush;
+
+ if (!tdp_enabled) {
+ /*
+ * A TLB flush on behalf of the guest is equivalent to
+ * INVPCID(all), toggling CR4.PGE, etc., which requires
+ * a forced sync of the shadow page tables. Unload the
+ * entire MMU here and the subsequent load will sync the
+ * shadow page tables, and also flush the TLB.
+ */
+ kvm_mmu_unload(vcpu);
+ return;
+ }
+
static_call(kvm_x86_tlb_flush_guest)(vcpu);
}
@@ -3101,10 +3114,14 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
* expensive IPIs.
*/
if (guest_pv_has(vcpu, KVM_FEATURE_PV_TLB_FLUSH)) {
+ u8 st_preempted = xchg(&st->preempted, 0);
+
trace_kvm_pv_tlb_flush(vcpu->vcpu_id,
- st->preempted & KVM_VCPU_FLUSH_TLB);
- if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB)
+ st_preempted & KVM_VCPU_FLUSH_TLB);
+ if (st_preempted & KVM_VCPU_FLUSH_TLB)
kvm_vcpu_flush_tlb_guest(vcpu);
+ } else {
+ st->preempted = 0;
}
vcpu->arch.st.preempted = 0;
@@ -3468,7 +3485,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_LASTBRANCHTOIP:
case MSR_IA32_LASTINTFROMIP:
case MSR_IA32_LASTINTTOIP:
- case MSR_K8_SYSCFG:
+ case MSR_AMD64_SYSCFG:
case MSR_K8_TSEG_ADDR:
case MSR_K8_TSEG_MASK:
case MSR_VM_HSAVE_PA:
@@ -7089,7 +7106,10 @@ static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
{
- emul_to_vcpu(ctxt)->arch.hflags = emul_flags;
+ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+
+ vcpu->arch.hflags = emul_flags;
+ kvm_mmu_reset_context(vcpu);
}
static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt,
@@ -7226,6 +7246,11 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK);
BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK);
+ ctxt->interruptibility = 0;
+ ctxt->have_exception = false;
+ ctxt->exception.vector = -1;
+ ctxt->perm_ok = false;
+
init_decode_cache(ctxt);
vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
}
@@ -7561,14 +7586,7 @@ int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
kvm_vcpu_check_breakpoint(vcpu, &r))
return r;
- ctxt->interruptibility = 0;
- ctxt->have_exception = false;
- ctxt->exception.vector = -1;
- ctxt->perm_ok = false;
-
- ctxt->ud = emulation_type & EMULTYPE_TRAP_UD;
-
- r = x86_decode_insn(ctxt, insn, insn_len);
+ r = x86_decode_insn(ctxt, insn, insn_len, emulation_type);
trace_kvm_emulate_insn_start(vcpu);
++vcpu->stat.insn_emulation;
@@ -8243,6 +8261,7 @@ void kvm_arch_exit(void)
kvm_x86_ops.hardware_enable = NULL;
kvm_mmu_module_exit();
free_percpu(user_return_msrs);
+ kmem_cache_destroy(x86_emulator_cache);
kmem_cache_destroy(x86_fpu_cache);
#ifdef CONFIG_KVM_XEN
static_key_deferred_flush(&kvm_xen_enabled);
@@ -8360,6 +8379,9 @@ static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id)
vcpu->stat.directed_yield_attempted++;
+ if (single_task_running())
+ goto no_yield;
+
rcu_read_lock();
map = rcu_dereference(vcpu->kvm->arch.apic_map);
@@ -9496,7 +9518,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
if (r <= 0)
break;
- kvm_clear_request(KVM_REQ_PENDING_TIMER, vcpu);
+ kvm_clear_request(KVM_REQ_UNBLOCK, vcpu);
if (kvm_cpu_has_pending_timer(vcpu))
kvm_inject_pending_timer_irqs(vcpu);
@@ -10115,8 +10137,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
kvm_update_dr7(vcpu);
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
- vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
- get_segment_base(vcpu, VCPU_SREG_CS);
+ vcpu->arch.singlestep_rip = kvm_get_linear_rip(vcpu);
/*
* Trigger an rflags update that will inject or remove the trace
@@ -11499,7 +11520,8 @@ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
void kvm_arch_start_assignment(struct kvm *kvm)
{
- atomic_inc(&kvm->arch.assigned_device_count);
+ if (atomic_inc_return(&kvm->arch.assigned_device_count) == 1)
+ static_call_cond(kvm_x86_start_assignment)(kvm);
}
EXPORT_SYMBOL_GPL(kvm_arch_start_assignment);
diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c
index a67afd74232c..a1d24fdc07cf 100644
--- a/arch/x86/lib/insn-eval.c
+++ b/arch/x86/lib/insn-eval.c
@@ -1417,7 +1417,7 @@ void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs)
}
}
-static unsigned long insn_get_effective_ip(struct pt_regs *regs)
+static int insn_get_effective_ip(struct pt_regs *regs, unsigned long *ip)
{
unsigned long seg_base = 0;
@@ -1430,10 +1430,12 @@ static unsigned long insn_get_effective_ip(struct pt_regs *regs)
if (!user_64bit_mode(regs)) {
seg_base = insn_get_seg_base(regs, INAT_SEG_REG_CS);
if (seg_base == -1L)
- return 0;
+ return -EINVAL;
}
- return seg_base + regs->ip;
+ *ip = seg_base + regs->ip;
+
+ return 0;
}
/**
@@ -1446,18 +1448,17 @@ static unsigned long insn_get_effective_ip(struct pt_regs *regs)
*
* Returns:
*
- * Number of instruction bytes copied.
- *
- * 0 if nothing was copied.
+ * - number of instruction bytes copied.
+ * - 0 if nothing was copied.
+ * - -EINVAL if the linear address of the instruction could not be calculated
*/
int insn_fetch_from_user(struct pt_regs *regs, unsigned char buf[MAX_INSN_SIZE])
{
unsigned long ip;
int not_copied;
- ip = insn_get_effective_ip(regs);
- if (!ip)
- return 0;
+ if (insn_get_effective_ip(regs, &ip))
+ return -EINVAL;
not_copied = copy_from_user(buf, (void __user *)ip, MAX_INSN_SIZE);
@@ -1475,18 +1476,17 @@ int insn_fetch_from_user(struct pt_regs *regs, unsigned char buf[MAX_INSN_SIZE])
*
* Returns:
*
- * Number of instruction bytes copied.
- *
- * 0 if nothing was copied.
+ * - number of instruction bytes copied.
+ * - 0 if nothing was copied.
+ * - -EINVAL if the linear address of the instruction could not be calculated.
*/
int insn_fetch_from_user_inatomic(struct pt_regs *regs, unsigned char buf[MAX_INSN_SIZE])
{
unsigned long ip;
int not_copied;
- ip = insn_get_effective_ip(regs);
- if (!ip)
- return 0;
+ if (insn_get_effective_ip(regs, &ip))
+ return -EINVAL;
not_copied = __copy_from_user_inatomic(buf, (void __user *)ip, MAX_INSN_SIZE);
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index 4d32cb06ffd5..ec9922cba30a 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -58,12 +58,16 @@ SYM_FUNC_START_NOALIGN(__x86_indirect_alt_call_\reg)
2: .skip 5-(2b-1b), 0x90
SYM_FUNC_END(__x86_indirect_alt_call_\reg)
+STACK_FRAME_NON_STANDARD(__x86_indirect_alt_call_\reg)
+
SYM_FUNC_START_NOALIGN(__x86_indirect_alt_jmp_\reg)
ANNOTATE_RETPOLINE_SAFE
1: jmp *%\reg
2: .skip 5-(2b-1b), 0x90
SYM_FUNC_END(__x86_indirect_alt_jmp_\reg)
+STACK_FRAME_NON_STANDARD(__x86_indirect_alt_jmp_\reg)
+
.endm
/*
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index b93d6cd08a7f..121921b2927c 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -5,7 +5,7 @@
#include <xen/xen.h>
#include <asm/fpu/internal.h>
-#include <asm/sev-es.h>
+#include <asm/sev.h>
#include <asm/traps.h>
#include <asm/kdebug.h>
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 362255bfc9a8..2d27932c9ac7 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -836,8 +836,8 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
if (si_code == SEGV_PKUERR)
force_sig_pkuerr((void __user *)address, pkey);
-
- force_sig_fault(SIGSEGV, si_code, (void __user *)address);
+ else
+ force_sig_fault(SIGSEGV, si_code, (void __user *)address);
local_irq_disable();
}
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 12c686c65ea9..60ade7dd71bd 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -118,7 +118,9 @@ static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *des
if (!IS_ENABLED(CONFIG_EFI))
return;
- if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA)
+ if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA ||
+ (efi_mem_type(addr) == EFI_BOOT_SERVICES_DATA &&
+ efi_mem_attributes(addr) & EFI_MEMORY_RUNTIME))
desc->flags |= IORES_MAP_ENCRYPTED;
}
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index 04aba7e80a36..470b20208430 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -504,10 +504,6 @@ void __init sme_enable(struct boot_params *bp)
#define AMD_SME_BIT BIT(0)
#define AMD_SEV_BIT BIT(1)
- /* Check the SEV MSR whether SEV or SME is enabled */
- sev_status = __rdmsr(MSR_AMD64_SEV);
- feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
-
/*
* Check for the SME/SEV feature:
* CPUID Fn8000_001F[EAX]
@@ -519,17 +515,22 @@ void __init sme_enable(struct boot_params *bp)
eax = 0x8000001f;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
- if (!(eax & feature_mask))
+ /* Check whether SEV or SME is supported */
+ if (!(eax & (AMD_SEV_BIT | AMD_SME_BIT)))
return;
me_mask = 1UL << (ebx & 0x3f);
+ /* Check the SEV MSR whether SEV or SME is enabled */
+ sev_status = __rdmsr(MSR_AMD64_SEV);
+ feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
+
/* Check if memory encryption is enabled */
if (feature_mask == AMD_SME_BIT) {
/*
* No SME if Hypervisor bit is set. This check is here to
* prevent a guest from trying to enable SME. For running as a
- * KVM guest the MSR_K8_SYSCFG will be sufficient, but there
+ * KVM guest the MSR_AMD64_SYSCFG will be sufficient, but there
* might be other hypervisors which emulate that MSR as non-zero
* or even pass it through to the guest.
* A malicious hypervisor can still trick a guest into this
@@ -542,8 +543,8 @@ void __init sme_enable(struct boot_params *bp)
return;
/* For SME, check the SYSCFG MSR */
- msr = __rdmsr(MSR_K8_SYSCFG);
- if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
+ msr = __rdmsr(MSR_AMD64_SYSCFG);
+ if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
return;
} else {
/* SEV state cannot be controlled by a command line option */
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 5eb4dc2b97da..e94da744386f 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -254,7 +254,13 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
/* make sure all non-reserved blocks are inside the limits */
bi->start = max(bi->start, low);
- bi->end = min(bi->end, high);
+
+ /* preserve info for non-RAM areas above 'max_pfn': */
+ if (bi->end > high) {
+ numa_add_memblk_to(bi->nid, high, bi->end,
+ &numa_reserved_meminfo);
+ bi->end = high;
+ }
/* and there's no empty block */
if (bi->start >= bi->end)
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index ae744b6a0785..dd40d3fea74e 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -284,7 +284,7 @@ static int __init early_root_info_init(void)
/* need to take out [4G, TOM2) for RAM*/
/* SYS_CFG */
- address = MSR_K8_SYSCFG;
+ address = MSR_AMD64_SYSCFG;
rdmsrl(address, val);
/* TOP_MEM2 is enabled? */
if (val & (1<<21)) {
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 02dc64625e64..2edd86649468 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -779,4 +779,48 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
+#define RS690_LOWER_TOP_OF_DRAM2 0x30
+#define RS690_LOWER_TOP_OF_DRAM2_VALID 0x1
+#define RS690_UPPER_TOP_OF_DRAM2 0x31
+#define RS690_HTIU_NB_INDEX 0xA8
+#define RS690_HTIU_NB_INDEX_WR_ENABLE 0x100
+#define RS690_HTIU_NB_DATA 0xAC
+
+/*
+ * Some BIOS implementations support RAM above 4GB, but do not configure the
+ * PCI host to respond to bus master accesses for these addresses. These
+ * implementations set the TOP_OF_DRAM_SLOT1 register correctly, so PCI DMA
+ * works as expected for addresses below 4GB.
+ *
+ * Reference: "AMD RS690 ASIC Family Register Reference Guide" (pg. 2-57)
+ * https://www.amd.com/system/files/TechDocs/43372_rs690_rrg_3.00o.pdf
+ */
+static void rs690_fix_64bit_dma(struct pci_dev *pdev)
+{
+ u32 val = 0;
+ phys_addr_t top_of_dram = __pa(high_memory - 1) + 1;
+
+ if (top_of_dram <= (1ULL << 32))
+ return;
+
+ pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
+ RS690_LOWER_TOP_OF_DRAM2);
+ pci_read_config_dword(pdev, RS690_HTIU_NB_DATA, &val);
+
+ if (val)
+ return;
+
+ pci_info(pdev, "Adjusting top of DRAM to %pa for 64-bit DMA support\n", &top_of_dram);
+
+ pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
+ RS690_UPPER_TOP_OF_DRAM2 | RS690_HTIU_NB_INDEX_WR_ENABLE);
+ pci_write_config_dword(pdev, RS690_HTIU_NB_DATA, top_of_dram >> 32);
+
+ pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
+ RS690_LOWER_TOP_OF_DRAM2 | RS690_HTIU_NB_INDEX_WR_ENABLE);
+ pci_write_config_dword(pdev, RS690_HTIU_NB_DATA,
+ top_of_dram | RS690_LOWER_TOP_OF_DRAM2_VALID);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7910, rs690_fix_64bit_dma);
+
#endif
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 8a26e705cb06..147c30a81f15 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -468,7 +468,7 @@ void __init efi_init(void)
*/
if (!efi_runtime_supported())
- pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
+ pr_err("No EFI runtime due to 32/64-bit mismatch with kernel\n");
if (!efi_runtime_supported() || efi_runtime_disabled()) {
efi_memmap_unmap();
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index df7b5477fc4f..7515e78ef898 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -47,7 +47,7 @@
#include <asm/realmode.h>
#include <asm/time.h>
#include <asm/pgalloc.h>
-#include <asm/sev-es.h>
+#include <asm/sev.h>
/*
* We allocate runtime services regions top-down, starting from -4G, i.e.
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 7850111008a8..b15ebfe40a73 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -450,6 +450,18 @@ void __init efi_free_boot_services(void)
size -= rm_size;
}
+ /*
+ * Don't free memory under 1M for two reasons:
+ * - BIOS might clobber it
+ * - Crash kernel needs it to be reserved
+ */
+ if (start + size < SZ_1M)
+ continue;
+ if (start < SZ_1M) {
+ size -= (SZ_1M - start);
+ start = SZ_1M;
+ }
+
memblock_free_late(start, size);
}
diff --git a/arch/x86/realmode/Makefile b/arch/x86/realmode/Makefile
index 6b1f3a4eeb44..a0b491ae2de8 100644
--- a/arch/x86/realmode/Makefile
+++ b/arch/x86/realmode/Makefile
@@ -10,7 +10,6 @@
# Sanitizer runtimes are unavailable and cannot be linked here.
KASAN_SANITIZE := n
KCSAN_SANITIZE := n
-OBJECT_FILES_NON_STANDARD := y
subdir- := rm
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index 1be71ef5e4c4..6534c92d0f83 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -9,7 +9,7 @@
#include <asm/realmode.h>
#include <asm/tlbflush.h>
#include <asm/crash.h>
-#include <asm/sev-es.h>
+#include <asm/sev.h>
struct real_mode_header *real_mode_header;
u32 *trampoline_cr4_features;
@@ -29,14 +29,16 @@ void __init reserve_real_mode(void)
/* Has to be under 1M so we can execute real-mode AP code. */
mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
- if (!mem) {
+ if (!mem)
pr_info("No sub-1M memory is available for the trampoline\n");
- return;
- }
+ else
+ set_real_mode_mem(mem);
- memblock_reserve(mem, size);
- set_real_mode_mem(mem);
- crash_reserve_low_1M();
+ /*
+ * Unconditionally reserve the entire fisrt 1M, see comment in
+ * setup_arch().
+ */
+ memblock_reserve(0, SZ_1M);
}
static void sme_sev_setup_real_mode(struct trampoline_header *th)
diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
index 84c5d1b33d10..cc8391f86cdb 100644
--- a/arch/x86/realmode/rm/trampoline_64.S
+++ b/arch/x86/realmode/rm/trampoline_64.S
@@ -123,9 +123,9 @@ SYM_CODE_START(startup_32)
*/
btl $TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags
jnc .Ldone
- movl $MSR_K8_SYSCFG, %ecx
+ movl $MSR_AMD64_SYSCFG, %ecx
rdmsr
- bts $MSR_K8_SYSCFG_MEM_ENCRYPT_BIT, %eax
+ bts $MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT, %eax
jc .Ldone
/*
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 17503fed2017..03149422dce2 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -592,8 +592,10 @@ DEFINE_IDTENTRY_RAW(xenpv_exc_debug)
DEFINE_IDTENTRY_RAW(exc_xen_unknown_trap)
{
/* This should never happen and there is no way to handle it. */
+ instrumentation_begin();
pr_err("Unknown trap in Xen PV mode.");
BUG();
+ instrumentation_end();
}
#ifdef CONFIG_X86_MCE
@@ -1273,16 +1275,16 @@ asmlinkage __visible void __init xen_start_kernel(void)
/* Get mfn list */
xen_build_dynamic_phys_to_machine();
+ /* Work out if we support NX */
+ get_cpu_cap(&boot_cpu_data);
+ x86_configure_nx();
+
/*
* Set up kernel GDT and segment registers, mainly so that
* -fstack-protector code can be executed.
*/
xen_setup_gdt(0);
- /* Work out if we support NX */
- get_cpu_cap(&boot_cpu_data);
- x86_configure_nx();
-
/* Determine virtual and physical address sizes */
get_cpu_address_sizes(&boot_cpu_data);
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index 744c2f463845..4361fe4247e3 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -43,7 +43,7 @@
*
* Atomically reads the value of @v.
*/
-#define atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
/**
* atomic_set - set atomic variable
@@ -52,11 +52,11 @@
*
* Atomically sets the value of @v to @i.
*/
-#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
+#define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
#if XCHAL_HAVE_EXCLUSIVE
#define ATOMIC_OP(op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
@@ -74,7 +74,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN(op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
@@ -95,7 +95,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
@@ -116,7 +116,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
#elif XCHAL_HAVE_S32C1I
#define ATOMIC_OP(op) \
-static inline void atomic_##op(int i, atomic_t * v) \
+static inline void arch_atomic_##op(int i, atomic_t * v) \
{ \
unsigned long tmp; \
int result; \
@@ -135,7 +135,7 @@ static inline void atomic_##op(int i, atomic_t * v) \
} \
#define ATOMIC_OP_RETURN(op) \
-static inline int atomic_##op##_return(int i, atomic_t * v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t * v) \
{ \
unsigned long tmp; \
int result; \
@@ -157,7 +157,7 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
}
#define ATOMIC_FETCH_OP(op) \
-static inline int atomic_fetch_##op(int i, atomic_t * v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \
{ \
unsigned long tmp; \
int result; \
@@ -180,7 +180,7 @@ static inline int atomic_fetch_##op(int i, atomic_t * v) \
#else /* XCHAL_HAVE_S32C1I */
#define ATOMIC_OP(op) \
-static inline void atomic_##op(int i, atomic_t * v) \
+static inline void arch_atomic_##op(int i, atomic_t * v) \
{ \
unsigned int vval; \
\
@@ -198,7 +198,7 @@ static inline void atomic_##op(int i, atomic_t * v) \
} \
#define ATOMIC_OP_RETURN(op) \
-static inline int atomic_##op##_return(int i, atomic_t * v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t * v) \
{ \
unsigned int vval; \
\
@@ -218,7 +218,7 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
}
#define ATOMIC_FETCH_OP(op) \
-static inline int atomic_fetch_##op(int i, atomic_t * v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \
{ \
unsigned int tmp, vval; \
\
@@ -257,7 +257,7 @@ ATOMIC_OPS(xor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic_cmpxchg(v, o, n) ((int)arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
#endif /* _XTENSA_ATOMIC_H */
diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h
index a175f8aec3fb..3699e2818efb 100644
--- a/arch/xtensa/include/asm/cmpxchg.h
+++ b/arch/xtensa/include/asm/cmpxchg.h
@@ -80,7 +80,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
}
}
-#define cmpxchg(ptr,o,n) \
+#define arch_cmpxchg(ptr,o,n) \
({ __typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
(__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
@@ -97,7 +97,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
case 4:
return __cmpxchg_u32(ptr, old, new);
default:
- return __cmpxchg_local_generic(ptr, old, new, size);
+ return __generic_cmpxchg_local(ptr, old, new, size);
}
return old;
@@ -107,11 +107,11 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
-#define cmpxchg_local(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
+#define arch_cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__generic_cmpxchg_local((ptr), (unsigned long)(o),\
(unsigned long)(n), sizeof(*(ptr))))
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
+#define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
/*
* xchg_u32
@@ -169,7 +169,7 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
#endif
}
-#define xchg(ptr,x) \
+#define arch_xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
static inline u32 xchg_small(volatile void *ptr, u32 x, int size)
diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl
index 9d76d433d3d6..fd2f30227d96 100644
--- a/arch/xtensa/kernel/syscalls/syscall.tbl
+++ b/arch/xtensa/kernel/syscalls/syscall.tbl
@@ -413,7 +413,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 0270cd7ca165..acd1f881273e 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -372,9 +372,38 @@ struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync)
return bic->bfqq[is_sync];
}
+static void bfq_put_stable_ref(struct bfq_queue *bfqq);
+
void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync)
{
+ /*
+ * If bfqq != NULL, then a non-stable queue merge between
+ * bic->bfqq and bfqq is happening here. This causes troubles
+ * in the following case: bic->bfqq has also been scheduled
+ * for a possible stable merge with bic->stable_merge_bfqq,
+ * and bic->stable_merge_bfqq == bfqq happens to
+ * hold. Troubles occur because bfqq may then undergo a split,
+ * thereby becoming eligible for a stable merge. Yet, if
+ * bic->stable_merge_bfqq points exactly to bfqq, then bfqq
+ * would be stably merged with itself. To avoid this anomaly,
+ * we cancel the stable merge if
+ * bic->stable_merge_bfqq == bfqq.
+ */
bic->bfqq[is_sync] = bfqq;
+
+ if (bfqq && bic->stable_merge_bfqq == bfqq) {
+ /*
+ * Actually, these same instructions are executed also
+ * in bfq_setup_cooperator, in case of abort or actual
+ * execution of a stable merge. We could avoid
+ * repeating these instructions there too, but if we
+ * did so, we would nest even more complexity in this
+ * function.
+ */
+ bfq_put_stable_ref(bic->stable_merge_bfqq);
+
+ bic->stable_merge_bfqq = NULL;
+ }
}
struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic)
@@ -2263,10 +2292,9 @@ static void bfq_remove_request(struct request_queue *q,
}
-static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
+static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
unsigned int nr_segs)
{
- struct request_queue *q = hctx->queue;
struct bfq_data *bfqd = q->elevator->elevator_data;
struct request *free = NULL;
/*
@@ -2631,8 +2659,6 @@ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
struct bfq_queue *bfqq);
-static void bfq_put_stable_ref(struct bfq_queue *bfqq);
-
/*
* Attempt to schedule a merge of bfqq with the currently in-service
* queue or with a close queue among the scheduled queues. Return
diff --git a/block/blk-iocost.c b/block/blk-iocost.c
index e0c4baa01857..c2d6bc88d3f1 100644
--- a/block/blk-iocost.c
+++ b/block/blk-iocost.c
@@ -1069,7 +1069,17 @@ static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
lockdep_assert_held(&ioc->lock);
- inuse = clamp_t(u32, inuse, 1, active);
+ /*
+ * For an active leaf node, its inuse shouldn't be zero or exceed
+ * @active. An active internal node's inuse is solely determined by the
+ * inuse to active ratio of its children regardless of @inuse.
+ */
+ if (list_empty(&iocg->active_list) && iocg->child_active_sum) {
+ inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
+ iocg->child_active_sum);
+ } else {
+ inuse = clamp_t(u32, inuse, 1, active);
+ }
iocg->last_inuse = iocg->inuse;
if (save)
@@ -1086,7 +1096,7 @@ static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
/* update the level sums */
parent->child_active_sum += (s32)(active - child->active);
parent->child_inuse_sum += (s32)(inuse - child->inuse);
- /* apply the udpates */
+ /* apply the updates */
child->active = active;
child->inuse = inuse;
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 42a365b1b9c0..996a4b2f73aa 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -358,14 +358,16 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
unsigned int nr_segs)
{
struct elevator_queue *e = q->elevator;
- struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
- struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
+ struct blk_mq_ctx *ctx;
+ struct blk_mq_hw_ctx *hctx;
bool ret = false;
enum hctx_type type;
if (e && e->type->ops.bio_merge)
- return e->type->ops.bio_merge(hctx, bio, nr_segs);
+ return e->type->ops.bio_merge(q, bio, nr_segs);
+ ctx = blk_mq_get_ctx(q);
+ hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
type = hctx->type;
if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
list_empty_careful(&ctx->rq_lists[type]))
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 466676bc2f0b..c86c01bfecdb 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2232,8 +2232,9 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
/* Bypass scheduler for flush requests */
blk_insert_flush(rq);
blk_mq_run_hw_queue(data.hctx, true);
- } else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs ||
- !blk_queue_nonrot(q))) {
+ } else if (plug && (q->nr_hw_queues == 1 ||
+ blk_mq_is_sbitmap_shared(rq->mq_hctx->flags) ||
+ q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) {
/*
* Use plugging if we have a ->commit_rqs() hook as well, as
* we know the driver uses bd->last in a smart fashion.
@@ -3285,10 +3286,12 @@ EXPORT_SYMBOL(blk_mq_init_allocated_queue);
/* tags can _not_ be used after returning from blk_mq_exit_queue */
void blk_mq_exit_queue(struct request_queue *q)
{
- struct blk_mq_tag_set *set = q->tag_set;
+ struct blk_mq_tag_set *set = q->tag_set;
- blk_mq_del_queue_tag_set(q);
+ /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
+ /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
+ blk_mq_del_queue_tag_set(q);
}
static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
diff --git a/block/genhd.c b/block/genhd.c
index 39ca97b0edc6..9f8cb7beaad1 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -29,8 +29,6 @@
static struct kobject *block_depr;
-DECLARE_RWSEM(bdev_lookup_sem);
-
/* for extended dynamic devt allocation, currently only one major is used */
#define NR_EXT_DEVT (1 << MINORBITS)
static DEFINE_IDA(ext_devt_ida);
@@ -609,13 +607,8 @@ void del_gendisk(struct gendisk *disk)
blk_integrity_del(disk);
disk_del_events(disk);
- /*
- * Block lookups of the disk until all bdevs are unhashed and the
- * disk is marked as dead (GENHD_FL_UP cleared).
- */
- down_write(&bdev_lookup_sem);
-
mutex_lock(&disk->part0->bd_mutex);
+ disk->flags &= ~GENHD_FL_UP;
blk_drop_partitions(disk);
mutex_unlock(&disk->part0->bd_mutex);
@@ -629,8 +622,6 @@ void del_gendisk(struct gendisk *disk)
remove_inode_hash(disk->part0->bd_inode);
set_capacity(disk, 0);
- disk->flags &= ~GENHD_FL_UP;
- up_write(&bdev_lookup_sem);
if (!(disk->flags & GENHD_FL_HIDDEN)) {
sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index 8969e122f081..81e3279ecd57 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -561,11 +561,12 @@ static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
}
}
-static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
+static bool kyber_bio_merge(struct request_queue *q, struct bio *bio,
unsigned int nr_segs)
{
+ struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
+ struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
struct kyber_hctx_data *khd = hctx->sched_data;
- struct blk_mq_ctx *ctx = blk_mq_get_ctx(hctx->queue);
struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]];
unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
struct list_head *rq_list = &kcq->rq_list[sched_domain];
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index 04aded71ead2..8eea2cbf2bf4 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -461,10 +461,9 @@ static int dd_request_merge(struct request_queue *q, struct request **rq,
return ELEVATOR_NO_MERGE;
}
-static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
+static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
unsigned int nr_segs)
{
- struct request_queue *q = hctx->queue;
struct deadline_data *dd = q->elevator->elevator_data;
struct request *free = NULL;
bool ret;
diff --git a/block/partitions/efi.c b/block/partitions/efi.c
index b64bfdd4326c..e2716792ecc1 100644
--- a/block/partitions/efi.c
+++ b/block/partitions/efi.c
@@ -682,7 +682,7 @@ static void utf16_le_to_7bit(const __le16 *in, unsigned int size, u8 *out)
}
/**
- * efi_partition(struct parsed_partitions *state)
+ * efi_partition - scan for GPT partitions
* @state: disk parsed partitions
*
* Description: called from check.c, if the disk contains GPT
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 6cd7f7025df4..d8a91521144e 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -233,7 +233,8 @@ async_xor_offs(struct page *dest, unsigned int offset,
if (submit->flags & ASYNC_TX_XOR_DROP_DST) {
src_cnt--;
src_list++;
- src_offs++;
+ if (src_offs)
+ src_offs++;
}
/* wait for any prerequisite operations */
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 47980c6b1945..8bad63417a50 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -33,8 +33,6 @@ source "drivers/nvme/Kconfig"
source "drivers/misc/Kconfig"
-source "drivers/ide/Kconfig"
-
source "drivers/scsi/Kconfig"
source "drivers/ata/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 5a6d613e868d..f85185f9139e 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -78,7 +78,6 @@ obj-$(CONFIG_CXL_BUS) += cxl/
obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/
obj-$(CONFIG_NUBUS) += nubus/
obj-y += macintosh/
-obj-$(CONFIG_IDE) += ide/
obj-y += scsi/
obj-y += nvme/
obj-$(CONFIG_ATA) += ata/
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index 0ec5b3f69112..6e02448d15d9 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -226,6 +226,7 @@ static const struct acpi_device_id acpi_apd_device_ids[] = {
{ "AMDI0010", APD_ADDR(wt_i2c_desc) },
{ "AMD0020", APD_ADDR(cz_uart_desc) },
{ "AMDI0020", APD_ADDR(cz_uart_desc) },
+ { "AMDI0022", APD_ADDR(cz_uart_desc) },
{ "AMD0030", },
{ "AMD0040", APD_ADDR(fch_misc_desc)},
{ "HYGO0010", APD_ADDR(wt_i2c_desc) },
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index 624a26794d55..e5ba9795ec69 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -285,6 +285,14 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
}
break;
+ case ACPI_TYPE_LOCAL_ADDRESS_HANDLER:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+ "***** Address handler %p\n", object));
+
+ acpi_os_delete_mutex(object->address_space.context_mutex);
+ break;
+
default:
break;
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index be7da23fad76..a4bd673934c0 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -330,32 +330,21 @@ static void acpi_bus_osc_negotiate_platform_control(void)
if (ACPI_FAILURE(acpi_run_osc(handle, &context)))
return;
- capbuf_ret = context.ret.pointer;
- if (context.ret.length <= OSC_SUPPORT_DWORD) {
- kfree(context.ret.pointer);
- return;
- }
+ kfree(context.ret.pointer);
- /*
- * Now run _OSC again with query flag clear and with the caps
- * supported by both the OS and the platform.
- */
+ /* Now run _OSC again with query flag clear */
capbuf[OSC_QUERY_DWORD] = 0;
- capbuf[OSC_SUPPORT_DWORD] = capbuf_ret[OSC_SUPPORT_DWORD];
- kfree(context.ret.pointer);
if (ACPI_FAILURE(acpi_run_osc(handle, &context)))
return;
capbuf_ret = context.ret.pointer;
- if (context.ret.length > OSC_SUPPORT_DWORD) {
- osc_sb_apei_support_acked =
- capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
- osc_pc_lpi_support_confirmed =
- capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT;
- osc_sb_native_usb4_support_confirmed =
- capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_NATIVE_USB4_SUPPORT;
- }
+ osc_sb_apei_support_acked =
+ capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
+ osc_pc_lpi_support_confirmed =
+ capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT;
+ osc_sb_native_usb4_support_confirmed =
+ capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_NATIVE_USB4_SUPPORT;
kfree(context.ret.pointer);
}
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 16c0fe8a72a7..d260bc1f3e6e 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -1313,6 +1313,7 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
{"PNP0C0B", }, /* Generic ACPI fan */
{"INT3404", }, /* Fan */
{"INTC1044", }, /* Fan for Tiger Lake generation */
+ {"INTC1048", }, /* Fan for Alder Lake generation */
{}
};
struct acpi_device *adev = ACPI_COMPANION(dev);
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index b852cff80287..e21611c9a170 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -134,7 +134,7 @@ int acpi_power_init(void);
void acpi_power_resources_list_free(struct list_head *list);
int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
struct list_head *list);
-int acpi_add_power_resource(acpi_handle handle);
+struct acpi_device *acpi_add_power_resource(acpi_handle handle);
void acpi_power_add_remove_device(struct acpi_device *adev, bool add);
int acpi_power_wakeup_list_init(struct list_head *list, int *system_level);
int acpi_device_sleep_wake(struct acpi_device *dev,
@@ -142,6 +142,7 @@ int acpi_device_sleep_wake(struct acpi_device *dev,
int acpi_power_get_inferred_state(struct acpi_device *device, int *state);
int acpi_power_on_resources(struct acpi_device *device, int state);
int acpi_power_transition(struct acpi_device *device, int state);
+void acpi_turn_off_unused_power_resources(bool init);
/* --------------------------------------------------------------------------
Device Power Management
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 958aaac869e8..23d9a09d7060 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -686,6 +686,13 @@ int nfit_spa_type(struct acpi_nfit_system_address *spa)
return -1;
}
+static size_t sizeof_spa(struct acpi_nfit_system_address *spa)
+{
+ if (spa->flags & ACPI_NFIT_LOCATION_COOKIE_VALID)
+ return sizeof(*spa);
+ return sizeof(*spa) - 8;
+}
+
static bool add_spa(struct acpi_nfit_desc *acpi_desc,
struct nfit_table_prev *prev,
struct acpi_nfit_system_address *spa)
@@ -693,22 +700,22 @@ static bool add_spa(struct acpi_nfit_desc *acpi_desc,
struct device *dev = acpi_desc->dev;
struct nfit_spa *nfit_spa;
- if (spa->header.length != sizeof(*spa))
+ if (spa->header.length != sizeof_spa(spa))
return false;
list_for_each_entry(nfit_spa, &prev->spas, list) {
- if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) {
+ if (memcmp(nfit_spa->spa, spa, sizeof_spa(spa)) == 0) {
list_move_tail(&nfit_spa->list, &acpi_desc->spas);
return true;
}
}
- nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa),
+ nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof_spa(spa),
GFP_KERNEL);
if (!nfit_spa)
return false;
INIT_LIST_HEAD(&nfit_spa->list);
- memcpy(nfit_spa->spa, spa, sizeof(*spa));
+ memcpy(nfit_spa->spa, spa, sizeof_spa(spa));
list_add_tail(&nfit_spa->list, &acpi_desc->spas);
dev_dbg(dev, "spa index: %d type: %s\n",
spa->range_index,
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 32974b575e46..97c9a94a1a30 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -52,6 +52,7 @@ struct acpi_power_resource {
u32 system_level;
u32 order;
unsigned int ref_count;
+ unsigned int users;
bool wakeup_enabled;
struct mutex resource_lock;
struct list_head dependents;
@@ -147,6 +148,7 @@ int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
for (i = start; i < package->package.count; i++) {
union acpi_object *element = &package->package.elements[i];
+ struct acpi_device *rdev;
acpi_handle rhandle;
if (element->type != ACPI_TYPE_LOCAL_REFERENCE) {
@@ -163,13 +165,16 @@ int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
if (acpi_power_resource_is_dup(package, start, i))
continue;
- err = acpi_add_power_resource(rhandle);
- if (err)
+ rdev = acpi_add_power_resource(rhandle);
+ if (!rdev) {
+ err = -ENODEV;
break;
-
+ }
err = acpi_power_resources_list_add(rhandle, list);
if (err)
break;
+
+ to_power_resource(rdev)->users++;
}
if (err)
acpi_power_resources_list_free(list);
@@ -907,7 +912,7 @@ static void acpi_power_add_resource_to_list(struct acpi_power_resource *resource
mutex_unlock(&power_resource_list_lock);
}
-int acpi_add_power_resource(acpi_handle handle)
+struct acpi_device *acpi_add_power_resource(acpi_handle handle)
{
struct acpi_power_resource *resource;
struct acpi_device *device = NULL;
@@ -918,11 +923,11 @@ int acpi_add_power_resource(acpi_handle handle)
acpi_bus_get_device(handle, &device);
if (device)
- return 0;
+ return device;
resource = kzalloc(sizeof(*resource), GFP_KERNEL);
if (!resource)
- return -ENOMEM;
+ return NULL;
device = &resource->device;
acpi_init_device_object(device, handle, ACPI_BUS_TYPE_POWER);
@@ -959,11 +964,11 @@ int acpi_add_power_resource(acpi_handle handle)
acpi_power_add_resource_to_list(resource);
acpi_device_add_finalize(device);
- return 0;
+ return device;
err:
acpi_release_power_resource(&device->dev);
- return result;
+ return NULL;
}
#ifdef CONFIG_ACPI_SLEEP
@@ -995,8 +1000,40 @@ void acpi_resume_power_resources(void)
mutex_unlock(&power_resource_list_lock);
}
+#endif
+
+static void acpi_power_turn_off_if_unused(struct acpi_power_resource *resource,
+ bool init)
+{
+ if (resource->ref_count > 0)
+ return;
+
+ if (init) {
+ if (resource->users > 0)
+ return;
+ } else {
+ int result, state;
+
+ result = acpi_power_get_state(resource->device.handle, &state);
+ if (result || state == ACPI_POWER_RESOURCE_STATE_OFF)
+ return;
+ }
+
+ dev_info(&resource->device.dev, "Turning OFF\n");
+ __acpi_power_off(resource);
+}
-void acpi_turn_off_unused_power_resources(void)
+/**
+ * acpi_turn_off_unused_power_resources - Turn off power resources not in use.
+ * @init: Control switch.
+ *
+ * If @ainit is set, unconditionally turn off all of the ACPI power resources
+ * without any users.
+ *
+ * Otherwise, turn off all ACPI power resources without active references (that
+ * is, the ones that should be "off" at the moment) that are "on".
+ */
+void acpi_turn_off_unused_power_resources(bool init)
{
struct acpi_power_resource *resource;
@@ -1005,14 +1042,10 @@ void acpi_turn_off_unused_power_resources(void)
list_for_each_entry_reverse(resource, &acpi_power_resource_list, list_node) {
mutex_lock(&resource->resource_lock);
- if (!resource->ref_count) {
- dev_info(&resource->device.dev, "Turning OFF\n");
- __acpi_power_off(resource);
- }
+ acpi_power_turn_off_if_unused(resource, init);
mutex_unlock(&resource->resource_lock);
}
mutex_unlock(&power_resource_list_lock);
}
-#endif
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index a22778e880c2..e10d38ac7cf2 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -700,6 +700,7 @@ int acpi_device_add(struct acpi_device *device,
result = acpi_device_set_name(device, acpi_device_bus_id);
if (result) {
+ kfree_const(acpi_device_bus_id->bus_id);
kfree(acpi_device_bus_id);
goto err_unlock;
}
@@ -2359,6 +2360,8 @@ int __init acpi_scan_init(void)
}
}
+ acpi_turn_off_unused_power_resources(true);
+
acpi_scan_initialized = true;
out:
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 09fd13757b65..3bb2adef8490 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -504,7 +504,7 @@ static void acpi_pm_start(u32 acpi_state)
*/
static void acpi_pm_end(void)
{
- acpi_turn_off_unused_power_resources();
+ acpi_turn_off_unused_power_resources(false);
acpi_scan_lock_release();
/*
* This is necessary in case acpi_pm_finish() is not called during a
@@ -1009,10 +1009,8 @@ static void acpi_sleep_hibernate_setup(void)
return;
acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs);
- if (facs) {
+ if (facs)
s4_hardware_signature = facs->hardware_signature;
- acpi_put_table((struct acpi_table_header *)facs);
- }
}
#else /* !CONFIG_HIBERNATION */
static inline void acpi_sleep_hibernate_setup(void) {}
diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h
index 1856f76ac83f..7fe41ee489d6 100644
--- a/drivers/acpi/sleep.h
+++ b/drivers/acpi/sleep.h
@@ -8,7 +8,6 @@ extern struct list_head acpi_wakeup_device_list;
extern struct mutex acpi_device_lock;
extern void acpi_resume_power_resources(void);
-extern void acpi_turn_off_unused_power_resources(void);
static inline acpi_status acpi_set_waking_vector(u32 wakeup_address)
{
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 61d34e1dc59c..bcec598b89f2 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -4918,7 +4918,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
uint32_t enable;
if (copy_from_user(&enable, ubuf, sizeof(enable))) {
- ret = -EINVAL;
+ ret = -EFAULT;
goto err;
}
binder_inner_proc_lock(proc);
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 030cb32da980..b7a5abee2147 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -1015,11 +1015,11 @@ config PATA_CMD640_PCI
If unsure, say N.
config PATA_FALCON
- tristate "Atari Falcon PATA support"
- depends on M68K && ATARI
+ tristate "Atari Falcon and Q40/Q60 PATA support"
+ depends on M68K && (ATARI || Q40)
help
This option enables support for the on-board IDE
- interface on the Atari Falcon.
+ interface on the Atari Falcon and Q40/Q60.
If unsure, say N.
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 33192a8f687d..186cbf90c8ea 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -446,6 +446,10 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
+ /* Dell S140/S150 */
+ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_SUBVENDOR_ID_DELL, PCI_ANY_ID,
+ PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
+
/* VIA */
{ PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
{ PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index d1f284f0c83d..2e89499bd9c3 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -384,12 +384,15 @@ extern struct device_attribute *ahci_sdev_attrs[];
* for ATA_BASE_SHT
*/
#define AHCI_SHT(drv_name) \
- ATA_NCQ_SHT(drv_name), \
+ __ATA_BASE_SHT(drv_name), \
.can_queue = AHCI_MAX_CMDS, \
.sg_tablesize = AHCI_MAX_SG, \
.dma_boundary = AHCI_DMA_BOUNDARY, \
.shost_attrs = ahci_shost_attrs, \
- .sdev_attrs = ahci_sdev_attrs
+ .sdev_attrs = ahci_sdev_attrs, \
+ .change_queue_depth = ata_scsi_change_queue_depth, \
+ .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
+ .slave_configure = ata_scsi_slave_config
extern struct ata_port_operations ahci_ops;
extern struct ata_port_operations ahci_platform_ops;
diff --git a/drivers/ata/ahci_sunxi.c b/drivers/ata/ahci_sunxi.c
index cb69b737cb49..56b695136977 100644
--- a/drivers/ata/ahci_sunxi.c
+++ b/drivers/ata/ahci_sunxi.c
@@ -200,7 +200,7 @@ static void ahci_sunxi_start_engine(struct ata_port *ap)
}
static const struct ata_port_info ahci_sunxi_port_info = {
- .flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ,
+ .flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ | ATA_FLAG_NO_DIPM,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_platform_ops,
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index d671d33ef287..c3a65ccd4b79 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -252,8 +252,9 @@ static void atiixp_bmdma_stop(struct ata_queued_cmd *qc)
}
static struct scsi_host_template atiixp_sht = {
- ATA_BMDMA_SHT(DRV_NAME),
+ ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = LIBATA_DUMB_MAX_PRD,
+ .dma_boundary = ATA_DMA_BOUNDARY,
};
static struct ata_port_operations atiixp_port_ops = {
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index d09d432d3c44..247c14702624 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -95,8 +95,9 @@ static void cs5520_set_piomode(struct ata_port *ap, struct ata_device *adev)
}
static struct scsi_host_template cs5520_sht = {
- ATA_BMDMA_SHT(DRV_NAME),
+ ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = LIBATA_DUMB_MAX_PRD,
+ .dma_boundary = ATA_DMA_BOUNDARY,
};
static struct ata_port_operations cs5520_port_ops = {
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
index a1b4aaccaa50..d5b7ac14e78f 100644
--- a/drivers/ata/pata_cs5530.c
+++ b/drivers/ata/pata_cs5530.c
@@ -147,8 +147,9 @@ static unsigned int cs5530_qc_issue(struct ata_queued_cmd *qc)
}
static struct scsi_host_template cs5530_sht = {
- ATA_BMDMA_SHT(DRV_NAME),
+ ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = LIBATA_DUMB_MAX_PRD,
+ .dma_boundary = ATA_DMA_BOUNDARY,
};
static struct ata_port_operations cs5530_port_ops = {
diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
index e1486fe298ae..5b3a7a8ebef6 100644
--- a/drivers/ata/pata_cypress.c
+++ b/drivers/ata/pata_cypress.c
@@ -41,6 +41,10 @@ enum {
CY82_INDEX_TIMEOUT = 0x32
};
+static bool enable_dma = true;
+module_param(enable_dma, bool, 0);
+MODULE_PARM_DESC(enable_dma, "Enable bus master DMA operations");
+
/**
* cy82c693_set_piomode - set initial PIO mode data
* @ap: ATA interface
@@ -124,14 +128,16 @@ static struct ata_port_operations cy82c693_port_ops = {
static int cy82c693_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
- static const struct ata_port_info info = {
+ static struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
.port_ops = &cy82c693_port_ops
};
const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info };
+ if (enable_dma)
+ info.mwdma_mask = ATA_MWDMA2;
+
/* Devfn 1 is the ATA primary. The secondary is magic and on devfn2.
For the moment we don't handle the secondary. FIXME */
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index badab6708893..46208ececbb6 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -928,7 +928,7 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
/* INT[3] (IRQ_EP93XX_EXT3) line connected as pull down */
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- err = -ENXIO;
+ err = irq;
goto err_rel_gpio;
}
diff --git a/drivers/ata/pata_falcon.c b/drivers/ata/pata_falcon.c
index 27b0952fde6b..9d0dd8f4c21c 100644
--- a/drivers/ata/pata_falcon.c
+++ b/drivers/ata/pata_falcon.c
@@ -33,8 +33,6 @@
#define DRV_NAME "pata_falcon"
#define DRV_VERSION "0.1.0"
-#define ATA_HD_CONTROL 0x39
-
static struct scsi_host_template pata_falcon_sht = {
ATA_PIO_SHT(DRV_NAME),
};
@@ -121,23 +119,42 @@ static struct ata_port_operations pata_falcon_ops = {
static int __init pata_falcon_init_one(struct platform_device *pdev)
{
- struct resource *res;
+ struct resource *base_mem_res, *ctl_mem_res;
+ struct resource *base_res, *ctl_res, *irq_res;
struct ata_host *host;
struct ata_port *ap;
void __iomem *base;
+ int irq = 0;
- dev_info(&pdev->dev, "Atari Falcon PATA controller\n");
+ dev_info(&pdev->dev, "Atari Falcon and Q40/Q60 PATA controller\n");
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
+ base_res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (base_res && !devm_request_region(&pdev->dev, base_res->start,
+ resource_size(base_res), DRV_NAME)) {
+ dev_err(&pdev->dev, "resources busy\n");
+ return -EBUSY;
+ }
- if (!devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), DRV_NAME)) {
+ ctl_res = platform_get_resource(pdev, IORESOURCE_IO, 1);
+ if (ctl_res && !devm_request_region(&pdev->dev, ctl_res->start,
+ resource_size(ctl_res), DRV_NAME)) {
dev_err(&pdev->dev, "resources busy\n");
return -EBUSY;
}
+ base_mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!base_mem_res)
+ return -ENODEV;
+ if (!devm_request_mem_region(&pdev->dev, base_mem_res->start,
+ resource_size(base_mem_res), DRV_NAME)) {
+ dev_err(&pdev->dev, "resources busy\n");
+ return -EBUSY;
+ }
+
+ ctl_mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!ctl_mem_res)
+ return -ENODEV;
+
/* allocate host */
host = ata_host_alloc(&pdev->dev, 1);
if (!host)
@@ -147,10 +164,10 @@ static int __init pata_falcon_init_one(struct platform_device *pdev)
ap->ops = &pata_falcon_ops;
ap->pio_mask = ATA_PIO4;
ap->flags |= ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_IORDY;
- ap->flags |= ATA_FLAG_PIO_POLLING;
- base = (void __iomem *)res->start;
- ap->ioaddr.data_addr = base;
+ base = (void __iomem *)base_mem_res->start;
+ /* N.B. this assumes data_addr will be used for word-sized I/O only */
+ ap->ioaddr.data_addr = base + 0 + 0 * 4;
ap->ioaddr.error_addr = base + 1 + 1 * 4;
ap->ioaddr.feature_addr = base + 1 + 1 * 4;
ap->ioaddr.nsect_addr = base + 1 + 2 * 4;
@@ -161,14 +178,25 @@ static int __init pata_falcon_init_one(struct platform_device *pdev)
ap->ioaddr.status_addr = base + 1 + 7 * 4;
ap->ioaddr.command_addr = base + 1 + 7 * 4;
- ap->ioaddr.altstatus_addr = base + ATA_HD_CONTROL;
- ap->ioaddr.ctl_addr = base + ATA_HD_CONTROL;
+ base = (void __iomem *)ctl_mem_res->start;
+ ap->ioaddr.altstatus_addr = base + 1;
+ ap->ioaddr.ctl_addr = base + 1;
- ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", (unsigned long)base,
- (unsigned long)base + ATA_HD_CONTROL);
+ ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx",
+ (unsigned long)base_mem_res->start,
+ (unsigned long)ctl_mem_res->start);
+
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (irq_res && irq_res->start > 0) {
+ irq = irq_res->start;
+ } else {
+ ap->flags |= ATA_FLAG_PIO_POLLING;
+ ata_port_desc(ap, "no IRQ, using PIO polling");
+ }
/* activate */
- return ata_host_activate(host, 0, NULL, 0, &pata_falcon_sht);
+ return ata_host_activate(host, irq, irq ? ata_sff_interrupt : NULL,
+ IRQF_SHARED, &pata_falcon_sht);
}
static int __exit pata_falcon_remove_one(struct platform_device *pdev)
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index e47a28271f5b..be0ca8d5b345 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -914,7 +914,7 @@ static int pata_macio_do_resume(struct pata_macio_priv *priv)
#endif /* CONFIG_PM_SLEEP */
static struct scsi_host_template pata_macio_sht = {
- ATA_BASE_SHT(DRV_NAME),
+ __ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = MAX_DCMDS,
/* We may not need that strict one */
.dma_boundary = ATA_DMA_BOUNDARY,
@@ -923,6 +923,9 @@ static struct scsi_host_template pata_macio_sht = {
*/
.max_segment_size = MAX_DBDMA_SEG,
.slave_configure = pata_macio_slave_config,
+ .sdev_attrs = ata_common_sdev_attrs,
+ .can_queue = ATA_DEF_QUEUE,
+ .tag_alloc_policy = BLK_TAG_ALLOC_RR,
};
static struct ata_port_operations pata_macio_ops = {
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index bd87476ab481..b5a3f710d76d 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -898,10 +898,11 @@ static int octeon_cf_probe(struct platform_device *pdev)
return -EINVAL;
}
- irq_handler = octeon_cf_interrupt;
i = platform_get_irq(dma_dev, 0);
- if (i > 0)
+ if (i > 0) {
irq = i;
+ irq_handler = octeon_cf_interrupt;
+ }
}
of_node_put(dma_node);
}
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index 479c4b29b856..2e110aefe59b 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -115,10 +115,10 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_err(&pdev->dev, "no IRQ resource found\n");
- return -ENOENT;
- }
+ if (irq < 0)
+ return irq;
+ if (!irq)
+ return -EINVAL;
gpiod = devm_gpiod_get(&pdev->dev, NULL, GPIOD_IN);
if (IS_ERR(gpiod)) {
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
index 3b8c111140bd..f28daf62a37d 100644
--- a/drivers/ata/pata_sc1200.c
+++ b/drivers/ata/pata_sc1200.c
@@ -193,8 +193,9 @@ static int sc1200_qc_defer(struct ata_queued_cmd *qc)
}
static struct scsi_host_template sc1200_sht = {
- ATA_BMDMA_SHT(DRV_NAME),
+ ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = LIBATA_DUMB_MAX_PRD,
+ .dma_boundary = ATA_DMA_BOUNDARY,
};
static struct ata_port_operations sc1200_port_ops = {
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index 7511e11eef4d..b602e303fb54 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -253,8 +253,9 @@ static void serverworks_set_dmamode(struct ata_port *ap, struct ata_device *adev
}
static struct scsi_host_template serverworks_osb4_sht = {
- ATA_BMDMA_SHT(DRV_NAME),
+ ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = LIBATA_DUMB_MAX_PRD,
+ .dma_boundary = ATA_DMA_BOUNDARY,
};
static struct scsi_host_template serverworks_csb_sht = {
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index d55ee244d693..e5838b23c9e0 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -313,7 +313,7 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host,
DPRINTK("interrupt coalescing, count = 0x%x, ticks = %x\n",
intr_coalescing_count, intr_coalescing_ticks);
- DPRINTK("ICC register status: (hcr base: 0x%x) = 0x%x\n",
+ DPRINTK("ICC register status: (hcr base: %p) = 0x%x\n",
hcr_base, ioread32(hcr_base + ICC));
}
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index 64b2ef15ec19..8440203e835e 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -469,10 +469,12 @@ static int ahci_highbank_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
+ if (irq < 0) {
dev_err(dev, "no irq\n");
- return -EINVAL;
+ return irq;
}
+ if (!irq)
+ return -EINVAL;
hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
if (!hpriv) {
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index c8867c12c0b8..9d86203e1e7a 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -666,10 +666,14 @@ static struct scsi_host_template mv5_sht = {
};
#endif
static struct scsi_host_template mv6_sht = {
- ATA_NCQ_SHT(DRV_NAME),
+ __ATA_BASE_SHT(DRV_NAME),
.can_queue = MV_MAX_Q_DEPTH - 1,
.sg_tablesize = MV_MAX_SG_CT / 2,
.dma_boundary = MV_DMA_BOUNDARY,
+ .sdev_attrs = ata_ncq_sdev_attrs,
+ .change_queue_depth = ata_scsi_change_queue_depth,
+ .tag_alloc_policy = BLK_TAG_ALLOC_RR,
+ .slave_configure = ata_scsi_slave_config
};
static struct ata_port_operations mv5_ops = {
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 20190f66ced9..c385d18ce87b 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -375,19 +375,25 @@ static struct scsi_host_template nv_sht = {
};
static struct scsi_host_template nv_adma_sht = {
- ATA_NCQ_SHT(DRV_NAME),
+ __ATA_BASE_SHT(DRV_NAME),
.can_queue = NV_ADMA_MAX_CPBS,
.sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
.dma_boundary = NV_ADMA_DMA_BOUNDARY,
.slave_configure = nv_adma_slave_config,
+ .sdev_attrs = ata_ncq_sdev_attrs,
+ .change_queue_depth = ata_scsi_change_queue_depth,
+ .tag_alloc_policy = BLK_TAG_ALLOC_RR,
};
static struct scsi_host_template nv_swncq_sht = {
- ATA_NCQ_SHT(DRV_NAME),
+ __ATA_BASE_SHT(DRV_NAME),
.can_queue = ATA_MAX_QUEUE - 1,
.sg_tablesize = LIBATA_MAX_PRD,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = nv_swncq_slave_config,
+ .sdev_attrs = ata_ncq_sdev_attrs,
+ .change_queue_depth = ata_scsi_change_queue_depth,
+ .tag_alloc_policy = BLK_TAG_ALLOC_RR,
};
/*
@@ -2118,7 +2124,7 @@ static int nv_swncq_sdbfis(struct ata_port *ap)
*/
lack_dhfis = 1;
- DPRINTK("id 0x%x QC: qc_active 0x%x,"
+ DPRINTK("id 0x%x QC: qc_active 0x%llx,"
"SWNCQ:qc_active 0x%X defer_bits %X "
"dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
ap->print_id, ap->qc_active, pp->qc_active,
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 560070d4f1d0..06a1e27c4f84 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -374,11 +374,14 @@ static struct pci_driver sil24_pci_driver = {
};
static struct scsi_host_template sil24_sht = {
- ATA_NCQ_SHT(DRV_NAME),
+ __ATA_BASE_SHT(DRV_NAME),
.can_queue = SIL24_MAX_CMDS,
.sg_tablesize = SIL24_MAX_SGE,
.dma_boundary = ATA_DMA_BOUNDARY,
.tag_alloc_policy = BLK_TAG_ALLOC_FIFO,
+ .sdev_attrs = ata_ncq_sdev_attrs,
+ .change_queue_depth = ata_scsi_change_queue_depth,
+ .slave_configure = ata_scsi_slave_config
};
static struct ata_port_operations sil24_ops = {
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 4a8bf8cda52b..54ba506e5a89 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -150,7 +150,7 @@ void fwnode_links_purge(struct fwnode_handle *fwnode)
fwnode_links_purge_consumers(fwnode);
}
-static void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode)
+void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode)
{
struct fwnode_handle *child;
@@ -164,6 +164,7 @@ static void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode)
fwnode_for_each_available_child_node(fwnode, child)
fw_devlink_purge_absent_suppliers(child);
}
+EXPORT_SYMBOL_GPL(fw_devlink_purge_absent_suppliers);
#ifdef CONFIG_SRCU
static DEFINE_MUTEX(device_links_lock);
@@ -193,6 +194,17 @@ int device_links_read_lock_held(void)
{
return srcu_read_lock_held(&device_links_srcu);
}
+
+static void device_link_synchronize_removal(void)
+{
+ synchronize_srcu(&device_links_srcu);
+}
+
+static void device_link_remove_from_lists(struct device_link *link)
+{
+ list_del_rcu(&link->s_node);
+ list_del_rcu(&link->c_node);
+}
#else /* !CONFIG_SRCU */
static DECLARE_RWSEM(device_links_lock);
@@ -223,6 +235,16 @@ int device_links_read_lock_held(void)
return lockdep_is_held(&device_links_lock);
}
#endif
+
+static inline void device_link_synchronize_removal(void)
+{
+}
+
+static void device_link_remove_from_lists(struct device_link *link)
+{
+ list_del(&link->s_node);
+ list_del(&link->c_node);
+}
#endif /* !CONFIG_SRCU */
static bool device_is_ancestor(struct device *dev, struct device *target)
@@ -444,8 +466,13 @@ static struct attribute *devlink_attrs[] = {
};
ATTRIBUTE_GROUPS(devlink);
-static void device_link_free(struct device_link *link)
+static void device_link_release_fn(struct work_struct *work)
{
+ struct device_link *link = container_of(work, struct device_link, rm_work);
+
+ /* Ensure that all references to the link object have been dropped. */
+ device_link_synchronize_removal();
+
while (refcount_dec_not_one(&link->rpm_active))
pm_runtime_put(link->supplier);
@@ -454,24 +481,19 @@ static void device_link_free(struct device_link *link)
kfree(link);
}
-#ifdef CONFIG_SRCU
-static void __device_link_free_srcu(struct rcu_head *rhead)
-{
- device_link_free(container_of(rhead, struct device_link, rcu_head));
-}
-
static void devlink_dev_release(struct device *dev)
{
struct device_link *link = to_devlink(dev);
- call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu);
-}
-#else
-static void devlink_dev_release(struct device *dev)
-{
- device_link_free(to_devlink(dev));
+ INIT_WORK(&link->rm_work, device_link_release_fn);
+ /*
+ * It may take a while to complete this work because of the SRCU
+ * synchronization in device_link_release_fn() and if the consumer or
+ * supplier devices get deleted when it runs, so put it into the "long"
+ * workqueue.
+ */
+ queue_work(system_long_wq, &link->rm_work);
}
-#endif
static struct class devlink_class = {
.name = "devlink",
@@ -845,7 +867,6 @@ out:
}
EXPORT_SYMBOL_GPL(device_link_add);
-#ifdef CONFIG_SRCU
static void __device_link_del(struct kref *kref)
{
struct device_link *link = container_of(kref, struct device_link, kref);
@@ -855,25 +876,9 @@ static void __device_link_del(struct kref *kref)
pm_runtime_drop_link(link);
- list_del_rcu(&link->s_node);
- list_del_rcu(&link->c_node);
- device_unregister(&link->link_dev);
-}
-#else /* !CONFIG_SRCU */
-static void __device_link_del(struct kref *kref)
-{
- struct device_link *link = container_of(kref, struct device_link, kref);
-
- dev_info(link->consumer, "Dropping the link to %s\n",
- dev_name(link->supplier));
-
- pm_runtime_drop_link(link);
-
- list_del(&link->s_node);
- list_del(&link->c_node);
+ device_link_remove_from_lists(link);
device_unregister(&link->link_dev);
}
-#endif /* !CONFIG_SRCU */
static void device_link_put_kref(struct device_link *link)
{
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index b31b3af5c490..d5ffaab3cb61 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -218,14 +218,14 @@ static int memory_block_offline(struct memory_block *mem)
struct zone *zone;
int ret;
- zone = page_zone(pfn_to_page(start_pfn));
-
/*
* Unaccount before offlining, such that unpopulated zone and kthreads
* can properly be torn down in offline_pages().
*/
- if (nr_vmemmap_pages)
+ if (nr_vmemmap_pages) {
+ zone = page_zone(pfn_to_page(start_pfn));
adjust_present_page_count(zone, -nr_vmemmap_pages);
+ }
ret = offline_pages(start_pfn + nr_vmemmap_pages,
nr_pages - nr_vmemmap_pages);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 1fc1a992f90c..b570848d23e0 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1637,6 +1637,7 @@ void pm_runtime_init(struct device *dev)
dev->power.request_pending = false;
dev->power.request = RPM_REQ_NONE;
dev->power.deferred_resume = false;
+ dev->power.needs_force_resume = 0;
INIT_WORK(&dev->power.work, pm_runtime_work);
dev->power.timer_expires = 0;
@@ -1804,10 +1805,12 @@ int pm_runtime_force_suspend(struct device *dev)
* its parent, but set its status to RPM_SUSPENDED anyway in case this
* function will be called again for it in the meantime.
*/
- if (pm_runtime_need_not_resume(dev))
+ if (pm_runtime_need_not_resume(dev)) {
pm_runtime_set_suspended(dev);
- else
+ } else {
__update_runtime_status(dev, RPM_SUSPENDED);
+ dev->power.needs_force_resume = 1;
+ }
return 0;
@@ -1834,7 +1837,7 @@ int pm_runtime_force_resume(struct device *dev)
int (*callback)(struct device *);
int ret = 0;
- if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
+ if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
goto out;
/*
@@ -1853,6 +1856,7 @@ int pm_runtime_force_resume(struct device *dev)
pm_runtime_mark_last_busy(dev);
out:
+ dev->power.needs_force_resume = 0;
pm_runtime_enable(dev);
return ret;
}
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 50b1e2d06a25..159bac6c5046 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -4,8 +4,9 @@
# subsystems should select the appropriate symbols.
config REGMAP
- default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SOUNDWIRE || REGMAP_SOUNDWIRE_MBQ || REGMAP_SCCB || REGMAP_I3C || REGMAP_SPI_AVMM)
+ default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SOUNDWIRE || REGMAP_SOUNDWIRE_MBQ || REGMAP_SCCB || REGMAP_I3C || REGMAP_SPI_AVMM || REGMAP_MDIO)
select IRQ_DOMAIN if REGMAP_IRQ
+ select MDIO_BUS if REGMAP_MDIO
bool
config REGCACHE_COMPRESSED
@@ -36,6 +37,9 @@ config REGMAP_W1
tristate
depends on W1
+config REGMAP_MDIO
+ tristate
+
config REGMAP_MMIO
tristate
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index 33f63adb5b3d..11facb32a027 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -19,3 +19,4 @@ obj-$(CONFIG_REGMAP_SOUNDWIRE_MBQ) += regmap-sdw-mbq.o
obj-$(CONFIG_REGMAP_SCCB) += regmap-sccb.o
obj-$(CONFIG_REGMAP_I3C) += regmap-i3c.o
obj-$(CONFIG_REGMAP_SPI_AVMM) += regmap-spi-avmm.o
+obj-$(CONFIG_REGMAP_MDIO) += regmap-mdio.o
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index 62b95a9212ae..980e5ce6a3a3 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -306,33 +306,64 @@ static const struct regmap_bus regmap_i2c_smbus_i2c_block_reg16 = {
static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
const struct regmap_config *config)
{
+ const struct i2c_adapter_quirks *quirks;
+ const struct regmap_bus *bus = NULL;
+ struct regmap_bus *ret_bus;
+ u16 max_read = 0, max_write = 0;
+
if (i2c_check_functionality(i2c->adapter, I2C_FUNC_I2C))
- return &regmap_i2c;
+ bus = &regmap_i2c;
else if (config->val_bits == 8 && config->reg_bits == 8 &&
i2c_check_functionality(i2c->adapter,
I2C_FUNC_SMBUS_I2C_BLOCK))
- return &regmap_i2c_smbus_i2c_block;
+ bus = &regmap_i2c_smbus_i2c_block;
else if (config->val_bits == 8 && config->reg_bits == 16 &&
i2c_check_functionality(i2c->adapter,
I2C_FUNC_SMBUS_I2C_BLOCK))
- return &regmap_i2c_smbus_i2c_block_reg16;
+ bus = &regmap_i2c_smbus_i2c_block_reg16;
else if (config->val_bits == 16 && config->reg_bits == 8 &&
i2c_check_functionality(i2c->adapter,
I2C_FUNC_SMBUS_WORD_DATA))
switch (regmap_get_val_endian(&i2c->dev, NULL, config)) {
case REGMAP_ENDIAN_LITTLE:
- return &regmap_smbus_word;
+ bus = &regmap_smbus_word;
+ break;
case REGMAP_ENDIAN_BIG:
- return &regmap_smbus_word_swapped;
+ bus = &regmap_smbus_word_swapped;
+ break;
default: /* everything else is not supported */
break;
}
else if (config->val_bits == 8 && config->reg_bits == 8 &&
i2c_check_functionality(i2c->adapter,
I2C_FUNC_SMBUS_BYTE_DATA))
- return &regmap_smbus_byte;
+ bus = &regmap_smbus_byte;
+
+ if (!bus)
+ return ERR_PTR(-ENOTSUPP);
+
+ quirks = i2c->adapter->quirks;
+ if (quirks) {
+ if (quirks->max_read_len &&
+ (bus->max_raw_read == 0 || bus->max_raw_read > quirks->max_read_len))
+ max_read = quirks->max_read_len;
+
+ if (quirks->max_write_len &&
+ (bus->max_raw_write == 0 || bus->max_raw_write > quirks->max_write_len))
+ max_write = quirks->max_write_len;
+
+ if (max_read || max_write) {
+ ret_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
+ if (!ret_bus)
+ return ERR_PTR(-ENOMEM);
+ ret_bus->free_on_exit = true;
+ ret_bus->max_raw_read = max_read;
+ ret_bus->max_raw_write = max_write;
+ bus = ret_bus;
+ }
+ }
- return ERR_PTR(-ENOTSUPP);
+ return bus;
}
struct regmap *__regmap_init_i2c(struct i2c_client *i2c,
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 760296a4b606..d2656581a608 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -531,6 +531,10 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
}
}
+ if (chip->status_invert)
+ for (i = 0; i < data->chip->num_regs; i++)
+ data->status_buf[i] = ~data->status_buf[i];
+
/*
* Ignore masked IRQs and ack if we need to; we ack early so
* there is no race between handling and acknowleding the
@@ -800,6 +804,9 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
goto err_alloc;
}
+ if (chip->status_invert)
+ d->status_buf[i] = ~d->status_buf[i];
+
if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
reg = sub_irq_reg(d, d->chip->ack_base, i);
if (chip->ack_invert)
diff --git a/drivers/base/regmap/regmap-mdio.c b/drivers/base/regmap/regmap-mdio.c
new file mode 100644
index 000000000000..6a20201299f5
--- /dev/null
+++ b/drivers/base/regmap/regmap-mdio.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/errno.h>
+#include <linux/mdio.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#define REGVAL_MASK GENMASK(15, 0)
+#define REGNUM_C22_MASK GENMASK(4, 0)
+/* Clause-45 mask includes the device type (5 bit) and actual register number (16 bit) */
+#define REGNUM_C45_MASK GENMASK(20, 0)
+
+static int regmap_mdio_read(struct mdio_device *mdio_dev, u32 reg, unsigned int *val)
+{
+ int ret;
+
+ ret = mdiobus_read(mdio_dev->bus, mdio_dev->addr, reg);
+ if (ret < 0)
+ return ret;
+
+ *val = ret & REGVAL_MASK;
+ return 0;
+}
+
+static int regmap_mdio_write(struct mdio_device *mdio_dev, u32 reg, unsigned int val)
+{
+ return mdiobus_write(mdio_dev->bus, mdio_dev->addr, reg, val);
+}
+
+static int regmap_mdio_c22_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct mdio_device *mdio_dev = context;
+
+ if (unlikely(reg & ~REGNUM_C22_MASK))
+ return -ENXIO;
+
+ return regmap_mdio_read(mdio_dev, reg, val);
+}
+
+static int regmap_mdio_c22_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct mdio_device *mdio_dev = context;
+
+ if (unlikely(reg & ~REGNUM_C22_MASK))
+ return -ENXIO;
+
+ return mdiobus_write(mdio_dev->bus, mdio_dev->addr, reg, val);
+}
+
+static const struct regmap_bus regmap_mdio_c22_bus = {
+ .reg_write = regmap_mdio_c22_write,
+ .reg_read = regmap_mdio_c22_read,
+};
+
+static int regmap_mdio_c45_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct mdio_device *mdio_dev = context;
+
+ if (unlikely(reg & ~REGNUM_C45_MASK))
+ return -ENXIO;
+
+ return regmap_mdio_read(mdio_dev, MII_ADDR_C45 | reg, val);
+}
+
+static int regmap_mdio_c45_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct mdio_device *mdio_dev = context;
+
+ if (unlikely(reg & ~REGNUM_C45_MASK))
+ return -ENXIO;
+
+ return regmap_mdio_write(mdio_dev, MII_ADDR_C45 | reg, val);
+}
+
+static const struct regmap_bus regmap_mdio_c45_bus = {
+ .reg_write = regmap_mdio_c45_write,
+ .reg_read = regmap_mdio_c45_read,
+};
+
+struct regmap *__regmap_init_mdio(struct mdio_device *mdio_dev,
+ const struct regmap_config *config, struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ const struct regmap_bus *bus;
+
+ if (config->reg_bits == 5 && config->val_bits == 16)
+ bus = &regmap_mdio_c22_bus;
+ else if (config->reg_bits == 21 && config->val_bits == 16)
+ bus = &regmap_mdio_c45_bus;
+ else
+ return ERR_PTR(-EOPNOTSUPP);
+
+ return __regmap_init(&mdio_dev->dev, bus, mdio_dev, config, lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__regmap_init_mdio);
+
+struct regmap *__devm_regmap_init_mdio(struct mdio_device *mdio_dev,
+ const struct regmap_config *config, struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ const struct regmap_bus *bus;
+
+ if (config->reg_bits == 5 && config->val_bits == 16)
+ bus = &regmap_mdio_c22_bus;
+ else if (config->reg_bits == 21 && config->val_bits == 16)
+ bus = &regmap_mdio_c45_bus;
+ else
+ return ERR_PTR(-EOPNOTSUPP);
+
+ return __devm_regmap_init(&mdio_dev->dev, bus, mdio_dev, config, lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_mdio);
+
+MODULE_AUTHOR("Sander Vanheule <sander@svanheule.net>");
+MODULE_DESCRIPTION("Regmap MDIO Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 297e95be25b3..fe3e38dd5324 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -243,6 +243,16 @@ static void regmap_format_7_9_write(struct regmap *map,
*out = cpu_to_be16((reg << 9) | val);
}
+static void regmap_format_7_17_write(struct regmap *map,
+ unsigned int reg, unsigned int val)
+{
+ u8 *out = map->work_buf;
+
+ out[2] = val;
+ out[1] = val >> 8;
+ out[0] = (val >> 16) | (reg << 1);
+}
+
static void regmap_format_10_14_write(struct regmap *map,
unsigned int reg, unsigned int val)
{
@@ -885,6 +895,9 @@ struct regmap *__regmap_init(struct device *dev,
case 9:
map->format.format_write = regmap_format_7_9_write;
break;
+ case 17:
+ map->format.format_write = regmap_format_7_17_write;
+ break;
default:
goto err_hwlock;
}
@@ -1496,6 +1509,8 @@ void regmap_exit(struct regmap *map)
mutex_destroy(&map->mutex);
kfree_const(map->name);
kfree(map->patch);
+ if (map->bus && map->bus->free_on_exit)
+ kfree(map->bus);
kfree(map);
}
EXPORT_SYMBOL_GPL(regmap_exit);
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index 3cc11b813f28..d1f1a8240120 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -1045,7 +1045,15 @@ int device_add_software_node(struct device *dev, const struct software_node *nod
}
set_secondary_fwnode(dev, &swnode->fwnode);
- software_node_notify(dev, KOBJ_ADD);
+
+ /*
+ * If the device has been fully registered by the time this function is
+ * called, software_node_notify() must be called separately so that the
+ * symlinks get created and the reference count of the node is kept in
+ * balance.
+ */
+ if (device_is_registered(dev))
+ software_node_notify(dev, KOBJ_ADD);
return 0;
}
@@ -1065,7 +1073,8 @@ void device_remove_software_node(struct device *dev)
if (!swnode)
return;
- software_node_notify(dev, KOBJ_REMOVE);
+ if (device_is_registered(dev))
+ software_node_notify(dev, KOBJ_REMOVE);
set_secondary_fwnode(dev, NULL);
kobject_put(&swnode->kobj);
}
@@ -1119,8 +1128,7 @@ int software_node_notify(struct device *dev, unsigned long action)
switch (action) {
case KOBJ_ADD:
- ret = sysfs_create_link_nowarn(&dev->kobj, &swnode->kobj,
- "software_node");
+ ret = sysfs_create_link(&dev->kobj, &swnode->kobj, "software_node");
if (ret)
break;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index d58d68f3c7cd..76e12f3482a9 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1879,29 +1879,18 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
static int lo_open(struct block_device *bdev, fmode_t mode)
{
- struct loop_device *lo;
+ struct loop_device *lo = bdev->bd_disk->private_data;
int err;
- /*
- * take loop_ctl_mutex to protect lo pointer from race with
- * loop_control_ioctl(LOOP_CTL_REMOVE), however, to reduce contention
- * release it prior to updating lo->lo_refcnt.
- */
- err = mutex_lock_killable(&loop_ctl_mutex);
- if (err)
- return err;
- lo = bdev->bd_disk->private_data;
- if (!lo) {
- mutex_unlock(&loop_ctl_mutex);
- return -ENXIO;
- }
err = mutex_lock_killable(&lo->lo_mutex);
- mutex_unlock(&loop_ctl_mutex);
if (err)
return err;
- atomic_inc(&lo->lo_refcnt);
+ if (lo->lo_state == Lo_deleting)
+ err = -ENXIO;
+ else
+ atomic_inc(&lo->lo_refcnt);
mutex_unlock(&lo->lo_mutex);
- return 0;
+ return err;
}
static void lo_release(struct gendisk *disk, fmode_t mode)
@@ -2285,7 +2274,7 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
mutex_unlock(&lo->lo_mutex);
break;
}
- lo->lo_disk->private_data = NULL;
+ lo->lo_state = Lo_deleting;
mutex_unlock(&lo->lo_mutex);
idr_remove(&loop_index_idr, lo->lo_number);
loop_remove(lo);
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index a3c04f310672..5beb959b94d3 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -22,6 +22,7 @@ enum {
Lo_unbound,
Lo_bound,
Lo_rundown,
+ Lo_deleting,
};
struct loop_func_table;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 4ff71b579cfc..45d2c28c8fc8 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -1980,7 +1980,8 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
* config ref and try to destroy the workqueue from inside the work
* queue.
*/
- flush_workqueue(nbd->recv_workq);
+ if (nbd->recv_workq)
+ flush_workqueue(nbd->recv_workq);
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
&nbd->config->runtime_flags))
nbd_config_put(nbd);
@@ -2014,12 +2015,11 @@ static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
}
mutex_unlock(&nbd_index_mutex);
- if (!refcount_inc_not_zero(&nbd->config_refs)) {
- nbd_put(nbd);
- return 0;
- }
+ if (!refcount_inc_not_zero(&nbd->config_refs))
+ goto put_nbd;
nbd_disconnect_and_put(nbd);
nbd_config_put(nbd);
+put_nbd:
nbd_put(nbd);
return 0;
}
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 5d603ef39bad..7f6ba2c975ed 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -388,6 +388,8 @@ static const struct usb_device_id blacklist_table[] = {
/* Realtek 8822CE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0xb00c), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0bda, 0xc822), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Realtek 8852AE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0xc852), .driver_info = BTUSB_REALTEK |
@@ -2527,10 +2529,17 @@ static int btusb_intel_download_firmware_newgen(struct hci_dev *hdev,
}
btusb_setup_intel_newgen_get_fw_name(ver, fwname, sizeof(fwname), "sfi");
- err = request_firmware(&fw, fwname, &hdev->dev);
+ err = firmware_request_nowarn(&fw, fwname, &hdev->dev);
if (err < 0) {
+ if (!test_bit(BTUSB_BOOTLOADER, &data->flags)) {
+ /* Firmware has already been loaded */
+ set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
+ return 0;
+ }
+
bt_dev_err(hdev, "Failed to load Intel firmware file %s (%d)",
fwname, err);
+
return err;
}
@@ -2680,12 +2689,24 @@ download:
err = btusb_setup_intel_new_get_fw_name(ver, params, fwname,
sizeof(fwname), "sfi");
if (err < 0) {
+ if (!test_bit(BTUSB_BOOTLOADER, &data->flags)) {
+ /* Firmware has already been loaded */
+ set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
+ return 0;
+ }
+
bt_dev_err(hdev, "Unsupported Intel firmware naming");
return -EINVAL;
}
- err = request_firmware(&fw, fwname, &hdev->dev);
+ err = firmware_request_nowarn(&fw, fwname, &hdev->dev);
if (err < 0) {
+ if (!test_bit(BTUSB_BOOTLOADER, &data->flags)) {
+ /* Firmware has already been loaded */
+ set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
+ return 0;
+ }
+
bt_dev_err(hdev, "Failed to load Intel firmware file %s (%d)",
fwname, err);
return err;
diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c
index 7c810f02a2ef..b3357a8a2fdb 100644
--- a/drivers/bus/mhi/pci_generic.c
+++ b/drivers/bus/mhi/pci_generic.c
@@ -311,8 +311,8 @@ static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = {
MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
- MHI_CHANNEL_CONFIG_UL(32, "AT", 32, 0),
- MHI_CHANNEL_CONFIG_DL(33, "AT", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
};
@@ -708,7 +708,7 @@ static void mhi_pci_remove(struct pci_dev *pdev)
struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
- del_timer(&mhi_pdev->health_check_timer);
+ del_timer_sync(&mhi_pdev->health_check_timer);
cancel_work_sync(&mhi_pdev->recovery_work);
if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
@@ -935,9 +935,43 @@ static int __maybe_unused mhi_pci_resume(struct device *dev)
return ret;
}
+static int __maybe_unused mhi_pci_freeze(struct device *dev)
+{
+ struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+
+ /* We want to stop all operations, hibernation does not guarantee that
+ * device will be in the same state as before freezing, especially if
+ * the intermediate restore kernel reinitializes MHI device with new
+ * context.
+ */
+ if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
+ mhi_power_down(mhi_cntrl, false);
+ mhi_unprepare_after_power_down(mhi_cntrl);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused mhi_pci_restore(struct device *dev)
+{
+ struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
+
+ /* Reinitialize the device */
+ queue_work(system_long_wq, &mhi_pdev->recovery_work);
+
+ return 0;
+}
+
static const struct dev_pm_ops mhi_pci_pm_ops = {
SET_RUNTIME_PM_OPS(mhi_pci_runtime_suspend, mhi_pci_runtime_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(mhi_pci_suspend, mhi_pci_resume)
+#ifdef CONFIG_PM_SLEEP
+ .suspend = mhi_pci_suspend,
+ .resume = mhi_pci_resume,
+ .freeze = mhi_pci_freeze,
+ .thaw = mhi_pci_restore,
+ .restore = mhi_pci_restore,
+#endif
};
static struct pci_driver mhi_pci_driver = {
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 5fae60f8c135..38cb116ed433 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -1334,6 +1334,34 @@ err_allow_idle:
return error;
}
+static int sysc_reinit_module(struct sysc *ddata, bool leave_enabled)
+{
+ struct device *dev = ddata->dev;
+ int error;
+
+ /* Disable target module if it is enabled */
+ if (ddata->enabled) {
+ error = sysc_runtime_suspend(dev);
+ if (error)
+ dev_warn(dev, "reinit suspend failed: %i\n", error);
+ }
+
+ /* Enable target module */
+ error = sysc_runtime_resume(dev);
+ if (error)
+ dev_warn(dev, "reinit resume failed: %i\n", error);
+
+ if (leave_enabled)
+ return error;
+
+ /* Disable target module if no leave_enabled was set */
+ error = sysc_runtime_suspend(dev);
+ if (error)
+ dev_warn(dev, "reinit suspend failed: %i\n", error);
+
+ return error;
+}
+
static int __maybe_unused sysc_noirq_suspend(struct device *dev)
{
struct sysc *ddata;
@@ -1344,12 +1372,18 @@ static int __maybe_unused sysc_noirq_suspend(struct device *dev)
(SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
return 0;
- return pm_runtime_force_suspend(dev);
+ if (!ddata->enabled)
+ return 0;
+
+ ddata->needs_resume = 1;
+
+ return sysc_runtime_suspend(dev);
}
static int __maybe_unused sysc_noirq_resume(struct device *dev)
{
struct sysc *ddata;
+ int error = 0;
ddata = dev_get_drvdata(dev);
@@ -1357,7 +1391,19 @@ static int __maybe_unused sysc_noirq_resume(struct device *dev)
(SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
return 0;
- return pm_runtime_force_resume(dev);
+ if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_RESUME) {
+ error = sysc_reinit_module(ddata, ddata->needs_resume);
+ if (error)
+ dev_warn(dev, "noirq_resume failed: %i\n", error);
+ } else if (ddata->needs_resume) {
+ error = sysc_runtime_resume(dev);
+ if (error)
+ dev_warn(dev, "noirq_resume failed: %i\n", error);
+ }
+
+ ddata->needs_resume = 0;
+
+ return error;
}
static const struct dev_pm_ops sysc_pm_ops = {
@@ -1408,9 +1454,9 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
/* Uarts on omap4 and later */
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
- SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
- SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
/* Quirks that need to be set based on the module address */
SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff,
@@ -1459,6 +1505,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("tptc", 0, 0, -ENODEV, -ENODEV, 0x40007c00, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("sata", 0, 0xfc, 0x1100, -ENODEV, 0x5e412000, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff,
@@ -1466,7 +1514,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff,
- SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
+ SYSC_QUIRK_REINIT_ON_RESUME),
SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
SYSC_MODULE_QUIRK_WDT),
/* PRUSS on am3, am4 and am5 */
@@ -1524,7 +1573,6 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000400, 0xffffffff, 0),
SYSC_QUIRK("rfbi", 0x4832a800, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
SYSC_QUIRK("rfbi", 0x58002000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
- SYSC_QUIRK("sata", 0, 0xfc, 0x1100, -ENODEV, 0x5e412000, 0xffffffff, 0),
SYSC_QUIRK("scm", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0),
SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4e8b0100, 0xffffffff, 0),
SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4f000100, 0xffffffff, 0),
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 742b4a0932e3..c6d8c0f59722 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -744,6 +744,13 @@ static const struct blk_mq_ops gdrom_mq_ops = {
static int probe_gdrom(struct platform_device *devptr)
{
int err;
+
+ /*
+ * Ensure our "one" device is initialized properly in case of previous
+ * usages of it
+ */
+ memset(&gd, 0, sizeof(gd));
+
/* Start the device */
if (gdrom_execute_diagnostic() != 1) {
pr_warn("ATA Probe for GDROM failed\n");
@@ -830,6 +837,8 @@ static int remove_gdrom(struct platform_device *devptr)
if (gdrom_major)
unregister_blkdev(gdrom_major, GDROM_DEV_NAME);
unregister_cdrom(gd.cd_info);
+ kfree(gd.cd_info);
+ kfree(gd.toc);
return 0;
}
@@ -845,7 +854,7 @@ static struct platform_driver gdrom_driver = {
static int __init init_gdrom(void)
{
int rc;
- gd.toc = NULL;
+
rc = platform_driver_register(&gdrom_driver);
if (rc)
return rc;
@@ -861,8 +870,6 @@ static void __exit exit_gdrom(void)
{
platform_device_unregister(pd);
platform_driver_unregister(&gdrom_driver);
- kfree(gd.toc);
- kfree(gd.cd_info);
}
module_init(init_gdrom);
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index ed3b7dab678d..8b55085650ad 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -984,6 +984,8 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
hdp->hd_phys_address = fixmem32->address;
hdp->hd_address = ioremap(fixmem32->address,
HPET_RANGE_SIZE);
+ if (!hdp->hd_address)
+ return AE_ERROR;
if (hpet_is_known(hdp)) {
iounmap(hdp->hd_address);
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index eff1f12d981a..c84d23951219 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -656,6 +656,7 @@ int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip)
if (nr_commands !=
be32_to_cpup((__be32 *)&buf.data[TPM_HEADER_SIZE + 5])) {
+ rc = -EFAULT;
tpm_buf_destroy(&buf);
goto out;
}
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index a2e0395cbe61..55b9d3965ae1 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -709,16 +709,14 @@ static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
cap_t cap;
int ret;
- /* TPM 2.0 */
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
- return tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
-
- /* TPM 1.2 */
ret = request_locality(chip, 0);
if (ret < 0)
return ret;
- ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ ret = tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
+ else
+ ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
release_locality(chip, 0);
@@ -1127,12 +1125,20 @@ int tpm_tis_resume(struct device *dev)
if (ret)
return ret;
- /* TPM 1.2 requires self-test on resume. This function actually returns
+ /*
+ * TPM 1.2 requires self-test on resume. This function actually returns
* an error code but for unknown reason it isn't handled.
*/
- if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
+ if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
+ ret = request_locality(chip, 0);
+ if (ret < 0)
+ return ret;
+
tpm1_do_selftest(chip);
+ release_locality(chip, 0);
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(tpm_tis_resume);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index e2ec1b745243..65508eb89ec9 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -4540,6 +4540,9 @@ int of_clk_add_provider(struct device_node *np,
struct of_clk_provider *cp;
int ret;
+ if (!np)
+ return 0;
+
cp = kzalloc(sizeof(*cp), GFP_KERNEL);
if (!cp)
return -ENOMEM;
@@ -4579,6 +4582,9 @@ int of_clk_add_hw_provider(struct device_node *np,
struct of_clk_provider *cp;
int ret;
+ if (!np)
+ return 0;
+
cp = kzalloc(sizeof(*cp), GFP_KERNEL);
if (!cp)
return -ENOMEM;
@@ -4676,6 +4682,9 @@ void of_clk_del_provider(struct device_node *np)
{
struct of_clk_provider *cp;
+ if (!np)
+ return;
+
mutex_lock(&of_clk_mutex);
list_for_each_entry(cp, &of_clk_providers, link) {
if (cp->node == np) {
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index 977fd05ac35f..d6ece7bbce89 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -419,7 +419,7 @@ static void resume_hv_clock_tsc(struct clocksource *arg)
hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr);
}
-#ifdef VDSO_CLOCKMODE_HVCLOCK
+#ifdef HAVE_VDSO_CLOCKMODE_HVCLOCK
static int hv_cs_enable(struct clocksource *cs)
{
vclocks_set_used(VDSO_CLOCKMODE_HVCLOCK);
@@ -435,7 +435,7 @@ static struct clocksource hyperv_cs_tsc = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.suspend= suspend_hv_clock_tsc,
.resume = resume_hv_clock_tsc,
-#ifdef VDSO_CLOCKMODE_HVCLOCK
+#ifdef HAVE_VDSO_CLOCKMODE_HVCLOCK
.enable = hv_cs_enable,
.vdso_clock_mode = VDSO_CLOCKMODE_HVCLOCK,
#else
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index a5c5f70acfc9..e65e0a43be64 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -19,16 +19,6 @@ config ACPI_CPPC_CPUFREQ
If in doubt, say N.
-config ACPI_CPPC_CPUFREQ_FIE
- bool "Frequency Invariance support for CPPC cpufreq driver"
- depends on ACPI_CPPC_CPUFREQ && GENERIC_ARCH_TOPOLOGY
- default y
- help
- This extends frequency invariance support in the CPPC cpufreq driver,
- by using CPPC delivered and reference performance counters.
-
- If in doubt, say N.
-
config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM
tristate "Allwinner nvmem based SUN50I CPUFreq driver"
depends on ARCH_SUNXI
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index d1bbc16fba4b..7e7450453714 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -646,7 +646,11 @@ static u64 get_max_boost_ratio(unsigned int cpu)
return 0;
}
- highest_perf = perf_caps.highest_perf;
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ highest_perf = amd_get_highest_perf();
+ else
+ highest_perf = perf_caps.highest_perf;
+
nominal_perf = perf_caps.nominal_perf;
if (!highest_perf || !nominal_perf) {
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 3848b4c222e1..2f769b1630c5 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -10,18 +10,14 @@
#define pr_fmt(fmt) "CPPC Cpufreq:" fmt
-#include <linux/arch_topology.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/dmi.h>
-#include <linux/irq_work.h>
-#include <linux/kthread.h>
#include <linux/time.h>
#include <linux/vmalloc.h>
-#include <uapi/linux/sched/types.h>
#include <asm/unaligned.h>
@@ -61,204 +57,6 @@ static struct cppc_workaround_oem_info wa_info[] = {
}
};
-#ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
-
-/* Frequency invariance support */
-struct cppc_freq_invariance {
- int cpu;
- struct irq_work irq_work;
- struct kthread_work work;
- struct cppc_perf_fb_ctrs prev_perf_fb_ctrs;
- struct cppc_cpudata *cpu_data;
-};
-
-static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv);
-static struct kthread_worker *kworker_fie;
-static bool fie_disabled;
-
-static struct cpufreq_driver cppc_cpufreq_driver;
-static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu);
-static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
- struct cppc_perf_fb_ctrs fb_ctrs_t0,
- struct cppc_perf_fb_ctrs fb_ctrs_t1);
-
-/**
- * cppc_scale_freq_workfn - CPPC arch_freq_scale updater for frequency invariance
- * @work: The work item.
- *
- * The CPPC driver register itself with the topology core to provide its own
- * implementation (cppc_scale_freq_tick()) of topology_scale_freq_tick() which
- * gets called by the scheduler on every tick.
- *
- * Note that the arch specific counters have higher priority than CPPC counters,
- * if available, though the CPPC driver doesn't need to have any special
- * handling for that.
- *
- * On an invocation of cppc_scale_freq_tick(), we schedule an irq work (since we
- * reach here from hard-irq context), which then schedules a normal work item
- * and cppc_scale_freq_workfn() updates the per_cpu arch_freq_scale variable
- * based on the counter updates since the last tick.
- */
-static void cppc_scale_freq_workfn(struct kthread_work *work)
-{
- struct cppc_freq_invariance *cppc_fi;
- struct cppc_perf_fb_ctrs fb_ctrs = {0};
- struct cppc_cpudata *cpu_data;
- unsigned long local_freq_scale;
- u64 perf;
-
- cppc_fi = container_of(work, struct cppc_freq_invariance, work);
- cpu_data = cppc_fi->cpu_data;
-
- if (cppc_get_perf_ctrs(cppc_fi->cpu, &fb_ctrs)) {
- pr_warn("%s: failed to read perf counters\n", __func__);
- return;
- }
-
- cppc_fi->prev_perf_fb_ctrs = fb_ctrs;
- perf = cppc_perf_from_fbctrs(cpu_data, cppc_fi->prev_perf_fb_ctrs,
- fb_ctrs);
-
- perf <<= SCHED_CAPACITY_SHIFT;
- local_freq_scale = div64_u64(perf, cpu_data->perf_caps.highest_perf);
- if (WARN_ON(local_freq_scale > 1024))
- local_freq_scale = 1024;
-
- per_cpu(arch_freq_scale, cppc_fi->cpu) = local_freq_scale;
-}
-
-static void cppc_irq_work(struct irq_work *irq_work)
-{
- struct cppc_freq_invariance *cppc_fi;
-
- cppc_fi = container_of(irq_work, struct cppc_freq_invariance, irq_work);
- kthread_queue_work(kworker_fie, &cppc_fi->work);
-}
-
-static void cppc_scale_freq_tick(void)
-{
- struct cppc_freq_invariance *cppc_fi = &per_cpu(cppc_freq_inv, smp_processor_id());
-
- /*
- * cppc_get_perf_ctrs() can potentially sleep, call that from the right
- * context.
- */
- irq_work_queue(&cppc_fi->irq_work);
-}
-
-static struct scale_freq_data cppc_sftd = {
- .source = SCALE_FREQ_SOURCE_CPPC,
- .set_freq_scale = cppc_scale_freq_tick,
-};
-
-static void cppc_freq_invariance_policy_init(struct cpufreq_policy *policy,
- struct cppc_cpudata *cpu_data)
-{
- struct cppc_perf_fb_ctrs fb_ctrs = {0};
- struct cppc_freq_invariance *cppc_fi;
- int i, ret;
-
- if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
- return;
-
- if (fie_disabled)
- return;
-
- for_each_cpu(i, policy->cpus) {
- cppc_fi = &per_cpu(cppc_freq_inv, i);
- cppc_fi->cpu = i;
- cppc_fi->cpu_data = cpu_data;
- kthread_init_work(&cppc_fi->work, cppc_scale_freq_workfn);
- init_irq_work(&cppc_fi->irq_work, cppc_irq_work);
-
- ret = cppc_get_perf_ctrs(i, &fb_ctrs);
- if (ret) {
- pr_warn("%s: failed to read perf counters: %d\n",
- __func__, ret);
- fie_disabled = true;
- } else {
- cppc_fi->prev_perf_fb_ctrs = fb_ctrs;
- }
- }
-}
-
-static void __init cppc_freq_invariance_init(void)
-{
- struct sched_attr attr = {
- .size = sizeof(struct sched_attr),
- .sched_policy = SCHED_DEADLINE,
- .sched_nice = 0,
- .sched_priority = 0,
- /*
- * Fake (unused) bandwidth; workaround to "fix"
- * priority inheritance.
- */
- .sched_runtime = 1000000,
- .sched_deadline = 10000000,
- .sched_period = 10000000,
- };
- int ret;
-
- if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
- return;
-
- if (fie_disabled)
- return;
-
- kworker_fie = kthread_create_worker(0, "cppc_fie");
- if (IS_ERR(kworker_fie))
- return;
-
- ret = sched_setattr_nocheck(kworker_fie->task, &attr);
- if (ret) {
- pr_warn("%s: failed to set SCHED_DEADLINE: %d\n", __func__,
- ret);
- kthread_destroy_worker(kworker_fie);
- return;
- }
-
- /* Register for freq-invariance */
- topology_set_scale_freq_source(&cppc_sftd, cpu_present_mask);
-}
-
-static void cppc_freq_invariance_exit(void)
-{
- struct cppc_freq_invariance *cppc_fi;
- int i;
-
- if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
- return;
-
- if (fie_disabled)
- return;
-
- topology_clear_scale_freq_source(SCALE_FREQ_SOURCE_CPPC, cpu_present_mask);
-
- for_each_possible_cpu(i) {
- cppc_fi = &per_cpu(cppc_freq_inv, i);
- irq_work_sync(&cppc_fi->irq_work);
- }
-
- kthread_destroy_worker(kworker_fie);
- kworker_fie = NULL;
-}
-
-#else
-static inline void
-cppc_freq_invariance_policy_init(struct cpufreq_policy *policy,
- struct cppc_cpudata *cpu_data)
-{
-}
-
-static inline void cppc_freq_invariance_init(void)
-{
-}
-
-static inline void cppc_freq_invariance_exit(void)
-{
-}
-#endif /* CONFIG_ACPI_CPPC_CPUFREQ_FIE */
-
/* Callback function used to retrieve the max frequency from DMI */
static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
{
@@ -547,12 +345,9 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpu_data->perf_ctrls.desired_perf = caps->highest_perf;
ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
- if (ret) {
+ if (ret)
pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
caps->highest_perf, cpu, ret);
- } else {
- cppc_freq_invariance_policy_init(policy, cpu_data);
- }
return ret;
}
@@ -565,12 +360,12 @@ static inline u64 get_delta(u64 t1, u64 t0)
return (u32)t1 - (u32)t0;
}
-static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
- struct cppc_perf_fb_ctrs fb_ctrs_t0,
- struct cppc_perf_fb_ctrs fb_ctrs_t1)
+static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu_data,
+ struct cppc_perf_fb_ctrs fb_ctrs_t0,
+ struct cppc_perf_fb_ctrs fb_ctrs_t1)
{
u64 delta_reference, delta_delivered;
- u64 reference_perf;
+ u64 reference_perf, delivered_perf;
reference_perf = fb_ctrs_t0.reference_perf;
@@ -579,21 +374,12 @@ static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
delta_delivered = get_delta(fb_ctrs_t1.delivered,
fb_ctrs_t0.delivered);
- /* Check to avoid divide-by zero and invalid delivered_perf */
- if (!delta_reference || !delta_delivered)
- return cpu_data->perf_ctrls.desired_perf;
-
- return (reference_perf * delta_delivered) / delta_reference;
-}
-
-static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu_data,
- struct cppc_perf_fb_ctrs fb_ctrs_t0,
- struct cppc_perf_fb_ctrs fb_ctrs_t1)
-{
- u64 delivered_perf;
-
- delivered_perf = cppc_perf_from_fbctrs(cpu_data, fb_ctrs_t0,
- fb_ctrs_t1);
+ /* Check to avoid divide-by zero */
+ if (delta_reference || delta_delivered)
+ delivered_perf = (reference_perf * delta_delivered) /
+ delta_reference;
+ else
+ delivered_perf = cpu_data->perf_ctrls.desired_perf;
return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf);
}
@@ -718,8 +504,6 @@ static void cppc_check_hisi_workaround(void)
static int __init cppc_cpufreq_init(void)
{
- int ret;
-
if ((acpi_disabled) || !acpi_cpc_valid())
return -ENODEV;
@@ -727,11 +511,7 @@ static int __init cppc_cpufreq_init(void)
cppc_check_hisi_workaround();
- ret = cpufreq_register_driver(&cppc_cpufreq_driver);
- if (!ret)
- cppc_freq_invariance_init();
-
- return ret;
+ return cpufreq_register_driver(&cppc_cpufreq_driver);
}
static inline void free_cpu_data(void)
@@ -748,7 +528,6 @@ static inline void free_cpu_data(void)
static void __exit cppc_cpufreq_exit(void)
{
- cppc_freq_invariance_exit();
cpufreq_unregister_driver(&cppc_cpufreq_driver);
free_cpu_data();
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index f0401064d7aa..0e69dffd5a76 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -3033,6 +3033,14 @@ static const struct x86_cpu_id hwp_support_ids[] __initconst = {
{}
};
+static bool intel_pstate_hwp_is_enabled(void)
+{
+ u64 value;
+
+ rdmsrl(MSR_PM_ENABLE, value);
+ return !!(value & 0x1);
+}
+
static int __init intel_pstate_init(void)
{
const struct x86_cpu_id *id;
@@ -3051,8 +3059,12 @@ static int __init intel_pstate_init(void)
* Avoid enabling HWP for processors without EPP support,
* because that means incomplete HWP implementation which is a
* corner case and supporting it is generally problematic.
+ *
+ * If HWP is enabled already, though, there is no choice but to
+ * deal with it.
*/
- if (!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) {
+ if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) ||
+ intel_pstate_hwp_is_enabled()) {
hwp_active++;
hwp_mode_bdw = id->driver_data;
intel_pstate.attr = hwp_cpufreq_attrs;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index facc8e6bc580..d385daf2c71c 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -442,7 +442,6 @@ static int nitrox_probe(struct pci_dev *pdev,
err = pci_request_mem_regions(pdev, nitrox_driver_name);
if (err) {
pci_disable_device(pdev);
- dev_err(&pdev->dev, "Failed to request mem regions!\n");
return err;
}
pci_set_master(pdev);
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index f264b70c383e..eadd1eaa2fb5 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -760,7 +760,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
if (dma_buf_is_dynamic(attach->dmabuf)) {
dma_resv_lock(attach->dmabuf->resv, NULL);
- ret = dma_buf_pin(attach);
+ ret = dmabuf->ops->pin(attach);
if (ret)
goto err_unlock;
}
@@ -786,7 +786,7 @@ err_attach:
err_unpin:
if (dma_buf_is_dynamic(attach->dmabuf))
- dma_buf_unpin(attach);
+ dmabuf->ops->unpin(attach);
err_unlock:
if (dma_buf_is_dynamic(attach->dmabuf))
@@ -843,7 +843,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
__unmap_dma_buf(attach, attach->sgt, attach->dir);
if (dma_buf_is_dynamic(attach->dmabuf)) {
- dma_buf_unpin(attach);
+ dmabuf->ops->unpin(attach);
dma_resv_unlock(attach->dmabuf->resv);
}
}
@@ -956,7 +956,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
if (dma_buf_is_dynamic(attach->dmabuf)) {
dma_resv_assert_held(attach->dmabuf->resv);
if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
- r = dma_buf_pin(attach);
+ r = attach->dmabuf->ops->pin(attach);
if (r)
return ERR_PTR(r);
}
@@ -968,7 +968,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
- dma_buf_unpin(attach);
+ attach->dmabuf->ops->unpin(attach);
if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
attach->sgt = sg_table;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6ab9d9a488a6..39b5b46e880f 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -59,6 +59,7 @@ config DMA_OF
#devices
config ALTERA_MSGDMA
tristate "Altera / Intel mSGDMA Engine"
+ depends on HAS_IOMEM
select DMA_ENGINE
help
Enable support for Altera / Intel mSGDMA controller.
@@ -701,6 +702,7 @@ config XILINX_ZYNQMP_DMA
config XILINX_ZYNQMP_DPDMA
tristate "Xilinx DPDMA Engine"
+ depends on HAS_IOMEM && OF
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
index 4ec909e0b810..4ae057922ef1 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
+++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
@@ -332,6 +332,7 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
}
if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
+ err = -EINVAL;
dev_err(dev, "DPDMAI major version mismatch\n"
"Found %u.%u, supported version is %u.%u\n",
priv->dpdmai_attr.version.major,
@@ -341,6 +342,7 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
}
if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
+ err = -EINVAL;
dev_err(dev, "DPDMAI minor version mismatch\n"
"Found %u.%u, supported version is %u.%u\n",
priv->dpdmai_attr.version.major,
@@ -475,6 +477,7 @@ static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
ppriv->store =
dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE, dev);
if (!ppriv->store) {
+ err = -ENOMEM;
dev_err(dev, "dpaa2_io_store_create() failed\n");
goto err_store;
}
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index 302cba5ff779..d4419bf1fede 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -110,6 +110,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
pasid = iommu_sva_get_pasid(sva);
if (pasid == IOMMU_PASID_INVALID) {
iommu_sva_unbind_device(sva);
+ rc = -EINVAL;
goto failed;
}
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 2a926bef87f2..442d55c11a5f 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -168,6 +168,32 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
return rc;
}
+static void idxd_cleanup_interrupts(struct idxd_device *idxd)
+{
+ struct pci_dev *pdev = idxd->pdev;
+ struct idxd_irq_entry *irq_entry;
+ int i, msixcnt;
+
+ msixcnt = pci_msix_vec_count(pdev);
+ if (msixcnt <= 0)
+ return;
+
+ irq_entry = &idxd->irq_entries[0];
+ free_irq(irq_entry->vector, irq_entry);
+
+ for (i = 1; i < msixcnt; i++) {
+
+ irq_entry = &idxd->irq_entries[i];
+ if (idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE))
+ idxd_device_release_int_handle(idxd, idxd->int_handles[i],
+ IDXD_IRQ_MSIX);
+ free_irq(irq_entry->vector, irq_entry);
+ }
+
+ idxd_mask_error_interrupts(idxd);
+ pci_free_irq_vectors(pdev);
+}
+
static int idxd_setup_wqs(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
@@ -242,6 +268,7 @@ static int idxd_setup_engines(struct idxd_device *idxd)
engine->idxd = idxd;
device_initialize(&engine->conf_dev);
engine->conf_dev.parent = &idxd->conf_dev;
+ engine->conf_dev.bus = &dsa_bus_type;
engine->conf_dev.type = &idxd_engine_device_type;
rc = dev_set_name(&engine->conf_dev, "engine%d.%d", idxd->id, engine->id);
if (rc < 0) {
@@ -303,6 +330,19 @@ static int idxd_setup_groups(struct idxd_device *idxd)
return rc;
}
+static void idxd_cleanup_internals(struct idxd_device *idxd)
+{
+ int i;
+
+ for (i = 0; i < idxd->max_groups; i++)
+ put_device(&idxd->groups[i]->conf_dev);
+ for (i = 0; i < idxd->max_engines; i++)
+ put_device(&idxd->engines[i]->conf_dev);
+ for (i = 0; i < idxd->max_wqs; i++)
+ put_device(&idxd->wqs[i]->conf_dev);
+ destroy_workqueue(idxd->wq);
+}
+
static int idxd_setup_internals(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
@@ -531,12 +571,12 @@ static int idxd_probe(struct idxd_device *idxd)
dev_dbg(dev, "Loading RO device config\n");
rc = idxd_device_load_config(idxd);
if (rc < 0)
- goto err;
+ goto err_config;
}
rc = idxd_setup_interrupts(idxd);
if (rc)
- goto err;
+ goto err_config;
dev_dbg(dev, "IDXD interrupt setup complete.\n");
@@ -549,6 +589,8 @@ static int idxd_probe(struct idxd_device *idxd)
dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
return 0;
+ err_config:
+ idxd_cleanup_internals(idxd);
err:
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
@@ -556,6 +598,18 @@ static int idxd_probe(struct idxd_device *idxd)
return rc;
}
+static void idxd_cleanup(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+
+ perfmon_pmu_remove(idxd);
+ idxd_cleanup_interrupts(idxd);
+ idxd_cleanup_internals(idxd);
+ if (device_pasid_enabled(idxd))
+ idxd_disable_system_pasid(idxd);
+ iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
+}
+
static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
@@ -608,7 +662,7 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rc = idxd_register_devices(idxd);
if (rc) {
dev_err(dev, "IDXD sysfs setup failed\n");
- goto err;
+ goto err_dev_register;
}
idxd->state = IDXD_DEV_CONF_READY;
@@ -618,6 +672,8 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
+ err_dev_register:
+ idxd_cleanup(idxd);
err:
pci_iounmap(pdev, idxd->reg_base);
err_iomap:
@@ -745,12 +801,12 @@ static int __init idxd_init_module(void)
* If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in
* enumerating the device. We can not utilize it.
*/
- if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) {
+ if (!cpu_feature_enabled(X86_FEATURE_MOVDIR64B)) {
pr_warn("idxd driver failed to load without MOVDIR64B.\n");
return -ENODEV;
}
- if (!boot_cpu_has(X86_FEATURE_ENQCMD))
+ if (!cpu_feature_enabled(X86_FEATURE_ENQCMD))
pr_warn("Platform does not have ENQCMD(S) support.\n");
else
support_enqcmd = true;
@@ -787,6 +843,7 @@ module_init(idxd_init_module);
static void __exit idxd_exit_module(void)
{
+ idxd_unregister_driver();
pci_unregister_driver(&idxd_pci_driver);
idxd_cdev_remove();
idxd_unregister_bus_type();
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
index 0d5c42f7bfa4..97d9a6f04f2a 100644
--- a/drivers/dma/ipu/ipu_irq.c
+++ b/drivers/dma/ipu/ipu_irq.c
@@ -230,7 +230,7 @@ out:
}
/**
- * ipu_irq_map() - map an IPU interrupt source to an IRQ number
+ * ipu_irq_unmap() - unmap an IPU interrupt source
* @source: interrupt source bit position (see ipu_irq_map())
* @return: 0 or negative error code
*/
diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
index 27c07350971d..375e7e647df6 100644
--- a/drivers/dma/mediatek/mtk-uart-apdma.c
+++ b/drivers/dma/mediatek/mtk-uart-apdma.c
@@ -131,10 +131,7 @@ static unsigned int mtk_uart_apdma_read(struct mtk_chan *c, unsigned int reg)
static void mtk_uart_apdma_desc_free(struct virt_dma_desc *vd)
{
- struct dma_chan *chan = vd->tx.chan;
- struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
-
- kfree(c->desc);
+ kfree(container_of(vd, struct mtk_uart_apdma_desc, vd));
}
static void mtk_uart_apdma_start_tx(struct mtk_chan *c)
@@ -207,14 +204,9 @@ static void mtk_uart_apdma_start_rx(struct mtk_chan *c)
static void mtk_uart_apdma_tx_handler(struct mtk_chan *c)
{
- struct mtk_uart_apdma_desc *d = c->desc;
-
mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
-
- list_del(&d->vd.node);
- vchan_cookie_complete(&d->vd);
}
static void mtk_uart_apdma_rx_handler(struct mtk_chan *c)
@@ -245,9 +237,17 @@ static void mtk_uart_apdma_rx_handler(struct mtk_chan *c)
c->rx_status = d->avail_len - cnt;
mtk_uart_apdma_write(c, VFF_RPT, wg);
+}
- list_del(&d->vd.node);
- vchan_cookie_complete(&d->vd);
+static void mtk_uart_apdma_chan_complete_handler(struct mtk_chan *c)
+{
+ struct mtk_uart_apdma_desc *d = c->desc;
+
+ if (d) {
+ list_del(&d->vd.node);
+ vchan_cookie_complete(&d->vd);
+ c->desc = NULL;
+ }
}
static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
@@ -261,6 +261,7 @@ static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
mtk_uart_apdma_rx_handler(c);
else if (c->dir == DMA_MEM_TO_DEV)
mtk_uart_apdma_tx_handler(c);
+ mtk_uart_apdma_chan_complete_handler(c);
spin_unlock_irqrestore(&c->vc.lock, flags);
return IRQ_HANDLED;
@@ -348,7 +349,7 @@ static struct dma_async_tx_descriptor *mtk_uart_apdma_prep_slave_sg
return NULL;
/* Now allocate and setup the descriptor */
- d = kzalloc(sizeof(*d), GFP_ATOMIC);
+ d = kzalloc(sizeof(*d), GFP_NOWAIT);
if (!d)
return NULL;
@@ -366,7 +367,7 @@ static void mtk_uart_apdma_issue_pending(struct dma_chan *chan)
unsigned long flags;
spin_lock_irqsave(&c->vc.lock, flags);
- if (vchan_issue_pending(&c->vc)) {
+ if (vchan_issue_pending(&c->vc) && !c->desc) {
vd = vchan_next_desc(&c->vc);
c->desc = to_mtk_uart_apdma_desc(&vd->tx);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index fd8d2bc3be9f..110de8a60058 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2694,13 +2694,15 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
for (i = 0; i < len / period_len; i++) {
desc = pl330_get_desc(pch);
if (!desc) {
+ unsigned long iflags;
+
dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
__func__, __LINE__);
if (!first)
return NULL;
- spin_lock_irqsave(&pl330->pool_lock, flags);
+ spin_lock_irqsave(&pl330->pool_lock, iflags);
while (!list_empty(&first->node)) {
desc = list_entry(first->node.next,
@@ -2710,7 +2712,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
list_move_tail(&first->node, &pl330->desc_pool);
- spin_unlock_irqrestore(&pl330->pool_lock, flags);
+ spin_unlock_irqrestore(&pl330->pool_lock, iflags);
return NULL;
}
diff --git a/drivers/dma/qcom/Kconfig b/drivers/dma/qcom/Kconfig
index 365f94eb3b08..3f926a653bd8 100644
--- a/drivers/dma/qcom/Kconfig
+++ b/drivers/dma/qcom/Kconfig
@@ -33,6 +33,7 @@ config QCOM_GPI_DMA
config QCOM_HIDMA_MGMT
tristate "Qualcomm Technologies HIDMA Management support"
+ depends on HAS_IOMEM
select DMA_ENGINE
help
Enable support for the Qualcomm Technologies HIDMA Management.
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index 806ca02c52d7..62026607f3f8 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -418,8 +418,23 @@ static int __init hidma_mgmt_init(void)
hidma_mgmt_of_populate_channels(child);
}
#endif
- return platform_driver_register(&hidma_mgmt_driver);
+ /*
+ * We do not check for return value here, as it is assumed that
+ * platform_driver_register must not fail. The reason for this is that
+ * the (potential) hidma_mgmt_of_populate_channels calls above are not
+ * cleaned up if it does fail, and to do this work is quite
+ * complicated. In particular, various calls of of_address_to_resource,
+ * of_irq_to_resource, platform_device_register_full, of_dma_configure,
+ * and of_msi_configure which then call other functions and so on, must
+ * be cleaned up - this is not a trivial exercise.
+ *
+ * Currently, this module is not intended to be unloaded, and there is
+ * no module_exit function defined which does the needed cleanup. For
+ * this reason, we have to assume success here.
+ */
+ platform_driver_register(&hidma_mgmt_driver);
+ return 0;
}
module_init(hidma_mgmt_init);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/sf-pdma/Kconfig b/drivers/dma/sf-pdma/Kconfig
index f8ffa02e279f..ba46a0a15a93 100644
--- a/drivers/dma/sf-pdma/Kconfig
+++ b/drivers/dma/sf-pdma/Kconfig
@@ -1,5 +1,6 @@
config SF_PDMA
tristate "Sifive PDMA controller driver"
+ depends on HAS_IOMEM
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index d530c1bf11d9..6885b3dcd7a9 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1913,7 +1913,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
/* Enable runtime PM and initialize the device. */
pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
return ret;
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 265d7c07b348..e1827393143f 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -3675,6 +3675,9 @@ static int __init d40_probe(struct platform_device *pdev)
kfree(base->lcla_pool.base_unaligned);
+ if (base->lcpa_base)
+ iounmap(base->lcpa_base);
+
if (base->phy_lcpa)
release_mem_region(base->phy_lcpa,
base->lcpa_size);
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 36ba8b43e78d..18cbd1e43c2e 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1452,7 +1452,7 @@ static int stm32_mdma_alloc_chan_resources(struct dma_chan *c)
return -ENOMEM;
}
- ret = pm_runtime_get_sync(dmadev->ddev.dev);
+ ret = pm_runtime_resume_and_get(dmadev->ddev.dev);
if (ret < 0)
return ret;
@@ -1718,7 +1718,7 @@ static int stm32_mdma_pm_suspend(struct device *dev)
u32 ccr, id;
int ret;
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index 70b29bd079c9..6c709803203a 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -113,6 +113,7 @@
#define XILINX_DPDMA_CH_VDO 0x020
#define XILINX_DPDMA_CH_PYLD_SZ 0x024
#define XILINX_DPDMA_CH_DESC_ID 0x028
+#define XILINX_DPDMA_CH_DESC_ID_MASK GENMASK(15, 0)
/* DPDMA descriptor fields */
#define XILINX_DPDMA_DESC_CONTROL_PREEMBLE 0xa5
@@ -866,7 +867,8 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
* will be used, but it should be enough.
*/
list_for_each_entry(sw_desc, &desc->descriptors, node)
- sw_desc->hw.desc_id = desc->vdesc.tx.cookie;
+ sw_desc->hw.desc_id = desc->vdesc.tx.cookie
+ & XILINX_DPDMA_CH_DESC_ID_MASK;
sw_desc = list_first_entry(&desc->descriptors,
struct xilinx_dpdma_sw_desc, node);
@@ -1086,7 +1088,8 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan)
if (!chan->running || !pending)
goto out;
- desc_id = dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_ID);
+ desc_id = dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_ID)
+ & XILINX_DPDMA_CH_DESC_ID_MASK;
/* If the retrigger raced with vsync, retry at the next frame. */
sw_desc = list_first_entry(&pending->descriptors,
@@ -1459,7 +1462,7 @@ static void xilinx_dpdma_enable_irq(struct xilinx_dpdma_device *xdev)
*/
static void xilinx_dpdma_disable_irq(struct xilinx_dpdma_device *xdev)
{
- dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ERR_ALL);
+ dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ALL);
dpdma_write(xdev->reg, XILINX_DPDMA_EIDS, XILINX_DPDMA_EINTR_ALL);
}
@@ -1596,6 +1599,26 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
return dma_get_slave_channel(&xdev->chan[chan_id]->vchan.chan);
}
+static void dpdma_hw_init(struct xilinx_dpdma_device *xdev)
+{
+ unsigned int i;
+ void __iomem *reg;
+
+ /* Disable all interrupts */
+ xilinx_dpdma_disable_irq(xdev);
+
+ /* Stop all channels */
+ for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) {
+ reg = xdev->reg + XILINX_DPDMA_CH_BASE
+ + XILINX_DPDMA_CH_OFFSET * i;
+ dpdma_clr(reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE);
+ }
+
+ /* Clear the interrupt status registers */
+ dpdma_write(xdev->reg, XILINX_DPDMA_ISR, XILINX_DPDMA_INTR_ALL);
+ dpdma_write(xdev->reg, XILINX_DPDMA_EISR, XILINX_DPDMA_EINTR_ALL);
+}
+
static int xilinx_dpdma_probe(struct platform_device *pdev)
{
struct xilinx_dpdma_device *xdev;
@@ -1622,6 +1645,8 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)
if (IS_ERR(xdev->reg))
return PTR_ERR(xdev->reg);
+ dpdma_hw_init(xdev);
+
xdev->irq = platform_get_irq(pdev, 0);
if (xdev->irq < 0) {
dev_err(xdev->dev, "failed to get platform irq\n");
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index d8419565b92c..5fecf5aa6e85 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -468,7 +468,7 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
struct zynqmp_dma_desc_sw *desc;
int i, ret;
- ret = pm_runtime_get_sync(chan->dev);
+ ret = pm_runtime_resume_and_get(chan->dev);
if (ret < 0)
return ret;
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 9fa4dfc6ebee..f0d8f60acee1 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -3083,7 +3083,7 @@ static void read_mc_regs(struct amd64_pvt *pvt)
edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
/* Check first whether TOP_MEM2 is enabled: */
- rdmsrl(MSR_K8_SYSCFG, msr_val);
+ rdmsrl(MSR_AMD64_SYSCFG, msr_val);
if (msr_val & BIT(21)) {
rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index 5dd905a3f30c..27d56920b469 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -323,6 +323,21 @@ static const char * const smca_umc_mce_desc[] = {
"AES SRAM ECC error",
};
+static const char * const smca_umc2_mce_desc[] = {
+ "DRAM ECC error",
+ "Data poison error",
+ "SDP parity error",
+ "Reserved",
+ "Address/Command parity error",
+ "Write data parity error",
+ "DCQ SRAM ECC error",
+ "Reserved",
+ "Read data parity error",
+ "Rdb SRAM ECC error",
+ "RdRsp SRAM ECC error",
+ "LM32 MP errors",
+};
+
static const char * const smca_pb_mce_desc[] = {
"An ECC error in the Parameter Block RAM array",
};
@@ -400,6 +415,56 @@ static const char * const smca_pcie_mce_desc[] = {
"CCIX Non-okay write response with data error",
};
+static const char * const smca_pcie2_mce_desc[] = {
+ "SDP Parity Error logging",
+};
+
+static const char * const smca_xgmipcs_mce_desc[] = {
+ "Data Loss Error",
+ "Training Error",
+ "Flow Control Acknowledge Error",
+ "Rx Fifo Underflow Error",
+ "Rx Fifo Overflow Error",
+ "CRC Error",
+ "BER Exceeded Error",
+ "Tx Vcid Data Error",
+ "Replay Buffer Parity Error",
+ "Data Parity Error",
+ "Replay Fifo Overflow Error",
+ "Replay Fifo Underflow Error",
+ "Elastic Fifo Overflow Error",
+ "Deskew Error",
+ "Flow Control CRC Error",
+ "Data Startup Limit Error",
+ "FC Init Timeout Error",
+ "Recovery Timeout Error",
+ "Ready Serial Timeout Error",
+ "Ready Serial Attempt Error",
+ "Recovery Attempt Error",
+ "Recovery Relock Attempt Error",
+ "Replay Attempt Error",
+ "Sync Header Error",
+ "Tx Replay Timeout Error",
+ "Rx Replay Timeout Error",
+ "LinkSub Tx Timeout Error",
+ "LinkSub Rx Timeout Error",
+ "Rx CMD Pocket Error",
+};
+
+static const char * const smca_xgmiphy_mce_desc[] = {
+ "RAM ECC Error",
+ "ARC instruction buffer parity error",
+ "ARC data buffer parity error",
+ "PHY APB error",
+};
+
+static const char * const smca_waflphy_mce_desc[] = {
+ "RAM ECC Error",
+ "ARC instruction buffer parity error",
+ "ARC data buffer parity error",
+ "PHY APB error",
+};
+
struct smca_mce_desc {
const char * const *descs;
unsigned int num_descs;
@@ -418,6 +483,7 @@ static struct smca_mce_desc smca_mce_descs[] = {
[SMCA_CS_V2] = { smca_cs2_mce_desc, ARRAY_SIZE(smca_cs2_mce_desc) },
[SMCA_PIE] = { smca_pie_mce_desc, ARRAY_SIZE(smca_pie_mce_desc) },
[SMCA_UMC] = { smca_umc_mce_desc, ARRAY_SIZE(smca_umc_mce_desc) },
+ [SMCA_UMC_V2] = { smca_umc2_mce_desc, ARRAY_SIZE(smca_umc2_mce_desc) },
[SMCA_PB] = { smca_pb_mce_desc, ARRAY_SIZE(smca_pb_mce_desc) },
[SMCA_PSP] = { smca_psp_mce_desc, ARRAY_SIZE(smca_psp_mce_desc) },
[SMCA_PSP_V2] = { smca_psp2_mce_desc, ARRAY_SIZE(smca_psp2_mce_desc) },
@@ -426,6 +492,10 @@ static struct smca_mce_desc smca_mce_descs[] = {
[SMCA_MP5] = { smca_mp5_mce_desc, ARRAY_SIZE(smca_mp5_mce_desc) },
[SMCA_NBIO] = { smca_nbio_mce_desc, ARRAY_SIZE(smca_nbio_mce_desc) },
[SMCA_PCIE] = { smca_pcie_mce_desc, ARRAY_SIZE(smca_pcie_mce_desc) },
+ [SMCA_PCIE_V2] = { smca_pcie2_mce_desc, ARRAY_SIZE(smca_pcie2_mce_desc) },
+ [SMCA_XGMI_PCS] = { smca_xgmipcs_mce_desc, ARRAY_SIZE(smca_xgmipcs_mce_desc) },
+ [SMCA_XGMI_PHY] = { smca_xgmiphy_mce_desc, ARRAY_SIZE(smca_xgmiphy_mce_desc) },
+ [SMCA_WAFL_PHY] = { smca_waflphy_mce_desc, ARRAY_SIZE(smca_waflphy_mce_desc) },
};
static bool f12h_mc0_mce(u16 ec, u8 xec)
diff --git a/drivers/firmware/arm_scmi/notify.h b/drivers/firmware/arm_scmi/notify.h
index ce0324be6c71..4e9b627edfef 100644
--- a/drivers/firmware/arm_scmi/notify.h
+++ b/drivers/firmware/arm_scmi/notify.h
@@ -79,8 +79,6 @@ struct scmi_protocol_events {
int scmi_notification_init(struct scmi_handle *handle);
void scmi_notification_exit(struct scmi_handle *handle);
-
-struct scmi_protocol_handle;
int scmi_register_protocol_events(const struct scmi_handle *handle, u8 proto_id,
const struct scmi_protocol_handle *ph,
const struct scmi_protocol_events *ee);
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index d0dee37ad522..4ceba5ef7895 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -552,8 +552,10 @@ static unsigned long scpi_clk_get_val(u16 clk_id)
ret = scpi_send_message(CMD_GET_CLOCK_VALUE, &le_clk_id,
sizeof(le_clk_id), &rate, sizeof(rate));
+ if (ret)
+ return 0;
- return ret ? ret : le32_to_cpu(rate);
+ return le32_to_cpu(rate);
}
static int scpi_clk_set_val(u16 clk_id, unsigned long rate)
diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c
index e1926483ae2f..4c3201e290e2 100644
--- a/drivers/firmware/efi/apple-properties.c
+++ b/drivers/firmware/efi/apple-properties.c
@@ -157,7 +157,7 @@ static int __init unmarshal_devices(struct properties_header *properties)
if (!entry[0].name)
goto skip_device;
- ret = device_add_properties(dev, entry); /* makes deep copy */
+ ret = device_create_managed_software_node(dev, entry, NULL);
if (ret)
dev_err(dev, "error %d assigning properties\n", ret);
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index e15d484b6a5a..ea7ca74fc173 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -276,8 +276,7 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg)
if (!msg || !(mem->validation_bits & CPER_MEM_VALID_MODULE_HANDLE))
return 0;
- n = 0;
- len = CPER_REC_LEN - 1;
+ len = CPER_REC_LEN;
dmi_memdev_name(mem->mem_dev_handle, &bank, &device);
if (bank && device)
n = snprintf(msg, len, "DIMM location: %s %s ", bank, device);
@@ -286,7 +285,6 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg)
"DIMM location: not present. DMI handle: 0x%.4x ",
mem->mem_dev_handle);
- msg[n] = '\0';
return n;
}
diff --git a/drivers/firmware/efi/dev-path-parser.c b/drivers/firmware/efi/dev-path-parser.c
index 5c9625e552f4..10d4457417a4 100644
--- a/drivers/firmware/efi/dev-path-parser.c
+++ b/drivers/firmware/efi/dev-path-parser.c
@@ -12,52 +12,39 @@
#include <linux/efi.h>
#include <linux/pci.h>
-struct acpi_hid_uid {
- struct acpi_device_id hid[2];
- char uid[11]; /* UINT_MAX + null byte */
-};
-
-static int __init match_acpi_dev(struct device *dev, const void *data)
-{
- struct acpi_hid_uid hid_uid = *(const struct acpi_hid_uid *)data;
- struct acpi_device *adev = to_acpi_device(dev);
-
- if (acpi_match_device_ids(adev, hid_uid.hid))
- return 0;
-
- if (adev->pnp.unique_id)
- return !strcmp(adev->pnp.unique_id, hid_uid.uid);
- else
- return !strcmp("0", hid_uid.uid);
-}
-
static long __init parse_acpi_path(const struct efi_dev_path *node,
struct device *parent, struct device **child)
{
- struct acpi_hid_uid hid_uid = {};
+ char hid[ACPI_ID_LEN], uid[11]; /* UINT_MAX + null byte */
+ struct acpi_device *adev;
struct device *phys_dev;
if (node->header.length != 12)
return -EINVAL;
- sprintf(hid_uid.hid[0].id, "%c%c%c%04X",
+ sprintf(hid, "%c%c%c%04X",
'A' + ((node->acpi.hid >> 10) & 0x1f) - 1,
'A' + ((node->acpi.hid >> 5) & 0x1f) - 1,
'A' + ((node->acpi.hid >> 0) & 0x1f) - 1,
node->acpi.hid >> 16);
- sprintf(hid_uid.uid, "%u", node->acpi.uid);
-
- *child = bus_find_device(&acpi_bus_type, NULL, &hid_uid,
- match_acpi_dev);
- if (!*child)
+ sprintf(uid, "%u", node->acpi.uid);
+
+ for_each_acpi_dev_match(adev, hid, NULL, -1) {
+ if (adev->pnp.unique_id && !strcmp(adev->pnp.unique_id, uid))
+ break;
+ if (!adev->pnp.unique_id && node->acpi.uid == 0)
+ break;
+ acpi_dev_put(adev);
+ }
+ if (!adev)
return -ENODEV;
- phys_dev = acpi_get_first_physical_node(to_acpi_device(*child));
+ phys_dev = acpi_get_first_physical_node(adev);
if (phys_dev) {
- get_device(phys_dev);
- put_device(*child);
- *child = phys_dev;
- }
+ *child = get_device(phys_dev);
+ acpi_dev_put(adev);
+ } else
+ *child = &adev->dev;
return 0;
}
diff --git a/drivers/firmware/efi/fdtparams.c b/drivers/firmware/efi/fdtparams.c
index bb042ab7c2be..e901f8564ca0 100644
--- a/drivers/firmware/efi/fdtparams.c
+++ b/drivers/firmware/efi/fdtparams.c
@@ -98,6 +98,9 @@ u64 __init efi_get_fdt_params(struct efi_memory_map_data *mm)
BUILD_BUG_ON(ARRAY_SIZE(target) != ARRAY_SIZE(name));
BUILD_BUG_ON(ARRAY_SIZE(target) != ARRAY_SIZE(dt_params[0].params));
+ if (!fdt)
+ return 0;
+
for (i = 0; i < ARRAY_SIZE(dt_params); i++) {
node = fdt_path_offset(fdt, dt_params[i].path);
if (node < 0)
diff --git a/drivers/firmware/efi/libstub/file.c b/drivers/firmware/efi/libstub/file.c
index 4e81c6077188..dd95f330fe6e 100644
--- a/drivers/firmware/efi/libstub/file.c
+++ b/drivers/firmware/efi/libstub/file.c
@@ -103,7 +103,7 @@ static int find_file_option(const efi_char16_t *cmdline, int cmdline_len,
return 0;
/* Skip any leading slashes */
- while (cmdline[i] == L'/' || cmdline[i] == L'\\')
+ while (i < cmdline_len && (cmdline[i] == L'/' || cmdline[i] == L'\\'))
i++;
while (--result_len > 0 && i < cmdline_len) {
diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
index 5737cb0fcd44..0a9aba5f9cef 100644
--- a/drivers/firmware/efi/memattr.c
+++ b/drivers/firmware/efi/memattr.c
@@ -67,11 +67,6 @@ static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out)
return false;
}
- if (!(in->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))) {
- pr_warn("Entry attributes invalid: RO and XP bits both cleared\n");
- return false;
- }
-
if (PAGE_SIZE > EFI_PAGE_SIZE &&
(!PAGE_ALIGNED(in->phys_addr) ||
!PAGE_ALIGNED(in->num_pages << EFI_PAGE_SHIFT))) {
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 1dd0ec6727fd..3c69b785cb79 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -1383,6 +1383,7 @@ config GPIO_TPS68470
config GPIO_TQMX86
tristate "TQ-Systems QTMX86 GPIO"
depends on MFD_TQMX86 || COMPILE_TEST
+ depends on HAS_IOPORT_MAP
select GPIOLIB_IRQCHIP
help
This driver supports GPIO on the TQMX86 IO controller.
@@ -1450,6 +1451,7 @@ menu "PCI GPIO expanders"
config GPIO_AMD8111
tristate "AMD 8111 GPIO driver"
depends on X86 || COMPILE_TEST
+ depends on HAS_IOPORT_MAP
help
The AMD 8111 south bridge contains 32 GPIO pins which can be used.
diff --git a/drivers/gpio/gpio-cadence.c b/drivers/gpio/gpio-cadence.c
index a4d3239d2594..4ab3fcd9b9ba 100644
--- a/drivers/gpio/gpio-cadence.c
+++ b/drivers/gpio/gpio-cadence.c
@@ -278,6 +278,7 @@ static const struct of_device_id cdns_of_ids[] = {
{ .compatible = "cdns,gpio-r1p02" },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, cdns_of_ids);
static struct platform_driver cdns_gpio_driver = {
.driver = {
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 157106e1e438..b9fdf05d7669 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -334,7 +334,7 @@ static int mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base)
ct->chip.irq_unmask = irq_gc_mask_set_bit;
ct->chip.irq_set_type = gpio_set_irq_type;
ct->chip.irq_set_wake = gpio_set_wake_irq;
- ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND;
+ ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND;
ct->regs.ack = GPIO_ISR;
ct->regs.mask = GPIO_IMR;
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index 1bd9e44df718..05974b760796 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -444,16 +444,6 @@ static int tegra186_irq_set_wake(struct irq_data *data, unsigned int on)
return 0;
}
-static int tegra186_irq_set_affinity(struct irq_data *data,
- const struct cpumask *dest,
- bool force)
-{
- if (data->parent_data)
- return irq_chip_set_affinity_parent(data, dest, force);
-
- return -EINVAL;
-}
-
static void tegra186_gpio_irq(struct irq_desc *desc)
{
struct tegra_gpio *gpio = irq_desc_get_handler_data(desc);
@@ -700,7 +690,6 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
gpio->intc.irq_unmask = tegra186_irq_unmask;
gpio->intc.irq_set_type = tegra186_irq_set_type;
gpio->intc.irq_set_wake = tegra186_irq_set_wake;
- gpio->intc.irq_set_affinity = tegra186_irq_set_affinity;
irq = &gpio->gpio.irq;
irq->chip = &gpio->intc;
diff --git a/drivers/gpio/gpio-wcd934x.c b/drivers/gpio/gpio-wcd934x.c
index 1cbce5990855..97e6caedf1f3 100644
--- a/drivers/gpio/gpio-wcd934x.c
+++ b/drivers/gpio/gpio-wcd934x.c
@@ -7,7 +7,7 @@
#include <linux/slab.h>
#include <linux/of_device.h>
-#define WCD_PIN_MASK(p) BIT(p - 1)
+#define WCD_PIN_MASK(p) BIT(p)
#define WCD_REG_DIR_CTL_OFFSET 0x42
#define WCD_REG_VAL_CTL_OFFSET 0x43
#define WCD934X_NPINS 5
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index b411d3156e0b..136557e7dd3c 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -542,7 +542,7 @@ static void xgpio_irqhandler(struct irq_desc *desc)
}
/**
- * xgpio_of_probe - Probe method for the GPIO device.
+ * xgpio_probe - Probe method for the GPIO device.
* @pdev: pointer to the platform device
*
* Return:
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index 1631727bf0da..c7b5446d01fd 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -1880,6 +1880,7 @@ static void gpio_v2_line_info_changed_to_v1(
struct gpio_v2_line_info_changed *lic_v2,
struct gpioline_info_changed *lic_v1)
{
+ memset(lic_v1, 0, sizeof(*lic_v1));
gpio_v2_line_info_to_v1(&lic_v2->info, &lic_v1->info);
lic_v1->timestamp = lic_v2->timestamp_ns;
lic_v1->event_type = lic_v2->event_type;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index dc3a69296321..264176a01e16 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1006,6 +1006,7 @@ struct amdgpu_device {
struct amdgpu_df df;
struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
+ uint32_t harvest_ip_mask;
int num_ip_blocks;
struct mutex mn_lock;
DECLARE_HASHTABLE(mn_hash, 7);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
index fad3b91f74f5..d39cff4a1fe3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
@@ -156,16 +156,16 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
break;
case 1:
- sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0,
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
break;
case 2:
- sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA2, 0,
- mmSDMA2_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL;
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
+ mmSDMA2_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
break;
case 3:
- sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA3, 0,
- mmSDMA3_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL;
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
+ mmSDMA3_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
break;
}
@@ -450,7 +450,7 @@ static int hqd_sdma_dump_v10_3(struct kgd_dev *kgd,
engine_id, queue_id);
uint32_t i = 0, reg;
#undef HQD_N_REGS
-#define HQD_N_REGS (19+6+7+10)
+#define HQD_N_REGS (19+6+7+12)
*dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
if (*dump == NULL)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 0350205c4897..6819fe5612d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -337,7 +337,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
{
struct amdgpu_ctx *ctx;
struct amdgpu_ctx_mgr *mgr;
- unsigned long ras_counter;
if (!fpriv)
return -EINVAL;
@@ -362,21 +361,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
if (atomic_read(&ctx->guilty))
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
- /*query ue count*/
- ras_counter = amdgpu_ras_query_error_count(adev, false);
- /*ras counter is monotonic increasing*/
- if (ras_counter != ctx->ras_counter_ue) {
- out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
- ctx->ras_counter_ue = ras_counter;
- }
-
- /*query ce count*/
- ras_counter = amdgpu_ras_query_error_count(adev, true);
- if (ras_counter != ctx->ras_counter_ce) {
- out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
- ctx->ras_counter_ce = ras_counter;
- }
-
mutex_unlock(&mgr->lock);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 7d3b54615147..57ec108b5972 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1683,6 +1683,19 @@ int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
if (!ip_block_version)
return -EINVAL;
+ switch (ip_block_version->type) {
+ case AMD_IP_BLOCK_TYPE_VCN:
+ if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
+ return 0;
+ break;
+ case AMD_IP_BLOCK_TYPE_JPEG:
+ if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
+ return 0;
+ break;
+ default:
+ break;
+ }
+
DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
ip_block_version->funcs->name);
@@ -3105,13 +3118,14 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
*/
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
{
- if (amdgpu_sriov_vf(adev) || adev->enable_virtual_display)
+ if (amdgpu_sriov_vf(adev) ||
+ adev->enable_virtual_display ||
+ (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
return false;
return amdgpu_device_asic_has_dc_support(adev->asic_type);
}
-
static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
{
struct amdgpu_device *adev =
@@ -3276,6 +3290,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->vm_manager.vm_pte_funcs = NULL;
adev->vm_manager.vm_pte_num_scheds = 0;
adev->gmc.gmc_funcs = NULL;
+ adev->harvest_ip_mask = 0x0;
adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
@@ -4466,7 +4481,6 @@ out:
r = amdgpu_ib_ring_tests(tmp_adev);
if (r) {
dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
- r = amdgpu_device_ip_suspend(tmp_adev);
need_full_reset = true;
r = -EAGAIN;
goto end;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index b2dbcb4df020..e1b6f5891759 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -373,6 +373,34 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
return -EINVAL;
}
+void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
+{
+ struct binary_header *bhdr;
+ struct harvest_table *harvest_info;
+ int i;
+
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ harvest_info = (struct harvest_table *)(adev->mman.discovery_bin +
+ le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset));
+
+ for (i = 0; i < 32; i++) {
+ if (le32_to_cpu(harvest_info->list[i].hw_id) == 0)
+ break;
+
+ switch (le32_to_cpu(harvest_info->list[i].hw_id)) {
+ case VCN_HWID:
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
+ break;
+ case DMU_HWID:
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
{
struct binary_header *bhdr;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
index 8f6183801cb3..1b1ae21b1037 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
@@ -29,6 +29,7 @@
void amdgpu_discovery_fini(struct amdgpu_device *adev);
int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev);
+void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev);
int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
int *major, int *minor, int *revision);
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 8a1fb8b6606e..2a4cd7d377bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -1047,17 +1047,18 @@ int amdgpu_display_gem_fb_init(struct drm_device *dev,
rfb->base.obj[0] = obj;
drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
- ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+
+ ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
if (ret)
goto err;
- ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
+ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
if (ret)
goto err;
return 0;
err:
- drm_err(dev, "Failed to init gem fb: %d\n", ret);
+ drm_dbg_kms(dev, "Failed to init gem fb: %d\n", ret);
rfb->base.obj[0] = NULL;
return ret;
}
@@ -1071,9 +1072,6 @@ int amdgpu_display_gem_fb_verify_and_init(
rfb->base.obj[0] = obj;
drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
- ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
- if (ret)
- goto err;
/* Verify that the modifier is supported. */
if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format,
mode_cmd->modifier[0])) {
@@ -1092,9 +1090,13 @@ int amdgpu_display_gem_fb_verify_and_init(
if (ret)
goto err;
+ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+ if (ret)
+ goto err;
+
return 0;
err:
- drm_err(dev, "Failed to verify and init gem fb: %d\n", ret);
+ drm_dbg_kms(dev, "Failed to verify and init gem fb: %d\n", ret);
rfb->base.obj[0] = NULL;
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index baa980a477d9..37ec59365080 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -214,9 +214,21 @@ static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
{
struct drm_gem_object *obj = attach->dmabuf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ int r;
/* pin buffer into GTT */
- return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
+ r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
+ if (r)
+ return r;
+
+ if (bo->tbo.moving) {
+ r = dma_fence_wait(bo->tbo.moving, true);
+ if (r) {
+ amdgpu_bo_unpin(bo);
+ return r;
+ }
+ }
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 4f10c4529840..09b048647523 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -288,10 +288,13 @@ out:
static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev)
{
struct amdgpu_framebuffer *rfb = &rfbdev->rfb;
+ int i;
drm_fb_helper_unregister_fbi(&rfbdev->helper);
if (rfb->base.obj[0]) {
+ for (i = 0; i < rfb->base.format->num_planes; i++)
+ drm_gem_object_put(rfb->base.obj[0]);
amdgpufb_destroy_pinned_object(rfb->base.obj[0]);
rfb->base.obj[0] = NULL;
drm_framebuffer_unregister_private(&rfb->base);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
index 8f4a8f8d8146..39b6c6bfab45 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
@@ -101,7 +101,8 @@ static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
{
unsigned char buff[34];
- int addrptr = 0, size = 0;
+ int addrptr, size;
+ int len;
if (!is_fru_eeprom_supported(adev))
return 0;
@@ -109,7 +110,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
/* If algo exists, it means that the i2c_adapter's initialized */
if (!adev->pm.smu_i2c.algo) {
DRM_WARN("Cannot access FRU, EEPROM accessor not initialized");
- return 0;
+ return -ENODEV;
}
/* There's a lot of repetition here. This is due to the FRU having
@@ -128,7 +129,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
if (size < 1) {
DRM_ERROR("Failed to read FRU Manufacturer, ret:%d", size);
- return size;
+ return -EINVAL;
}
/* Increment the addrptr by the size of the field, and 1 due to the
@@ -138,43 +139,45 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
if (size < 1) {
DRM_ERROR("Failed to read FRU product name, ret:%d", size);
- return size;
+ return -EINVAL;
}
+ len = size;
/* Product name should only be 32 characters. Any more,
* and something could be wrong. Cap it at 32 to be safe
*/
- if (size > 32) {
+ if (len >= sizeof(adev->product_name)) {
DRM_WARN("FRU Product Number is larger than 32 characters. This is likely a mistake");
- size = 32;
+ len = sizeof(adev->product_name) - 1;
}
/* Start at 2 due to buff using fields 0 and 1 for the address */
- memcpy(adev->product_name, &buff[2], size);
- adev->product_name[size] = '\0';
+ memcpy(adev->product_name, &buff[2], len);
+ adev->product_name[len] = '\0';
addrptr += size + 1;
size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
if (size < 1) {
DRM_ERROR("Failed to read FRU product number, ret:%d", size);
- return size;
+ return -EINVAL;
}
+ len = size;
/* Product number should only be 16 characters. Any more,
* and something could be wrong. Cap it at 16 to be safe
*/
- if (size > 16) {
+ if (len >= sizeof(adev->product_number)) {
DRM_WARN("FRU Product Number is larger than 16 characters. This is likely a mistake");
- size = 16;
+ len = sizeof(adev->product_number) - 1;
}
- memcpy(adev->product_number, &buff[2], size);
- adev->product_number[size] = '\0';
+ memcpy(adev->product_number, &buff[2], len);
+ adev->product_number[len] = '\0';
addrptr += size + 1;
size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
if (size < 1) {
DRM_ERROR("Failed to read FRU product version, ret:%d", size);
- return size;
+ return -EINVAL;
}
addrptr += size + 1;
@@ -182,18 +185,19 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
if (size < 1) {
DRM_ERROR("Failed to read FRU serial number, ret:%d", size);
- return size;
+ return -EINVAL;
}
+ len = size;
/* Serial number should only be 16 characters. Any more,
* and something could be wrong. Cap it at 16 to be safe
*/
- if (size > 16) {
+ if (len >= sizeof(adev->serial)) {
DRM_WARN("FRU Serial Number is larger than 16 characters. This is likely a mistake");
- size = 16;
+ len = sizeof(adev->serial) - 1;
}
- memcpy(adev->serial, &buff[2], size);
- adev->serial[size] = '\0';
+ memcpy(adev->serial, &buff[2], len);
+ adev->serial[len] = '\0';
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 1345f7eba011..f9434bc2f9b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -100,7 +100,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
kfree(ubo->metadata);
}
- kfree(bo);
+ kvfree(bo);
}
/**
@@ -552,7 +552,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo));
*bo_ptr = NULL;
- bo = kzalloc(bp->bo_ptr_size, GFP_KERNEL);
+ bo = kvzalloc(bp->bo_ptr_size, GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 46a5328e00e0..60aa99a39a74 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -76,6 +76,7 @@ struct psp_ring
uint64_t ring_mem_mc_addr;
void *ring_mem_handle;
uint32_t ring_size;
+ uint32_t ring_wptr;
};
/* More registers may will be supported */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 3bef0432cac2..d5cbc51c5eaa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -225,7 +225,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
*addr += mm_cur->start & ~PAGE_MASK;
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
- num_bytes = num_pages * 8;
+ num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
AMDGPU_IB_POOL_DELAYED, &job);
@@ -1210,6 +1210,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
if (gtt && gtt->userptr) {
amdgpu_ttm_tt_set_user_pages(ttm, NULL);
kfree(ttm->sg);
+ ttm->sg = NULL;
ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
return;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 2408ed4c7d84..0597aeb5f0e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -173,6 +173,9 @@
#define mmGC_THROTTLE_CTRL_Sienna_Cichlid 0x2030
#define mmGC_THROTTLE_CTRL_Sienna_Cichlid_BASE_IDX 0
+#define mmRLC_SPARE_INT_0_Sienna_Cichlid 0x4ca5
+#define mmRLC_SPARE_INT_0_Sienna_Cichlid_BASE_IDX 1
+
#define GFX_RLCG_GC_WRITE_OLD (0x8 << 28)
#define GFX_RLCG_GC_WRITE (0x0 << 28)
#define GFX_RLCG_GC_READ (0x1 << 28)
@@ -1395,9 +1398,10 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0xffffffff, 0x20000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04800000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04900000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1800ff, 0x00000044),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x00007fff, 0x000001fe),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
@@ -1415,12 +1419,13 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000820, 0x00000820),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x001f0000, 0x00070104),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0xffdf80ff, 0x479c0010),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00800000)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00c00000)
};
static bool gfx_v10_is_rlcg_rw(struct amdgpu_device *adev, u32 offset, uint32_t *flag, bool write)
@@ -1478,8 +1483,15 @@ static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32
(adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG2) * 4;
scratch_reg3 = adev->rmmio +
(adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3) * 4;
- spare_int = adev->rmmio +
- (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT) * 4;
+
+ if (adev->asic_type >= CHIP_SIENNA_CICHLID) {
+ spare_int = adev->rmmio +
+ (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_0_Sienna_Cichlid_BASE_IDX]
+ + mmRLC_SPARE_INT_0_Sienna_Cichlid) * 4;
+ } else {
+ spare_int = adev->rmmio +
+ (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT) * 4;
+ }
grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
@@ -7347,9 +7359,15 @@ static int gfx_v10_0_hw_fini(void *handle)
if (amdgpu_sriov_vf(adev)) {
gfx_v10_0_cp_gfx_enable(adev, false);
/* Program KIQ position of RLC_CP_SCHEDULERS during destroy */
- tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
- tmp &= 0xffffff00;
- WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
+ if (adev->asic_type >= CHIP_SIENNA_CICHLID) {
+ tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid);
+ tmp &= 0xffffff00;
+ WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid, tmp);
+ } else {
+ tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
+ tmp &= 0xffffff00;
+ WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index a078a38c2cee..516467e962b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -4943,7 +4943,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
amdgpu_gfx_rlc_enter_safe_mode(adev);
/* Enable 3D CGCG/CGLS */
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
+ if (enable) {
/* write cmd to clear cgcg/cgls ov */
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
/* unset CGCG override */
@@ -4955,8 +4955,12 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
/* enable 3Dcgcg FSM(0x0000363f) */
def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
- data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
- RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
+ data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
+ RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
+ else
+ data = 0x0 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT;
+
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index de5abceced0d..85967a5570cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -172,6 +172,8 @@ static int jpeg_v2_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
jpeg_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index 83531997aeba..46096ad7f0d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -187,19 +187,17 @@ static int jpeg_v2_5_hw_init(void *handle)
static int jpeg_v2_5_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring;
int i;
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
if (adev->jpeg.harvest_config & (1 << i))
continue;
- ring = &adev->jpeg.inst[i].ring_dec;
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS))
jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
-
- ring->sched.ready = false;
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
index de5dfcfb3859..bd77794315bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
@@ -159,15 +159,13 @@ static int jpeg_v3_0_hw_init(void *handle)
static int jpeg_v3_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring;
- ring = &adev->jpeg.inst->ring_dec;
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
jpeg_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
- ring->sched.ready = false;
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index d54af7f8801b..d290ca0b06da 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -623,6 +623,16 @@ static const struct amdgpu_ip_block_version nv_common_ip_block =
.funcs = &nv_common_ip_funcs,
};
+static bool nv_is_headless_sku(struct pci_dev *pdev)
+{
+ if ((pdev->device == 0x731E &&
+ (pdev->revision == 0xC6 || pdev->revision == 0xC7)) ||
+ (pdev->device == 0x7340 && pdev->revision == 0xC9) ||
+ (pdev->device == 0x7360 && pdev->revision == 0xC7))
+ return true;
+ return false;
+}
+
static int nv_reg_base_init(struct amdgpu_device *adev)
{
int r;
@@ -635,6 +645,12 @@ static int nv_reg_base_init(struct amdgpu_device *adev)
goto legacy_init;
}
+ amdgpu_discovery_harvest_ip(adev);
+ if (nv_is_headless_sku(adev->pdev)) {
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
+ }
+
return 0;
}
@@ -671,16 +687,6 @@ void nv_set_virt_ops(struct amdgpu_device *adev)
adev->virt.ops = &xgpu_nv_virt_ops;
}
-static bool nv_is_headless_sku(struct pci_dev *pdev)
-{
- if ((pdev->device == 0x731E &&
- (pdev->revision == 0xC6 || pdev->revision == 0xC7)) ||
- (pdev->device == 0x7340 && pdev->revision == 0xC9) ||
- (pdev->device == 0x7360 && pdev->revision == 0xC7))
- return true;
- return false;
-}
-
int nv_set_ip_blocks(struct amdgpu_device *adev)
{
int r;
@@ -728,8 +734,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- if (!nv_is_headless_sku(adev->pdev))
- amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
if (adev->enable_mes)
amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
@@ -752,8 +757,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- if (!nv_is_headless_sku(adev->pdev))
- amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
if (!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
break;
@@ -777,7 +781,6 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
if (!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
-
if (adev->enable_mes)
amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
break;
@@ -1149,6 +1152,11 @@ static int nv_common_early_init(void *handle)
return -EINVAL;
}
+ if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
+ adev->pg_flags &= ~(AMD_PG_SUPPORT_VCN |
+ AMD_PG_SUPPORT_VCN_DPG |
+ AMD_PG_SUPPORT_JPEG);
+
if (amdgpu_sriov_vf(adev)) {
amdgpu_virt_init_setting(adev);
xgpu_nv_mailbox_set_irq_funcs(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 589410c32d09..02bba1f3c42e 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -720,7 +720,7 @@ static uint32_t psp_v11_0_ring_get_wptr(struct psp_context *psp)
struct amdgpu_device *adev = psp->adev;
if (amdgpu_sriov_vf(adev))
- data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
+ data = psp->km_ring.ring_wptr;
else
data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
@@ -734,6 +734,7 @@ static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
if (amdgpu_sriov_vf(adev)) {
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
+ psp->km_ring.ring_wptr = value;
} else
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index f2e725f72d2f..908664a5774b 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -379,7 +379,7 @@ static uint32_t psp_v3_1_ring_get_wptr(struct psp_context *psp)
struct amdgpu_device *adev = psp->adev;
if (amdgpu_sriov_vf(adev))
- data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
+ data = psp->km_ring.ring_wptr;
else
data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
return data;
@@ -394,6 +394,7 @@ static void psp_v3_1_ring_set_wptr(struct psp_context *psp, uint32_t value)
/* send interrupt to PSP for SRIOV ring write pointer update */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
GFX_CTRL_CMD_ID_CONSUME_CMD);
+ psp->km_ring.ring_wptr = value;
} else
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 920fc6d4a127..8859133ce37e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -123,6 +123,10 @@ static const struct soc15_reg_golden golden_settings_sdma_nv14[] = {
static const struct soc15_reg_golden golden_settings_sdma_nv12[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index b1ad9e52b234..240596b25fe4 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -497,11 +497,6 @@ static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev)
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
}
-
- sdma0->sched.ready = false;
- sdma1->sched.ready = false;
- sdma2->sched.ready = false;
- sdma3->sched.ready = false;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index d80e12b80c7e..e65c286f93a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -302,6 +302,7 @@ static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode,
*codecs = &rv_video_codecs_decode;
return 0;
case CHIP_ARCTURUS:
+ case CHIP_ALDEBARAN:
case CHIP_RENOIR:
if (encode)
*codecs = &vega_video_codecs_encode;
@@ -1392,7 +1393,6 @@ static int soc15_common_early_init(void *handle)
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CP_LS |
- AMD_CG_SUPPORT_GFX_3D_CGCG |
AMD_CG_SUPPORT_GFX_3D_CGLS |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
@@ -1401,7 +1401,8 @@ static int soc15_common_early_init(void *handle)
AMD_CG_SUPPORT_MC_MGCG |
AMD_CG_SUPPORT_MC_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
- AMD_CG_SUPPORT_SDMA_LS;
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_VCN_MGCG;
adev->pg_flags = AMD_PG_SUPPORT_SDMA |
AMD_PG_SUPPORT_MMHUB |
@@ -1411,7 +1412,6 @@ static int soc15_common_early_init(void *handle)
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_RLC_LS |
AMD_CG_SUPPORT_GFX_CP_LS |
- AMD_CG_SUPPORT_GFX_3D_CGCG |
AMD_CG_SUPPORT_GFX_3D_CGLS |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 2bab9c77952f..cf3803f8f075 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -357,6 +357,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
error:
dma_fence_put(fence);
+ amdgpu_bo_unpin(bo);
amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 51a773a37a35..27b1ced145d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -231,9 +231,13 @@ static int vcn_v1_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
- RREG32_SOC15(VCN, 0, mmUVD_STATUS))
+ (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(VCN, 0, mmUVD_STATUS))) {
vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ }
return 0;
}
@@ -1119,10 +1123,10 @@ static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev)
UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp);
- /* put VCPU into reset */
- WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
- UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
- ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+ /* stall UMC channel */
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
+ UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
@@ -1141,6 +1145,11 @@ static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev)
UVD_SOFT_RESET__LMI_SOFT_RESET_MASK,
~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
+ /* put VCPU into reset */
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
+ UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
+ ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+
WREG32_SOC15(UVD, 0, mmUVD_STATUS, 0);
vcn_v1_0_enable_clock_gating(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 116b9643d5ba..8af567c546db 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -262,6 +262,8 @@ static int vcn_v2_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, 0, mmUVD_STATUS)))
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 948813d7caa0..888b17d84691 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -321,6 +321,8 @@ static int vcn_v2_5_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index cf165ab5dd26..3b23de996db2 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -372,15 +372,14 @@ done:
static int vcn_v3_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring;
- int i, j;
+ int i;
+
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
- ring = &adev->vcn.inst[i].ring_dec;
-
if (!amdgpu_sriov_vf(adev)) {
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
@@ -388,12 +387,6 @@ static int vcn_v3_0_hw_fini(void *handle)
vcn_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
}
}
- ring->sched.ready = false;
-
- for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
- ring = &adev->vcn.inst[i].ring_enc[j];
- ring->sched.ready = false;
- }
}
return 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 389eff96fcf6..652cc1a0e450 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -925,7 +925,8 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
}
- adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
+ if (!adev->dm.dc->ctx->dmub_srv)
+ adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
if (!adev->dm.dc->ctx->dmub_srv) {
DRM_ERROR("Couldn't allocate DC DMUB server!\n");
return -ENOMEM;
@@ -1954,7 +1955,6 @@ static int dm_suspend(void *handle)
amdgpu_dm_irq_suspend(adev);
-
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
return 0;
@@ -5500,7 +5500,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
struct drm_display_mode saved_mode;
struct drm_display_mode *freesync_mode = NULL;
bool native_mode_found = false;
- bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
+ bool recalculate_timing = false;
+ bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
int mode_refresh;
int preferred_refresh = 0;
#if defined(CONFIG_DRM_AMD_DC_DCN)
@@ -5563,7 +5564,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
*/
DRM_DEBUG_DRIVER("No preferred mode found\n");
} else {
- recalculate_timing |= amdgpu_freesync_vid_mode &&
+ recalculate_timing = amdgpu_freesync_vid_mode &&
is_freesync_video_mode(&mode, aconnector);
if (recalculate_timing) {
freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
@@ -5571,11 +5572,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
mode = *freesync_mode;
} else {
decide_crtc_timing_for_drm_display_mode(
- &mode, preferred_mode,
- dm_state ? (dm_state->scaling != RMX_OFF) : false);
- }
+ &mode, preferred_mode, scale);
- preferred_refresh = drm_mode_vrefresh(preferred_mode);
+ preferred_refresh = drm_mode_vrefresh(preferred_mode);
+ }
}
if (recalculate_timing)
@@ -5587,7 +5587,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
* If scaling is enabled and refresh rate didn't change
* we copy the vic and polarities of the old timings
*/
- if (!recalculate_timing || mode_refresh != preferred_refresh)
+ if (!scale || mode_refresh != preferred_refresh)
fill_stream_properties_from_drm_display_mode(
stream, &mode, &aconnector->base, con_state, NULL,
requested_bpc);
@@ -9854,7 +9854,7 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
if (cursor_scale_w != primary_scale_w ||
cursor_scale_h != primary_scale_h) {
- DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
+ drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
return -EINVAL;
}
@@ -9891,7 +9891,7 @@ static int validate_overlay(struct drm_atomic_state *state)
int i;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
- struct drm_plane_state *primary_state, *overlay_state = NULL;
+ struct drm_plane_state *primary_state, *cursor_state, *overlay_state = NULL;
/* Check if primary plane is contained inside overlay */
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
@@ -9921,6 +9921,14 @@ static int validate_overlay(struct drm_atomic_state *state)
if (!primary_state->crtc)
return 0;
+ /* check if cursor plane is enabled */
+ cursor_state = drm_atomic_get_plane_state(state, overlay_state->crtc->cursor);
+ if (IS_ERR(cursor_state))
+ return PTR_ERR(cursor_state);
+
+ if (drm_atomic_plane_disabling(plane->state, cursor_state))
+ return 0;
+
/* Perform the bounds check to ensure the overlay plane covers the primary */
if (primary_state->crtc_x < overlay_state->crtc_x ||
primary_state->crtc_y < overlay_state->crtc_y ||
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index 616f5b1ea3a8..666796a0067c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -650,6 +650,7 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct
/* File created at /sys/class/drm/card0/device/hdcp_srm*/
hdcp_work[0].attr = data_attr;
+ sysfs_bin_attr_init(&hdcp_work[0].attr);
if (sysfs_create_bin_file(&adev->dev->kobj, &hdcp_work[0].attr))
DRM_WARN("Failed to create device file hdcp_srm");
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index f4374d83662a..c1f5474c205a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1076,6 +1076,24 @@ static bool dc_link_detect_helper(struct dc_link *link,
dc_is_dvi_signal(link->connector_signal)) {
if (prev_sink)
dc_sink_release(prev_sink);
+ link_disconnect_sink(link);
+
+ return false;
+ }
+ /*
+ * Abort detection for DP connectors if we have
+ * no EDID and connector is active converter
+ * as there are no display downstream
+ *
+ */
+ if (dc_is_dp_sst_signal(link->connector_signal) &&
+ (link->dpcd_caps.dongle_type ==
+ DISPLAY_DONGLE_DP_VGA_CONVERTER ||
+ link->dpcd_caps.dongle_type ==
+ DISPLAY_DONGLE_DP_DVI_CONVERTER)) {
+ if (prev_sink)
+ dc_sink_release(prev_sink);
+ link_disconnect_sink(link);
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 527e56c353cb..8357aa3c41d5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -3236,7 +3236,7 @@ static noinline bool dcn20_validate_bandwidth_fp(struct dc *dc,
voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
- if (voltage_supported && dummy_pstate_supported) {
+ if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) {
context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
goto restore_dml_state;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index 4a5fa23d8e7b..5fcc2e64305d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -826,10 +826,11 @@ static const struct dc_plane_cap plane_cap = {
.fp16 = 16000
},
+ /* 6:1 downscaling ratio: 1000/6 = 166.666 */
.max_downscale_factor = {
- .argb8888 = 600,
- .nv12 = 600,
- .fp16 = 600
+ .argb8888 = 167,
+ .nv12 = 167,
+ .fp16 = 167
}
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 5b54b7fc5105..472696f949ac 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -843,10 +843,11 @@ static const struct dc_plane_cap plane_cap = {
.fp16 = 16000
},
+ /* 6:1 downscaling ratio: 1000/6 = 166.666 */
.max_downscale_factor = {
- .argb8888 = 600,
- .nv12 = 600,
- .fp16 = 600
+ .argb8888 = 167,
+ .nv12 = 167,
+ .fp16 = 167
},
64,
64
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index fc2dea243d1b..a33f0365329b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -284,10 +284,11 @@ static const struct dc_plane_cap plane_cap = {
.nv12 = 16000,
.fp16 = 16000
},
+ /* 6:1 downscaling ratio: 1000/6 = 166.666 */
.max_downscale_factor = {
- .argb8888 = 600,
- .nv12 = 600,
- .fp16 = 600
+ .argb8888 = 167,
+ .nv12 = 167,
+ .fp16 = 167
},
16,
16
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 43ed6291b2b8..9ab706cd07ff 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -216,6 +216,12 @@ enum PP_FEATURE_MASK {
PP_GFX_DCS_MASK = 0x80000,
};
+enum amd_harvest_ip_mask {
+ AMD_HARVEST_IP_VCN_MASK = 0x1,
+ AMD_HARVEST_IP_JPEG_MASK = 0x2,
+ AMD_HARVEST_IP_DMU_MASK = 0x4,
+};
+
enum DC_FEATURE_MASK {
DC_FBC_MASK = 0x1,
DC_MULTI_MON_PP_MCLK_SWITCH_MASK = 0x2,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
index f5fe540cd536..27cf22716793 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
@@ -810,6 +810,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
data->fine_grain_enabled = 1;
+ break;
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
break;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
index 26a5321e621b..15c0b8af376f 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
@@ -4817,70 +4817,70 @@ static int si_populate_smc_initial_state(struct amdgpu_device *adev,
u32 reg;
int ret;
- table->initialState.levels[0].mclk.vDLL_CNTL =
+ table->initialState.level.mclk.vDLL_CNTL =
cpu_to_be32(si_pi->clock_registers.dll_cntl);
- table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
+ table->initialState.level.mclk.vMCLK_PWRMGT_CNTL =
cpu_to_be32(si_pi->clock_registers.mclk_pwrmgt_cntl);
- table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
+ table->initialState.level.mclk.vMPLL_AD_FUNC_CNTL =
cpu_to_be32(si_pi->clock_registers.mpll_ad_func_cntl);
- table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
+ table->initialState.level.mclk.vMPLL_DQ_FUNC_CNTL =
cpu_to_be32(si_pi->clock_registers.mpll_dq_func_cntl);
- table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL =
+ table->initialState.level.mclk.vMPLL_FUNC_CNTL =
cpu_to_be32(si_pi->clock_registers.mpll_func_cntl);
- table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
+ table->initialState.level.mclk.vMPLL_FUNC_CNTL_1 =
cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_1);
- table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
+ table->initialState.level.mclk.vMPLL_FUNC_CNTL_2 =
cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_2);
- table->initialState.levels[0].mclk.vMPLL_SS =
+ table->initialState.level.mclk.vMPLL_SS =
cpu_to_be32(si_pi->clock_registers.mpll_ss1);
- table->initialState.levels[0].mclk.vMPLL_SS2 =
+ table->initialState.level.mclk.vMPLL_SS2 =
cpu_to_be32(si_pi->clock_registers.mpll_ss2);
- table->initialState.levels[0].mclk.mclk_value =
+ table->initialState.level.mclk.mclk_value =
cpu_to_be32(initial_state->performance_levels[0].mclk);
- table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
+ table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL =
cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl);
- table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
+ table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_2 =
cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_2);
- table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
+ table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_3 =
cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_3);
- table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
+ table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_4 =
cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_4);
- table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
+ table->initialState.level.sclk.vCG_SPLL_SPREAD_SPECTRUM =
cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum);
- table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
+ table->initialState.level.sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum_2);
- table->initialState.levels[0].sclk.sclk_value =
+ table->initialState.level.sclk.sclk_value =
cpu_to_be32(initial_state->performance_levels[0].sclk);
- table->initialState.levels[0].arbRefreshState =
+ table->initialState.level.arbRefreshState =
SISLANDS_INITIAL_STATE_ARB_INDEX;
- table->initialState.levels[0].ACIndex = 0;
+ table->initialState.level.ACIndex = 0;
ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
initial_state->performance_levels[0].vddc,
- &table->initialState.levels[0].vddc);
+ &table->initialState.level.vddc);
if (!ret) {
u16 std_vddc;
ret = si_get_std_voltage_value(adev,
- &table->initialState.levels[0].vddc,
+ &table->initialState.level.vddc,
&std_vddc);
if (!ret)
si_populate_std_voltage_value(adev, std_vddc,
- table->initialState.levels[0].vddc.index,
- &table->initialState.levels[0].std_vddc);
+ table->initialState.level.vddc.index,
+ &table->initialState.level.std_vddc);
}
if (eg_pi->vddci_control)
si_populate_voltage_value(adev,
&eg_pi->vddci_voltage_table,
initial_state->performance_levels[0].vddci,
- &table->initialState.levels[0].vddci);
+ &table->initialState.level.vddci);
if (si_pi->vddc_phase_shed_control)
si_populate_phase_shedding_value(adev,
@@ -4888,41 +4888,41 @@ static int si_populate_smc_initial_state(struct amdgpu_device *adev,
initial_state->performance_levels[0].vddc,
initial_state->performance_levels[0].sclk,
initial_state->performance_levels[0].mclk,
- &table->initialState.levels[0].vddc);
+ &table->initialState.level.vddc);
- si_populate_initial_mvdd_value(adev, &table->initialState.levels[0].mvdd);
+ si_populate_initial_mvdd_value(adev, &table->initialState.level.mvdd);
reg = CG_R(0xffff) | CG_L(0);
- table->initialState.levels[0].aT = cpu_to_be32(reg);
- table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
- table->initialState.levels[0].gen2PCIE = (u8)si_pi->boot_pcie_gen;
+ table->initialState.level.aT = cpu_to_be32(reg);
+ table->initialState.level.bSP = cpu_to_be32(pi->dsp);
+ table->initialState.level.gen2PCIE = (u8)si_pi->boot_pcie_gen;
if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
- table->initialState.levels[0].strobeMode =
+ table->initialState.level.strobeMode =
si_get_strobe_mode_settings(adev,
initial_state->performance_levels[0].mclk);
if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold)
- table->initialState.levels[0].mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG;
+ table->initialState.level.mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG;
else
- table->initialState.levels[0].mcFlags = 0;
+ table->initialState.level.mcFlags = 0;
}
table->initialState.levelCount = 1;
table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
- table->initialState.levels[0].dpm2.MaxPS = 0;
- table->initialState.levels[0].dpm2.NearTDPDec = 0;
- table->initialState.levels[0].dpm2.AboveSafeInc = 0;
- table->initialState.levels[0].dpm2.BelowSafeInc = 0;
- table->initialState.levels[0].dpm2.PwrEfficiencyRatio = 0;
+ table->initialState.level.dpm2.MaxPS = 0;
+ table->initialState.level.dpm2.NearTDPDec = 0;
+ table->initialState.level.dpm2.AboveSafeInc = 0;
+ table->initialState.level.dpm2.BelowSafeInc = 0;
+ table->initialState.level.dpm2.PwrEfficiencyRatio = 0;
reg = MIN_POWER_MASK | MAX_POWER_MASK;
- table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
+ table->initialState.level.SQPowerThrottle = cpu_to_be32(reg);
reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
- table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
+ table->initialState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
return 0;
}
@@ -4953,18 +4953,18 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
if (pi->acpi_vddc) {
ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
- pi->acpi_vddc, &table->ACPIState.levels[0].vddc);
+ pi->acpi_vddc, &table->ACPIState.level.vddc);
if (!ret) {
u16 std_vddc;
ret = si_get_std_voltage_value(adev,
- &table->ACPIState.levels[0].vddc, &std_vddc);
+ &table->ACPIState.level.vddc, &std_vddc);
if (!ret)
si_populate_std_voltage_value(adev, std_vddc,
- table->ACPIState.levels[0].vddc.index,
- &table->ACPIState.levels[0].std_vddc);
+ table->ACPIState.level.vddc.index,
+ &table->ACPIState.level.std_vddc);
}
- table->ACPIState.levels[0].gen2PCIE = si_pi->acpi_pcie_gen;
+ table->ACPIState.level.gen2PCIE = si_pi->acpi_pcie_gen;
if (si_pi->vddc_phase_shed_control) {
si_populate_phase_shedding_value(adev,
@@ -4972,23 +4972,23 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
pi->acpi_vddc,
0,
0,
- &table->ACPIState.levels[0].vddc);
+ &table->ACPIState.level.vddc);
}
} else {
ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
- pi->min_vddc_in_table, &table->ACPIState.levels[0].vddc);
+ pi->min_vddc_in_table, &table->ACPIState.level.vddc);
if (!ret) {
u16 std_vddc;
ret = si_get_std_voltage_value(adev,
- &table->ACPIState.levels[0].vddc, &std_vddc);
+ &table->ACPIState.level.vddc, &std_vddc);
if (!ret)
si_populate_std_voltage_value(adev, std_vddc,
- table->ACPIState.levels[0].vddc.index,
- &table->ACPIState.levels[0].std_vddc);
+ table->ACPIState.level.vddc.index,
+ &table->ACPIState.level.std_vddc);
}
- table->ACPIState.levels[0].gen2PCIE =
+ table->ACPIState.level.gen2PCIE =
(u8)amdgpu_get_pcie_gen_support(adev,
si_pi->sys_pcie_mask,
si_pi->boot_pcie_gen,
@@ -5000,14 +5000,14 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
pi->min_vddc_in_table,
0,
0,
- &table->ACPIState.levels[0].vddc);
+ &table->ACPIState.level.vddc);
}
if (pi->acpi_vddc) {
if (eg_pi->acpi_vddci)
si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table,
eg_pi->acpi_vddci,
- &table->ACPIState.levels[0].vddci);
+ &table->ACPIState.level.vddci);
}
mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
@@ -5018,59 +5018,59 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
spll_func_cntl_2 |= SCLK_MUX_SEL(4);
- table->ACPIState.levels[0].mclk.vDLL_CNTL =
+ table->ACPIState.level.mclk.vDLL_CNTL =
cpu_to_be32(dll_cntl);
- table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
+ table->ACPIState.level.mclk.vMCLK_PWRMGT_CNTL =
cpu_to_be32(mclk_pwrmgt_cntl);
- table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
+ table->ACPIState.level.mclk.vMPLL_AD_FUNC_CNTL =
cpu_to_be32(mpll_ad_func_cntl);
- table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
+ table->ACPIState.level.mclk.vMPLL_DQ_FUNC_CNTL =
cpu_to_be32(mpll_dq_func_cntl);
- table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL =
+ table->ACPIState.level.mclk.vMPLL_FUNC_CNTL =
cpu_to_be32(mpll_func_cntl);
- table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
+ table->ACPIState.level.mclk.vMPLL_FUNC_CNTL_1 =
cpu_to_be32(mpll_func_cntl_1);
- table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
+ table->ACPIState.level.mclk.vMPLL_FUNC_CNTL_2 =
cpu_to_be32(mpll_func_cntl_2);
- table->ACPIState.levels[0].mclk.vMPLL_SS =
+ table->ACPIState.level.mclk.vMPLL_SS =
cpu_to_be32(si_pi->clock_registers.mpll_ss1);
- table->ACPIState.levels[0].mclk.vMPLL_SS2 =
+ table->ACPIState.level.mclk.vMPLL_SS2 =
cpu_to_be32(si_pi->clock_registers.mpll_ss2);
- table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
+ table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL =
cpu_to_be32(spll_func_cntl);
- table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
+ table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_2 =
cpu_to_be32(spll_func_cntl_2);
- table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
+ table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_3 =
cpu_to_be32(spll_func_cntl_3);
- table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
+ table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_4 =
cpu_to_be32(spll_func_cntl_4);
- table->ACPIState.levels[0].mclk.mclk_value = 0;
- table->ACPIState.levels[0].sclk.sclk_value = 0;
+ table->ACPIState.level.mclk.mclk_value = 0;
+ table->ACPIState.level.sclk.sclk_value = 0;
- si_populate_mvdd_value(adev, 0, &table->ACPIState.levels[0].mvdd);
+ si_populate_mvdd_value(adev, 0, &table->ACPIState.level.mvdd);
if (eg_pi->dynamic_ac_timing)
- table->ACPIState.levels[0].ACIndex = 0;
+ table->ACPIState.level.ACIndex = 0;
- table->ACPIState.levels[0].dpm2.MaxPS = 0;
- table->ACPIState.levels[0].dpm2.NearTDPDec = 0;
- table->ACPIState.levels[0].dpm2.AboveSafeInc = 0;
- table->ACPIState.levels[0].dpm2.BelowSafeInc = 0;
- table->ACPIState.levels[0].dpm2.PwrEfficiencyRatio = 0;
+ table->ACPIState.level.dpm2.MaxPS = 0;
+ table->ACPIState.level.dpm2.NearTDPDec = 0;
+ table->ACPIState.level.dpm2.AboveSafeInc = 0;
+ table->ACPIState.level.dpm2.BelowSafeInc = 0;
+ table->ACPIState.level.dpm2.PwrEfficiencyRatio = 0;
reg = MIN_POWER_MASK | MAX_POWER_MASK;
- table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
+ table->ACPIState.level.SQPowerThrottle = cpu_to_be32(reg);
reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
- table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
+ table->ACPIState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
return 0;
}
static int si_populate_ulv_state(struct amdgpu_device *adev,
- SISLANDS_SMC_SWSTATE *state)
+ struct SISLANDS_SMC_SWSTATE_SINGLE *state)
{
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct si_power_info *si_pi = si_get_pi(adev);
@@ -5079,19 +5079,19 @@ static int si_populate_ulv_state(struct amdgpu_device *adev,
int ret;
ret = si_convert_power_level_to_smc(adev, &ulv->pl,
- &state->levels[0]);
+ &state->level);
if (!ret) {
if (eg_pi->sclk_deep_sleep) {
if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
- state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
+ state->level.stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
else
- state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
+ state->level.stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
}
if (ulv->one_pcie_lane_in_ulv)
state->flags |= PPSMC_SWSTATE_FLAG_PCIE_X1;
- state->levels[0].arbRefreshState = (u8)(SISLANDS_ULV_STATE_ARB_INDEX);
- state->levels[0].ACIndex = 1;
- state->levels[0].std_vddc = state->levels[0].vddc;
+ state->level.arbRefreshState = (u8)(SISLANDS_ULV_STATE_ARB_INDEX);
+ state->level.ACIndex = 1;
+ state->level.std_vddc = state->level.vddc;
state->levelCount = 1;
state->flags |= PPSMC_SWSTATE_FLAG_DC;
@@ -5190,7 +5190,9 @@ static int si_init_smc_table(struct amdgpu_device *adev)
if (ret)
return ret;
- table->driverState = table->initialState;
+ table->driverState.flags = table->initialState.flags;
+ table->driverState.levelCount = table->initialState.levelCount;
+ table->driverState.levels[0] = table->initialState.level;
ret = si_do_program_memory_timing_parameters(adev, amdgpu_boot_state,
SISLANDS_INITIAL_STATE_ARB_INDEX);
@@ -5737,8 +5739,8 @@ static int si_upload_ulv_state(struct amdgpu_device *adev)
if (ulv->supported && ulv->pl.vddc) {
u32 address = si_pi->state_table_start +
offsetof(SISLANDS_SMC_STATETABLE, ULVState);
- SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.ULVState;
- u32 state_size = sizeof(SISLANDS_SMC_SWSTATE);
+ struct SISLANDS_SMC_SWSTATE_SINGLE *smc_state = &si_pi->smc_statetable.ULVState;
+ u32 state_size = sizeof(struct SISLANDS_SMC_SWSTATE_SINGLE);
memset(smc_state, 0, state_size);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h b/drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h
index 0f7554052c90..c7dc117a688c 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h
@@ -191,6 +191,14 @@ struct SISLANDS_SMC_SWSTATE
typedef struct SISLANDS_SMC_SWSTATE SISLANDS_SMC_SWSTATE;
+struct SISLANDS_SMC_SWSTATE_SINGLE {
+ uint8_t flags;
+ uint8_t levelCount;
+ uint8_t padding2;
+ uint8_t padding3;
+ SISLANDS_SMC_HW_PERFORMANCE_LEVEL level;
+};
+
#define SISLANDS_SMC_VOLTAGEMASK_VDDC 0
#define SISLANDS_SMC_VOLTAGEMASK_MVDD 1
#define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2
@@ -208,19 +216,19 @@ typedef struct SISLANDS_SMC_VOLTAGEMASKTABLE SISLANDS_SMC_VOLTAGEMASKTABLE;
struct SISLANDS_SMC_STATETABLE
{
- uint8_t thermalProtectType;
- uint8_t systemFlags;
- uint8_t maxVDDCIndexInPPTable;
- uint8_t extraFlags;
- uint32_t lowSMIO[SISLANDS_MAX_NO_VREG_STEPS];
- SISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable;
- SISLANDS_SMC_VOLTAGEMASKTABLE phaseMaskTable;
- PP_SIslands_DPM2Parameters dpm2Params;
- SISLANDS_SMC_SWSTATE initialState;
- SISLANDS_SMC_SWSTATE ACPIState;
- SISLANDS_SMC_SWSTATE ULVState;
- SISLANDS_SMC_SWSTATE driverState;
- SISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1];
+ uint8_t thermalProtectType;
+ uint8_t systemFlags;
+ uint8_t maxVDDCIndexInPPTable;
+ uint8_t extraFlags;
+ uint32_t lowSMIO[SISLANDS_MAX_NO_VREG_STEPS];
+ SISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable;
+ SISLANDS_SMC_VOLTAGEMASKTABLE phaseMaskTable;
+ PP_SIslands_DPM2Parameters dpm2Params;
+ struct SISLANDS_SMC_SWSTATE_SINGLE initialState;
+ struct SISLANDS_SMC_SWSTATE_SINGLE ACPIState;
+ struct SISLANDS_SMC_SWSTATE_SINGLE ULVState;
+ SISLANDS_SMC_SWSTATE driverState;
+ SISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
};
typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index ac13042672ea..0eaf86b5e698 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -2925,6 +2925,8 @@ static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu,
static int navi10_enable_mgpu_fan_boost(struct smu_context *smu)
{
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *smc_pptable = table_context->driver_pptable;
struct amdgpu_device *adev = smu->adev;
uint32_t param = 0;
@@ -2932,6 +2934,13 @@ static int navi10_enable_mgpu_fan_boost(struct smu_context *smu)
if (adev->asic_type == CHIP_NAVI12)
return 0;
+ /*
+ * Skip the MGpuFanBoost setting for those ASICs
+ * which do not support it
+ */
+ if (!smc_pptable->MGpuFanBoostLimitRpm)
+ return 0;
+
/* Workaround for WS SKU */
if (adev->pdev->device == 0x7312 &&
adev->pdev->revision == 0)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index d2fd44b903ca..b124a5e40dd6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -3027,6 +3027,16 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
static int sienna_cichlid_enable_mgpu_fan_boost(struct smu_context *smu)
{
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *smc_pptable = table_context->driver_pptable;
+
+ /*
+ * Skip the MGpuFanBoost setting for those ASICs
+ * which do not support it
+ */
+ if (!smc_pptable->MGpuFanBoostLimitRpm)
+ return 0;
+
return smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetMGpuFanBoostLimitRpm,
0,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 05ad75d155e8..cfe4fc69277e 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -232,7 +232,6 @@ static void atmel_hlcdc_crtc_atomic_enable(struct drm_crtc *c,
pm_runtime_put_sync(dev->dev);
- drm_crtc_vblank_on(c);
}
#define ATMEL_HLCDC_RGB444_OUTPUT BIT(0)
@@ -344,7 +343,16 @@ static int atmel_hlcdc_crtc_atomic_check(struct drm_crtc *c,
static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c,
struct drm_atomic_state *state)
{
+ drm_crtc_vblank_on(c);
+}
+
+static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *c,
+ struct drm_atomic_state *state)
+{
struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->dev->event_lock, flags);
if (c->state->event) {
c->state->event->pipe = drm_crtc_index(c);
@@ -354,12 +362,7 @@ static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c,
crtc->event = c->state->event;
c->state->event = NULL;
}
-}
-
-static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
- /* TODO: write common plane control register if available */
+ spin_unlock_irqrestore(&c->dev->event_lock, flags);
}
static const struct drm_crtc_helper_funcs lcdc_crtc_helper_funcs = {
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 65af56e47129..f09b6dd8754c 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -593,6 +593,7 @@ static int atmel_hlcdc_dc_modeset_init(struct drm_device *dev)
dev->mode_config.max_width = dc->desc->max_width;
dev->mode_config.max_height = dc->desc->max_height;
dev->mode_config.funcs = &mode_config_funcs;
+ dev->mode_config.async_page_flip = true;
return 0;
}
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index f2d46b7ac6f9..232abbba3686 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -314,9 +314,10 @@ int drm_master_open(struct drm_file *file_priv)
void drm_master_release(struct drm_file *file_priv)
{
struct drm_device *dev = file_priv->minor->dev;
- struct drm_master *master = file_priv->master;
+ struct drm_master *master;
mutex_lock(&dev->master_mutex);
+ master = file_priv->master;
if (file_priv->magic)
idr_remove(&file_priv->master->magic_map, file_priv->magic);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index d273d1a8603a..495a4767a443 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -118,17 +118,18 @@ int drm_getunique(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_unique *u = data;
- struct drm_master *master = file_priv->master;
+ struct drm_master *master;
- mutex_lock(&master->dev->master_mutex);
+ mutex_lock(&dev->master_mutex);
+ master = file_priv->master;
if (u->unique_len >= master->unique_len) {
if (copy_to_user(u->unique, master->unique, master->unique_len)) {
- mutex_unlock(&master->dev->master_mutex);
+ mutex_unlock(&dev->master_mutex);
return -EFAULT;
}
}
u->unique_len = master->unique_len;
- mutex_unlock(&master->dev->master_mutex);
+ mutex_unlock(&dev->master_mutex);
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index b9a4b7670a89..197b97341cad 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -815,10 +815,8 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ctx->addr = devm_ioremap_resource(dev, res);
- if (IS_ERR(ctx->addr)) {
- dev_err(dev, "ioremap failed\n");
+ if (IS_ERR(ctx->addr))
return PTR_ERR(ctx->addr);
- }
ret = decon_conf_irq(ctx, "vsync", decon_irq_handler, 0);
if (ret < 0)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 44e402b7cdfb..2d2fe5ab26e7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1786,10 +1786,8 @@ static int exynos_dsi_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dsi->reg_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(dsi->reg_base)) {
- dev_err(dev, "failed to remap io region\n");
+ if (IS_ERR(dsi->reg_base))
return PTR_ERR(dsi->reg_base);
- }
dsi->phy = devm_phy_get(dev, "dsim");
if (IS_ERR(dsi->phy)) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 49a2e0c53918..ae576122873e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -723,7 +723,7 @@ static void fimd_win_set_colkey(struct fimd_context *ctx, unsigned int win)
}
/**
- * shadow_protect_win() - disable updating values from shadow registers at vsync
+ * fimd_shadow_protect_win() - disable updating values from shadow registers at vsync
*
* @ctx: local driver data
* @win: window to protect registers for
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 69f57ca9c68d..1e1cb245fca7 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -20,7 +20,6 @@ config DRM_I915
select INPUT if ACPI
select ACPI_VIDEO if ACPI
select ACPI_BUTTON if ACPI
- select IO_MAPPING
select SYNC_FILE
select IOSF_MBI
select CRC32
@@ -102,7 +101,6 @@ config DRM_I915_GVT
bool "Enable Intel GVT-g graphics virtualization host support"
depends on DRM_I915
depends on 64BIT
- depends on VFIO_MDEV=y || VFIO_MDEV=DRM_I915
default n
help
Choose this option if you want to enable Intel GVT-g graphics
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 6a2dee8cef1f..642c60f3d9b1 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -1095,44 +1095,6 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
return -EINVAL;
}
-/* Optimize link config in order: max bpp, min lanes, min clock */
-static int
-intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
- struct intel_crtc_state *pipe_config,
- const struct link_config_limits *limits)
-{
- const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
- int bpp, clock, lane_count;
- int mode_rate, link_clock, link_avail;
-
- for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
- int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
-
- mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
- output_bpp);
-
- for (lane_count = limits->min_lane_count;
- lane_count <= limits->max_lane_count;
- lane_count <<= 1) {
- for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
- link_clock = intel_dp->common_rates[clock];
- link_avail = intel_dp_max_data_rate(link_clock,
- lane_count);
-
- if (mode_rate <= link_avail) {
- pipe_config->lane_count = lane_count;
- pipe_config->pipe_bpp = bpp;
- pipe_config->port_clock = link_clock;
-
- return 0;
- }
- }
- }
- }
-
- return -EINVAL;
-}
-
static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
{
int i, num_bpc;
@@ -1382,22 +1344,11 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
intel_dp_can_bigjoiner(intel_dp))
pipe_config->bigjoiner = true;
- if (intel_dp_is_edp(intel_dp))
- /*
- * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
- * section A.1: "It is recommended that the minimum number of
- * lanes be used, using the minimum link rate allowed for that
- * lane configuration."
- *
- * Note that we fall back to the max clock and lane count for eDP
- * panels that fail with the fast optimal settings (see
- * intel_dp->use_max_params), in which case the fast vs. wide
- * choice doesn't matter.
- */
- ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config, &limits);
- else
- /* Optimize for slow and wide. */
- ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
+ /*
+ * Optimize for slow and wide for everything, because there are some
+ * eDP 1.3 and 1.4 panels don't work well with fast and narrow.
+ */
+ ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
/* enable compression if the mode doesn't fit available BW */
drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en);
@@ -2160,7 +2111,7 @@ void intel_dp_check_frl_training(struct intel_dp *intel_dp)
* -PCON supports SRC_CTL_MODE (VESA DP2.0-HDMI2.1 PCON Spec Draft-1 Sec-7)
* -sink is HDMI2.1
*/
- if (!(intel_dp->dpcd[2] & DP_PCON_SOURCE_CTL_MODE) ||
+ if (!(intel_dp->downstream_ports[2] & DP_PCON_SOURCE_CTL_MODE) ||
!intel_dp_is_hdmi_2_1_sink(intel_dp) ||
intel_dp->frl.is_trained)
return;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 02a003fd48fb..50cae0198a3d 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -128,49 +128,13 @@ intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
return drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) == 1;
}
-/**
- * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode
- * @intel_dp: Intel DP struct
- *
- * Read the LTTPR common and DPRX capabilities and switch to non-transparent
- * link training mode if any is detected and read the PHY capabilities for all
- * detected LTTPRs. In case of an LTTPR detection error or if the number of
- * LTTPRs is more than is supported (8), fall back to the no-LTTPR,
- * transparent mode link training mode.
- *
- * Returns:
- * >0 if LTTPRs were detected and the non-transparent LT mode was set. The
- * DPRX capabilities are read out.
- * 0 if no LTTPRs or more than 8 LTTPRs were detected or in case of a
- * detection failure and the transparent LT mode was set. The DPRX
- * capabilities are read out.
- * <0 Reading out the DPRX capabilities failed.
- */
-int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
+static int intel_dp_init_lttpr(struct intel_dp *intel_dp)
{
int lttpr_count;
- bool ret;
int i;
- ret = intel_dp_read_lttpr_common_caps(intel_dp);
-
- /* The DPTX shall read the DPRX caps after LTTPR detection. */
- if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) {
- intel_dp_reset_lttpr_common_caps(intel_dp);
- return -EIO;
- }
-
- if (!ret)
- return 0;
-
- /*
- * The 0xF0000-0xF02FF range is only valid if the DPCD revision is
- * at least 1.4.
- */
- if (intel_dp->dpcd[DP_DPCD_REV] < 0x14) {
- intel_dp_reset_lttpr_common_caps(intel_dp);
+ if (!intel_dp_read_lttpr_common_caps(intel_dp))
return 0;
- }
lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
/*
@@ -211,6 +175,37 @@ int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
return lttpr_count;
}
+
+/**
+ * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode
+ * @intel_dp: Intel DP struct
+ *
+ * Read the LTTPR common and DPRX capabilities and switch to non-transparent
+ * link training mode if any is detected and read the PHY capabilities for all
+ * detected LTTPRs. In case of an LTTPR detection error or if the number of
+ * LTTPRs is more than is supported (8), fall back to the no-LTTPR,
+ * transparent mode link training mode.
+ *
+ * Returns:
+ * >0 if LTTPRs were detected and the non-transparent LT mode was set. The
+ * DPRX capabilities are read out.
+ * 0 if no LTTPRs or more than 8 LTTPRs were detected or in case of a
+ * detection failure and the transparent LT mode was set. The DPRX
+ * capabilities are read out.
+ * <0 Reading out the DPRX capabilities failed.
+ */
+int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
+{
+ int lttpr_count = intel_dp_init_lttpr(intel_dp);
+
+ /* The DPTX shall read the DPRX caps after LTTPR detection. */
+ if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) {
+ intel_dp_reset_lttpr_common_caps(intel_dp);
+ return -EIO;
+ }
+
+ return lttpr_count;
+}
EXPORT_SYMBOL(intel_dp_init_lttpr_and_dprx_caps);
static u8 dp_voltage_max(u8 preemph)
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index e5dadde422f7..bbaf05515e88 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -383,7 +383,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
i830_overlay_clock_gating(dev_priv, true);
}
-static void
+__i915_active_call static void
intel_overlay_last_flip_retire(struct i915_active *active)
{
struct intel_overlay *overlay =
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 23f6b00e08e2..8598a1c78a4c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -189,7 +189,7 @@ compute_partial_view(const struct drm_i915_gem_object *obj,
struct i915_ggtt_view view;
if (i915_gem_object_is_tiled(obj))
- chunk = roundup(chunk, tile_row_pages(obj));
+ chunk = roundup(chunk, tile_row_pages(obj) ?: 1);
view.type = I915_GGTT_VIEW_PARTIAL;
view.partial.offset = rounddown(page_offset, chunk);
@@ -367,10 +367,11 @@ retry:
goto err_unpin;
/* Finally, remap it using the new GTT offset */
- ret = io_mapping_map_user(&ggtt->iomap, area, area->vm_start +
- (vma->ggtt_view.partial.offset << PAGE_SHIFT),
- (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
- min_t(u64, vma->size, area->vm_end - area->vm_start));
+ ret = remap_io_mapping(area,
+ area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
+ (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
+ min_t(u64, vma->size, area->vm_end - area->vm_start),
+ &ggtt->iomap);
if (ret)
goto err_fence;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index aed8a37ccdc9..7361971c177d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -63,6 +63,8 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
i915_gem_object_set_tiling_quirk(obj);
+ GEM_BUG_ON(!list_empty(&obj->mm.link));
+ atomic_inc(&obj->mm.shrink_pin);
shrinkable = false;
}
diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
index de575fdb033f..21f08e53889c 100644
--- a/drivers/gpu/drm/i915/gt/gen7_renderclear.c
+++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
@@ -397,7 +397,10 @@ static void emit_batch(struct i915_vma * const vma,
gen7_emit_pipeline_invalidate(&cmds);
batch_add(&cmds, MI_LOAD_REGISTER_IMM(2));
batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7));
- batch_add(&cmds, 0xffff0000);
+ batch_add(&cmds, 0xffff0000 |
+ ((IS_IVB_GT1(i915) || IS_VALLEYVIEW(i915)) ?
+ HIZ_RAW_STALL_OPT_DISABLE :
+ 0));
batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1));
batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
gen7_emit_pipeline_invalidate(&cmds);
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index 176c19633412..74bf6fc8461f 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -641,7 +641,6 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
err = pin_pt_dma(vm, pde->pt.base);
if (err) {
- i915_gem_object_put(pde->pt.base);
free_pd(vm, pde);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index e72b7a0dc316..8a322594210c 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -653,8 +653,8 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
* banks of memory are paired and unswizzled on the
* uneven portion, so leave that as unknown.
*/
- if (intel_uncore_read(uncore, C0DRB3) ==
- intel_uncore_read(uncore, C1DRB3)) {
+ if (intel_uncore_read16(uncore, C0DRB3) ==
+ intel_uncore_read16(uncore, C1DRB3)) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
}
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index e7c2babcee8b..cbac409f6c8a 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -46,118 +46,6 @@ static const char * const supported_hypervisors[] = {
[INTEL_GVT_HYPERVISOR_KVM] = "KVM",
};
-static struct intel_vgpu_type *
-intel_gvt_find_vgpu_type(struct intel_gvt *gvt, unsigned int type_group_id)
-{
- if (WARN_ON(type_group_id >= gvt->num_types))
- return NULL;
- return &gvt->types[type_group_id];
-}
-
-static ssize_t available_instances_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr,
- char *buf)
-{
- struct intel_vgpu_type *type;
- unsigned int num = 0;
- void *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
-
- type = intel_gvt_find_vgpu_type(gvt, mtype_get_type_group_id(mtype));
- if (!type)
- num = 0;
- else
- num = type->avail_instance;
-
- return sprintf(buf, "%u\n", num);
-}
-
-static ssize_t device_api_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr, char *buf)
-{
- return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
-}
-
-static ssize_t description_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr, char *buf)
-{
- struct intel_vgpu_type *type;
- void *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
-
- type = intel_gvt_find_vgpu_type(gvt, mtype_get_type_group_id(mtype));
- if (!type)
- return 0;
-
- return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
- "fence: %d\nresolution: %s\n"
- "weight: %d\n",
- BYTES_TO_MB(type->low_gm_size),
- BYTES_TO_MB(type->high_gm_size),
- type->fence, vgpu_edid_str(type->resolution),
- type->weight);
-}
-
-static MDEV_TYPE_ATTR_RO(available_instances);
-static MDEV_TYPE_ATTR_RO(device_api);
-static MDEV_TYPE_ATTR_RO(description);
-
-static struct attribute *gvt_type_attrs[] = {
- &mdev_type_attr_available_instances.attr,
- &mdev_type_attr_device_api.attr,
- &mdev_type_attr_description.attr,
- NULL,
-};
-
-static struct attribute_group *gvt_vgpu_type_groups[] = {
- [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
-};
-
-static bool intel_get_gvt_attrs(struct attribute_group ***intel_vgpu_type_groups)
-{
- *intel_vgpu_type_groups = gvt_vgpu_type_groups;
- return true;
-}
-
-static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
-{
- int i, j;
- struct intel_vgpu_type *type;
- struct attribute_group *group;
-
- for (i = 0; i < gvt->num_types; i++) {
- type = &gvt->types[i];
-
- group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
- if (WARN_ON(!group))
- goto unwind;
-
- group->name = type->name;
- group->attrs = gvt_type_attrs;
- gvt_vgpu_type_groups[i] = group;
- }
-
- return 0;
-
-unwind:
- for (j = 0; j < i; j++) {
- group = gvt_vgpu_type_groups[j];
- kfree(group);
- }
-
- return -ENOMEM;
-}
-
-static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
-{
- int i;
- struct attribute_group *group;
-
- for (i = 0; i < gvt->num_types; i++) {
- group = gvt_vgpu_type_groups[i];
- gvt_vgpu_type_groups[i] = NULL;
- kfree(group);
- }
-}
-
static const struct intel_gvt_ops intel_gvt_ops = {
.emulate_cfg_read = intel_vgpu_emulate_cfg_read,
.emulate_cfg_write = intel_vgpu_emulate_cfg_write,
@@ -169,8 +57,6 @@ static const struct intel_gvt_ops intel_gvt_ops = {
.vgpu_reset = intel_gvt_reset_vgpu,
.vgpu_activate = intel_gvt_activate_vgpu,
.vgpu_deactivate = intel_gvt_deactivate_vgpu,
- .gvt_find_vgpu_type = intel_gvt_find_vgpu_type,
- .get_gvt_attrs = intel_get_gvt_attrs,
.vgpu_query_plane = intel_vgpu_query_plane,
.vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
.write_protect_handler = intel_vgpu_page_track_handler,
@@ -274,7 +160,6 @@ void intel_gvt_clean_device(struct drm_i915_private *i915)
return;
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
- intel_gvt_cleanup_vgpu_type_groups(gvt);
intel_gvt_clean_vgpu_types(gvt);
intel_gvt_debugfs_clean(gvt);
@@ -363,12 +248,6 @@ int intel_gvt_init_device(struct drm_i915_private *i915)
if (ret)
goto out_clean_thread;
- ret = intel_gvt_init_vgpu_type_groups(gvt);
- if (ret) {
- gvt_err("failed to init vgpu type groups: %d\n", ret);
- goto out_clean_types;
- }
-
vgpu = intel_gvt_create_idle_vgpu(gvt);
if (IS_ERR(vgpu)) {
ret = PTR_ERR(vgpu);
@@ -454,7 +333,8 @@ EXPORT_SYMBOL_GPL(intel_gvt_register_hypervisor);
void
intel_gvt_unregister_hypervisor(void)
{
- intel_gvt_hypervisor_host_exit(intel_gvt_host.dev);
+ void *gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt;
+ intel_gvt_hypervisor_host_exit(intel_gvt_host.dev, gvt);
module_put(THIS_MODULE);
}
EXPORT_SYMBOL_GPL(intel_gvt_unregister_hypervisor);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 88ab360fcb31..0c0615602343 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -574,9 +574,6 @@ struct intel_gvt_ops {
void (*vgpu_reset)(struct intel_vgpu *);
void (*vgpu_activate)(struct intel_vgpu *);
void (*vgpu_deactivate)(struct intel_vgpu *);
- struct intel_vgpu_type *(*gvt_find_vgpu_type)(
- struct intel_gvt *gvt, unsigned int type_group_id);
- bool (*get_gvt_attrs)(struct attribute_group ***intel_vgpu_type_groups);
int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index b79da5124f83..f33e3cbd0439 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -49,7 +49,7 @@ enum hypervisor_type {
struct intel_gvt_mpt {
enum hypervisor_type type;
int (*host_init)(struct device *dev, void *gvt, const void *ops);
- void (*host_exit)(struct device *dev);
+ void (*host_exit)(struct device *dev, void *gvt);
int (*attach_vgpu)(void *vgpu, unsigned long *handle);
void (*detach_vgpu)(void *vgpu);
int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 65ff43cfc0f7..48b4d4cf805d 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -144,6 +144,104 @@ static inline bool handle_valid(unsigned long handle)
return !!(handle & ~0xff);
}
+static ssize_t available_instances_show(struct mdev_type *mtype,
+ struct mdev_type_attribute *attr,
+ char *buf)
+{
+ struct intel_vgpu_type *type;
+ unsigned int num = 0;
+ struct intel_gvt *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
+
+ type = &gvt->types[mtype_get_type_group_id(mtype)];
+ if (!type)
+ num = 0;
+ else
+ num = type->avail_instance;
+
+ return sprintf(buf, "%u\n", num);
+}
+
+static ssize_t device_api_show(struct mdev_type *mtype,
+ struct mdev_type_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
+}
+
+static ssize_t description_show(struct mdev_type *mtype,
+ struct mdev_type_attribute *attr, char *buf)
+{
+ struct intel_vgpu_type *type;
+ struct intel_gvt *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
+
+ type = &gvt->types[mtype_get_type_group_id(mtype)];
+ if (!type)
+ return 0;
+
+ return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
+ "fence: %d\nresolution: %s\n"
+ "weight: %d\n",
+ BYTES_TO_MB(type->low_gm_size),
+ BYTES_TO_MB(type->high_gm_size),
+ type->fence, vgpu_edid_str(type->resolution),
+ type->weight);
+}
+
+static MDEV_TYPE_ATTR_RO(available_instances);
+static MDEV_TYPE_ATTR_RO(device_api);
+static MDEV_TYPE_ATTR_RO(description);
+
+static struct attribute *gvt_type_attrs[] = {
+ &mdev_type_attr_available_instances.attr,
+ &mdev_type_attr_device_api.attr,
+ &mdev_type_attr_description.attr,
+ NULL,
+};
+
+static struct attribute_group *gvt_vgpu_type_groups[] = {
+ [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
+};
+
+static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
+{
+ int i, j;
+ struct intel_vgpu_type *type;
+ struct attribute_group *group;
+
+ for (i = 0; i < gvt->num_types; i++) {
+ type = &gvt->types[i];
+
+ group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
+ if (!group)
+ goto unwind;
+
+ group->name = type->name;
+ group->attrs = gvt_type_attrs;
+ gvt_vgpu_type_groups[i] = group;
+ }
+
+ return 0;
+
+unwind:
+ for (j = 0; j < i; j++) {
+ group = gvt_vgpu_type_groups[j];
+ kfree(group);
+ }
+
+ return -ENOMEM;
+}
+
+static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
+{
+ int i;
+ struct attribute_group *group;
+
+ for (i = 0; i < gvt->num_types; i++) {
+ group = gvt_vgpu_type_groups[i];
+ gvt_vgpu_type_groups[i] = NULL;
+ kfree(group);
+ }
+}
+
static int kvmgt_guest_init(struct mdev_device *mdev);
static void intel_vgpu_release_work(struct work_struct *work);
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
@@ -694,14 +792,13 @@ static int intel_vgpu_create(struct mdev_device *mdev)
struct intel_vgpu *vgpu = NULL;
struct intel_vgpu_type *type;
struct device *pdev;
- void *gvt;
+ struct intel_gvt *gvt;
int ret;
pdev = mdev_parent_dev(mdev);
gvt = kdev_to_i915(pdev)->gvt;
- type = intel_gvt_ops->gvt_find_vgpu_type(gvt,
- mdev_get_type_group_id(mdev));
+ type = &gvt->types[mdev_get_type_group_id(mdev)];
if (!type) {
ret = -EINVAL;
goto out;
@@ -1667,19 +1764,26 @@ static struct mdev_parent_ops intel_vgpu_ops = {
static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
{
- struct attribute_group **kvm_vgpu_type_groups;
+ int ret;
+
+ ret = intel_gvt_init_vgpu_type_groups((struct intel_gvt *)gvt);
+ if (ret)
+ return ret;
intel_gvt_ops = ops;
- if (!intel_gvt_ops->get_gvt_attrs(&kvm_vgpu_type_groups))
- return -EFAULT;
- intel_vgpu_ops.supported_type_groups = kvm_vgpu_type_groups;
+ intel_vgpu_ops.supported_type_groups = gvt_vgpu_type_groups;
- return mdev_register_device(dev, &intel_vgpu_ops);
+ ret = mdev_register_device(dev, &intel_vgpu_ops);
+ if (ret)
+ intel_gvt_cleanup_vgpu_type_groups((struct intel_gvt *)gvt);
+
+ return ret;
}
-static void kvmgt_host_exit(struct device *dev)
+static void kvmgt_host_exit(struct device *dev, void *gvt)
{
mdev_unregister_device(dev);
+ intel_gvt_cleanup_vgpu_type_groups((struct intel_gvt *)gvt);
}
static int kvmgt_page_track_add(unsigned long handle, u64 gfn)
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 550a456e936f..e6c5a792a49a 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -63,13 +63,13 @@ static inline int intel_gvt_hypervisor_host_init(struct device *dev,
/**
* intel_gvt_hypervisor_host_exit - exit GVT-g host side
*/
-static inline void intel_gvt_hypervisor_host_exit(struct device *dev)
+static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt)
{
/* optional to provide */
if (!intel_gvt_host.mpt->host_exit)
return;
- intel_gvt_host.mpt->host_exit(dev);
+ intel_gvt_host.mpt->host_exit(dev, gvt);
}
/**
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index cf9a3d384971..aa573b078ae7 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -1156,7 +1156,8 @@ static int auto_active(struct i915_active *ref)
return 0;
}
-static void auto_retire(struct i915_active *ref)
+__i915_active_call static void
+auto_retire(struct i915_active *ref)
{
i915_active_put(ref);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9ec9277539ec..69e43bf91a15 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1905,6 +1905,9 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
/* i915_mm.c */
+int remap_io_mapping(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn, unsigned long size,
+ struct io_mapping *iomap);
int remap_io_sg(struct vm_area_struct *vma,
unsigned long addr, unsigned long size,
struct scatterlist *sgl, resource_size_t iobase);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b23f58e94cfb..b3cedd20f365 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -999,12 +999,11 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
obj->mm.madv = args->madv;
if (i915_gem_object_has_pages(obj)) {
- struct list_head *list;
+ unsigned long flags;
- if (i915_gem_object_is_shrinkable(obj)) {
- unsigned long flags;
-
- spin_lock_irqsave(&i915->mm.obj_lock, flags);
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+ if (!list_empty(&obj->mm.link)) {
+ struct list_head *list;
if (obj->mm.madv != I915_MADV_WILLNEED)
list = &i915->mm.purge_list;
@@ -1012,8 +1011,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
list = &i915->mm.shrink_list;
list_move_tail(&obj->mm.link, list);
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
/* if the object is no longer attached, discard its backing storage */
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
index 4c8cd08c672d..666808cb3a32 100644
--- a/drivers/gpu/drm/i915/i915_mm.c
+++ b/drivers/gpu/drm/i915/i915_mm.c
@@ -28,10 +28,90 @@
#include "i915_drv.h"
-#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
+struct remap_pfn {
+ struct mm_struct *mm;
+ unsigned long pfn;
+ pgprot_t prot;
+
+ struct sgt_iter sgt;
+ resource_size_t iobase;
+};
+
+static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
+{
+ struct remap_pfn *r = data;
+
+ /* Special PTE are not associated with any struct page */
+ set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot)));
+ r->pfn++;
+
+ return 0;
+}
#define use_dma(io) ((io) != -1)
+static inline unsigned long sgt_pfn(const struct remap_pfn *r)
+{
+ if (use_dma(r->iobase))
+ return (r->sgt.dma + r->sgt.curr + r->iobase) >> PAGE_SHIFT;
+ else
+ return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT);
+}
+
+static int remap_sg(pte_t *pte, unsigned long addr, void *data)
+{
+ struct remap_pfn *r = data;
+
+ if (GEM_WARN_ON(!r->sgt.sgp))
+ return -EINVAL;
+
+ /* Special PTE are not associated with any struct page */
+ set_pte_at(r->mm, addr, pte,
+ pte_mkspecial(pfn_pte(sgt_pfn(r), r->prot)));
+ r->pfn++; /* track insertions in case we need to unwind later */
+
+ r->sgt.curr += PAGE_SIZE;
+ if (r->sgt.curr >= r->sgt.max)
+ r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), use_dma(r->iobase));
+
+ return 0;
+}
+
+/**
+ * remap_io_mapping - remap an IO mapping to userspace
+ * @vma: user vma to map to
+ * @addr: target user address to start at
+ * @pfn: physical address of kernel memory
+ * @size: size of map area
+ * @iomap: the source io_mapping
+ *
+ * Note: this is only safe if the mm semaphore is held when called.
+ */
+int remap_io_mapping(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn, unsigned long size,
+ struct io_mapping *iomap)
+{
+ struct remap_pfn r;
+ int err;
+
+#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
+ GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
+
+ /* We rely on prevalidation of the io-mapping to skip track_pfn(). */
+ r.mm = vma->vm_mm;
+ r.pfn = pfn;
+ r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) |
+ (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK));
+
+ err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r);
+ if (unlikely(err)) {
+ zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT);
+ return err;
+ }
+
+ return 0;
+}
+
/**
* remap_io_sg - remap an IO mapping to userspace
* @vma: user vma to map to
@@ -46,7 +126,12 @@ int remap_io_sg(struct vm_area_struct *vma,
unsigned long addr, unsigned long size,
struct scatterlist *sgl, resource_size_t iobase)
{
- unsigned long pfn, len, remapped = 0;
+ struct remap_pfn r = {
+ .mm = vma->vm_mm,
+ .prot = vma->vm_page_prot,
+ .sgt = __sgt_iter(sgl, use_dma(iobase)),
+ .iobase = iobase,
+ };
int err;
/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
@@ -55,25 +140,11 @@ int remap_io_sg(struct vm_area_struct *vma,
if (!use_dma(iobase))
flush_cache_range(vma, addr, size);
- do {
- if (use_dma(iobase)) {
- if (!sg_dma_len(sgl))
- break;
- pfn = (sg_dma_address(sgl) + iobase) >> PAGE_SHIFT;
- len = sg_dma_len(sgl);
- } else {
- pfn = page_to_pfn(sg_page(sgl));
- len = sgl->length;
- }
-
- err = remap_pfn_range(vma, addr + remapped, pfn, len,
- vma->vm_page_prot);
- if (err)
- break;
- remapped += len;
- } while ((sgl = __sg_next(sgl)));
-
- if (err)
- zap_vma_ptes(vma, addr, remapped);
- return err;
+ err = apply_to_page_range(r.mm, addr, size, remap_sg, &r);
+ if (unlikely(err)) {
+ zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT);
+ return err;
+ }
+
+ return 0;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index ee8e753d98ce..eae0abd614cb 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -1592,8 +1592,8 @@ static int live_breadcrumbs_smoketest(void *arg)
for (n = 0; n < smoke[0].ncontexts; n++) {
smoke[0].contexts[n] = live_context(i915, file);
- if (!smoke[0].contexts[n]) {
- ret = -ENOMEM;
+ if (IS_ERR(smoke[0].contexts[n])) {
+ ret = PTR_ERR(smoke[0].contexts[n]);
goto out_contexts;
}
}
diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c
index f64e06e1067d..96ea1a2c11dd 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.c
+++ b/drivers/gpu/drm/kmb/kmb_drv.c
@@ -137,6 +137,7 @@ static int kmb_hw_init(struct drm_device *drm, unsigned long flags)
/* Allocate LCD interrupt resources */
irq_lcd = platform_get_irq(pdev, 0);
if (irq_lcd < 0) {
+ ret = irq_lcd;
drm_err(&kmb->drm, "irq_lcd not found");
goto setup_fail;
}
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
index b3fd3501c412..5275b2723293 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi.c
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -577,7 +577,7 @@ static void mcde_dsi_setup_video_mode(struct mcde_dsi *d,
* porches and sync.
*/
/* (ps/s) / (pixels/s) = ps/pixels */
- pclk = DIV_ROUND_UP_ULL(1000000000000, mode->clock);
+ pclk = DIV_ROUND_UP_ULL(1000000000000, (mode->clock * 1000));
dev_dbg(d->dev, "picoseconds between two pixels: %llu\n",
pclk);
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 453d8b4c5763..07fcd12dca16 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -485,11 +485,12 @@ static int meson_probe_remote(struct platform_device *pdev,
static void meson_drv_shutdown(struct platform_device *pdev)
{
struct meson_drm *priv = dev_get_drvdata(&pdev->dev);
- struct drm_device *drm = priv->drm;
- DRM_DEBUG_DRIVER("\n");
- drm_kms_helper_poll_fini(drm);
- drm_atomic_helper_shutdown(drm);
+ if (!priv)
+ return;
+
+ drm_kms_helper_poll_fini(priv->drm);
+ drm_atomic_helper_shutdown(priv->drm);
}
static int meson_drv_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index d553f62f4eeb..f6c1b62b901e 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -157,7 +157,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
* GPU registers so we need to add 0x1a800 to the register value on A630
* to get the right value from PM4.
*/
- get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
+ get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
rbmemptr_stats(ring, index, alwayson_start));
/* Invalidate CCU depth and color */
@@ -187,7 +187,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
rbmemptr_stats(ring, index, cpcycles_end));
- get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
+ get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
rbmemptr_stats(ring, index, alwayson_end));
/* Write the fence to the scratch register */
@@ -206,8 +206,8 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_RING(ring, submit->seqno);
trace_msm_gpu_submit_flush(submit,
- gmu_read64(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L,
- REG_A6XX_GMU_ALWAYS_ON_COUNTER_H));
+ gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
+ REG_A6XX_CP_ALWAYS_ON_COUNTER_HI));
a6xx_flush(gpu, ring);
}
@@ -462,6 +462,113 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0);
}
+/* For a615, a616, a618, A619, a630, a640 and a680 */
+static const u32 a6xx_protect[] = {
+ A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
+ A6XX_PROTECT_RDONLY(0x00501, 0x0005),
+ A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
+ A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00510, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00534, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00800, 0x0082),
+ A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
+ A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
+ A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
+ A6XX_PROTECT_NORDWR(0x00900, 0x004d),
+ A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
+ A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
+ A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
+ A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
+ A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
+ A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
+ A6XX_PROTECT_NORDWR(0x09624, 0x01db),
+ A6XX_PROTECT_NORDWR(0x09e70, 0x0001),
+ A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
+ A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
+ A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0ae50, 0x032f),
+ A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
+ A6XX_PROTECT_NORDWR(0x0be20, 0x17df),
+ A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
+ A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x11c00, 0x0000), /* note: infinite range */
+};
+
+/* These are for a620 and a650 */
+static const u32 a650_protect[] = {
+ A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
+ A6XX_PROTECT_RDONLY(0x00501, 0x0005),
+ A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
+ A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00510, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00534, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00800, 0x0082),
+ A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
+ A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
+ A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
+ A6XX_PROTECT_NORDWR(0x00900, 0x004d),
+ A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
+ A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
+ A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
+ A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
+ A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
+ A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
+ A6XX_PROTECT_NORDWR(0x08e80, 0x027f),
+ A6XX_PROTECT_NORDWR(0x09624, 0x01db),
+ A6XX_PROTECT_NORDWR(0x09e60, 0x0011),
+ A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
+ A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
+ A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0ae50, 0x032f),
+ A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0b608, 0x0007),
+ A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
+ A6XX_PROTECT_NORDWR(0x0be20, 0x17df),
+ A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
+ A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x18400, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x1a800, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x1f400, 0x0443),
+ A6XX_PROTECT_RDONLY(0x1f844, 0x007b),
+ A6XX_PROTECT_NORDWR(0x1f887, 0x001b),
+ A6XX_PROTECT_NORDWR(0x1f8c0, 0x0000), /* note: infinite range */
+};
+
+static void a6xx_set_cp_protect(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ const u32 *regs = a6xx_protect;
+ unsigned i, count = ARRAY_SIZE(a6xx_protect), count_max = 32;
+
+ BUILD_BUG_ON(ARRAY_SIZE(a6xx_protect) > 32);
+ BUILD_BUG_ON(ARRAY_SIZE(a650_protect) > 48);
+
+ if (adreno_is_a650(adreno_gpu)) {
+ regs = a650_protect;
+ count = ARRAY_SIZE(a650_protect);
+ count_max = 48;
+ }
+
+ /*
+ * Enable access protection to privileged registers, fault on an access
+ * protect violation and select the last span to protect from the start
+ * address all the way to the end of the register address space
+ */
+ gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, BIT(0) | BIT(1) | BIT(3));
+
+ for (i = 0; i < count - 1; i++)
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(i), regs[i]);
+ /* last CP_PROTECT to have "infinite" length on the last entry */
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(count_max - 1), regs[i]);
+}
+
static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -489,7 +596,7 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
rgb565_predicator << 11 | amsbc << 4 | lower_bit << 1);
gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, lower_bit << 1);
gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL,
- uavflagprd_inv >> 4 | lower_bit << 1);
+ uavflagprd_inv << 4 | lower_bit << 1);
gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, lower_bit << 21);
}
@@ -776,41 +883,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
}
/* Protect registers from the CP */
- gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, 0x00000003);
-
- gpu_write(gpu, REG_A6XX_CP_PROTECT(0),
- A6XX_PROTECT_RDONLY(0x600, 0x51));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(1), A6XX_PROTECT_RW(0xae50, 0x2));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(2), A6XX_PROTECT_RW(0x9624, 0x13));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(3), A6XX_PROTECT_RW(0x8630, 0x8));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(4), A6XX_PROTECT_RW(0x9e70, 0x1));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(5), A6XX_PROTECT_RW(0x9e78, 0x187));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(6), A6XX_PROTECT_RW(0xf000, 0x810));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(7),
- A6XX_PROTECT_RDONLY(0xfc00, 0x3));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(8), A6XX_PROTECT_RW(0x50e, 0x0));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(9), A6XX_PROTECT_RDONLY(0x50f, 0x0));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(10), A6XX_PROTECT_RW(0x510, 0x0));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(11),
- A6XX_PROTECT_RDONLY(0x0, 0x4f9));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(12),
- A6XX_PROTECT_RDONLY(0x501, 0xa));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(13),
- A6XX_PROTECT_RDONLY(0x511, 0x44));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(14), A6XX_PROTECT_RW(0xe00, 0xe));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(15), A6XX_PROTECT_RW(0x8e00, 0x0));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(16), A6XX_PROTECT_RW(0x8e50, 0xf));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(17), A6XX_PROTECT_RW(0xbe02, 0x0));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(18),
- A6XX_PROTECT_RW(0xbe20, 0x11f3));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(19), A6XX_PROTECT_RW(0x800, 0x82));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(20), A6XX_PROTECT_RW(0x8a0, 0x8));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(21), A6XX_PROTECT_RW(0x8ab, 0x19));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(22), A6XX_PROTECT_RW(0x900, 0x4d));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(23), A6XX_PROTECT_RW(0x98d, 0x76));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(24),
- A6XX_PROTECT_RDONLY(0x980, 0x4));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0));
+ a6xx_set_cp_protect(gpu);
/* Enable expanded apriv for targets that support it */
if (gpu->hw_apriv) {
@@ -1153,10 +1226,6 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
{
struct device_node *phandle;
- a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem", "gpu_cx");
- if (IS_ERR(a6xx_gpu->llc_mmio))
- return;
-
/*
* There is a different programming path for targets with an mmu500
* attached, so detect if that is the case
@@ -1166,6 +1235,11 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
of_device_is_compatible(phandle, "arm,mmu-500"));
of_node_put(phandle);
+ if (a6xx_gpu->have_mmu500)
+ a6xx_gpu->llc_mmio = NULL;
+ else
+ a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem", "gpu_cx");
+
a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU);
a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW);
@@ -1210,7 +1284,7 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
if (ret)
return ret;
- if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami)
+ if (a6xx_gpu->shadow_bo)
for (i = 0; i < gpu->nr_rings; i++)
a6xx_gpu->shadow[i] = 0;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index ce0610c5256f..bb544dfe5737 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -44,7 +44,7 @@ struct a6xx_gpu {
* REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
* registers starting at _reg.
*/
-#define A6XX_PROTECT_RW(_reg, _len) \
+#define A6XX_PROTECT_NORDWR(_reg, _len) \
((1 << 31) | \
(((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
index 82a8673ab8da..d7e4a39a904e 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.c
+++ b/drivers/gpu/drm/msm/dp/dp_audio.c
@@ -527,6 +527,7 @@ int dp_audio_hw_params(struct device *dev,
dp_audio_setup_acr(audio);
dp_audio_safe_to_exit_level(audio);
dp_audio_enable(audio, true);
+ dp_display_signal_audio_start(dp_display);
dp_display->audio_enabled = true;
end:
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 5a39da6e1eaf..1784e119269b 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -178,6 +178,15 @@ static int dp_del_event(struct dp_display_private *dp_priv, u32 event)
return 0;
}
+void dp_display_signal_audio_start(struct msm_dp *dp_display)
+{
+ struct dp_display_private *dp;
+
+ dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+ reinit_completion(&dp->audio_comp);
+}
+
void dp_display_signal_audio_complete(struct msm_dp *dp_display)
{
struct dp_display_private *dp;
@@ -586,10 +595,8 @@ static int dp_connect_pending_timeout(struct dp_display_private *dp, u32 data)
mutex_lock(&dp->event_mutex);
state = dp->hpd_state;
- if (state == ST_CONNECT_PENDING) {
- dp_display_enable(dp, 0);
+ if (state == ST_CONNECT_PENDING)
dp->hpd_state = ST_CONNECTED;
- }
mutex_unlock(&dp->event_mutex);
@@ -651,7 +658,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND);
/* signal the disconnect event early to ensure proper teardown */
- reinit_completion(&dp->audio_comp);
dp_display_handle_plugged_change(g_dp_display, false);
dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK |
@@ -669,10 +675,8 @@ static int dp_disconnect_pending_timeout(struct dp_display_private *dp, u32 data
mutex_lock(&dp->event_mutex);
state = dp->hpd_state;
- if (state == ST_DISCONNECT_PENDING) {
- dp_display_disable(dp, 0);
+ if (state == ST_DISCONNECT_PENDING)
dp->hpd_state = ST_DISCONNECTED;
- }
mutex_unlock(&dp->event_mutex);
@@ -898,7 +902,6 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data)
/* wait only if audio was enabled */
if (dp_display->audio_enabled) {
/* signal the disconnect event */
- reinit_completion(&dp->audio_comp);
dp_display_handle_plugged_change(dp_display, false);
if (!wait_for_completion_timeout(&dp->audio_comp,
HZ * 5))
@@ -1272,7 +1275,12 @@ static int dp_pm_resume(struct device *dev)
status = dp_catalog_link_is_connected(dp->catalog);
- if (status)
+ /*
+ * can not declared display is connected unless
+ * HDMI cable is plugged in and sink_count of
+ * dongle become 1
+ */
+ if (status && dp->link->sink_count)
dp->dp_display.is_connected = true;
else
dp->dp_display.is_connected = false;
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index 6092ba1ed85e..5173c89eedf7 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -34,6 +34,7 @@ int dp_display_get_modes(struct msm_dp *dp_display,
int dp_display_request_irq(struct msm_dp *dp_display);
bool dp_display_check_video_test(struct msm_dp *dp_display);
int dp_display_get_test_bpp(struct msm_dp *dp_display);
+void dp_display_signal_audio_start(struct msm_dp *dp_display);
void dp_display_signal_audio_complete(struct msm_dp *dp_display);
#endif /* _DP_DISPLAY_H_ */
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index f0a2ddf96a4b..ff7f2ec42030 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -843,7 +843,7 @@ int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy,
if (pixel_clk_provider)
*pixel_clk_provider = phy->provided_clocks->hws[DSI_PIXEL_PLL_CLK]->clk;
- return -EINVAL;
+ return 0;
}
void msm_dsi_phy_pll_save_state(struct msm_dsi_phy *phy)
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
index 34bc93548fcf..657778889d35 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -432,6 +432,7 @@ static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
pll_freq += div_u64(tmp64, multiplier);
vco_rate = pll_freq;
+ pll_10nm->vco_current_rate = vco_rate;
DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
pll_10nm->phy->id, (unsigned long)vco_rate, dec, frac);
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
index 582b1428f971..86e40a0d41a3 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -405,6 +405,10 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
if (!vco_name)
return -ENOMEM;
+ parent_name = devm_kzalloc(dev, 32, GFP_KERNEL);
+ if (!parent_name)
+ return -ENOMEM;
+
clk_name = devm_kzalloc(dev, 32, GFP_KERNEL);
if (!clk_name)
return -ENOMEM;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index e76ce40a12ab..6f96fbac8282 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -460,6 +460,7 @@ static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
pll_freq += div_u64(tmp64, multiplier);
vco_rate = pll_freq;
+ pll_7nm->vco_current_rate = vco_rate;
DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
pll_7nm->phy->id, (unsigned long)vco_rate, dec, frac);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index e1104d2454e2..fe7d17cd35ec 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -42,7 +42,7 @@
* - 1.7.0 - Add MSM_PARAM_SUSPENDS to access suspend count
*/
#define MSM_VERSION_MAJOR 1
-#define MSM_VERSION_MINOR 6
+#define MSM_VERSION_MINOR 7
#define MSM_VERSION_PATCHLEVEL 0
static const struct drm_mode_config_funcs mode_config_funcs = {
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index b199942266a2..369d91e6361e 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -190,13 +190,25 @@ struct page **msm_gem_get_pages(struct drm_gem_object *obj)
}
p = get_pages(obj);
+
+ if (!IS_ERR(p)) {
+ msm_obj->pin_count++;
+ update_inactive(msm_obj);
+ }
+
msm_gem_unlock(obj);
return p;
}
void msm_gem_put_pages(struct drm_gem_object *obj)
{
- /* when we start tracking the pin count, then do something here */
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ msm_gem_lock(obj);
+ msm_obj->pin_count--;
+ GEM_WARN_ON(msm_obj->pin_count < 0);
+ update_inactive(msm_obj);
+ msm_gem_unlock(obj);
}
int msm_gem_mmap_obj(struct drm_gem_object *obj,
@@ -646,6 +658,8 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
ret = -ENOMEM;
goto fail;
}
+
+ update_inactive(msm_obj);
}
return msm_obj->vaddr;
@@ -1227,6 +1241,13 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
to_msm_bo(obj)->vram_node = &vma->node;
+ /* Call chain get_pages() -> update_inactive() tries to
+ * access msm_obj->mm_list, but it is not initialized yet.
+ * To avoid NULL pointer dereference error, initialize
+ * mm_list to be empty.
+ */
+ INIT_LIST_HEAD(&msm_obj->mm_list);
+
msm_gem_lock(obj);
pages = get_pages(obj);
msm_gem_unlock(obj);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index a6480d2c81b2..03e2cc2a2ce1 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -221,7 +221,7 @@ static inline bool is_active(struct msm_gem_object *msm_obj)
/* imported/exported objects are not purgeable: */
static inline bool is_unpurgeable(struct msm_gem_object *msm_obj)
{
- return msm_obj->base.dma_buf && msm_obj->base.import_attach;
+ return msm_obj->base.import_attach || msm_obj->pin_count;
}
static inline bool is_purgeable(struct msm_gem_object *msm_obj)
@@ -271,7 +271,7 @@ static inline void mark_unpurgeable(struct msm_gem_object *msm_obj)
static inline bool is_unevictable(struct msm_gem_object *msm_obj)
{
- return is_unpurgeable(msm_obj) || msm_obj->pin_count || msm_obj->vaddr;
+ return is_unpurgeable(msm_obj) || msm_obj->vaddr;
}
static inline void mark_evictable(struct msm_gem_object *msm_obj)
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 3e09df0472ce..170aba99a110 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -546,7 +546,7 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
int i, j;
- if (!ttm_dma)
+ if (!ttm_dma || !ttm_dma->dma_address)
return;
if (!ttm_dma->pages) {
NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);
@@ -582,7 +582,7 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
int i, j;
- if (!ttm_dma)
+ if (!ttm_dma || !ttm_dma->dma_address)
return;
if (!ttm_dma->pages) {
NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 347488685f74..60019d0532fc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -93,7 +93,22 @@ int nouveau_gem_prime_pin(struct drm_gem_object *obj)
if (ret)
return -EINVAL;
- return 0;
+ ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
+ if (ret)
+ goto error;
+
+ if (nvbo->bo.moving)
+ ret = dma_fence_wait(nvbo->bo.moving, true);
+
+ ttm_bo_unreserve(&nvbo->bo);
+ if (ret)
+ goto error;
+
+ return ret;
+
+error:
+ nouveau_bo_unpin(nvbo);
+ return ret;
}
void nouveau_gem_prime_unpin(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/panel/panel-samsung-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
index f484147fc3a6..c4b388850a13 100644
--- a/drivers/gpu/drm/panel/panel-samsung-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
@@ -383,6 +383,7 @@ MODULE_DEVICE_TABLE(spi, ld9040_ids);
static struct spi_driver ld9040_driver = {
.probe = ld9040_probe,
.remove = ld9040_remove,
+ .id_table = ld9040_ids,
.driver = {
.name = "panel-samsung-ld9040",
.of_match_table = ld9040_of_match,
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index dd5ef6493723..769f666335ac 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -1687,102 +1687,102 @@ static int ni_populate_smc_initial_state(struct radeon_device *rdev,
u32 reg;
int ret;
- table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
+ table->initialState.level.mclk.vMPLL_AD_FUNC_CNTL =
cpu_to_be32(ni_pi->clock_registers.mpll_ad_func_cntl);
- table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL_2 =
+ table->initialState.level.mclk.vMPLL_AD_FUNC_CNTL_2 =
cpu_to_be32(ni_pi->clock_registers.mpll_ad_func_cntl_2);
- table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
+ table->initialState.level.mclk.vMPLL_DQ_FUNC_CNTL =
cpu_to_be32(ni_pi->clock_registers.mpll_dq_func_cntl);
- table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL_2 =
+ table->initialState.level.mclk.vMPLL_DQ_FUNC_CNTL_2 =
cpu_to_be32(ni_pi->clock_registers.mpll_dq_func_cntl_2);
- table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
+ table->initialState.level.mclk.vMCLK_PWRMGT_CNTL =
cpu_to_be32(ni_pi->clock_registers.mclk_pwrmgt_cntl);
- table->initialState.levels[0].mclk.vDLL_CNTL =
+ table->initialState.level.mclk.vDLL_CNTL =
cpu_to_be32(ni_pi->clock_registers.dll_cntl);
- table->initialState.levels[0].mclk.vMPLL_SS =
+ table->initialState.level.mclk.vMPLL_SS =
cpu_to_be32(ni_pi->clock_registers.mpll_ss1);
- table->initialState.levels[0].mclk.vMPLL_SS2 =
+ table->initialState.level.mclk.vMPLL_SS2 =
cpu_to_be32(ni_pi->clock_registers.mpll_ss2);
- table->initialState.levels[0].mclk.mclk_value =
+ table->initialState.level.mclk.mclk_value =
cpu_to_be32(initial_state->performance_levels[0].mclk);
- table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
+ table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL =
cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl);
- table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
+ table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_2 =
cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl_2);
- table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
+ table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_3 =
cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl_3);
- table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
+ table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_4 =
cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl_4);
- table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
+ table->initialState.level.sclk.vCG_SPLL_SPREAD_SPECTRUM =
cpu_to_be32(ni_pi->clock_registers.cg_spll_spread_spectrum);
- table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
+ table->initialState.level.sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
cpu_to_be32(ni_pi->clock_registers.cg_spll_spread_spectrum_2);
- table->initialState.levels[0].sclk.sclk_value =
+ table->initialState.level.sclk.sclk_value =
cpu_to_be32(initial_state->performance_levels[0].sclk);
- table->initialState.levels[0].arbRefreshState =
+ table->initialState.level.arbRefreshState =
NISLANDS_INITIAL_STATE_ARB_INDEX;
- table->initialState.levels[0].ACIndex = 0;
+ table->initialState.level.ACIndex = 0;
ret = ni_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
initial_state->performance_levels[0].vddc,
- &table->initialState.levels[0].vddc);
+ &table->initialState.level.vddc);
if (!ret) {
u16 std_vddc;
ret = ni_get_std_voltage_value(rdev,
- &table->initialState.levels[0].vddc,
+ &table->initialState.level.vddc,
&std_vddc);
if (!ret)
ni_populate_std_voltage_value(rdev, std_vddc,
- table->initialState.levels[0].vddc.index,
- &table->initialState.levels[0].std_vddc);
+ table->initialState.level.vddc.index,
+ &table->initialState.level.std_vddc);
}
if (eg_pi->vddci_control)
ni_populate_voltage_value(rdev,
&eg_pi->vddci_voltage_table,
initial_state->performance_levels[0].vddci,
- &table->initialState.levels[0].vddci);
+ &table->initialState.level.vddci);
- ni_populate_initial_mvdd_value(rdev, &table->initialState.levels[0].mvdd);
+ ni_populate_initial_mvdd_value(rdev, &table->initialState.level.mvdd);
reg = CG_R(0xffff) | CG_L(0);
- table->initialState.levels[0].aT = cpu_to_be32(reg);
+ table->initialState.level.aT = cpu_to_be32(reg);
- table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
+ table->initialState.level.bSP = cpu_to_be32(pi->dsp);
if (pi->boot_in_gen2)
- table->initialState.levels[0].gen2PCIE = 1;
+ table->initialState.level.gen2PCIE = 1;
else
- table->initialState.levels[0].gen2PCIE = 0;
+ table->initialState.level.gen2PCIE = 0;
if (pi->mem_gddr5) {
- table->initialState.levels[0].strobeMode =
+ table->initialState.level.strobeMode =
cypress_get_strobe_mode_settings(rdev,
initial_state->performance_levels[0].mclk);
if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold)
- table->initialState.levels[0].mcFlags = NISLANDS_SMC_MC_EDC_RD_FLAG | NISLANDS_SMC_MC_EDC_WR_FLAG;
+ table->initialState.level.mcFlags = NISLANDS_SMC_MC_EDC_RD_FLAG | NISLANDS_SMC_MC_EDC_WR_FLAG;
else
- table->initialState.levels[0].mcFlags = 0;
+ table->initialState.level.mcFlags = 0;
}
table->initialState.levelCount = 1;
table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
- table->initialState.levels[0].dpm2.MaxPS = 0;
- table->initialState.levels[0].dpm2.NearTDPDec = 0;
- table->initialState.levels[0].dpm2.AboveSafeInc = 0;
- table->initialState.levels[0].dpm2.BelowSafeInc = 0;
+ table->initialState.level.dpm2.MaxPS = 0;
+ table->initialState.level.dpm2.NearTDPDec = 0;
+ table->initialState.level.dpm2.AboveSafeInc = 0;
+ table->initialState.level.dpm2.BelowSafeInc = 0;
reg = MIN_POWER_MASK | MAX_POWER_MASK;
- table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
+ table->initialState.level.SQPowerThrottle = cpu_to_be32(reg);
reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
- table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
+ table->initialState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
return 0;
}
@@ -1813,43 +1813,43 @@ static int ni_populate_smc_acpi_state(struct radeon_device *rdev,
if (pi->acpi_vddc) {
ret = ni_populate_voltage_value(rdev,
&eg_pi->vddc_voltage_table,
- pi->acpi_vddc, &table->ACPIState.levels[0].vddc);
+ pi->acpi_vddc, &table->ACPIState.level.vddc);
if (!ret) {
u16 std_vddc;
ret = ni_get_std_voltage_value(rdev,
- &table->ACPIState.levels[0].vddc, &std_vddc);
+ &table->ACPIState.level.vddc, &std_vddc);
if (!ret)
ni_populate_std_voltage_value(rdev, std_vddc,
- table->ACPIState.levels[0].vddc.index,
- &table->ACPIState.levels[0].std_vddc);
+ table->ACPIState.level.vddc.index,
+ &table->ACPIState.level.std_vddc);
}
if (pi->pcie_gen2) {
if (pi->acpi_pcie_gen2)
- table->ACPIState.levels[0].gen2PCIE = 1;
+ table->ACPIState.level.gen2PCIE = 1;
else
- table->ACPIState.levels[0].gen2PCIE = 0;
+ table->ACPIState.level.gen2PCIE = 0;
} else {
- table->ACPIState.levels[0].gen2PCIE = 0;
+ table->ACPIState.level.gen2PCIE = 0;
}
} else {
ret = ni_populate_voltage_value(rdev,
&eg_pi->vddc_voltage_table,
pi->min_vddc_in_table,
- &table->ACPIState.levels[0].vddc);
+ &table->ACPIState.level.vddc);
if (!ret) {
u16 std_vddc;
ret = ni_get_std_voltage_value(rdev,
- &table->ACPIState.levels[0].vddc,
+ &table->ACPIState.level.vddc,
&std_vddc);
if (!ret)
ni_populate_std_voltage_value(rdev, std_vddc,
- table->ACPIState.levels[0].vddc.index,
- &table->ACPIState.levels[0].std_vddc);
+ table->ACPIState.level.vddc.index,
+ &table->ACPIState.level.std_vddc);
}
- table->ACPIState.levels[0].gen2PCIE = 0;
+ table->ACPIState.level.gen2PCIE = 0;
}
if (eg_pi->acpi_vddci) {
@@ -1857,7 +1857,7 @@ static int ni_populate_smc_acpi_state(struct radeon_device *rdev,
ni_populate_voltage_value(rdev,
&eg_pi->vddci_voltage_table,
eg_pi->acpi_vddci,
- &table->ACPIState.levels[0].vddci);
+ &table->ACPIState.level.vddci);
}
@@ -1900,37 +1900,37 @@ static int ni_populate_smc_acpi_state(struct radeon_device *rdev,
spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
spll_func_cntl_2 |= SCLK_MUX_SEL(4);
- table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
- table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
- table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
- table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
- table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
- table->ACPIState.levels[0].mclk.vDLL_CNTL = cpu_to_be32(dll_cntl);
+ table->ACPIState.level.mclk.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
+ table->ACPIState.level.mclk.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
+ table->ACPIState.level.mclk.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
+ table->ACPIState.level.mclk.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
+ table->ACPIState.level.mclk.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
+ table->ACPIState.level.mclk.vDLL_CNTL = cpu_to_be32(dll_cntl);
- table->ACPIState.levels[0].mclk.mclk_value = 0;
+ table->ACPIState.level.mclk.mclk_value = 0;
- table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
- table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
- table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
- table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(spll_func_cntl_4);
+ table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
+ table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
+ table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
+ table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(spll_func_cntl_4);
- table->ACPIState.levels[0].sclk.sclk_value = 0;
+ table->ACPIState.level.sclk.sclk_value = 0;
- ni_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd);
+ ni_populate_mvdd_value(rdev, 0, &table->ACPIState.level.mvdd);
if (eg_pi->dynamic_ac_timing)
- table->ACPIState.levels[0].ACIndex = 1;
+ table->ACPIState.level.ACIndex = 1;
- table->ACPIState.levels[0].dpm2.MaxPS = 0;
- table->ACPIState.levels[0].dpm2.NearTDPDec = 0;
- table->ACPIState.levels[0].dpm2.AboveSafeInc = 0;
- table->ACPIState.levels[0].dpm2.BelowSafeInc = 0;
+ table->ACPIState.level.dpm2.MaxPS = 0;
+ table->ACPIState.level.dpm2.NearTDPDec = 0;
+ table->ACPIState.level.dpm2.AboveSafeInc = 0;
+ table->ACPIState.level.dpm2.BelowSafeInc = 0;
reg = MIN_POWER_MASK | MAX_POWER_MASK;
- table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
+ table->ACPIState.level.SQPowerThrottle = cpu_to_be32(reg);
reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
- table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
+ table->ACPIState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
return 0;
}
@@ -1980,7 +1980,9 @@ static int ni_init_smc_table(struct radeon_device *rdev)
if (ret)
return ret;
- table->driverState = table->initialState;
+ table->driverState.flags = table->initialState.flags;
+ table->driverState.levelCount = table->initialState.levelCount;
+ table->driverState.levels[0] = table->initialState.level;
table->ULVState = table->initialState;
diff --git a/drivers/gpu/drm/radeon/nislands_smc.h b/drivers/gpu/drm/radeon/nislands_smc.h
index 7395cb6b3cac..42f3bab0f9ee 100644
--- a/drivers/gpu/drm/radeon/nislands_smc.h
+++ b/drivers/gpu/drm/radeon/nislands_smc.h
@@ -143,6 +143,14 @@ struct NISLANDS_SMC_SWSTATE
typedef struct NISLANDS_SMC_SWSTATE NISLANDS_SMC_SWSTATE;
+struct NISLANDS_SMC_SWSTATE_SINGLE {
+ uint8_t flags;
+ uint8_t levelCount;
+ uint8_t padding2;
+ uint8_t padding3;
+ NISLANDS_SMC_HW_PERFORMANCE_LEVEL level;
+};
+
#define NISLANDS_SMC_VOLTAGEMASK_VDDC 0
#define NISLANDS_SMC_VOLTAGEMASK_MVDD 1
#define NISLANDS_SMC_VOLTAGEMASK_VDDCI 2
@@ -160,19 +168,19 @@ typedef struct NISLANDS_SMC_VOLTAGEMASKTABLE NISLANDS_SMC_VOLTAGEMASKTABLE;
struct NISLANDS_SMC_STATETABLE
{
- uint8_t thermalProtectType;
- uint8_t systemFlags;
- uint8_t maxVDDCIndexInPPTable;
- uint8_t extraFlags;
- uint8_t highSMIO[NISLANDS_MAX_NO_VREG_STEPS];
- uint32_t lowSMIO[NISLANDS_MAX_NO_VREG_STEPS];
- NISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable;
- PP_NIslands_DPM2Parameters dpm2Params;
- NISLANDS_SMC_SWSTATE initialState;
- NISLANDS_SMC_SWSTATE ACPIState;
- NISLANDS_SMC_SWSTATE ULVState;
- NISLANDS_SMC_SWSTATE driverState;
- NISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1];
+ uint8_t thermalProtectType;
+ uint8_t systemFlags;
+ uint8_t maxVDDCIndexInPPTable;
+ uint8_t extraFlags;
+ uint8_t highSMIO[NISLANDS_MAX_NO_VREG_STEPS];
+ uint32_t lowSMIO[NISLANDS_MAX_NO_VREG_STEPS];
+ NISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable;
+ PP_NIslands_DPM2Parameters dpm2Params;
+ struct NISLANDS_SMC_SWSTATE_SINGLE initialState;
+ struct NISLANDS_SMC_SWSTATE_SINGLE ACPIState;
+ struct NISLANDS_SMC_SWSTATE_SINGLE ULVState;
+ NISLANDS_SMC_SWSTATE driverState;
+ NISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
};
typedef struct NISLANDS_SMC_STATETABLE NISLANDS_SMC_STATETABLE;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 42281fce552e..56ed5634cebe 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1549,6 +1549,7 @@ struct radeon_dpm {
void *priv;
u32 new_active_crtcs;
int new_active_crtc_count;
+ int high_pixelclock_count;
u32 current_active_crtcs;
int current_active_crtc_count;
bool single_display;
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 3808a753127b..04109a2a6fd7 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -301,7 +301,8 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
- rdev->gart.pages[p] = pagelist[i];
+ rdev->gart.pages[p] = pagelist ? pagelist[i] :
+ rdev->dummy_page.page;
page_base = dma_addr[i];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
page_entry = radeon_gart_get_page_entry(page_base, flags);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 0c1950f4e146..3861c0b98fcf 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1767,6 +1767,7 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
struct drm_device *ddev = rdev->ddev;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
+ struct radeon_connector *radeon_connector;
if (!rdev->pm.dpm_enabled)
return;
@@ -1776,6 +1777,7 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
/* update active crtc counts */
rdev->pm.dpm.new_active_crtcs = 0;
rdev->pm.dpm.new_active_crtc_count = 0;
+ rdev->pm.dpm.high_pixelclock_count = 0;
if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
list_for_each_entry(crtc,
&ddev->mode_config.crtc_list, head) {
@@ -1783,6 +1785,12 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
if (crtc->enabled) {
rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
rdev->pm.dpm.new_active_crtc_count++;
+ if (!radeon_crtc->connector)
+ continue;
+
+ radeon_connector = to_radeon_connector(radeon_crtc->connector);
+ if (radeon_connector->pixelclock_for_modeset > 297000)
+ rdev->pm.dpm.high_pixelclock_count++;
}
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index 42a87948e28c..4a90807351e7 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -77,9 +77,19 @@ int radeon_gem_prime_pin(struct drm_gem_object *obj)
/* pin buffer into GTT */
ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
- if (likely(ret == 0))
- bo->prime_shared_count++;
-
+ if (unlikely(ret))
+ goto error;
+
+ if (bo->tbo.moving) {
+ ret = dma_fence_wait(bo->tbo.moving, false);
+ if (unlikely(ret)) {
+ radeon_bo_unpin(bo);
+ goto error;
+ }
+ }
+
+ bo->prime_shared_count++;
+error:
radeon_bo_unreserve(bo);
return ret;
}
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index dfa9fdbe98da..06bb24d7a9fe 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -286,7 +286,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
if (rdev->uvd.vcpu_bo == NULL)
return -EINVAL;
- memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
+ memcpy_toio((void __iomem *)rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
size = radeon_bo_size(rdev->uvd.vcpu_bo);
size -= rdev->uvd_fw->size;
@@ -294,7 +294,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
ptr = rdev->uvd.cpu_addr;
ptr += rdev->uvd_fw->size;
- memset(ptr, 0, size);
+ memset_io((void __iomem *)ptr, 0, size);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 918609551804..3add39c1a689 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2979,6 +2979,9 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
(rdev->pdev->device == 0x6605)) {
max_sclk = 75000;
}
+
+ if (rdev->pm.dpm.high_pixelclock_count > 1)
+ disable_sclk_switching = true;
}
if (rps->vce_active) {
@@ -4350,70 +4353,70 @@ static int si_populate_smc_initial_state(struct radeon_device *rdev,
u32 reg;
int ret;
- table->initialState.levels[0].mclk.vDLL_CNTL =
+ table->initialState.level.mclk.vDLL_CNTL =
cpu_to_be32(si_pi->clock_registers.dll_cntl);
- table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
+ table->initialState.level.mclk.vMCLK_PWRMGT_CNTL =
cpu_to_be32(si_pi->clock_registers.mclk_pwrmgt_cntl);
- table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
+ table->initialState.level.mclk.vMPLL_AD_FUNC_CNTL =
cpu_to_be32(si_pi->clock_registers.mpll_ad_func_cntl);
- table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
+ table->initialState.level.mclk.vMPLL_DQ_FUNC_CNTL =
cpu_to_be32(si_pi->clock_registers.mpll_dq_func_cntl);
- table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL =
+ table->initialState.level.mclk.vMPLL_FUNC_CNTL =
cpu_to_be32(si_pi->clock_registers.mpll_func_cntl);
- table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
+ table->initialState.level.mclk.vMPLL_FUNC_CNTL_1 =
cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_1);
- table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
+ table->initialState.level.mclk.vMPLL_FUNC_CNTL_2 =
cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_2);
- table->initialState.levels[0].mclk.vMPLL_SS =
+ table->initialState.level.mclk.vMPLL_SS =
cpu_to_be32(si_pi->clock_registers.mpll_ss1);
- table->initialState.levels[0].mclk.vMPLL_SS2 =
+ table->initialState.level.mclk.vMPLL_SS2 =
cpu_to_be32(si_pi->clock_registers.mpll_ss2);
- table->initialState.levels[0].mclk.mclk_value =
+ table->initialState.level.mclk.mclk_value =
cpu_to_be32(initial_state->performance_levels[0].mclk);
- table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
+ table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL =
cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl);
- table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
+ table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_2 =
cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_2);
- table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
+ table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_3 =
cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_3);
- table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
+ table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_4 =
cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_4);
- table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
+ table->initialState.level.sclk.vCG_SPLL_SPREAD_SPECTRUM =
cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum);
- table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
+ table->initialState.level.sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum_2);
- table->initialState.levels[0].sclk.sclk_value =
+ table->initialState.level.sclk.sclk_value =
cpu_to_be32(initial_state->performance_levels[0].sclk);
- table->initialState.levels[0].arbRefreshState =
+ table->initialState.level.arbRefreshState =
SISLANDS_INITIAL_STATE_ARB_INDEX;
- table->initialState.levels[0].ACIndex = 0;
+ table->initialState.level.ACIndex = 0;
ret = si_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
initial_state->performance_levels[0].vddc,
- &table->initialState.levels[0].vddc);
+ &table->initialState.level.vddc);
if (!ret) {
u16 std_vddc;
ret = si_get_std_voltage_value(rdev,
- &table->initialState.levels[0].vddc,
+ &table->initialState.level.vddc,
&std_vddc);
if (!ret)
si_populate_std_voltage_value(rdev, std_vddc,
- table->initialState.levels[0].vddc.index,
- &table->initialState.levels[0].std_vddc);
+ table->initialState.level.vddc.index,
+ &table->initialState.level.std_vddc);
}
if (eg_pi->vddci_control)
si_populate_voltage_value(rdev,
&eg_pi->vddci_voltage_table,
initial_state->performance_levels[0].vddci,
- &table->initialState.levels[0].vddci);
+ &table->initialState.level.vddci);
if (si_pi->vddc_phase_shed_control)
si_populate_phase_shedding_value(rdev,
@@ -4421,43 +4424,43 @@ static int si_populate_smc_initial_state(struct radeon_device *rdev,
initial_state->performance_levels[0].vddc,
initial_state->performance_levels[0].sclk,
initial_state->performance_levels[0].mclk,
- &table->initialState.levels[0].vddc);
+ &table->initialState.level.vddc);
- si_populate_initial_mvdd_value(rdev, &table->initialState.levels[0].mvdd);
+ si_populate_initial_mvdd_value(rdev, &table->initialState.level.mvdd);
reg = CG_R(0xffff) | CG_L(0);
- table->initialState.levels[0].aT = cpu_to_be32(reg);
+ table->initialState.level.aT = cpu_to_be32(reg);
- table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
+ table->initialState.level.bSP = cpu_to_be32(pi->dsp);
- table->initialState.levels[0].gen2PCIE = (u8)si_pi->boot_pcie_gen;
+ table->initialState.level.gen2PCIE = (u8)si_pi->boot_pcie_gen;
if (pi->mem_gddr5) {
- table->initialState.levels[0].strobeMode =
+ table->initialState.level.strobeMode =
si_get_strobe_mode_settings(rdev,
initial_state->performance_levels[0].mclk);
if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold)
- table->initialState.levels[0].mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG;
+ table->initialState.level.mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG;
else
- table->initialState.levels[0].mcFlags = 0;
+ table->initialState.level.mcFlags = 0;
}
table->initialState.levelCount = 1;
table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
- table->initialState.levels[0].dpm2.MaxPS = 0;
- table->initialState.levels[0].dpm2.NearTDPDec = 0;
- table->initialState.levels[0].dpm2.AboveSafeInc = 0;
- table->initialState.levels[0].dpm2.BelowSafeInc = 0;
- table->initialState.levels[0].dpm2.PwrEfficiencyRatio = 0;
+ table->initialState.level.dpm2.MaxPS = 0;
+ table->initialState.level.dpm2.NearTDPDec = 0;
+ table->initialState.level.dpm2.AboveSafeInc = 0;
+ table->initialState.level.dpm2.BelowSafeInc = 0;
+ table->initialState.level.dpm2.PwrEfficiencyRatio = 0;
reg = MIN_POWER_MASK | MAX_POWER_MASK;
- table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
+ table->initialState.level.SQPowerThrottle = cpu_to_be32(reg);
reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
- table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
+ table->initialState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
return 0;
}
@@ -4488,18 +4491,18 @@ static int si_populate_smc_acpi_state(struct radeon_device *rdev,
if (pi->acpi_vddc) {
ret = si_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
- pi->acpi_vddc, &table->ACPIState.levels[0].vddc);
+ pi->acpi_vddc, &table->ACPIState.level.vddc);
if (!ret) {
u16 std_vddc;
ret = si_get_std_voltage_value(rdev,
- &table->ACPIState.levels[0].vddc, &std_vddc);
+ &table->ACPIState.level.vddc, &std_vddc);
if (!ret)
si_populate_std_voltage_value(rdev, std_vddc,
- table->ACPIState.levels[0].vddc.index,
- &table->ACPIState.levels[0].std_vddc);
+ table->ACPIState.level.vddc.index,
+ &table->ACPIState.level.std_vddc);
}
- table->ACPIState.levels[0].gen2PCIE = si_pi->acpi_pcie_gen;
+ table->ACPIState.level.gen2PCIE = si_pi->acpi_pcie_gen;
if (si_pi->vddc_phase_shed_control) {
si_populate_phase_shedding_value(rdev,
@@ -4507,23 +4510,23 @@ static int si_populate_smc_acpi_state(struct radeon_device *rdev,
pi->acpi_vddc,
0,
0,
- &table->ACPIState.levels[0].vddc);
+ &table->ACPIState.level.vddc);
}
} else {
ret = si_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
- pi->min_vddc_in_table, &table->ACPIState.levels[0].vddc);
+ pi->min_vddc_in_table, &table->ACPIState.level.vddc);
if (!ret) {
u16 std_vddc;
ret = si_get_std_voltage_value(rdev,
- &table->ACPIState.levels[0].vddc, &std_vddc);
+ &table->ACPIState.level.vddc, &std_vddc);
if (!ret)
si_populate_std_voltage_value(rdev, std_vddc,
- table->ACPIState.levels[0].vddc.index,
- &table->ACPIState.levels[0].std_vddc);
+ table->ACPIState.level.vddc.index,
+ &table->ACPIState.level.std_vddc);
}
- table->ACPIState.levels[0].gen2PCIE = (u8)r600_get_pcie_gen_support(rdev,
+ table->ACPIState.level.gen2PCIE = (u8)r600_get_pcie_gen_support(rdev,
si_pi->sys_pcie_mask,
si_pi->boot_pcie_gen,
RADEON_PCIE_GEN1);
@@ -4534,14 +4537,14 @@ static int si_populate_smc_acpi_state(struct radeon_device *rdev,
pi->min_vddc_in_table,
0,
0,
- &table->ACPIState.levels[0].vddc);
+ &table->ACPIState.level.vddc);
}
if (pi->acpi_vddc) {
if (eg_pi->acpi_vddci)
si_populate_voltage_value(rdev, &eg_pi->vddci_voltage_table,
eg_pi->acpi_vddci,
- &table->ACPIState.levels[0].vddci);
+ &table->ACPIState.level.vddci);
}
mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
@@ -4552,59 +4555,59 @@ static int si_populate_smc_acpi_state(struct radeon_device *rdev,
spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
spll_func_cntl_2 |= SCLK_MUX_SEL(4);
- table->ACPIState.levels[0].mclk.vDLL_CNTL =
+ table->ACPIState.level.mclk.vDLL_CNTL =
cpu_to_be32(dll_cntl);
- table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
+ table->ACPIState.level.mclk.vMCLK_PWRMGT_CNTL =
cpu_to_be32(mclk_pwrmgt_cntl);
- table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
+ table->ACPIState.level.mclk.vMPLL_AD_FUNC_CNTL =
cpu_to_be32(mpll_ad_func_cntl);
- table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
+ table->ACPIState.level.mclk.vMPLL_DQ_FUNC_CNTL =
cpu_to_be32(mpll_dq_func_cntl);
- table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL =
+ table->ACPIState.level.mclk.vMPLL_FUNC_CNTL =
cpu_to_be32(mpll_func_cntl);
- table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
+ table->ACPIState.level.mclk.vMPLL_FUNC_CNTL_1 =
cpu_to_be32(mpll_func_cntl_1);
- table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
+ table->ACPIState.level.mclk.vMPLL_FUNC_CNTL_2 =
cpu_to_be32(mpll_func_cntl_2);
- table->ACPIState.levels[0].mclk.vMPLL_SS =
+ table->ACPIState.level.mclk.vMPLL_SS =
cpu_to_be32(si_pi->clock_registers.mpll_ss1);
- table->ACPIState.levels[0].mclk.vMPLL_SS2 =
+ table->ACPIState.level.mclk.vMPLL_SS2 =
cpu_to_be32(si_pi->clock_registers.mpll_ss2);
- table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
+ table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL =
cpu_to_be32(spll_func_cntl);
- table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
+ table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_2 =
cpu_to_be32(spll_func_cntl_2);
- table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
+ table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_3 =
cpu_to_be32(spll_func_cntl_3);
- table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
+ table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_4 =
cpu_to_be32(spll_func_cntl_4);
- table->ACPIState.levels[0].mclk.mclk_value = 0;
- table->ACPIState.levels[0].sclk.sclk_value = 0;
+ table->ACPIState.level.mclk.mclk_value = 0;
+ table->ACPIState.level.sclk.sclk_value = 0;
- si_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd);
+ si_populate_mvdd_value(rdev, 0, &table->ACPIState.level.mvdd);
if (eg_pi->dynamic_ac_timing)
- table->ACPIState.levels[0].ACIndex = 0;
+ table->ACPIState.level.ACIndex = 0;
- table->ACPIState.levels[0].dpm2.MaxPS = 0;
- table->ACPIState.levels[0].dpm2.NearTDPDec = 0;
- table->ACPIState.levels[0].dpm2.AboveSafeInc = 0;
- table->ACPIState.levels[0].dpm2.BelowSafeInc = 0;
- table->ACPIState.levels[0].dpm2.PwrEfficiencyRatio = 0;
+ table->ACPIState.level.dpm2.MaxPS = 0;
+ table->ACPIState.level.dpm2.NearTDPDec = 0;
+ table->ACPIState.level.dpm2.AboveSafeInc = 0;
+ table->ACPIState.level.dpm2.BelowSafeInc = 0;
+ table->ACPIState.level.dpm2.PwrEfficiencyRatio = 0;
reg = MIN_POWER_MASK | MAX_POWER_MASK;
- table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
+ table->ACPIState.level.SQPowerThrottle = cpu_to_be32(reg);
reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
- table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
+ table->ACPIState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
return 0;
}
static int si_populate_ulv_state(struct radeon_device *rdev,
- SISLANDS_SMC_SWSTATE *state)
+ struct SISLANDS_SMC_SWSTATE_SINGLE *state)
{
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct si_power_info *si_pi = si_get_pi(rdev);
@@ -4613,19 +4616,19 @@ static int si_populate_ulv_state(struct radeon_device *rdev,
int ret;
ret = si_convert_power_level_to_smc(rdev, &ulv->pl,
- &state->levels[0]);
+ &state->level);
if (!ret) {
if (eg_pi->sclk_deep_sleep) {
if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
- state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
+ state->level.stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
else
- state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
+ state->level.stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
}
if (ulv->one_pcie_lane_in_ulv)
state->flags |= PPSMC_SWSTATE_FLAG_PCIE_X1;
- state->levels[0].arbRefreshState = (u8)(SISLANDS_ULV_STATE_ARB_INDEX);
- state->levels[0].ACIndex = 1;
- state->levels[0].std_vddc = state->levels[0].vddc;
+ state->level.arbRefreshState = (u8)(SISLANDS_ULV_STATE_ARB_INDEX);
+ state->level.ACIndex = 1;
+ state->level.std_vddc = state->level.vddc;
state->levelCount = 1;
state->flags |= PPSMC_SWSTATE_FLAG_DC;
@@ -4725,7 +4728,9 @@ static int si_init_smc_table(struct radeon_device *rdev)
if (ret)
return ret;
- table->driverState = table->initialState;
+ table->driverState.flags = table->initialState.flags;
+ table->driverState.levelCount = table->initialState.levelCount;
+ table->driverState.levels[0] = table->initialState.level;
ret = si_do_program_memory_timing_parameters(rdev, radeon_boot_state,
SISLANDS_INITIAL_STATE_ARB_INDEX);
@@ -5275,8 +5280,8 @@ static int si_upload_ulv_state(struct radeon_device *rdev)
if (ulv->supported && ulv->pl.vddc) {
u32 address = si_pi->state_table_start +
offsetof(SISLANDS_SMC_STATETABLE, ULVState);
- SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.ULVState;
- u32 state_size = sizeof(SISLANDS_SMC_SWSTATE);
+ struct SISLANDS_SMC_SWSTATE_SINGLE *smc_state = &si_pi->smc_statetable.ULVState;
+ u32 state_size = sizeof(struct SISLANDS_SMC_SWSTATE_SINGLE);
memset(smc_state, 0, state_size);
diff --git a/drivers/gpu/drm/radeon/sislands_smc.h b/drivers/gpu/drm/radeon/sislands_smc.h
index fbd6589bdab9..4ea1cb2e45a3 100644
--- a/drivers/gpu/drm/radeon/sislands_smc.h
+++ b/drivers/gpu/drm/radeon/sislands_smc.h
@@ -191,6 +191,14 @@ struct SISLANDS_SMC_SWSTATE
typedef struct SISLANDS_SMC_SWSTATE SISLANDS_SMC_SWSTATE;
+struct SISLANDS_SMC_SWSTATE_SINGLE {
+ uint8_t flags;
+ uint8_t levelCount;
+ uint8_t padding2;
+ uint8_t padding3;
+ SISLANDS_SMC_HW_PERFORMANCE_LEVEL level;
+};
+
#define SISLANDS_SMC_VOLTAGEMASK_VDDC 0
#define SISLANDS_SMC_VOLTAGEMASK_MVDD 1
#define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2
@@ -208,19 +216,19 @@ typedef struct SISLANDS_SMC_VOLTAGEMASKTABLE SISLANDS_SMC_VOLTAGEMASKTABLE;
struct SISLANDS_SMC_STATETABLE
{
- uint8_t thermalProtectType;
- uint8_t systemFlags;
- uint8_t maxVDDCIndexInPPTable;
- uint8_t extraFlags;
- uint32_t lowSMIO[SISLANDS_MAX_NO_VREG_STEPS];
- SISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable;
- SISLANDS_SMC_VOLTAGEMASKTABLE phaseMaskTable;
- PP_SIslands_DPM2Parameters dpm2Params;
- SISLANDS_SMC_SWSTATE initialState;
- SISLANDS_SMC_SWSTATE ACPIState;
- SISLANDS_SMC_SWSTATE ULVState;
- SISLANDS_SMC_SWSTATE driverState;
- SISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1];
+ uint8_t thermalProtectType;
+ uint8_t systemFlags;
+ uint8_t maxVDDCIndexInPPTable;
+ uint8_t extraFlags;
+ uint32_t lowSMIO[SISLANDS_MAX_NO_VREG_STEPS];
+ SISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable;
+ SISLANDS_SMC_VOLTAGEMASKTABLE phaseMaskTable;
+ PP_SIslands_DPM2Parameters dpm2Params;
+ struct SISLANDS_SMC_SWSTATE_SINGLE initialState;
+ struct SISLANDS_SMC_SWSTATE_SINGLE ACPIState;
+ struct SISLANDS_SMC_SWSTATE_SINGLE ULVState;
+ SISLANDS_SMC_SWSTATE driverState;
+ SISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
};
typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE;
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index bbdfd5e26ec8..f75fb157f2ff 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -209,7 +209,7 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
goto err_disable_clk_tmds;
}
- ret = sun8i_hdmi_phy_probe(hdmi, phy_node);
+ ret = sun8i_hdmi_phy_get(hdmi, phy_node);
of_node_put(phy_node);
if (ret) {
dev_err(dev, "Couldn't get the HDMI PHY\n");
@@ -242,7 +242,6 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
cleanup_encoder:
drm_encoder_cleanup(encoder);
- sun8i_hdmi_phy_remove(hdmi);
err_disable_clk_tmds:
clk_disable_unprepare(hdmi->clk_tmds);
err_assert_ctrl_reset:
@@ -263,7 +262,6 @@ static void sun8i_dw_hdmi_unbind(struct device *dev, struct device *master,
struct sun8i_dw_hdmi *hdmi = dev_get_drvdata(dev);
dw_hdmi_unbind(hdmi->hdmi);
- sun8i_hdmi_phy_remove(hdmi);
clk_disable_unprepare(hdmi->clk_tmds);
reset_control_assert(hdmi->rst_ctrl);
gpiod_set_value(hdmi->ddc_en, 0);
@@ -320,7 +318,32 @@ static struct platform_driver sun8i_dw_hdmi_pltfm_driver = {
.of_match_table = sun8i_dw_hdmi_dt_ids,
},
};
-module_platform_driver(sun8i_dw_hdmi_pltfm_driver);
+
+static int __init sun8i_dw_hdmi_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&sun8i_dw_hdmi_pltfm_driver);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&sun8i_hdmi_phy_driver);
+ if (ret) {
+ platform_driver_unregister(&sun8i_dw_hdmi_pltfm_driver);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void __exit sun8i_dw_hdmi_exit(void)
+{
+ platform_driver_unregister(&sun8i_dw_hdmi_pltfm_driver);
+ platform_driver_unregister(&sun8i_hdmi_phy_driver);
+}
+
+module_init(sun8i_dw_hdmi_init);
+module_exit(sun8i_dw_hdmi_exit);
MODULE_AUTHOR("Jernej Skrabec <jernej.skrabec@siol.net>");
MODULE_DESCRIPTION("Allwinner DW HDMI bridge");
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
index d4b55af0592f..74f6ed0e2570 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
@@ -195,14 +195,15 @@ struct sun8i_dw_hdmi {
struct gpio_desc *ddc_en;
};
+extern struct platform_driver sun8i_hdmi_phy_driver;
+
static inline struct sun8i_dw_hdmi *
encoder_to_sun8i_dw_hdmi(struct drm_encoder *encoder)
{
return container_of(encoder, struct sun8i_dw_hdmi, encoder);
}
-int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node);
-void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi);
+int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node);
void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy);
void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy,
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
index 9994edf67509..c9239708d398 100644
--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
+++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
@@ -5,6 +5,7 @@
#include <linux/delay.h>
#include <linux/of_address.h>
+#include <linux/of_platform.h>
#include "sun8i_dw_hdmi.h"
@@ -597,10 +598,30 @@ static const struct of_device_id sun8i_hdmi_phy_of_table[] = {
{ /* sentinel */ }
};
-int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
+int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
+{
+ struct platform_device *pdev = of_find_device_by_node(node);
+ struct sun8i_hdmi_phy *phy;
+
+ if (!pdev)
+ return -EPROBE_DEFER;
+
+ phy = platform_get_drvdata(pdev);
+ if (!phy)
+ return -EPROBE_DEFER;
+
+ hdmi->phy = phy;
+
+ put_device(&pdev->dev);
+
+ return 0;
+}
+
+static int sun8i_hdmi_phy_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
- struct device *dev = hdmi->dev;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
struct sun8i_hdmi_phy *phy;
struct resource res;
void __iomem *regs;
@@ -704,7 +725,7 @@ int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
clk_prepare_enable(phy->clk_phy);
}
- hdmi->phy = phy;
+ platform_set_drvdata(pdev, phy);
return 0;
@@ -728,9 +749,9 @@ err_put_clk_bus:
return ret;
}
-void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi)
+static int sun8i_hdmi_phy_remove(struct platform_device *pdev)
{
- struct sun8i_hdmi_phy *phy = hdmi->phy;
+ struct sun8i_hdmi_phy *phy = platform_get_drvdata(pdev);
clk_disable_unprepare(phy->clk_mod);
clk_disable_unprepare(phy->clk_bus);
@@ -744,4 +765,14 @@ void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi)
clk_put(phy->clk_pll1);
clk_put(phy->clk_mod);
clk_put(phy->clk_bus);
+ return 0;
}
+
+struct platform_driver sun8i_hdmi_phy_driver = {
+ .probe = sun8i_hdmi_phy_probe,
+ .remove = sun8i_hdmi_phy_remove,
+ .driver = {
+ .name = "sun8i-hdmi-phy",
+ .of_match_table = sun8i_hdmi_phy_of_table,
+ },
+};
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 87df251c1fcf..0cb868065348 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -25,7 +25,7 @@
#include "trace.h"
/* XXX move to include/uapi/drm/drm_fourcc.h? */
-#define DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT BIT(22)
+#define DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT BIT_ULL(22)
struct reset_control;
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index 79bff8b48271..bfae8a02f55b 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -510,7 +510,7 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
* dGPU sector layout.
*/
if (tegra_plane_state->tiling.sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU)
- base |= BIT(39);
+ base |= BIT_ULL(39);
#endif
tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH);
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 7b88261f57bb..0ea320c1092b 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -3125,21 +3125,21 @@ static int tegra_sor_init(struct host1x_client *client)
if (err < 0) {
dev_err(sor->dev, "failed to acquire SOR reset: %d\n",
err);
- return err;
+ goto rpm_put;
}
err = reset_control_assert(sor->rst);
if (err < 0) {
dev_err(sor->dev, "failed to assert SOR reset: %d\n",
err);
- return err;
+ goto rpm_put;
}
}
err = clk_prepare_enable(sor->clk);
if (err < 0) {
dev_err(sor->dev, "failed to enable clock: %d\n", err);
- return err;
+ goto rpm_put;
}
usleep_range(1000, 3000);
@@ -3150,7 +3150,7 @@ static int tegra_sor_init(struct host1x_client *client)
dev_err(sor->dev, "failed to deassert SOR reset: %d\n",
err);
clk_disable_unprepare(sor->clk);
- return err;
+ goto rpm_put;
}
reset_control_release(sor->rst);
@@ -3171,6 +3171,12 @@ static int tegra_sor_init(struct host1x_client *client)
}
return 0;
+
+rpm_put:
+ if (sor->rst)
+ pm_runtime_put(sor->dev);
+
+ return err;
}
static int tegra_sor_exit(struct host1x_client *client)
@@ -3739,12 +3745,8 @@ static int tegra_sor_probe(struct platform_device *pdev)
if (!sor->aux)
return -EPROBE_DEFER;
- if (get_device(&sor->aux->ddc.dev)) {
- if (try_module_get(sor->aux->ddc.owner))
- sor->output.ddc = &sor->aux->ddc;
- else
- put_device(&sor->aux->ddc.dev);
- }
+ if (get_device(sor->aux->dev))
+ sor->output.ddc = &sor->aux->ddc;
}
if (!sor->aux) {
@@ -3772,12 +3774,13 @@ static int tegra_sor_probe(struct platform_device *pdev)
err = tegra_sor_parse_dt(sor);
if (err < 0)
- return err;
+ goto put_aux;
err = tegra_output_probe(&sor->output);
- if (err < 0)
- return dev_err_probe(&pdev->dev, err,
- "failed to probe output\n");
+ if (err < 0) {
+ dev_err_probe(&pdev->dev, err, "failed to probe output\n");
+ goto put_aux;
+ }
if (sor->ops && sor->ops->probe) {
err = sor->ops->probe(sor);
@@ -3916,17 +3919,10 @@ static int tegra_sor_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sor);
pm_runtime_enable(&pdev->dev);
- INIT_LIST_HEAD(&sor->client.list);
+ host1x_client_init(&sor->client);
sor->client.ops = &sor_client_ops;
sor->client.dev = &pdev->dev;
- err = host1x_client_register(&sor->client);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to register host1x client: %d\n",
- err);
- goto rpm_disable;
- }
-
/*
* On Tegra210 and earlier, provide our own implementation for the
* pad output clock.
@@ -3938,13 +3934,13 @@ static int tegra_sor_probe(struct platform_device *pdev)
sor->index);
if (!name) {
err = -ENOMEM;
- goto unregister;
+ goto uninit;
}
err = host1x_client_resume(&sor->client);
if (err < 0) {
dev_err(sor->dev, "failed to resume: %d\n", err);
- goto unregister;
+ goto uninit;
}
sor->clk_pad = tegra_clk_sor_pad_register(sor, name);
@@ -3955,17 +3951,30 @@ static int tegra_sor_probe(struct platform_device *pdev)
err = PTR_ERR(sor->clk_pad);
dev_err(sor->dev, "failed to register SOR pad clock: %d\n",
err);
- goto unregister;
+ goto uninit;
+ }
+
+ err = __host1x_client_register(&sor->client);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+ err);
+ goto uninit;
}
return 0;
-unregister:
- host1x_client_unregister(&sor->client);
-rpm_disable:
+uninit:
+ host1x_client_exit(&sor->client);
pm_runtime_disable(&pdev->dev);
remove:
+ if (sor->aux)
+ sor->output.ddc = NULL;
+
tegra_output_remove(&sor->output);
+put_aux:
+ if (sor->aux)
+ put_device(sor->aux->dev);
+
return err;
}
@@ -3983,6 +3992,11 @@ static int tegra_sor_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
+ if (sor->aux) {
+ put_device(sor->aux->dev);
+ sor->output.ddc = NULL;
+ }
+
tegra_output_remove(&sor->output);
return 0;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index cfd0b9292397..ebcffe794adb 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1172,7 +1172,10 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked, NULL))
return -EBUSY;
- if (!ttm_bo_get_unless_zero(bo)) {
+ if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
+ bo->ttm->page_flags & TTM_PAGE_FLAG_SG ||
+ bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED ||
+ !ttm_bo_get_unless_zero(bo)) {
if (locked)
dma_resv_unlock(bo->base.resv);
return -EBUSY;
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index 510e3e001dab..3d9c62b93e29 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -143,14 +143,8 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
list_for_each_entry(bo, &man->lru[j], lru) {
- uint32_t num_pages;
+ uint32_t num_pages = PFN_UP(bo->base.size);
- if (!bo->ttm ||
- bo->ttm->page_flags & TTM_PAGE_FLAG_SG ||
- bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)
- continue;
-
- num_pages = bo->ttm->num_pages;
ret = ttm_bo_swapout(bo, ctx, gfp_flags);
/* ttm_bo_swapout has dropped the lru_lock */
if (!ret)
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 1fda574579af..8106b5634fe1 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -159,6 +159,8 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
bool connected = false;
+ WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev));
+
if (vc4_hdmi->hpd_gpio) {
if (gpio_get_value_cansleep(vc4_hdmi->hpd_gpio) ^
vc4_hdmi->hpd_active_low)
@@ -180,10 +182,12 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
}
}
+ pm_runtime_put(&vc4_hdmi->pdev->dev);
return connector_status_connected;
}
cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
+ pm_runtime_put(&vc4_hdmi->pdev->dev);
return connector_status_disconnected;
}
@@ -473,7 +477,6 @@ static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
HDMI_READ(HDMI_VID_CTL) & ~VC4_HD_VID_CTL_ENABLE);
clk_disable_unprepare(vc4_hdmi->pixel_bvb_clock);
- clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock);
ret = pm_runtime_put(&vc4_hdmi->pdev->dev);
@@ -784,13 +787,6 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
return;
}
- ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
- if (ret) {
- DRM_ERROR("Failed to turn on HSM clock: %d\n", ret);
- clk_disable_unprepare(vc4_hdmi->pixel_clock);
- return;
- }
-
vc4_hdmi_cec_update_clk_div(vc4_hdmi);
/*
@@ -801,7 +797,6 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
(hsm_rate > VC4_HSM_MID_CLOCK ? 150000000 : 75000000));
if (ret) {
DRM_ERROR("Failed to set pixel bvb clock rate: %d\n", ret);
- clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock);
return;
}
@@ -809,7 +804,6 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
ret = clk_prepare_enable(vc4_hdmi->pixel_bvb_clock);
if (ret) {
DRM_ERROR("Failed to turn on pixel bvb clock: %d\n", ret);
- clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock);
return;
}
@@ -1929,6 +1923,29 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
return 0;
}
+#ifdef CONFIG_PM
+static int vc4_hdmi_runtime_suspend(struct device *dev)
+{
+ struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(vc4_hdmi->hsm_clock);
+
+ return 0;
+}
+
+static int vc4_hdmi_runtime_resume(struct device *dev)
+{
+ struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+#endif
+
static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
{
const struct vc4_hdmi_variant *variant = of_device_get_match_data(dev);
@@ -2165,11 +2182,18 @@ static const struct of_device_id vc4_hdmi_dt_match[] = {
{}
};
+static const struct dev_pm_ops vc4_hdmi_pm_ops = {
+ SET_RUNTIME_PM_OPS(vc4_hdmi_runtime_suspend,
+ vc4_hdmi_runtime_resume,
+ NULL)
+};
+
struct platform_driver vc4_hdmi_driver = {
.probe = vc4_hdmi_dev_probe,
.remove = vc4_hdmi_dev_remove,
.driver = {
.name = "vc4_hdmi",
.of_match_table = vc4_hdmi_dt_match,
+ .pm = &vc4_hdmi_pm_ops,
},
};
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index bb5529a7a9c2..948b3a58aad1 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -372,7 +372,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
if (!old_hvs_state->fifo_state[channel].in_use)
continue;
- ret = drm_crtc_commit_wait(old_hvs_state->fifo_state[i].pending_commit);
+ ret = drm_crtc_commit_wait(old_hvs_state->fifo_state[channel].pending_commit);
if (ret)
drm_err(dev, "Timed out waiting for commit\n");
}
diff --git a/drivers/gpu/drm/vc4/vc4_vec.c b/drivers/gpu/drm/vc4/vc4_vec.c
index bd5b8eb58b18..090529d0d5dc 100644
--- a/drivers/gpu/drm/vc4/vc4_vec.c
+++ b/drivers/gpu/drm/vc4/vc4_vec.c
@@ -197,12 +197,6 @@ struct vc4_vec_connector {
struct drm_encoder *encoder;
};
-static inline struct vc4_vec_connector *
-to_vc4_vec_connector(struct drm_connector *connector)
-{
- return container_of(connector, struct vc4_vec_connector, base);
-}
-
enum vc4_vec_tv_mode_id {
VC4_VEC_TV_MODE_NTSC,
VC4_VEC_TV_MODE_NTSC_J,
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 46f69c532b6b..218e3718fd68 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -736,6 +736,29 @@ void host1x_driver_unregister(struct host1x_driver *driver)
EXPORT_SYMBOL(host1x_driver_unregister);
/**
+ * __host1x_client_init() - initialize a host1x client
+ * @client: host1x client
+ * @key: lock class key for the client-specific mutex
+ */
+void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key)
+{
+ INIT_LIST_HEAD(&client->list);
+ __mutex_init(&client->lock, "host1x client lock", key);
+ client->usecount = 0;
+}
+EXPORT_SYMBOL(__host1x_client_init);
+
+/**
+ * host1x_client_exit() - uninitialize a host1x client
+ * @client: host1x client
+ */
+void host1x_client_exit(struct host1x_client *client)
+{
+ mutex_destroy(&client->lock);
+}
+EXPORT_SYMBOL(host1x_client_exit);
+
+/**
* __host1x_client_register() - register a host1x client
* @client: host1x client
* @key: lock class key for the client-specific mutex
@@ -747,16 +770,11 @@ EXPORT_SYMBOL(host1x_driver_unregister);
* device and call host1x_device_init(), which will in turn call each client's
* &host1x_client_ops.init implementation.
*/
-int __host1x_client_register(struct host1x_client *client,
- struct lock_class_key *key)
+int __host1x_client_register(struct host1x_client *client)
{
struct host1x *host1x;
int err;
- INIT_LIST_HEAD(&client->list);
- __mutex_init(&client->lock, "host1x client lock", key);
- client->usecount = 0;
-
mutex_lock(&devices_lock);
list_for_each_entry(host1x, &devices, list) {
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 4bf263c2d61a..160554903ef9 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -93,11 +93,11 @@ menu "Special HID drivers"
depends on HID
config HID_A4TECH
- tristate "A4 tech mice"
+ tristate "A4TECH mice"
depends on HID
default !EXPERT
help
- Support for A4 tech X5 and WOP-35 / Trust 450L mice.
+ Support for some A4TECH mice with two scroll wheels.
config HID_ACCUTOUCH
tristate "Accutouch touch device"
@@ -922,6 +922,21 @@ config HID_SAMSUNG
help
Support for Samsung InfraRed remote control or keyboards.
+config HID_SEMITEK
+ tristate "Semitek USB keyboards"
+ depends on HID
+ help
+ Support for Semitek USB keyboards that are not fully compliant
+ with the HID standard.
+
+ There are many variants, including:
+ - GK61, GK64, GK68, GK84, GK96, etc.
+ - SK61, SK64, SK68, SK84, SK96, etc.
+ - Dierya DK61/DK66
+ - Tronsmart TK09R
+ - Woo-dy
+ - X-Bows Nature/Knight
+
config HID_SONY
tristate "Sony PS2/3/4 accessories"
depends on USB_HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 193431ec4db8..1ea1a7c0b20f 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -106,6 +106,7 @@ obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o hid-roccat-common.o \
obj-$(CONFIG_HID_RMI) += hid-rmi.o
obj-$(CONFIG_HID_SAITEK) += hid-saitek.o
obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o
+obj-$(CONFIG_HID_SEMITEK) += hid-semitek.o
obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
obj-$(CONFIG_HID_SONY) += hid-sony.o
obj-$(CONFIG_HID_SPEEDLINK) += hid-speedlink.o
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
index 2ab38b715347..3589d9945da1 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
@@ -88,6 +88,7 @@ static void amd_sfh_work(struct work_struct *work)
sensor_index = req_node->sensor_idx;
report_id = req_node->report_id;
node_type = req_node->report_type;
+ kfree(req_node);
if (node_type == HID_FEATURE_REPORT) {
report_size = get_feature_report(sensor_index, report_id,
@@ -142,7 +143,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
int rc, i;
dev = &privdata->pdev->dev;
- cl_data = kzalloc(sizeof(*cl_data), GFP_KERNEL);
+ cl_data = devm_kzalloc(dev, sizeof(*cl_data), GFP_KERNEL);
if (!cl_data)
return -ENOMEM;
@@ -175,12 +176,12 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
rc = -EINVAL;
goto cleanup;
}
- cl_data->feature_report[i] = kzalloc(feature_report_size, GFP_KERNEL);
+ cl_data->feature_report[i] = devm_kzalloc(dev, feature_report_size, GFP_KERNEL);
if (!cl_data->feature_report[i]) {
rc = -ENOMEM;
goto cleanup;
}
- cl_data->input_report[i] = kzalloc(input_report_size, GFP_KERNEL);
+ cl_data->input_report[i] = devm_kzalloc(dev, input_report_size, GFP_KERNEL);
if (!cl_data->input_report[i]) {
rc = -ENOMEM;
goto cleanup;
@@ -189,7 +190,8 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
info.sensor_idx = cl_idx;
info.dma_address = cl_data->sensor_dma_addr[i];
- cl_data->report_descr[i] = kzalloc(cl_data->report_descr_sz[i], GFP_KERNEL);
+ cl_data->report_descr[i] =
+ devm_kzalloc(dev, cl_data->report_descr_sz[i], GFP_KERNEL);
if (!cl_data->report_descr[i]) {
rc = -ENOMEM;
goto cleanup;
@@ -214,11 +216,11 @@ cleanup:
cl_data->sensor_virt_addr[i],
cl_data->sensor_dma_addr[i]);
}
- kfree(cl_data->feature_report[i]);
- kfree(cl_data->input_report[i]);
- kfree(cl_data->report_descr[i]);
+ devm_kfree(dev, cl_data->feature_report[i]);
+ devm_kfree(dev, cl_data->input_report[i]);
+ devm_kfree(dev, cl_data->report_descr[i]);
}
- kfree(cl_data);
+ devm_kfree(dev, cl_data);
return rc;
}
@@ -241,6 +243,5 @@ int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata)
cl_data->sensor_dma_addr[i]);
}
}
- kfree(cl_data);
return 0;
}
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_hid.c b/drivers/hid/amd-sfh-hid/amd_sfh_hid.c
index 4f989483aa03..5ad1e7acd294 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_hid.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_hid.c
@@ -162,9 +162,6 @@ void amdtp_hid_remove(struct amdtp_cl_data *cli_data)
int i;
for (i = 0; i < cli_data->num_hid_devices; ++i) {
- kfree(cli_data->feature_report[i]);
- kfree(cli_data->input_report[i]);
- kfree(cli_data->report_descr[i]);
if (cli_data->hid_sensor_hubs[i]) {
kfree(cli_data->hid_sensor_hubs[i]->driver_data);
hid_destroy_device(cli_data->hid_sensor_hubs[i]);
diff --git a/drivers/hid/hid-a4tech.c b/drivers/hid/hid-a4tech.c
index 3a8c4a5971f7..2cbc32dda7f7 100644
--- a/drivers/hid/hid-a4tech.c
+++ b/drivers/hid/hid-a4tech.c
@@ -147,6 +147,8 @@ static const struct hid_device_id a4_devices[] = {
.driver_data = A4_2WHEEL_MOUSE_HACK_B8 },
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649),
.driver_data = A4_2WHEEL_MOUSE_HACK_B8 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_NB_95),
+ .driver_data = A4_2WHEEL_MOUSE_HACK_B8 },
{ }
};
MODULE_DEVICE_TABLE(hid, a4_devices);
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 2ab22b925941..fca8fc78a78a 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -79,10 +79,9 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
#define QUIRK_T100_KEYBOARD BIT(6)
#define QUIRK_T100CHI BIT(7)
#define QUIRK_G752_KEYBOARD BIT(8)
-#define QUIRK_T101HA_DOCK BIT(9)
-#define QUIRK_T90CHI BIT(10)
-#define QUIRK_MEDION_E1239T BIT(11)
-#define QUIRK_ROG_NKEY_KEYBOARD BIT(12)
+#define QUIRK_T90CHI BIT(9)
+#define QUIRK_MEDION_E1239T BIT(10)
+#define QUIRK_ROG_NKEY_KEYBOARD BIT(11)
#define I2C_KEYBOARD_QUIRKS (QUIRK_FIX_NOTEBOOK_REPORT | \
QUIRK_NO_INIT_REPORTS | \
@@ -335,7 +334,7 @@ static int asus_raw_event(struct hid_device *hdev,
if (drvdata->quirks & QUIRK_MEDION_E1239T)
return asus_e1239t_event(drvdata, data, size);
- if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) {
+ if (drvdata->quirks & QUIRK_USE_KBD_BACKLIGHT) {
/*
* Skip these report ID, the device emits a continuous stream associated
* with the AURA mode it is in which looks like an 'echo'.
@@ -355,6 +354,16 @@ static int asus_raw_event(struct hid_device *hdev,
return -1;
}
}
+ if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) {
+ /*
+ * G713 and G733 send these codes on some keypresses, depending on
+ * the key pressed it can trigger a shutdown event if not caught.
+ */
+ if(data[0] == 0x02 && data[1] == 0x30) {
+ return -1;
+ }
+ }
+
}
return 0;
@@ -1072,11 +1081,6 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
return ret;
}
- /* use hid-multitouch for T101HA touchpad */
- if (id->driver_data & QUIRK_T101HA_DOCK &&
- hdev->collection->usage == HID_GD_MOUSE)
- return -ENODEV;
-
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
hid_err(hdev, "Asus hw start failed: %d\n", ret);
@@ -1230,8 +1234,6 @@ static const struct hid_device_id asus_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_T100TAF_KEYBOARD),
QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
- { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
- USB_DEVICE_ID_ASUSTEK_T101HA_KEYBOARD), QUIRK_T101HA_DOCK },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_ASUS_AK1D) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_ASUS_MD_5110) },
{ HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_ASUS_MD_5112) },
@@ -1239,6 +1241,12 @@ static const struct hid_device_id asus_devices[] = {
USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD), QUIRK_T100CHI },
{ HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE_MEDION_E1239T),
QUIRK_MEDION_E1239T },
+ /*
+ * Note bind to the HID_GROUP_GENERIC group, so that we only bind to the keyboard
+ * part, while letting hid-multitouch.c handle the touchpad.
+ */
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T101HA_KEYBOARD) },
{ }
};
MODULE_DEVICE_TABLE(hid, asus_devices);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 0ae9f6df59d1..0de2788b9814 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2005,6 +2005,9 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
case BUS_I2C:
bus = "I2C";
break;
+ case BUS_VIRTUAL:
+ bus = "VIRTUAL";
+ break;
default:
bus = "<UNKNOWN>";
}
@@ -2588,7 +2591,6 @@ int hid_check_keys_pressed(struct hid_device *hid)
return 0;
}
-
EXPORT_SYMBOL_GPL(hid_check_keys_pressed);
static int __init hid_init(void)
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 59f8d716d78f..a311fb87b02a 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -930,6 +930,9 @@ static const char *keys[KEY_MAX + 1] = {
[KEY_APPSELECT] = "AppSelect",
[KEY_SCREENSAVER] = "ScreenSaver",
[KEY_VOICECOMMAND] = "VoiceCommand",
+ [KEY_ASSISTANT] = "Assistant",
+ [KEY_KBD_LAYOUT_NEXT] = "KbdLayoutNext",
+ [KEY_EMOJI_PICKER] = "EmojiPicker",
[KEY_BRIGHTNESS_MIN] = "BrightnessMin",
[KEY_BRIGHTNESS_MAX] = "BrightnessMax",
[KEY_BRIGHTNESS_AUTO] = "BrightnessAuto",
diff --git a/drivers/hid/hid-ft260.c b/drivers/hid/hid-ft260.c
index a5751607ce24..f43a8406cb9a 100644
--- a/drivers/hid/hid-ft260.c
+++ b/drivers/hid/hid-ft260.c
@@ -201,7 +201,7 @@ struct ft260_i2c_write_request_report {
u8 address; /* 7-bit I2C address */
u8 flag; /* I2C transaction condition */
u8 length; /* data payload length */
- u8 data[60]; /* data payload */
+ u8 data[FT260_WR_DATA_MAX]; /* data payload */
} __packed;
struct ft260_i2c_read_request_report {
@@ -249,7 +249,10 @@ static int ft260_hid_feature_report_get(struct hid_device *hdev,
ret = hid_hw_raw_request(hdev, report_id, buf, len, HID_FEATURE_REPORT,
HID_REQ_GET_REPORT);
- memcpy(data, buf, len);
+ if (likely(ret == len))
+ memcpy(data, buf, len);
+ else if (ret >= 0)
+ ret = -EIO;
kfree(buf);
return ret;
}
@@ -298,7 +301,7 @@ static int ft260_xfer_status(struct ft260_device *dev)
ret = ft260_hid_feature_report_get(hdev, FT260_I2C_STATUS,
(u8 *)&report, sizeof(report));
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
hid_err(hdev, "failed to retrieve status: %d\n", ret);
return ret;
}
@@ -429,6 +432,9 @@ static int ft260_smbus_write(struct ft260_device *dev, u8 addr, u8 cmd,
struct ft260_i2c_write_request_report *rep =
(struct ft260_i2c_write_request_report *)dev->write_buf;
+ if (data_len >= sizeof(rep->data))
+ return -EINVAL;
+
rep->address = addr;
rep->data[0] = cmd;
rep->length = data_len + 1;
@@ -721,10 +727,9 @@ static int ft260_get_system_config(struct hid_device *hdev,
ret = ft260_hid_feature_report_get(hdev, FT260_SYSTEM_SETTINGS,
(u8 *)cfg, len);
- if (ret != len) {
+ if (ret < 0) {
hid_err(hdev, "failed to retrieve system status\n");
- if (ret >= 0)
- return -EIO;
+ return ret;
}
return 0;
}
@@ -777,8 +782,8 @@ static int ft260_byte_show(struct hid_device *hdev, int id, u8 *cfg, int len,
int ret;
ret = ft260_hid_feature_report_get(hdev, id, cfg, len);
- if (ret != len && ret >= 0)
- return -EIO;
+ if (ret < 0)
+ return ret;
return scnprintf(buf, PAGE_SIZE, "%hi\n", *field);
}
@@ -789,8 +794,8 @@ static int ft260_word_show(struct hid_device *hdev, int id, u8 *cfg, int len,
int ret;
ret = ft260_hid_feature_report_get(hdev, id, cfg, len);
- if (ret != len && ret >= 0)
- return -EIO;
+ if (ret < 0)
+ return ret;
return scnprintf(buf, PAGE_SIZE, "%hi\n", le16_to_cpu(*field));
}
@@ -941,10 +946,8 @@ static int ft260_probe(struct hid_device *hdev, const struct hid_device_id *id)
ret = ft260_hid_feature_report_get(hdev, FT260_CHIP_VERSION,
(u8 *)&version, sizeof(version));
- if (ret != sizeof(version)) {
+ if (ret < 0) {
hid_err(hdev, "failed to retrieve chip version\n");
- if (ret >= 0)
- ret = -EIO;
goto err_hid_close;
}
diff --git a/drivers/hid/hid-gt683r.c b/drivers/hid/hid-gt683r.c
index 898871c8c768..29ccb0accfba 100644
--- a/drivers/hid/hid-gt683r.c
+++ b/drivers/hid/hid-gt683r.c
@@ -54,6 +54,7 @@ static const struct hid_device_id gt683r_led_id[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
{ }
};
+MODULE_DEVICE_TABLE(hid, gt683r_led_id);
static void gt683r_brightness_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 84b8da3e7d09..b84a0a11e05b 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -26,6 +26,7 @@
#define USB_DEVICE_ID_A4TECH_WCP32PU 0x0006
#define USB_DEVICE_ID_A4TECH_X5_005D 0x000a
#define USB_DEVICE_ID_A4TECH_RP_649 0x001a
+#define USB_DEVICE_ID_A4TECH_NB_95 0x022b
#define USB_VENDOR_ID_AASHIMA 0x06d6
#define USB_DEVICE_ID_AASHIMA_GAMEPAD 0x0025
@@ -299,8 +300,6 @@
#define USB_VENDOR_ID_CORSAIR 0x1b1c
#define USB_DEVICE_ID_CORSAIR_K90 0x1b02
-
-#define USB_VENDOR_ID_CORSAIR 0x1b1c
#define USB_DEVICE_ID_CORSAIR_K70R 0x1b09
#define USB_DEVICE_ID_CORSAIR_K95RGB 0x1b11
#define USB_DEVICE_ID_CORSAIR_M65RGB 0x1b12
@@ -751,6 +750,7 @@
#define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085
#define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3
#define USB_DEVICE_ID_LENOVO_X1_TAB3 0x60b5
+#define USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E 0x600e
#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D 0x608d
#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019 0x6019
#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E 0x602e
@@ -1051,6 +1051,7 @@
#define USB_DEVICE_ID_SAITEK_X52 0x075c
#define USB_DEVICE_ID_SAITEK_X52_2 0x0255
#define USB_DEVICE_ID_SAITEK_X52_PRO 0x0762
+#define USB_DEVICE_ID_SAITEK_X65 0x0b6a
#define USB_VENDOR_ID_SAMSUNG 0x0419
#define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001
@@ -1060,6 +1061,9 @@
#define USB_DEVICE_ID_SEMICO_USB_KEYKOARD 0x0023
#define USB_DEVICE_ID_SEMICO_USB_KEYKOARD2 0x0027
+#define USB_VENDOR_ID_SEMITEK 0x1ea7
+#define USB_DEVICE_ID_SEMITEK_KEYBOARD 0x0907
+
#define USB_VENDOR_ID_SENNHEISER 0x1395
#define USB_DEVICE_ID_SENNHEISER_BTD500USB 0x002c
@@ -1161,6 +1165,7 @@
#define USB_DEVICE_ID_SYNAPTICS_DELL_K12A 0x2819
#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012 0x2968
#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
+#define USB_DEVICE_ID_SYNAPTICS_DELL_K15A 0x6e21
#define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1002 0x73f4
#define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003 0x73f5
#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5 0x81a7
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 18f5e28d475c..abbfa91e73e4 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -964,6 +964,9 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x0cd: map_key_clear(KEY_PLAYPAUSE); break;
case 0x0cf: map_key_clear(KEY_VOICECOMMAND); break;
+
+ case 0x0d9: map_key_clear(KEY_EMOJI_PICKER); break;
+
case 0x0e0: map_abs_clear(ABS_VOLUME); break;
case 0x0e2: map_key_clear(KEY_MUTE); break;
case 0x0e5: map_key_clear(KEY_BASSBOOST); break;
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index d598094dadd0..fee4e54a3ce0 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -1263,6 +1263,7 @@ static int hidpp20_battery_map_status_voltage(u8 data[3], int *voltage,
int status;
long flags = (long) data[2];
+ *level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
if (flags & 0x80)
switch (flags & 0x07) {
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 2bb473d8c424..8bcaee4ccae0 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -693,7 +693,7 @@ static int magicmouse_probe(struct hid_device *hdev,
if (id->vendor == USB_VENDOR_ID_APPLE &&
id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
hdev->type != HID_TYPE_USBMOUSE)
- return 0;
+ return -ENODEV;
msc = devm_kzalloc(&hdev->dev, sizeof(*msc), GFP_KERNEL);
if (msc == NULL) {
@@ -779,7 +779,10 @@ err_stop_hw:
static void magicmouse_remove(struct hid_device *hdev)
{
struct magicmouse_sc *msc = hid_get_drvdata(hdev);
- cancel_delayed_work_sync(&msc->work);
+
+ if (msc)
+ cancel_delayed_work_sync(&msc->work);
+
hid_hw_stop(hdev);
}
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 9d9f3e1bd5f4..2e4fb76c45f3 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -70,6 +70,7 @@ MODULE_LICENSE("GPL");
#define MT_QUIRK_WIN8_PTP_BUTTONS BIT(18)
#define MT_QUIRK_SEPARATE_APP_REPORT BIT(19)
#define MT_QUIRK_FORCE_MULTI_INPUT BIT(20)
+#define MT_QUIRK_DISABLE_WAKEUP BIT(21)
#define MT_INPUTMODE_TOUCHSCREEN 0x02
#define MT_INPUTMODE_TOUCHPAD 0x03
@@ -191,6 +192,7 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app);
#define MT_CLS_EXPORT_ALL_INPUTS 0x0013
/* reserved 0x0014 */
#define MT_CLS_WIN_8_FORCE_MULTI_INPUT 0x0015
+#define MT_CLS_WIN_8_DISABLE_WAKEUP 0x0016
/* vendor specific classes */
#define MT_CLS_3M 0x0101
@@ -283,6 +285,15 @@ static const struct mt_class mt_classes[] = {
MT_QUIRK_WIN8_PTP_BUTTONS |
MT_QUIRK_FORCE_MULTI_INPUT,
.export_all_inputs = true },
+ { .name = MT_CLS_WIN_8_DISABLE_WAKEUP,
+ .quirks = MT_QUIRK_ALWAYS_VALID |
+ MT_QUIRK_IGNORE_DUPLICATES |
+ MT_QUIRK_HOVERING |
+ MT_QUIRK_CONTACT_CNT_ACCURATE |
+ MT_QUIRK_STICKY_FINGERS |
+ MT_QUIRK_WIN8_PTP_BUTTONS |
+ MT_QUIRK_DISABLE_WAKEUP,
+ .export_all_inputs = true },
/*
* vendor specific classes
@@ -604,9 +615,13 @@ static struct mt_report_data *mt_allocate_report_data(struct mt_device *td,
if (!(HID_MAIN_ITEM_VARIABLE & field->flags))
continue;
- for (n = 0; n < field->report_count; n++) {
- if (field->usage[n].hid == HID_DG_CONTACTID)
- rdata->is_mt_collection = true;
+ if (field->logical == HID_DG_FINGER || td->hdev->group != HID_GROUP_MULTITOUCH_WIN_8) {
+ for (n = 0; n < field->report_count; n++) {
+ if (field->usage[n].hid == HID_DG_CONTACTID) {
+ rdata->is_mt_collection = true;
+ break;
+ }
+ }
}
}
@@ -759,7 +774,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
return 1;
case HID_DG_CONFIDENCE:
if ((cls->name == MT_CLS_WIN_8 ||
- cls->name == MT_CLS_WIN_8_FORCE_MULTI_INPUT) &&
+ cls->name == MT_CLS_WIN_8_FORCE_MULTI_INPUT ||
+ cls->name == MT_CLS_WIN_8_DISABLE_WAKEUP) &&
(field->application == HID_DG_TOUCHPAD ||
field->application == HID_DG_TOUCHSCREEN))
app->quirks |= MT_QUIRK_CONFIDENCE;
@@ -1576,13 +1592,13 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
/* we do not set suffix = "Touchscreen" */
hi->input->name = hdev->name;
break;
- case HID_DG_STYLUS:
- /* force BTN_STYLUS to allow tablet matching in udev */
- __set_bit(BTN_STYLUS, hi->input->keybit);
- break;
case HID_VD_ASUS_CUSTOM_MEDIA_KEYS:
suffix = "Custom Media Keys";
break;
+ case HID_DG_STYLUS:
+ /* force BTN_STYLUS to allow tablet matching in udev */
+ __set_bit(BTN_STYLUS, hi->input->keybit);
+ fallthrough;
case HID_DG_PEN:
suffix = "Stylus";
break;
@@ -1749,8 +1765,14 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
#ifdef CONFIG_PM
static int mt_suspend(struct hid_device *hdev, pm_message_t state)
{
+ struct mt_device *td = hid_get_drvdata(hdev);
+
/* High latency is desirable for power savings during S3/S0ix */
- mt_set_modes(hdev, HID_LATENCY_HIGH, true, true);
+ if (td->mtclass.quirks & MT_QUIRK_DISABLE_WAKEUP)
+ mt_set_modes(hdev, HID_LATENCY_HIGH, false, false);
+ else
+ mt_set_modes(hdev, HID_LATENCY_HIGH, true, true);
+
return 0;
}
@@ -1809,6 +1831,12 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_ANTON,
USB_DEVICE_ID_ANTON_TOUCH_PAD) },
+ /* Asus T101HA */
+ { .driver_data = MT_CLS_WIN_8_DISABLE_WAKEUP,
+ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_ASUSTEK,
+ USB_DEVICE_ID_ASUSTEK_T101HA_KEYBOARD) },
+
/* Asus T304UA */
{ .driver_data = MT_CLS_ASUS,
HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 3dd6f15f2a67..51b39bda9a9d 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -110,6 +110,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M406XE), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E), HID_QUIRK_ALWAYS_POLL },
@@ -158,6 +159,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_2), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_PRO), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X65), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
{ HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET },
@@ -176,6 +178,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DELL_K12A), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DELL_K15A), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD), HID_QUIRK_BADPAD },
{ HID_USB_DEVICE(USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882), HID_QUIRK_NOGET },
@@ -211,6 +214,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_NB_95) },
#endif
#if IS_ENABLED(CONFIG_HID_ACCUTOUCH)
{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) },
diff --git a/drivers/hid/hid-semitek.c b/drivers/hid/hid-semitek.c
new file mode 100644
index 000000000000..ba6607d5e051
--- /dev/null
+++ b/drivers/hid/hid-semitek.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * HID driver for Semitek keyboards
+ *
+ * Copyright (c) 2021 Benjamin Moody
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+static __u8 *semitek_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ /* In the report descriptor for interface 2, fix the incorrect
+ description of report ID 0x04 (the report contains a
+ bitmask, not an array of keycodes.) */
+ if (*rsize == 0xcb && rdesc[0x83] == 0x81 && rdesc[0x84] == 0x00) {
+ hid_info(hdev, "fixing up Semitek report descriptor\n");
+ rdesc[0x84] = 0x02;
+ }
+ return rdesc;
+}
+
+static const struct hid_device_id semitek_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_SEMITEK, USB_DEVICE_ID_SEMITEK_KEYBOARD) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, semitek_devices);
+
+static struct hid_driver semitek_driver = {
+ .name = "semitek",
+ .id_table = semitek_devices,
+ .report_fixup = semitek_report_fixup,
+};
+module_hid_driver(semitek_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
index 2e6662173a79..32c2306e240d 100644
--- a/drivers/hid/hid-sensor-custom.c
+++ b/drivers/hid/hid-sensor-custom.c
@@ -387,7 +387,7 @@ static ssize_t store_value(struct device *dev, struct device_attribute *attr,
struct hid_sensor_custom *sensor_inst = dev_get_drvdata(dev);
int index, field_index, usage;
char name[HID_CUSTOM_NAME_LENGTH];
- int value;
+ int value, ret;
if (sscanf(attr->attr.name, "feature-%x-%x-%s", &index, &usage,
name) == 3) {
@@ -403,8 +403,10 @@ static ssize_t store_value(struct device *dev, struct device_attribute *attr,
report_id = sensor_inst->fields[field_index].attribute.
report_id;
- sensor_hub_set_feature(sensor_inst->hsdev, report_id,
- index, sizeof(value), &value);
+ ret = sensor_hub_set_feature(sensor_inst->hsdev, report_id,
+ index, sizeof(value), &value);
+ if (ret)
+ return ret;
} else
return -EINVAL;
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 95cf88f3bafb..6abd3e2a9094 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -209,16 +209,21 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
buffer_size = buffer_size / sizeof(__s32);
if (buffer_size) {
for (i = 0; i < buffer_size; ++i) {
- hid_set_field(report->field[field_index], i,
- (__force __s32)cpu_to_le32(*buf32));
+ ret = hid_set_field(report->field[field_index], i,
+ (__force __s32)cpu_to_le32(*buf32));
+ if (ret)
+ goto done_proc;
+
++buf32;
}
}
if (remaining_bytes) {
value = 0;
memcpy(&value, (u8 *)buf32, remaining_bytes);
- hid_set_field(report->field[field_index], i,
- (__force __s32)cpu_to_le32(value));
+ ret = hid_set_field(report->field[field_index], i,
+ (__force __s32)cpu_to_le32(value));
+ if (ret)
+ goto done_proc;
}
hid_hw_request(hsdev->hdev, report, HID_REQ_SET_REPORT);
hid_hw_wait(hsdev->hdev);
diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c
index 2e452c6e8ef4..f643b1cb112d 100644
--- a/drivers/hid/hid-thrustmaster.c
+++ b/drivers/hid/hid-thrustmaster.c
@@ -312,7 +312,7 @@ static int thrustmaster_probe(struct hid_device *hdev, const struct hid_device_i
}
tm_wheel->change_request = kzalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
- if (!tm_wheel->model_request) {
+ if (!tm_wheel->change_request) {
ret = -ENOMEM;
goto error5;
}
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 9993133989a5..46474612e73c 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -45,6 +45,7 @@
#define I2C_HID_QUIRK_BOGUS_IRQ BIT(4)
#define I2C_HID_QUIRK_RESET_ON_RESUME BIT(5)
#define I2C_HID_QUIRK_BAD_INPUT_SIZE BIT(6)
+#define I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET BIT(7)
/* flags */
@@ -178,6 +179,11 @@ static const struct i2c_hid_quirks {
I2C_HID_QUIRK_RESET_ON_RESUME },
{ USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720,
I2C_HID_QUIRK_BAD_INPUT_SIZE },
+ /*
+ * Sending the wakeup after reset actually break ELAN touchscreen controller
+ */
+ { USB_VENDOR_ID_ELAN, HID_ANY_ID,
+ I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET },
{ 0, 0 }
};
@@ -461,7 +467,8 @@ static int i2c_hid_hwreset(struct i2c_client *client)
}
/* At least some SIS devices need this after reset */
- ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
+ if (!(ihid->quirks & I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET))
+ ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
out_unlock:
mutex_unlock(&ihid->reset_lock);
@@ -990,8 +997,8 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID);
hid->product = le16_to_cpu(ihid->hdesc.wProductID);
- snprintf(hid->name, sizeof(hid->name), "%s %04hX:%04hX",
- client->name, hid->vendor, hid->product);
+ snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X",
+ client->name, (u16)hid->vendor, (u16)hid->product);
strlcpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys));
ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product);
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index 21b87e4003af..07e3cbc86bef 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -28,6 +28,8 @@
#define EHL_Ax_DEVICE_ID 0x4BB3
#define TGL_LP_DEVICE_ID 0xA0FC
#define TGL_H_DEVICE_ID 0x43FC
+#define ADL_S_DEVICE_ID 0x7AF8
+#define ADL_P_DEVICE_ID 0x51FC
#define REVISION_ID_CHT_A0 0x6
#define REVISION_ID_CHT_Ax_SI 0x0
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 06081cf9b85a..a6d5173ac003 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -39,6 +39,8 @@ static const struct pci_device_id ish_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, TGL_LP_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, TGL_H_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ADL_S_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ADL_P_DEVICE_ID)},
{0, }
};
MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
diff --git a/drivers/hid/surface-hid/surface_hid_core.c b/drivers/hid/surface-hid/surface_hid_core.c
index 7b27ec392232..5571e74abe91 100644
--- a/drivers/hid/surface-hid/surface_hid_core.c
+++ b/drivers/hid/surface-hid/surface_hid_core.c
@@ -168,9 +168,9 @@ int surface_hid_device_add(struct surface_hid_device *shid)
shid->hid->dev.parent = shid->dev;
shid->hid->bus = BUS_HOST;
- shid->hid->vendor = cpu_to_le16(shid->attrs.vendor);
- shid->hid->product = cpu_to_le16(shid->attrs.product);
- shid->hid->version = cpu_to_le16(shid->hid_desc.hid_version);
+ shid->hid->vendor = get_unaligned_le16(&shid->attrs.vendor);
+ shid->hid->product = get_unaligned_le16(&shid->attrs.product);
+ shid->hid->version = get_unaligned_le16(&shid->hid_desc.hid_version);
shid->hid->country = shid->hid_desc.country_code;
snprintf(shid->hid->name, sizeof(shid->hid->name), "Microsoft Surface %04X:%04X",
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 86257ce6d619..4e9077363c96 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -374,7 +374,7 @@ static int hid_submit_ctrl(struct hid_device *hid)
raw_report = usbhid->ctrl[usbhid->ctrltail].raw_report;
dir = usbhid->ctrl[usbhid->ctrltail].dir;
- len = ((report->size - 1) >> 3) + 1 + (report->id > 0);
+ len = hid_report_len(report);
if (dir == USB_DIR_OUT) {
usbhid->urbctrl->pipe = usb_sndctrlpipe(hid_to_usb_dev(hid), 0);
usbhid->urbctrl->transfer_buffer_length = len;
diff --git a/drivers/hid/usbhid/hid-pidff.c b/drivers/hid/usbhid/hid-pidff.c
index ea126c50acc3..3b4ee21cd811 100644
--- a/drivers/hid/usbhid/hid-pidff.c
+++ b/drivers/hid/usbhid/hid-pidff.c
@@ -1292,6 +1292,7 @@ int hid_pidff_init(struct hid_device *hid)
if (pidff->pool[PID_DEVICE_MANAGED_POOL].value &&
pidff->pool[PID_DEVICE_MANAGED_POOL].value[0] == 0) {
+ error = -EPERM;
hid_notice(hid,
"device does not support device managed pool\n");
goto fail;
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 87624902ea80..e3675377bc5d 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1583,6 +1583,17 @@ config SENSORS_SHT3x
This driver can also be built as a module. If so, the module
will be called sht3x.
+config SENSORS_SHT4x
+ tristate "Sensiron humidity and temperature sensors. SHT4x and compat."
+ depends on I2C
+ select CRC8
+ help
+ If you say yes here you get support for the Sensiron SHT40, SHT41 and
+ SHT45 humidity and temperature sensors.
+
+ This driver can also be built as a module. If so, the module
+ will be called sht4x.
+
config SENSORS_SHTC1
tristate "Sensiron humidity and temperature sensors. SHTC1 and compat."
depends on I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 59e78bc212cf..d712c61c1f5e 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -171,6 +171,7 @@ obj-$(CONFIG_SENSORS_SL28CPLD) += sl28cpld-hwmon.o
obj-$(CONFIG_SENSORS_SHT15) += sht15.o
obj-$(CONFIG_SENSORS_SHT21) += sht21.o
obj-$(CONFIG_SENSORS_SHT3x) += sht3x.o
+obj-$(CONFIG_SENSORS_SHT4x) += sht4x.o
obj-$(CONFIG_SENSORS_SHTC1) += shtc1.o
obj-$(CONFIG_SENSORS_SIS5595) += sis5595.o
obj-$(CONFIG_SENSORS_SMM665) += smm665.o
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 5677263bcf0d..483cd757abd3 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -485,7 +485,7 @@ static int adm9240_in_write(struct device *dev, u32 attr, int channel, long val)
reg = ADM9240_REG_IN_MIN(channel);
break;
case hwmon_in_max:
- reg = ADM9240_REG_IN(channel);
+ reg = ADM9240_REG_IN_MAX(channel);
break;
default:
return -EOPNOTSUPP;
diff --git a/drivers/hwmon/bt1-pvt.c b/drivers/hwmon/bt1-pvt.c
index 3e1d56585b91..74ce5211eb75 100644
--- a/drivers/hwmon/bt1-pvt.c
+++ b/drivers/hwmon/bt1-pvt.c
@@ -924,10 +924,8 @@ static int pvt_request_regs(struct pvt_hwmon *pvt)
}
pvt->regs = devm_ioremap_resource(pvt->dev, res);
- if (IS_ERR(pvt->regs)) {
- dev_err(pvt->dev, "Couldn't map PVT registers\n");
+ if (IS_ERR(pvt->regs))
return PTR_ERR(pvt->regs);
- }
return 0;
}
diff --git a/drivers/hwmon/corsair-psu.c b/drivers/hwmon/corsair-psu.c
index 3a5807e4a2ef..731d5117f9f1 100644
--- a/drivers/hwmon/corsair-psu.c
+++ b/drivers/hwmon/corsair-psu.c
@@ -355,7 +355,7 @@ static umode_t corsairpsu_hwmon_power_is_visible(const struct corsairpsu_data *p
return 0444;
default:
return 0;
- };
+ }
}
static umode_t corsairpsu_hwmon_in_is_visible(const struct corsairpsu_data *priv, u32 attr,
@@ -376,7 +376,7 @@ static umode_t corsairpsu_hwmon_in_is_visible(const struct corsairpsu_data *priv
break;
default:
break;
- };
+ }
return res;
}
@@ -771,6 +771,16 @@ static int corsairpsu_raw_event(struct hid_device *hdev, struct hid_report *repo
return 0;
}
+#ifdef CONFIG_PM
+static int corsairpsu_resume(struct hid_device *hdev)
+{
+ struct corsairpsu_data *priv = hid_get_drvdata(hdev);
+
+ /* some PSUs turn off the microcontroller during standby, so a reinit is required */
+ return corsairpsu_init(priv);
+}
+#endif
+
static const struct hid_device_id corsairpsu_idtable[] = {
{ HID_USB_DEVICE(0x1b1c, 0x1c03) }, /* Corsair HX550i */
{ HID_USB_DEVICE(0x1b1c, 0x1c04) }, /* Corsair HX650i */
@@ -793,6 +803,10 @@ static struct hid_driver corsairpsu_driver = {
.probe = corsairpsu_probe,
.remove = corsairpsu_remove,
.raw_event = corsairpsu_raw_event,
+#ifdef CONFIG_PM
+ .resume = corsairpsu_resume,
+ .reset_resume = corsairpsu_resume,
+#endif
};
module_hid_driver(corsairpsu_driver);
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 2970892bed82..f2221ca0aa7b 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -838,10 +838,10 @@ static struct attribute *i8k_attrs[] = {
static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
int index)
{
- if (disallow_fan_support && index >= 8)
+ if (disallow_fan_support && index >= 20)
return 0;
if (disallow_fan_type_call &&
- (index == 9 || index == 12 || index == 15))
+ (index == 21 || index == 25 || index == 28))
return 0;
if (index >= 0 && index <= 1 &&
!(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index fd47ab4e6892..8d3b1dae31df 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -153,8 +153,44 @@ static int hwmon_thermal_get_temp(void *data, int *temp)
return 0;
}
+static int hwmon_thermal_set_trips(void *data, int low, int high)
+{
+ struct hwmon_thermal_data *tdata = data;
+ struct hwmon_device *hwdev = to_hwmon_device(tdata->dev);
+ const struct hwmon_chip_info *chip = hwdev->chip;
+ const struct hwmon_channel_info **info = chip->info;
+ unsigned int i;
+ int err;
+
+ if (!chip->ops->write)
+ return 0;
+
+ for (i = 0; info[i] && info[i]->type != hwmon_temp; i++)
+ continue;
+
+ if (!info[i])
+ return 0;
+
+ if (info[i]->config[tdata->index] & HWMON_T_MIN) {
+ err = chip->ops->write(tdata->dev, hwmon_temp,
+ hwmon_temp_min, tdata->index, low);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+ }
+
+ if (info[i]->config[tdata->index] & HWMON_T_MAX) {
+ err = chip->ops->write(tdata->dev, hwmon_temp,
+ hwmon_temp_max, tdata->index, high);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+ }
+
+ return 0;
+}
+
static const struct thermal_zone_of_device_ops hwmon_thermal_ops = {
.get_temp = hwmon_thermal_get_temp,
+ .set_trips = hwmon_thermal_set_trips,
};
static void hwmon_thermal_remove_sensor(void *data)
diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c
index c602583d19f3..58d3828e2ec0 100644
--- a/drivers/hwmon/ina3221.c
+++ b/drivers/hwmon/ina3221.c
@@ -196,13 +196,11 @@ static inline u32 ina3221_reg_to_interval_us(u16 config)
u32 channels = hweight16(config & INA3221_CONFIG_CHs_EN_MASK);
u32 vbus_ct_idx = INA3221_CONFIG_VBUS_CT(config);
u32 vsh_ct_idx = INA3221_CONFIG_VSH_CT(config);
- u32 samples_idx = INA3221_CONFIG_AVG(config);
- u32 samples = ina3221_avg_samples[samples_idx];
u32 vbus_ct = ina3221_conv_time[vbus_ct_idx];
u32 vsh_ct = ina3221_conv_time[vsh_ct_idx];
/* Calculate total conversion time */
- return channels * (vbus_ct + vsh_ct) * samples;
+ return channels * (vbus_ct + vsh_ct);
}
static inline int ina3221_wait_for_data(struct ina3221_data *ina)
@@ -288,13 +286,14 @@ static int ina3221_read_in(struct device *dev, u32 attr, int channel, long *val)
return -ENODATA;
/* Write CONFIG register to trigger a single-shot measurement */
- if (ina->single_shot)
+ if (ina->single_shot) {
regmap_write(ina->regmap, INA3221_CONFIG,
ina->reg_config);
- ret = ina3221_wait_for_data(ina);
- if (ret)
- return ret;
+ ret = ina3221_wait_for_data(ina);
+ if (ret)
+ return ret;
+ }
ret = ina3221_read_value(ina, reg, &regval);
if (ret)
@@ -344,13 +343,14 @@ static int ina3221_read_curr(struct device *dev, u32 attr,
return -ENODATA;
/* Write CONFIG register to trigger a single-shot measurement */
- if (ina->single_shot)
+ if (ina->single_shot) {
regmap_write(ina->regmap, INA3221_CONFIG,
ina->reg_config);
- ret = ina3221_wait_for_data(ina);
- if (ret)
- return ret;
+ ret = ina3221_wait_for_data(ina);
+ if (ret)
+ return ret;
+ }
fallthrough;
case hwmon_curr_crit:
diff --git a/drivers/hwmon/lm70.c b/drivers/hwmon/lm70.c
index 40eab3349904..d2a60de5b8de 100644
--- a/drivers/hwmon/lm70.c
+++ b/drivers/hwmon/lm70.c
@@ -22,10 +22,10 @@
#include <linux/hwmon.h>
#include <linux/mutex.h>
#include <linux/mod_devicetable.h>
+#include <linux/of.h>
#include <linux/property.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
-#include <linux/acpi.h>
#define DRVNAME "lm70"
@@ -148,29 +148,6 @@ static const struct of_device_id lm70_of_ids[] = {
MODULE_DEVICE_TABLE(of, lm70_of_ids);
#endif
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id lm70_acpi_ids[] = {
- {
- .id = "LM000070",
- .driver_data = LM70_CHIP_LM70,
- },
- {
- .id = "TMP00121",
- .driver_data = LM70_CHIP_TMP121,
- },
- {
- .id = "LM000071",
- .driver_data = LM70_CHIP_LM71,
- },
- {
- .id = "LM000074",
- .driver_data = LM70_CHIP_LM74,
- },
- {},
-};
-MODULE_DEVICE_TABLE(acpi, lm70_acpi_ids);
-#endif
-
static int lm70_probe(struct spi_device *spi)
{
struct device *hwmon_dev;
@@ -184,7 +161,7 @@ static int lm70_probe(struct spi_device *spi)
/* signaling is SPI_MODE_0 */
- if (spi->mode & (SPI_CPOL | SPI_CPHA))
+ if ((spi->mode & SPI_MODE_X_MASK) != SPI_MODE_0)
return -EINVAL;
/* NOTE: we assume 8-bit words, and convert to 16 bits manually */
@@ -217,7 +194,6 @@ static struct spi_driver lm70_driver = {
.driver = {
.name = "lm70",
.of_match_table = of_match_ptr(lm70_of_ids),
- .acpi_match_table = ACPI_PTR(lm70_acpi_ids),
},
.id_table = lm70_ids,
.probe = lm70_probe,
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index e447febd121a..afdbb63237b9 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -50,6 +50,7 @@ enum lm75_type { /* keep sorted in alphabetical order */
tmp75,
tmp75b,
tmp75c,
+ tmp1075,
};
/**
@@ -293,6 +294,13 @@ static const struct lm75_params device_params[] = {
.clr_mask = 1 << 5, /*not one-shot mode*/
.default_resolution = 12,
.default_sample_time = MSEC_PER_SEC / 12,
+ },
+ [tmp1075] = { /* not one-shot mode, 27.5 ms sample rate */
+ .clr_mask = 1 << 5 | 1 << 6 | 1 << 7,
+ .default_resolution = 12,
+ .default_sample_time = 28,
+ .num_sample_times = 4,
+ .sample_times = (unsigned int []){ 28, 55, 110, 220 },
}
};
@@ -662,6 +670,7 @@ static const struct i2c_device_id lm75_ids[] = {
{ "tmp75", tmp75, },
{ "tmp75b", tmp75b, },
{ "tmp75c", tmp75c, },
+ { "tmp1075", tmp1075, },
{ /* LIST END */ }
};
MODULE_DEVICE_TABLE(i2c, lm75_ids);
@@ -771,6 +780,10 @@ static const struct of_device_id __maybe_unused lm75_of_match[] = {
.compatible = "ti,tmp75c",
.data = (void *)tmp75c
},
+ {
+ .compatible = "ti,tmp1075",
+ .data = (void *)tmp1075
+ },
{ },
};
MODULE_DEVICE_TABLE(of, lm75_of_match);
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index ac4adb44b224..97ab491d2922 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -596,7 +596,6 @@ static int lm80_probe(struct i2c_client *client)
struct device *dev = &client->dev;
struct device *hwmon_dev;
struct lm80_data *data;
- int rv;
data = devm_kzalloc(dev, sizeof(struct lm80_data), GFP_KERNEL);
if (!data)
@@ -609,14 +608,8 @@ static int lm80_probe(struct i2c_client *client)
lm80_init_client(client);
/* A few vars need to be filled upon startup */
- rv = lm80_read_value(client, LM80_REG_FAN_MIN(1));
- if (rv < 0)
- return rv;
- data->fan[f_min][0] = rv;
- rv = lm80_read_value(client, LM80_REG_FAN_MIN(2));
- if (rv < 0)
- return rv;
- data->fan[f_min][1] = rv;
+ data->fan[f_min][0] = lm80_read_value(client, LM80_REG_FAN_MIN(1));
+ data->fan[f_min][1] = lm80_read_value(client, LM80_REG_FAN_MIN(2));
hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
data, lm80_groups);
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index ebbfd5f352c0..567b7c521f38 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -465,6 +465,7 @@ enum lm90_temp11_reg_index {
struct lm90_data {
struct i2c_client *client;
+ struct device *hwmon_dev;
u32 channel_config[4];
struct hwmon_channel_info temp_info;
const struct hwmon_channel_info *info[3];
@@ -1028,8 +1029,11 @@ static int lm90_set_temp11(struct lm90_data *data, int index, long val)
int err;
/* +16 degrees offset for temp2 for the LM99 */
- if (data->kind == lm99 && index <= 2)
+ if (data->kind == lm99 && index <= 2) {
+ /* prevent integer underflow */
+ val = max(val, -128000l);
val -= 16000;
+ }
if (data->kind == adt7461 || data->kind == tmp451)
data->temp11[index] = temp_to_u16_adt7461(data, val);
@@ -1088,8 +1092,11 @@ static int lm90_set_temp8(struct lm90_data *data, int index, long val)
int err;
/* +16 degrees offset for temp2 for the LM99 */
- if (data->kind == lm99 && index == 3)
+ if (data->kind == lm99 && index == 3) {
+ /* prevent integer underflow */
+ val = max(val, -128000l);
val -= 16000;
+ }
if (data->kind == adt7461 || data->kind == tmp451)
data->temp8[index] = temp_to_u8_adt7461(data, val);
@@ -1136,6 +1143,9 @@ static int lm90_set_temphyst(struct lm90_data *data, long val)
else
temp = temp_from_s8(data->temp8[LOCAL_CRIT]);
+ /* prevent integer underflow */
+ val = max(val, -128000l);
+
data->temp_hyst = hyst_to_reg(temp - val);
err = i2c_smbus_write_byte_data(client, LM90_REG_W_TCRIT_HYST,
data->temp_hyst);
@@ -1703,6 +1713,13 @@ static int lm90_init_client(struct i2c_client *client, struct lm90_data *data)
if (data->kind == max6696)
config &= ~0x08;
+ /*
+ * Interrupt is enabled by default on reset, but it may be disabled
+ * by bootloader, unmask it.
+ */
+ if (client->irq)
+ config &= ~0x80;
+
config &= 0xBF; /* run */
lm90_update_confreg(data, config);
@@ -1731,22 +1748,41 @@ static bool lm90_is_tripped(struct i2c_client *client, u16 *status)
if ((st & (LM90_STATUS_LLOW | LM90_STATUS_LHIGH | LM90_STATUS_LTHRM)) ||
(st2 & MAX6696_STATUS2_LOT2))
- dev_warn(&client->dev,
- "temp%d out of range, please check!\n", 1);
+ dev_dbg(&client->dev,
+ "temp%d out of range, please check!\n", 1);
if ((st & (LM90_STATUS_RLOW | LM90_STATUS_RHIGH | LM90_STATUS_RTHRM)) ||
(st2 & MAX6696_STATUS2_ROT2))
- dev_warn(&client->dev,
- "temp%d out of range, please check!\n", 2);
+ dev_dbg(&client->dev,
+ "temp%d out of range, please check!\n", 2);
if (st & LM90_STATUS_ROPEN)
- dev_warn(&client->dev,
- "temp%d diode open, please check!\n", 2);
+ dev_dbg(&client->dev,
+ "temp%d diode open, please check!\n", 2);
if (st2 & (MAX6696_STATUS2_R2LOW | MAX6696_STATUS2_R2HIGH |
MAX6696_STATUS2_R2THRM | MAX6696_STATUS2_R2OT2))
- dev_warn(&client->dev,
- "temp%d out of range, please check!\n", 3);
+ dev_dbg(&client->dev,
+ "temp%d out of range, please check!\n", 3);
if (st2 & MAX6696_STATUS2_R2OPEN)
- dev_warn(&client->dev,
- "temp%d diode open, please check!\n", 3);
+ dev_dbg(&client->dev,
+ "temp%d diode open, please check!\n", 3);
+
+ if (st & LM90_STATUS_LLOW)
+ hwmon_notify_event(data->hwmon_dev, hwmon_temp,
+ hwmon_temp_min, 0);
+ if (st & LM90_STATUS_RLOW)
+ hwmon_notify_event(data->hwmon_dev, hwmon_temp,
+ hwmon_temp_min, 1);
+ if (st2 & MAX6696_STATUS2_R2LOW)
+ hwmon_notify_event(data->hwmon_dev, hwmon_temp,
+ hwmon_temp_min, 2);
+ if (st & LM90_STATUS_LHIGH)
+ hwmon_notify_event(data->hwmon_dev, hwmon_temp,
+ hwmon_temp_max, 0);
+ if (st & LM90_STATUS_RHIGH)
+ hwmon_notify_event(data->hwmon_dev, hwmon_temp,
+ hwmon_temp_max, 1);
+ if (st2 & MAX6696_STATUS2_R2HIGH)
+ hwmon_notify_event(data->hwmon_dev, hwmon_temp,
+ hwmon_temp_max, 2);
return true;
}
@@ -1904,12 +1940,13 @@ static int lm90_probe(struct i2c_client *client)
if (IS_ERR(hwmon_dev))
return PTR_ERR(hwmon_dev);
+ data->hwmon_dev = hwmon_dev;
+
if (client->irq) {
dev_dbg(dev, "IRQ: %d\n", client->irq);
err = devm_request_threaded_irq(dev, client->irq,
NULL, lm90_irq_thread,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- "lm90", client);
+ IRQF_ONESHOT, "lm90", client);
if (err < 0) {
dev_err(dev, "cannot request IRQ %d\n", client->irq);
return err;
@@ -1941,15 +1978,40 @@ static void lm90_alert(struct i2c_client *client, enum i2c_alert_protocol type,
lm90_update_confreg(data, data->config | 0x80);
}
} else {
- dev_info(&client->dev, "Everything OK\n");
+ dev_dbg(&client->dev, "Everything OK\n");
}
}
+static int __maybe_unused lm90_suspend(struct device *dev)
+{
+ struct lm90_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+
+ if (client->irq)
+ disable_irq(client->irq);
+
+ return 0;
+}
+
+static int __maybe_unused lm90_resume(struct device *dev)
+{
+ struct lm90_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+
+ if (client->irq)
+ enable_irq(client->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(lm90_pm_ops, lm90_suspend, lm90_resume);
+
static struct i2c_driver lm90_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "lm90",
.of_match_table = of_match_ptr(lm90_of_match),
+ .pm = &lm90_pm_ops,
},
.probe_new = lm90_probe,
.alert = lm90_alert,
diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c
index 4382105bf142..2a4bed0ab226 100644
--- a/drivers/hwmon/ltc2992.c
+++ b/drivers/hwmon/ltc2992.c
@@ -900,11 +900,15 @@ static int ltc2992_parse_dt(struct ltc2992_state *st)
fwnode_for_each_available_child_node(fwnode, child) {
ret = fwnode_property_read_u32(child, "reg", &addr);
- if (ret < 0)
+ if (ret < 0) {
+ fwnode_handle_put(child);
return ret;
+ }
- if (addr > 1)
+ if (addr > 1) {
+ fwnode_handle_put(child);
return -EINVAL;
+ }
ret = fwnode_property_read_u32(child, "shunt-resistor-micro-ohms", &val);
if (!ret)
diff --git a/drivers/hwmon/max31722.c b/drivers/hwmon/max31722.c
index 062eceb7be0d..613338cbcb17 100644
--- a/drivers/hwmon/max31722.c
+++ b/drivers/hwmon/max31722.c
@@ -6,7 +6,6 @@
* Copyright (c) 2016, Intel Corporation.
*/
-#include <linux/acpi.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/kernel.h>
@@ -133,20 +132,12 @@ static const struct spi_device_id max31722_spi_id[] = {
{"max31723", 0},
{}
};
-
-static const struct acpi_device_id __maybe_unused max31722_acpi_id[] = {
- {"MAX31722", 0},
- {"MAX31723", 0},
- {}
-};
-
MODULE_DEVICE_TABLE(spi, max31722_spi_id);
static struct spi_driver max31722_driver = {
.driver = {
.name = "max31722",
.pm = &max31722_pm_ops,
- .acpi_match_table = ACPI_PTR(max31722_acpi_id),
},
.probe = max31722_probe,
.remove = max31722_remove,
diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c
index 86e6c71db685..7e9362f6dc29 100644
--- a/drivers/hwmon/max31790.c
+++ b/drivers/hwmon/max31790.c
@@ -27,6 +27,7 @@
/* Fan Config register bits */
#define MAX31790_FAN_CFG_RPM_MODE 0x80
+#define MAX31790_FAN_CFG_CTRL_MON 0x10
#define MAX31790_FAN_CFG_TACH_INPUT_EN 0x08
#define MAX31790_FAN_CFG_TACH_INPUT 0x01
@@ -39,6 +40,8 @@
#define FAN_RPM_MIN 120
#define FAN_RPM_MAX 7864320
+#define FAN_COUNT_REG_MAX 0xffe0
+
#define RPM_FROM_REG(reg, sr) (((reg) >> 4) ? \
((60 * (sr) * 8192) / ((reg) >> 4)) : \
FAN_RPM_MAX)
@@ -79,7 +82,7 @@ static struct max31790_data *max31790_update_device(struct device *dev)
MAX31790_REG_FAN_FAULT_STATUS1);
if (rv < 0)
goto abort;
- data->fault_status = rv & 0x3F;
+ data->fault_status |= rv & 0x3F;
rv = i2c_smbus_read_byte_data(client,
MAX31790_REG_FAN_FAULT_STATUS2);
@@ -104,7 +107,7 @@ static struct max31790_data *max31790_update_device(struct device *dev)
data->tach[NR_CHANNEL + i] = rv;
} else {
rv = i2c_smbus_read_word_swapped(client,
- MAX31790_REG_PWMOUT(i));
+ MAX31790_REG_PWM_DUTY_CYCLE(i));
if (rv < 0)
goto abort;
data->pwm[i] = rv;
@@ -170,8 +173,11 @@ static int max31790_read_fan(struct device *dev, u32 attr, int channel,
switch (attr) {
case hwmon_fan_input:
- sr = get_tach_period(data->fan_dynamics[channel]);
- rpm = RPM_FROM_REG(data->tach[channel], sr);
+ sr = get_tach_period(data->fan_dynamics[channel % NR_CHANNEL]);
+ if (data->tach[channel] == FAN_COUNT_REG_MAX)
+ rpm = 0;
+ else
+ rpm = RPM_FROM_REG(data->tach[channel], sr);
*val = rpm;
return 0;
case hwmon_fan_target:
@@ -180,7 +186,21 @@ static int max31790_read_fan(struct device *dev, u32 attr, int channel,
*val = rpm;
return 0;
case hwmon_fan_fault:
+ mutex_lock(&data->update_lock);
*val = !!(data->fault_status & (1 << channel));
+ data->fault_status &= ~(1 << channel);
+ /*
+ * If a fault bit is set, we need to write into one of the fan
+ * configuration registers to clear it. Note that this also
+ * clears the fault for the companion channel if enabled.
+ */
+ if (*val) {
+ int reg = MAX31790_REG_TARGET_COUNT(channel % NR_CHANNEL);
+
+ i2c_smbus_write_byte_data(data->client, reg,
+ data->target_count[channel % NR_CHANNEL] >> 8);
+ }
+ mutex_unlock(&data->update_lock);
return 0;
default:
return -EOPNOTSUPP;
@@ -271,12 +291,12 @@ static int max31790_read_pwm(struct device *dev, u32 attr, int channel,
*val = data->pwm[channel] >> 8;
return 0;
case hwmon_pwm_enable:
- if (fan_config & MAX31790_FAN_CFG_RPM_MODE)
+ if (fan_config & MAX31790_FAN_CFG_CTRL_MON)
+ *val = 0;
+ else if (fan_config & MAX31790_FAN_CFG_RPM_MODE)
*val = 2;
- else if (fan_config & MAX31790_FAN_CFG_TACH_INPUT_EN)
- *val = 1;
else
- *val = 0;
+ *val = 1;
return 0;
default:
return -EOPNOTSUPP;
@@ -299,31 +319,41 @@ static int max31790_write_pwm(struct device *dev, u32 attr, int channel,
err = -EINVAL;
break;
}
- data->pwm[channel] = val << 8;
+ data->valid = false;
err = i2c_smbus_write_word_swapped(client,
MAX31790_REG_PWMOUT(channel),
- data->pwm[channel]);
+ val << 8);
break;
case hwmon_pwm_enable:
fan_config = data->fan_config[channel];
if (val == 0) {
- fan_config &= ~(MAX31790_FAN_CFG_TACH_INPUT_EN |
- MAX31790_FAN_CFG_RPM_MODE);
+ fan_config |= MAX31790_FAN_CFG_CTRL_MON;
+ /*
+ * Disable RPM mode; otherwise disabling fan speed
+ * monitoring is not possible.
+ */
+ fan_config &= ~MAX31790_FAN_CFG_RPM_MODE;
} else if (val == 1) {
- fan_config = (fan_config |
- MAX31790_FAN_CFG_TACH_INPUT_EN) &
- ~MAX31790_FAN_CFG_RPM_MODE;
+ fan_config &= ~(MAX31790_FAN_CFG_CTRL_MON | MAX31790_FAN_CFG_RPM_MODE);
} else if (val == 2) {
- fan_config |= MAX31790_FAN_CFG_TACH_INPUT_EN |
- MAX31790_FAN_CFG_RPM_MODE;
+ fan_config &= ~MAX31790_FAN_CFG_CTRL_MON;
+ /*
+ * The chip sets MAX31790_FAN_CFG_TACH_INPUT_EN on its
+ * own if MAX31790_FAN_CFG_RPM_MODE is set.
+ * Do it here as well to reflect the actual register
+ * value in the cache.
+ */
+ fan_config |= (MAX31790_FAN_CFG_RPM_MODE | MAX31790_FAN_CFG_TACH_INPUT_EN);
} else {
err = -EINVAL;
break;
}
- data->fan_config[channel] = fan_config;
- err = i2c_smbus_write_byte_data(client,
- MAX31790_REG_FAN_CONFIG(channel),
- fan_config);
+ if (fan_config != data->fan_config[channel]) {
+ err = i2c_smbus_write_byte_data(client, MAX31790_REG_FAN_CONFIG(channel),
+ fan_config);
+ if (!err)
+ data->fan_config[channel] = fan_config;
+ }
break;
default:
err = -EOPNOTSUPP;
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index 8587189c7f15..18fd6f12ca16 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -8,7 +8,6 @@
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/pm_runtime.h>
#include <linux/math64.h>
#include <linux/platform_device.h>
#include <linux/err.h>
@@ -17,9 +16,6 @@
#include <linux/platform_data/ntc_thermistor.h>
-#include <linux/iio/iio.h>
-#include <linux/iio/machine.h>
-#include <linux/iio/driver.h>
#include <linux/iio/consumer.h>
#include <linux/hwmon.h>
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
index f1ac153d0b56..967532afb1c0 100644
--- a/drivers/hwmon/occ/common.c
+++ b/drivers/hwmon/occ/common.c
@@ -217,9 +217,9 @@ int occ_update_response(struct occ *occ)
return rc;
/* limit the maximum rate of polling the OCC */
- if (time_after(jiffies, occ->last_update + OCC_UPDATE_FREQUENCY)) {
+ if (time_after(jiffies, occ->next_update)) {
rc = occ_poll(occ);
- occ->last_update = jiffies;
+ occ->next_update = jiffies + OCC_UPDATE_FREQUENCY;
} else {
rc = occ->last_error;
}
@@ -1165,6 +1165,7 @@ int occ_setup(struct occ *occ, const char *name)
return rc;
}
+ occ->next_update = jiffies + OCC_UPDATE_FREQUENCY;
occ_parse_poll_response(occ);
rc = occ_setup_sensor_attrs(occ);
diff --git a/drivers/hwmon/occ/common.h b/drivers/hwmon/occ/common.h
index 67e6968b8978..e6df719770e8 100644
--- a/drivers/hwmon/occ/common.h
+++ b/drivers/hwmon/occ/common.h
@@ -99,7 +99,7 @@ struct occ {
u8 poll_cmd_data; /* to perform OCC poll command */
int (*send_cmd)(struct occ *occ, u8 *cmd);
- unsigned long last_update;
+ unsigned long next_update;
struct mutex lock; /* lock OCC access */
struct device *hwmon;
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 37a5c39784fa..ffb609cee3a4 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -19,9 +19,10 @@ config SENSORS_PMBUS
default y
help
If you say yes here you get hardware monitoring support for generic
- PMBus devices, including but not limited to ADP4000, BMR453, BMR454,
- MAX20796, MDT040, NCP4200, NCP4208, PDT003, PDT006, PDT012, TPS40400,
- TPS544B20, TPS544B25, TPS544C20, TPS544C25, and UDT020.
+ PMBus devices, including but not limited to ADP4000, BMR310, BMR453,
+ BMR454, BMR456, BMR457, BMR458, BMR480, BMR490, BMR491, BMR492,
+ MAX20796, MDT040, NCP4200, NCP4208, PDT003, PDT006, PDT012,
+ TPS40400, TPS544B20, TPS544B25, TPS544C20, TPS544C25, and UDT020.
This driver can also be built as a module. If so, the module will
be called pmbus.
@@ -85,6 +86,15 @@ config SENSORS_IBM_CFFPS
This driver can also be built as a module. If so, the module will
be called ibm-cffps.
+config SENSORS_DPS920AB
+ tristate "Delta DPS920AB Power Supply"
+ help
+ If you say yes here you get hardware monitoring support for Delta
+ DPS920AB Power Supplies.
+
+ This driver can also be built as a module. If so, the module will
+ be called dps920ab.
+
config SENSORS_INSPUR_IPSPS
tristate "INSPUR Power System Power Supply"
help
@@ -248,6 +258,15 @@ config SENSORS_MAX8688
This driver can also be built as a module. If so, the module will
be called max8688.
+config SENSORS_MP2888
+ tristate "MPS MP2888"
+ help
+ If you say yes here you get hardware monitoring support for MPS
+ MP2888 Digital, Multi-Phase, Pulse-Width Modulation Controller.
+
+ This driver can also be built as a module. If so, the module will
+ be called mp2888.
+
config SENSORS_MP2975
tristate "MPS MP2975"
help
@@ -257,6 +276,15 @@ config SENSORS_MP2975
This driver can also be built as a module. If so, the module will
be called mp2975.
+config SENSORS_PIM4328
+ tristate "Flex PIM4328 and compatibles"
+ help
+ If you say yes here you get hardware monitoring support for Flex
+ PIM4328, PIM4820 and PIM4006 Power Interface Modules.
+
+ This driver can also be built as a module. If so, the module will
+ be called pim4328.
+
config SENSORS_PM6764TR
tristate "ST PM6764TR"
help
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index f8dcc27cd56a..0ed4d596a948 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_SENSORS_BEL_PFE) += bel-pfe.o
obj-$(CONFIG_SENSORS_BPA_RS600) += bpa-rs600.o
obj-$(CONFIG_SENSORS_FSP_3Y) += fsp-3y.o
obj-$(CONFIG_SENSORS_IBM_CFFPS) += ibm-cffps.o
+obj-$(CONFIG_SENSORS_DPS920AB) += dps920ab.o
obj-$(CONFIG_SENSORS_INSPUR_IPSPS) += inspur-ipsps.o
obj-$(CONFIG_SENSORS_IR35221) += ir35221.o
obj-$(CONFIG_SENSORS_IR36021) += ir36021.o
@@ -28,6 +29,7 @@ obj-$(CONFIG_SENSORS_MAX20751) += max20751.o
obj-$(CONFIG_SENSORS_MAX31785) += max31785.o
obj-$(CONFIG_SENSORS_MAX34440) += max34440.o
obj-$(CONFIG_SENSORS_MAX8688) += max8688.o
+obj-$(CONFIG_SENSORS_MP2888) += mp2888.o
obj-$(CONFIG_SENSORS_MP2975) += mp2975.o
obj-$(CONFIG_SENSORS_PM6764TR) += pm6764tr.o
obj-$(CONFIG_SENSORS_PXE1610) += pxe1610.o
@@ -39,3 +41,4 @@ obj-$(CONFIG_SENSORS_UCD9000) += ucd9000.o
obj-$(CONFIG_SENSORS_UCD9200) += ucd9200.o
obj-$(CONFIG_SENSORS_XDPE122) += xdpe12284.o
obj-$(CONFIG_SENSORS_ZL6100) += zl6100.o
+obj-$(CONFIG_SENSORS_PIM4328) += pim4328.o
diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index 980a3850b2f3..d311e0557401 100644
--- a/drivers/hwmon/pmbus/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -611,11 +611,13 @@ static int adm1275_probe(struct i2c_client *client)
tindex = 8;
info->func[0] |= PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT |
- PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
- /* Enable VOUT if not enabled (it is disabled by default) */
- if (!(config & ADM1278_VOUT_EN)) {
- config |= ADM1278_VOUT_EN;
+ /* Enable VOUT & TEMP1 if not enabled (disabled by default) */
+ if ((config & (ADM1278_VOUT_EN | ADM1278_TEMP1_EN)) !=
+ (ADM1278_VOUT_EN | ADM1278_TEMP1_EN)) {
+ config |= ADM1278_VOUT_EN | ADM1278_TEMP1_EN;
ret = i2c_smbus_write_byte_data(client,
ADM1275_PMON_CONFIG,
config);
@@ -625,10 +627,6 @@ static int adm1275_probe(struct i2c_client *client)
return -ENODEV;
}
}
-
- if (config & ADM1278_TEMP1_EN)
- info->func[0] |=
- PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
if (config & ADM1278_VIN_EN)
info->func[0] |= PMBUS_HAVE_VIN;
break;
diff --git a/drivers/hwmon/pmbus/bpa-rs600.c b/drivers/hwmon/pmbus/bpa-rs600.c
index f6558ee9dec3..2be69fedfa36 100644
--- a/drivers/hwmon/pmbus/bpa-rs600.c
+++ b/drivers/hwmon/pmbus/bpa-rs600.c
@@ -46,6 +46,32 @@ static int bpa_rs600_read_byte_data(struct i2c_client *client, int page, int reg
return ret;
}
+/*
+ * The BPA-RS600 violates the PMBus spec. Specifically it treats the
+ * mantissa as unsigned. Deal with this here to allow the PMBus core
+ * to work with correctly encoded data.
+ */
+static int bpa_rs600_read_vin(struct i2c_client *client)
+{
+ int ret, exponent, mantissa;
+
+ ret = pmbus_read_word_data(client, 0, 0xff, PMBUS_READ_VIN);
+ if (ret < 0)
+ return ret;
+
+ if (ret & BIT(10)) {
+ exponent = ret >> 11;
+ mantissa = ret & 0x7ff;
+
+ exponent++;
+ mantissa >>= 1;
+
+ ret = (exponent << 11) | mantissa;
+ }
+
+ return ret;
+}
+
static int bpa_rs600_read_word_data(struct i2c_client *client, int page, int phase, int reg)
{
int ret;
@@ -85,6 +111,9 @@ static int bpa_rs600_read_word_data(struct i2c_client *client, int page, int pha
/* These commands return data but it is invalid/un-documented */
ret = -ENXIO;
break;
+ case PMBUS_READ_VIN:
+ ret = bpa_rs600_read_vin(client);
+ break;
default:
if (reg >= PMBUS_VIRT_BASE)
ret = -ENXIO;
diff --git a/drivers/hwmon/pmbus/dps920ab.c b/drivers/hwmon/pmbus/dps920ab.c
new file mode 100644
index 000000000000..d3941f6eb29a
--- /dev/null
+++ b/drivers/hwmon/pmbus/dps920ab.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Delta DPS920AB PSU
+ *
+ * Copyright (C) 2021 Delta Networks, Inc.
+ * Copyright (C) 2021 Sartura Ltd.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include "pmbus.h"
+
+struct dps920ab_data {
+ char *mfr_model;
+ char *mfr_id;
+};
+
+static int dps920ab_read_word_data(struct i2c_client *client, int page, int phase, int reg)
+{
+ /*
+ * This masks commands which are not supported.
+ * PSU advertises that all features are supported,
+ * in reality that unfortunately is not true.
+ * So enable only those that the datasheet confirms.
+ */
+ switch (reg) {
+ case PMBUS_FAN_COMMAND_1:
+ case PMBUS_IOUT_OC_WARN_LIMIT:
+ case PMBUS_STATUS_WORD:
+ case PMBUS_READ_VIN:
+ case PMBUS_READ_IIN:
+ case PMBUS_READ_VOUT:
+ case PMBUS_READ_IOUT:
+ case PMBUS_READ_TEMPERATURE_1:
+ case PMBUS_READ_TEMPERATURE_2:
+ case PMBUS_READ_TEMPERATURE_3:
+ case PMBUS_READ_FAN_SPEED_1:
+ case PMBUS_READ_POUT:
+ case PMBUS_READ_PIN:
+ case PMBUS_MFR_VOUT_MIN:
+ case PMBUS_MFR_VOUT_MAX:
+ case PMBUS_MFR_IOUT_MAX:
+ case PMBUS_MFR_POUT_MAX:
+ return pmbus_read_word_data(client, page, phase, reg);
+ default:
+ return -ENXIO;
+ }
+}
+
+static int dps920ab_write_word_data(struct i2c_client *client, int page, int reg,
+ u16 word)
+{
+ /*
+ * This masks commands which are not supported.
+ * PSU only has one R/W register and that is
+ * for the fan.
+ */
+ switch (reg) {
+ case PMBUS_FAN_COMMAND_1:
+ return pmbus_write_word_data(client, page, reg, word);
+ default:
+ return -EACCES;
+ }
+}
+
+static struct pmbus_driver_info dps920ab_info = {
+ .pages = 1,
+
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = linear,
+ .format[PSC_CURRENT_IN] = linear,
+ .format[PSC_CURRENT_OUT] = linear,
+ .format[PSC_POWER] = linear,
+ .format[PSC_FAN] = linear,
+ .format[PSC_TEMPERATURE] = linear,
+
+ .func[0] =
+ PMBUS_HAVE_VIN | PMBUS_HAVE_IIN | PMBUS_HAVE_PIN |
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT | PMBUS_HAVE_POUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 |
+ PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12 |
+ PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_STATUS_TEMP,
+ .read_word_data = dps920ab_read_word_data,
+ .write_word_data = dps920ab_write_word_data,
+};
+
+static int dps920ab_mfr_id_show(struct seq_file *s, void *data)
+{
+ struct dps920ab_data *priv = s->private;
+
+ seq_printf(s, "%s\n", priv->mfr_id);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(dps920ab_mfr_id);
+
+static int dps920ab_mfr_model_show(struct seq_file *s, void *data)
+{
+ struct dps920ab_data *priv = s->private;
+
+ seq_printf(s, "%s\n", priv->mfr_model);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(dps920ab_mfr_model);
+
+static void dps920ab_init_debugfs(struct dps920ab_data *data, struct i2c_client *client)
+{
+ struct dentry *debugfs_dir;
+ struct dentry *root;
+
+ root = pmbus_get_debugfs_dir(client);
+ if (!root)
+ return;
+
+ debugfs_dir = debugfs_create_dir(client->name, root);
+
+ debugfs_create_file("mfr_id",
+ 0400,
+ debugfs_dir,
+ data,
+ &dps920ab_mfr_id_fops);
+
+ debugfs_create_file("mfr_model",
+ 0400,
+ debugfs_dir,
+ data,
+ &dps920ab_mfr_model_fops);
+}
+
+static int dps920ab_probe(struct i2c_client *client)
+{
+ u8 buf[I2C_SMBUS_BLOCK_MAX + 1];
+ struct dps920ab_data *data;
+ int ret;
+
+ data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ ret = i2c_smbus_read_block_data(client, PMBUS_MFR_ID, buf);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to read Manufacturer ID\n");
+ return ret;
+ }
+
+ buf[ret] = '\0';
+ if (ret != 5 || strncmp(buf, "DELTA", 5)) {
+ buf[ret] = '\0';
+ dev_err(&client->dev, "Unsupported Manufacturer ID '%s'\n", buf);
+ return -ENODEV;
+ }
+ data->mfr_id = devm_kstrdup(&client->dev, buf, GFP_KERNEL);
+ if (!data->mfr_id)
+ return -ENOMEM;
+
+ ret = i2c_smbus_read_block_data(client, PMBUS_MFR_MODEL, buf);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to read Manufacturer Model\n");
+ return ret;
+ }
+
+ buf[ret] = '\0';
+ if (ret != 11 || strncmp(buf, "DPS-920AB", 9)) {
+ dev_err(&client->dev, "Unsupported Manufacturer Model '%s'\n", buf);
+ return -ENODEV;
+ }
+ data->mfr_model = devm_kstrdup(&client->dev, buf, GFP_KERNEL);
+ if (!data->mfr_model)
+ return -ENOMEM;
+
+ ret = pmbus_do_probe(client, &dps920ab_info);
+ if (ret)
+ return ret;
+
+ dps920ab_init_debugfs(data, client);
+
+ return 0;
+}
+
+static const struct of_device_id __maybe_unused dps920ab_of_match[] = {
+ { .compatible = "delta,dps920ab", },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, dps920ab_of_match);
+
+static struct i2c_driver dps920ab_driver = {
+ .driver = {
+ .name = "dps920ab",
+ .of_match_table = of_match_ptr(dps920ab_of_match),
+ },
+ .probe_new = dps920ab_probe,
+};
+
+module_i2c_driver(dps920ab_driver);
+
+MODULE_AUTHOR("Robert Marko <robert.marko@sartura.hr>");
+MODULE_DESCRIPTION("PMBus driver for Delta DPS920AB PSU");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(PMBUS);
diff --git a/drivers/hwmon/pmbus/fsp-3y.c b/drivers/hwmon/pmbus/fsp-3y.c
index b177987286ae..aec294cc72d1 100644
--- a/drivers/hwmon/pmbus/fsp-3y.c
+++ b/drivers/hwmon/pmbus/fsp-3y.c
@@ -37,6 +37,8 @@ struct fsp3y_data {
struct pmbus_driver_info info;
int chip;
int page;
+
+ bool vout_linear_11;
};
#define to_fsp3y_data(x) container_of(x, struct fsp3y_data, info)
@@ -57,7 +59,7 @@ static int page_log_to_page_real(int page_log, enum chips chip)
case YH5151E_PAGE_12V_LOG:
return YH5151E_PAGE_12V_REAL;
case YH5151E_PAGE_5V_LOG:
- return YH5151E_PAGE_5V_LOG;
+ return YH5151E_PAGE_5V_REAL;
case YH5151E_PAGE_3V3_LOG:
return YH5151E_PAGE_3V3_REAL;
}
@@ -103,8 +105,16 @@ static int set_page(struct i2c_client *client, int page_log)
static int fsp3y_read_byte_data(struct i2c_client *client, int page, int reg)
{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct fsp3y_data *data = to_fsp3y_data(info);
int rv;
+ /*
+ * Inject an exponent for non-compliant YH5151-E.
+ */
+ if (data->vout_linear_11 && reg == PMBUS_VOUT_MODE)
+ return 0x1A;
+
rv = set_page(client, page);
if (rv < 0)
return rv;
@@ -114,6 +124,8 @@ static int fsp3y_read_byte_data(struct i2c_client *client, int page, int reg)
static int fsp3y_read_word_data(struct i2c_client *client, int page, int phase, int reg)
{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct fsp3y_data *data = to_fsp3y_data(info);
int rv;
/*
@@ -144,7 +156,17 @@ static int fsp3y_read_word_data(struct i2c_client *client, int page, int phase,
if (rv < 0)
return rv;
- return i2c_smbus_read_word_data(client, reg);
+ rv = i2c_smbus_read_word_data(client, reg);
+ if (rv < 0)
+ return rv;
+
+ /*
+ * Handle YH-5151E non-compliant linear11 vout voltage.
+ */
+ if (data->vout_linear_11 && reg == PMBUS_READ_VOUT)
+ rv = sign_extend32(rv, 10) & 0xffff;
+
+ return rv;
}
static struct pmbus_driver_info fsp3y_info[] = {
@@ -233,6 +255,25 @@ static int fsp3y_probe(struct i2c_client *client)
data->info = fsp3y_info[data->chip];
+ /*
+ * YH-5151E sometimes reports vout in linear11 and sometimes in
+ * linear16. This depends on the exact individual piece of hardware. One
+ * YH-5151E can use linear16 and another might use linear11 instead.
+ *
+ * The format can be recognized by reading VOUT_MODE - if it doesn't
+ * report a valid exponent, then vout uses linear11. Otherwise, the
+ * device is compliant and uses linear16.
+ */
+ data->vout_linear_11 = false;
+ if (data->chip == yh5151e) {
+ rv = i2c_smbus_read_byte_data(client, PMBUS_VOUT_MODE);
+ if (rv < 0)
+ return rv;
+
+ if (rv == 0xFF)
+ data->vout_linear_11 = true;
+ }
+
return pmbus_do_probe(client, &data->info);
}
diff --git a/drivers/hwmon/pmbus/isl68137.c b/drivers/hwmon/pmbus/isl68137.c
index 40597a9e799f..1a8caff1ac5f 100644
--- a/drivers/hwmon/pmbus/isl68137.c
+++ b/drivers/hwmon/pmbus/isl68137.c
@@ -244,8 +244,8 @@ static int isl68137_probe(struct i2c_client *client)
info->read_word_data = raa_dmpvr2_read_word_data;
break;
case raa_dmpvr2_2rail_nontc:
- info->func[0] &= ~PMBUS_HAVE_TEMP;
- info->func[1] &= ~PMBUS_HAVE_TEMP;
+ info->func[0] &= ~PMBUS_HAVE_TEMP3;
+ info->func[1] &= ~PMBUS_HAVE_TEMP3;
fallthrough;
case raa_dmpvr2_2rail:
info->pages = 2;
diff --git a/drivers/hwmon/pmbus/mp2888.c b/drivers/hwmon/pmbus/mp2888.c
new file mode 100644
index 000000000000..8ecd4adfef40
--- /dev/null
+++ b/drivers/hwmon/pmbus/mp2888.c
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Hardware monitoring driver for MPS Multi-phase Digital VR Controllers
+ *
+ * Copyright (C) 2020 Nvidia Technologies Ltd.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "pmbus.h"
+
+/* Vendor specific registers. */
+#define MP2888_MFR_SYS_CONFIG 0x44
+#define MP2888_MFR_READ_CS1_2 0x73
+#define MP2888_MFR_READ_CS3_4 0x74
+#define MP2888_MFR_READ_CS5_6 0x75
+#define MP2888_MFR_READ_CS7_8 0x76
+#define MP2888_MFR_READ_CS9_10 0x77
+#define MP2888_MFR_VR_CONFIG1 0xe1
+
+#define MP2888_TOTAL_CURRENT_RESOLUTION BIT(3)
+#define MP2888_PHASE_CURRENT_RESOLUTION BIT(4)
+#define MP2888_DRMOS_KCS GENMASK(2, 0)
+#define MP2888_TEMP_UNIT 10
+#define MP2888_MAX_PHASE 10
+
+struct mp2888_data {
+ struct pmbus_driver_info info;
+ int total_curr_resolution;
+ int phase_curr_resolution;
+ int curr_sense_gain;
+};
+
+#define to_mp2888_data(x) container_of(x, struct mp2888_data, info)
+
+static int mp2888_read_byte_data(struct i2c_client *client, int page, int reg)
+{
+ switch (reg) {
+ case PMBUS_VOUT_MODE:
+ /* Enforce VOUT direct format. */
+ return PB_VOUT_MODE_DIRECT;
+ default:
+ return -ENODATA;
+ }
+}
+
+static int
+mp2888_current_sense_gain_and_resolution_get(struct i2c_client *client, struct mp2888_data *data)
+{
+ int ret;
+
+ /*
+ * Obtain DrMOS current sense gain of power stage from the register
+ * , bits 0-2. The value is selected as below:
+ * 00b - 5µA/A, 01b - 8.5µA/A, 10b - 9.7µA/A, 11b - 10µA/A. Other
+ * values are reserved.
+ */
+ ret = i2c_smbus_read_word_data(client, MP2888_MFR_SYS_CONFIG);
+ if (ret < 0)
+ return ret;
+
+ switch (ret & MP2888_DRMOS_KCS) {
+ case 0:
+ data->curr_sense_gain = 85;
+ break;
+ case 1:
+ data->curr_sense_gain = 97;
+ break;
+ case 2:
+ data->curr_sense_gain = 100;
+ break;
+ case 3:
+ data->curr_sense_gain = 50;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * Obtain resolution selector for total and phase current report and protection.
+ * 0: original resolution; 1: half resolution (in such case phase current value should
+ * be doubled.
+ */
+ data->total_curr_resolution = (ret & MP2888_TOTAL_CURRENT_RESOLUTION) >> 3;
+ data->phase_curr_resolution = (ret & MP2888_PHASE_CURRENT_RESOLUTION) >> 4;
+
+ return 0;
+}
+
+static int
+mp2888_read_phase(struct i2c_client *client, struct mp2888_data *data, int page, int phase, u8 reg)
+{
+ int ret;
+
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ if (!((phase + 1) % 2))
+ ret >>= 8;
+ ret &= 0xff;
+
+ /*
+ * Output value is calculated as: (READ_CSx / 80 – 1.23) / (Kcs * Rcs)
+ * where:
+ * - Kcs is the DrMOS current sense gain of power stage, which is obtained from the
+ * register MP2888_MFR_VR_CONFIG1, bits 13-12 with the following selection of DrMOS
+ * (data->curr_sense_gain):
+ * 00b - 5µA/A, 01b - 8.5µA/A, 10b - 9.7µA/A, 11b - 10µA/A.
+ * - Rcs is the internal phase current sense resistor. This parameter depends on hardware
+ * assembly. By default it is set to 1kΩ. In case of different assembly, user should
+ * scale this parameter by dividing it by Rcs.
+ * If phase current resolution bit is set to 1, READ_CSx value should be doubled.
+ * Note, that current phase sensing, providing by the device is not accurate. This is
+ * because sampling of current occurrence of bit weight has a big deviation, especially for
+ * light load.
+ */
+ ret = DIV_ROUND_CLOSEST(ret * 100 - 9800, data->curr_sense_gain);
+ ret = (data->phase_curr_resolution) ? ret * 2 : ret;
+ /* Scale according to total current resolution. */
+ ret = (data->total_curr_resolution) ? ret * 8 : ret * 4;
+ return ret;
+}
+
+static int
+mp2888_read_phases(struct i2c_client *client, struct mp2888_data *data, int page, int phase)
+{
+ int ret;
+
+ switch (phase) {
+ case 0 ... 1:
+ ret = mp2888_read_phase(client, data, page, phase, MP2888_MFR_READ_CS1_2);
+ break;
+ case 2 ... 3:
+ ret = mp2888_read_phase(client, data, page, phase, MP2888_MFR_READ_CS3_4);
+ break;
+ case 4 ... 5:
+ ret = mp2888_read_phase(client, data, page, phase, MP2888_MFR_READ_CS5_6);
+ break;
+ case 6 ... 7:
+ ret = mp2888_read_phase(client, data, page, phase, MP2888_MFR_READ_CS7_8);
+ break;
+ case 8 ... 9:
+ ret = mp2888_read_phase(client, data, page, phase, MP2888_MFR_READ_CS9_10);
+ break;
+ default:
+ return -ENODATA;
+ }
+ return ret;
+}
+
+static int mp2888_read_word_data(struct i2c_client *client, int page, int phase, int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct mp2888_data *data = to_mp2888_data(info);
+ int ret;
+
+ switch (reg) {
+ case PMBUS_READ_VIN:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret <= 0)
+ return ret;
+
+ /*
+ * READ_VIN requires fixup to scale it to linear11 format. Register data format
+ * provides 10 bits for mantissa and 6 bits for exponent. Bits 15:10 are set with
+ * the fixed value 111011b.
+ */
+ ret = (ret & GENMASK(9, 0)) | ((ret & GENMASK(31, 10)) << 1);
+ break;
+ case PMBUS_OT_WARN_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+ /*
+ * Chip reports limits in degrees C, but the actual temperature in 10th of
+ * degrees C - scaling is needed to match both.
+ */
+ ret *= MP2888_TEMP_UNIT;
+ break;
+ case PMBUS_READ_IOUT:
+ if (phase != 0xff)
+ return mp2888_read_phases(client, data, page, phase);
+
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+ /*
+ * READ_IOUT register has unused bits 15:12 with fixed value 1110b. Clear these
+ * bits and scale with total current resolution. Data is provided in direct format.
+ */
+ ret &= GENMASK(11, 0);
+ ret = data->total_curr_resolution ? ret * 2 : ret;
+ break;
+ case PMBUS_IOUT_OC_WARN_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+ ret &= GENMASK(9, 0);
+ /*
+ * Chip reports limits with resolution 1A or 2A, if total current resolution bit is
+ * set 1. Actual current is reported with 0.25A or respectively 0.5A resolution.
+ * Scaling is needed to match both.
+ */
+ ret = data->total_curr_resolution ? ret * 8 : ret * 4;
+ break;
+ case PMBUS_READ_POUT:
+ case PMBUS_READ_PIN:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+ ret = data->total_curr_resolution ? ret * 2 : ret;
+ break;
+ case PMBUS_POUT_OP_WARN_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+ /*
+ * Chip reports limits with resolution 1W or 2W, if total current resolution bit is
+ * set 1. Actual power is reported with 0.5W or 1W respectively resolution. Scaling
+ * is needed to match both.
+ */
+ ret = data->total_curr_resolution ? ret * 4 : ret * 2;
+ break;
+ /*
+ * The below registers are not implemented by device or implemented not according to the
+ * spec. Skip all of them to avoid exposing non-relevant inputs to sysfs.
+ */
+ case PMBUS_OT_FAULT_LIMIT:
+ case PMBUS_UT_WARN_LIMIT:
+ case PMBUS_UT_FAULT_LIMIT:
+ case PMBUS_VIN_UV_FAULT_LIMIT:
+ case PMBUS_VOUT_UV_WARN_LIMIT:
+ case PMBUS_VOUT_OV_WARN_LIMIT:
+ case PMBUS_VOUT_UV_FAULT_LIMIT:
+ case PMBUS_VOUT_OV_FAULT_LIMIT:
+ case PMBUS_VIN_OV_WARN_LIMIT:
+ case PMBUS_IOUT_OC_LV_FAULT_LIMIT:
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+ case PMBUS_POUT_MAX:
+ case PMBUS_IOUT_UC_FAULT_LIMIT:
+ case PMBUS_POUT_OP_FAULT_LIMIT:
+ case PMBUS_PIN_OP_WARN_LIMIT:
+ case PMBUS_MFR_VIN_MIN:
+ case PMBUS_MFR_VOUT_MIN:
+ case PMBUS_MFR_VIN_MAX:
+ case PMBUS_MFR_VOUT_MAX:
+ case PMBUS_MFR_IIN_MAX:
+ case PMBUS_MFR_IOUT_MAX:
+ case PMBUS_MFR_PIN_MAX:
+ case PMBUS_MFR_POUT_MAX:
+ case PMBUS_MFR_MAX_TEMP_1:
+ return -ENXIO;
+ default:
+ return -ENODATA;
+ }
+
+ return ret;
+}
+
+static int mp2888_write_word_data(struct i2c_client *client, int page, int reg, u16 word)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct mp2888_data *data = to_mp2888_data(info);
+
+ switch (reg) {
+ case PMBUS_OT_WARN_LIMIT:
+ word = DIV_ROUND_CLOSEST(word, MP2888_TEMP_UNIT);
+ /* Drop unused bits 15:8. */
+ word = clamp_val(word, 0, GENMASK(7, 0));
+ break;
+ case PMBUS_IOUT_OC_WARN_LIMIT:
+ /* Fix limit according to total curent resolution. */
+ word = data->total_curr_resolution ? DIV_ROUND_CLOSEST(word, 8) :
+ DIV_ROUND_CLOSEST(word, 4);
+ /* Drop unused bits 15:10. */
+ word = clamp_val(word, 0, GENMASK(9, 0));
+ break;
+ case PMBUS_POUT_OP_WARN_LIMIT:
+ /* Fix limit according to total curent resolution. */
+ word = data->total_curr_resolution ? DIV_ROUND_CLOSEST(word, 4) :
+ DIV_ROUND_CLOSEST(word, 2);
+ /* Drop unused bits 15:10. */
+ word = clamp_val(word, 0, GENMASK(9, 0));
+ break;
+ default:
+ return -ENODATA;
+ }
+ return pmbus_write_word_data(client, page, reg, word);
+}
+
+static int
+mp2888_identify_multiphase(struct i2c_client *client, struct mp2888_data *data,
+ struct pmbus_driver_info *info)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Identify multiphase number - could be from 1 to 10. */
+ ret = i2c_smbus_read_word_data(client, MP2888_MFR_VR_CONFIG1);
+ if (ret <= 0)
+ return ret;
+
+ info->phases[0] = ret & GENMASK(3, 0);
+
+ /*
+ * The device provides a total of 10 PWM pins, and can be configured to different phase
+ * count applications for rail.
+ */
+ if (info->phases[0] > MP2888_MAX_PHASE)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct pmbus_driver_info mp2888_info = {
+ .pages = 1,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .format[PSC_CURRENT_IN] = linear,
+ .format[PSC_CURRENT_OUT] = direct,
+ .format[PSC_POWER] = direct,
+ .m[PSC_TEMPERATURE] = 1,
+ .R[PSC_TEMPERATURE] = 1,
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .R[PSC_VOLTAGE_OUT] = 3,
+ .m[PSC_CURRENT_OUT] = 4,
+ .m[PSC_POWER] = 1,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_IOUT |
+ PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP |
+ PMBUS_HAVE_POUT | PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT |
+ PMBUS_PHASE_VIRTUAL,
+ .pfunc[0] = PMBUS_HAVE_IOUT,
+ .pfunc[1] = PMBUS_HAVE_IOUT,
+ .pfunc[2] = PMBUS_HAVE_IOUT,
+ .pfunc[3] = PMBUS_HAVE_IOUT,
+ .pfunc[4] = PMBUS_HAVE_IOUT,
+ .pfunc[5] = PMBUS_HAVE_IOUT,
+ .pfunc[6] = PMBUS_HAVE_IOUT,
+ .pfunc[7] = PMBUS_HAVE_IOUT,
+ .pfunc[8] = PMBUS_HAVE_IOUT,
+ .pfunc[9] = PMBUS_HAVE_IOUT,
+ .read_byte_data = mp2888_read_byte_data,
+ .read_word_data = mp2888_read_word_data,
+ .write_word_data = mp2888_write_word_data,
+};
+
+static int mp2888_probe(struct i2c_client *client)
+{
+ struct pmbus_driver_info *info;
+ struct mp2888_data *data;
+ int ret;
+
+ data = devm_kzalloc(&client->dev, sizeof(struct mp2888_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ memcpy(&data->info, &mp2888_info, sizeof(*info));
+ info = &data->info;
+
+ /* Identify multiphase configuration. */
+ ret = mp2888_identify_multiphase(client, data, info);
+ if (ret)
+ return ret;
+
+ /* Obtain current sense gain of power stage and current resolution. */
+ ret = mp2888_current_sense_gain_and_resolution_get(client, data);
+ if (ret)
+ return ret;
+
+ return pmbus_do_probe(client, info);
+}
+
+static const struct i2c_device_id mp2888_id[] = {
+ {"mp2888", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, mp2888_id);
+
+static const struct of_device_id __maybe_unused mp2888_of_match[] = {
+ {.compatible = "mps,mp2888"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, mp2888_of_match);
+
+static struct i2c_driver mp2888_driver = {
+ .driver = {
+ .name = "mp2888",
+ .of_match_table = of_match_ptr(mp2888_of_match),
+ },
+ .probe_new = mp2888_probe,
+ .id_table = mp2888_id,
+};
+
+module_i2c_driver(mp2888_driver);
+
+MODULE_AUTHOR("Vadim Pasternak <vadimp@nvidia.com>");
+MODULE_DESCRIPTION("PMBus driver for MPS MP2888 device");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(PMBUS);
diff --git a/drivers/hwmon/pmbus/pim4328.c b/drivers/hwmon/pmbus/pim4328.c
new file mode 100644
index 000000000000..273ff6e57654
--- /dev/null
+++ b/drivers/hwmon/pmbus/pim4328.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Hardware monitoring driver for PIM4006, PIM4328 and PIM4820
+ *
+ * Copyright (c) 2021 Flextronics International Sweden AB
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pmbus.h>
+#include <linux/slab.h>
+#include "pmbus.h"
+
+enum chips { pim4006, pim4328, pim4820 };
+
+struct pim4328_data {
+ enum chips id;
+ struct pmbus_driver_info info;
+};
+
+#define to_pim4328_data(x) container_of(x, struct pim4328_data, info)
+
+/* PIM4006 and PIM4328 */
+#define PIM4328_MFR_READ_VINA 0xd3
+#define PIM4328_MFR_READ_VINB 0xd4
+
+/* PIM4006 */
+#define PIM4328_MFR_READ_IINA 0xd6
+#define PIM4328_MFR_READ_IINB 0xd7
+#define PIM4328_MFR_FET_CHECKSTATUS 0xd9
+
+/* PIM4328 */
+#define PIM4328_MFR_STATUS_BITS 0xd5
+
+/* PIM4820 */
+#define PIM4328_MFR_READ_STATUS 0xd0
+
+static const struct i2c_device_id pim4328_id[] = {
+ {"bmr455", pim4328},
+ {"pim4006", pim4006},
+ {"pim4106", pim4006},
+ {"pim4206", pim4006},
+ {"pim4306", pim4006},
+ {"pim4328", pim4328},
+ {"pim4406", pim4006},
+ {"pim4820", pim4820},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, pim4328_id);
+
+static int pim4328_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
+{
+ int ret;
+
+ if (page > 0)
+ return -ENXIO;
+
+ if (phase == 0xff)
+ return -ENODATA;
+
+ switch (reg) {
+ case PMBUS_READ_VIN:
+ ret = pmbus_read_word_data(client, page, phase,
+ phase == 0 ? PIM4328_MFR_READ_VINA
+ : PIM4328_MFR_READ_VINB);
+ break;
+ case PMBUS_READ_IIN:
+ ret = pmbus_read_word_data(client, page, phase,
+ phase == 0 ? PIM4328_MFR_READ_IINA
+ : PIM4328_MFR_READ_IINB);
+ break;
+ default:
+ ret = -ENODATA;
+ }
+
+ return ret;
+}
+
+static int pim4328_read_byte_data(struct i2c_client *client, int page, int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct pim4328_data *data = to_pim4328_data(info);
+ int ret, status;
+
+ if (page > 0)
+ return -ENXIO;
+
+ switch (reg) {
+ case PMBUS_STATUS_BYTE:
+ ret = pmbus_read_byte_data(client, page, PMBUS_STATUS_BYTE);
+ if (ret < 0)
+ return ret;
+ if (data->id == pim4006) {
+ status = pmbus_read_word_data(client, page, 0xff,
+ PIM4328_MFR_FET_CHECKSTATUS);
+ if (status < 0)
+ return status;
+ if (status & 0x0630) /* Input UV */
+ ret |= PB_STATUS_VIN_UV;
+ } else if (data->id == pim4328) {
+ status = pmbus_read_byte_data(client, page,
+ PIM4328_MFR_STATUS_BITS);
+ if (status < 0)
+ return status;
+ if (status & 0x04) /* Input UV */
+ ret |= PB_STATUS_VIN_UV;
+ if (status & 0x40) /* Output UV */
+ ret |= PB_STATUS_NONE_ABOVE;
+ } else if (data->id == pim4820) {
+ status = pmbus_read_byte_data(client, page,
+ PIM4328_MFR_READ_STATUS);
+ if (status < 0)
+ return status;
+ if (status & 0x05) /* Input OV or OC */
+ ret |= PB_STATUS_NONE_ABOVE;
+ if (status & 0x1a) /* Input UV */
+ ret |= PB_STATUS_VIN_UV;
+ if (status & 0x40) /* OT */
+ ret |= PB_STATUS_TEMPERATURE;
+ }
+ break;
+ default:
+ ret = -ENODATA;
+ }
+
+ return ret;
+}
+
+static int pim4328_probe(struct i2c_client *client)
+{
+ int status;
+ u8 device_id[I2C_SMBUS_BLOCK_MAX + 1];
+ const struct i2c_device_id *mid;
+ struct pim4328_data *data;
+ struct pmbus_driver_info *info;
+ struct pmbus_platform_data *pdata;
+ struct device *dev = &client->dev;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_BYTE_DATA
+ | I2C_FUNC_SMBUS_BLOCK_DATA))
+ return -ENODEV;
+
+ data = devm_kzalloc(&client->dev, sizeof(struct pim4328_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ status = i2c_smbus_read_block_data(client, PMBUS_MFR_MODEL, device_id);
+ if (status < 0) {
+ dev_err(&client->dev, "Failed to read Manufacturer Model\n");
+ return status;
+ }
+ for (mid = pim4328_id; mid->name[0]; mid++) {
+ if (!strncasecmp(mid->name, device_id, strlen(mid->name)))
+ break;
+ }
+ if (!mid->name[0]) {
+ dev_err(&client->dev, "Unsupported device\n");
+ return -ENODEV;
+ }
+
+ if (strcmp(client->name, mid->name))
+ dev_notice(&client->dev,
+ "Device mismatch: Configured %s, detected %s\n",
+ client->name, mid->name);
+
+ data->id = mid->driver_data;
+ info = &data->info;
+ info->pages = 1;
+ info->read_byte_data = pim4328_read_byte_data;
+ info->read_word_data = pim4328_read_word_data;
+
+ pdata = devm_kzalloc(dev, sizeof(struct pmbus_platform_data),
+ GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+ dev->platform_data = pdata;
+ pdata->flags = PMBUS_NO_CAPABILITY | PMBUS_NO_WRITE_PROTECT;
+
+ switch (data->id) {
+ case pim4006:
+ info->phases[0] = 2;
+ info->func[0] = PMBUS_PHASE_VIRTUAL | PMBUS_HAVE_VIN
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_IOUT;
+ info->pfunc[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_IIN;
+ info->pfunc[1] = PMBUS_HAVE_VIN | PMBUS_HAVE_IIN;
+ break;
+ case pim4328:
+ info->phases[0] = 2;
+ info->func[0] = PMBUS_PHASE_VIRTUAL
+ | PMBUS_HAVE_VCAP | PMBUS_HAVE_VIN
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_IOUT;
+ info->pfunc[0] = PMBUS_HAVE_VIN;
+ info->pfunc[1] = PMBUS_HAVE_VIN;
+ info->format[PSC_VOLTAGE_IN] = direct;
+ info->format[PSC_TEMPERATURE] = direct;
+ info->format[PSC_CURRENT_OUT] = direct;
+ pdata->flags |= PMBUS_USE_COEFFICIENTS_CMD;
+ break;
+ case pim4820:
+ info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_TEMP
+ | PMBUS_HAVE_IIN;
+ info->format[PSC_VOLTAGE_IN] = direct;
+ info->format[PSC_TEMPERATURE] = direct;
+ info->format[PSC_CURRENT_IN] = direct;
+ pdata->flags |= PMBUS_USE_COEFFICIENTS_CMD;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ return pmbus_do_probe(client, info);
+}
+
+static struct i2c_driver pim4328_driver = {
+ .driver = {
+ .name = "pim4328",
+ },
+ .probe_new = pim4328_probe,
+ .id_table = pim4328_id,
+};
+
+module_i2c_driver(pim4328_driver);
+
+MODULE_AUTHOR("Erik Rosen <erik.rosen@metormote.com>");
+MODULE_DESCRIPTION("PMBus driver for PIM4006, PIM4328, PIM4820 power interface modules");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(PMBUS);
diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
index 618c377664c4..d0d386990af5 100644
--- a/drivers/hwmon/pmbus/pmbus.c
+++ b/drivers/hwmon/pmbus/pmbus.c
@@ -173,13 +173,13 @@ static int pmbus_probe(struct i2c_client *client)
return -ENOMEM;
device_info = (struct pmbus_device_info *)i2c_match_id(pmbus_id, client)->driver_data;
- if (device_info->flags & PMBUS_SKIP_STATUS_CHECK) {
+ if (device_info->flags) {
pdata = devm_kzalloc(dev, sizeof(struct pmbus_platform_data),
GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- pdata->flags = PMBUS_SKIP_STATUS_CHECK;
+ pdata->flags = device_info->flags;
}
info->pages = device_info->pages;
@@ -193,22 +193,37 @@ static const struct pmbus_device_info pmbus_info_one = {
.pages = 1,
.flags = 0
};
+
static const struct pmbus_device_info pmbus_info_zero = {
.pages = 0,
.flags = 0
};
+
static const struct pmbus_device_info pmbus_info_one_skip = {
.pages = 1,
.flags = PMBUS_SKIP_STATUS_CHECK
};
+static const struct pmbus_device_info pmbus_info_one_status = {
+ .pages = 1,
+ .flags = PMBUS_READ_STATUS_AFTER_FAILED_CHECK
+};
+
/*
* Use driver_data to set the number of pages supported by the chip.
*/
static const struct i2c_device_id pmbus_id[] = {
{"adp4000", (kernel_ulong_t)&pmbus_info_one},
+ {"bmr310", (kernel_ulong_t)&pmbus_info_one_status},
{"bmr453", (kernel_ulong_t)&pmbus_info_one},
{"bmr454", (kernel_ulong_t)&pmbus_info_one},
+ {"bmr456", (kernel_ulong_t)&pmbus_info_one},
+ {"bmr457", (kernel_ulong_t)&pmbus_info_one},
+ {"bmr458", (kernel_ulong_t)&pmbus_info_one_status},
+ {"bmr480", (kernel_ulong_t)&pmbus_info_one_status},
+ {"bmr490", (kernel_ulong_t)&pmbus_info_one_status},
+ {"bmr491", (kernel_ulong_t)&pmbus_info_one_status},
+ {"bmr492", (kernel_ulong_t)&pmbus_info_one},
{"dps460", (kernel_ulong_t)&pmbus_info_one_skip},
{"dps650ab", (kernel_ulong_t)&pmbus_info_one_skip},
{"dps800", (kernel_ulong_t)&pmbus_info_one_skip},
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index 3968924f8533..e0aa8aa46d8c 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -375,7 +375,7 @@ enum pmbus_sensor_classes {
};
#define PMBUS_PAGES 32 /* Per PMBus specification */
-#define PMBUS_PHASES 8 /* Maximum number of phases per page */
+#define PMBUS_PHASES 10 /* Maximum number of phases per page */
/* Functionality bit mask */
#define PMBUS_HAVE_VIN BIT(0)
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index bbd745178147..776ee2237be2 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -523,6 +523,8 @@ static bool pmbus_check_register(struct i2c_client *client,
rv = func(client, page, reg);
if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK))
rv = pmbus_check_status_cml(client);
+ if (rv < 0 && (data->flags & PMBUS_READ_STATUS_AFTER_FAILED_CHECK))
+ data->read_status(client, -1);
pmbus_clear_fault_page(client, -1);
return rv >= 0;
}
@@ -1327,14 +1329,14 @@ static int pmbus_add_sensor_attrs(struct i2c_client *client,
pages = paged ? info->pages : 1;
for (page = 0; page < pages; page++) {
- if (!(info->func[page] & attrs->func))
- continue;
- ret = pmbus_add_sensor_attrs_one(client, data, info,
- name, index, page,
- 0xff, attrs, paged);
- if (ret)
- return ret;
- index++;
+ if (info->func[page] & attrs->func) {
+ ret = pmbus_add_sensor_attrs_one(client, data, info,
+ name, index, page,
+ 0xff, attrs, paged);
+ if (ret)
+ return ret;
+ index++;
+ }
if (info->phases[page]) {
int phase;
@@ -2140,6 +2142,111 @@ static int pmbus_find_attributes(struct i2c_client *client,
}
/*
+ * The pmbus_class_attr_map structure maps one sensor class to
+ * it's corresponding sensor attributes array.
+ */
+struct pmbus_class_attr_map {
+ enum pmbus_sensor_classes class;
+ int nattr;
+ const struct pmbus_sensor_attr *attr;
+};
+
+static const struct pmbus_class_attr_map class_attr_map[] = {
+ {
+ .class = PSC_VOLTAGE_IN,
+ .attr = voltage_attributes,
+ .nattr = ARRAY_SIZE(voltage_attributes),
+ }, {
+ .class = PSC_VOLTAGE_OUT,
+ .attr = voltage_attributes,
+ .nattr = ARRAY_SIZE(voltage_attributes),
+ }, {
+ .class = PSC_CURRENT_IN,
+ .attr = current_attributes,
+ .nattr = ARRAY_SIZE(current_attributes),
+ }, {
+ .class = PSC_CURRENT_OUT,
+ .attr = current_attributes,
+ .nattr = ARRAY_SIZE(current_attributes),
+ }, {
+ .class = PSC_POWER,
+ .attr = power_attributes,
+ .nattr = ARRAY_SIZE(power_attributes),
+ }, {
+ .class = PSC_TEMPERATURE,
+ .attr = temp_attributes,
+ .nattr = ARRAY_SIZE(temp_attributes),
+ }
+};
+
+/*
+ * Read the coefficients for direct mode.
+ */
+static int pmbus_read_coefficients(struct i2c_client *client,
+ struct pmbus_driver_info *info,
+ const struct pmbus_sensor_attr *attr)
+{
+ int rv;
+ union i2c_smbus_data data;
+ enum pmbus_sensor_classes class = attr->class;
+ s8 R;
+ s16 m, b;
+
+ data.block[0] = 2;
+ data.block[1] = attr->reg;
+ data.block[2] = 0x01;
+
+ rv = i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+ I2C_SMBUS_WRITE, PMBUS_COEFFICIENTS,
+ I2C_SMBUS_BLOCK_PROC_CALL, &data);
+
+ if (rv < 0)
+ return rv;
+
+ if (data.block[0] != 5)
+ return -EIO;
+
+ m = data.block[1] | (data.block[2] << 8);
+ b = data.block[3] | (data.block[4] << 8);
+ R = data.block[5];
+ info->m[class] = m;
+ info->b[class] = b;
+ info->R[class] = R;
+
+ return rv;
+}
+
+static int pmbus_init_coefficients(struct i2c_client *client,
+ struct pmbus_driver_info *info)
+{
+ int i, n, ret = -EINVAL;
+ const struct pmbus_class_attr_map *map;
+ const struct pmbus_sensor_attr *attr;
+
+ for (i = 0; i < ARRAY_SIZE(class_attr_map); i++) {
+ map = &class_attr_map[i];
+ if (info->format[map->class] != direct)
+ continue;
+ for (n = 0; n < map->nattr; n++) {
+ attr = &map->attr[n];
+ if (map->class != attr->class)
+ continue;
+ ret = pmbus_read_coefficients(client, info, attr);
+ if (ret >= 0)
+ break;
+ }
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "No coefficients found for sensor class %d\n",
+ map->class);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/*
* Identify chip parameters.
* This function is called for all chips.
*/
@@ -2214,11 +2321,14 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
data->has_status_word = true;
}
- /* Enable PEC if the controller supports it */
+ /* Enable PEC if the controller and bus supports it */
if (!(data->flags & PMBUS_NO_CAPABILITY)) {
ret = i2c_smbus_read_byte_data(client, PMBUS_CAPABILITY);
- if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK))
- client->flags |= I2C_CLIENT_PEC;
+ if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK)) {
+ if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_PEC)) {
+ client->flags |= I2C_CLIENT_PEC;
+ }
+ }
}
/*
@@ -2226,9 +2336,11 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
* faults, and we should not try it. Also, in that case, writes into
* limit registers need to be disabled.
*/
- ret = i2c_smbus_read_byte_data(client, PMBUS_WRITE_PROTECT);
- if (ret > 0 && (ret & PB_WP_ANY))
- data->flags |= PMBUS_WRITE_PROTECTED | PMBUS_SKIP_STATUS_CHECK;
+ if (!(data->flags & PMBUS_NO_WRITE_PROTECT)) {
+ ret = i2c_smbus_read_byte_data(client, PMBUS_WRITE_PROTECT);
+ if (ret > 0 && (ret & PB_WP_ANY))
+ data->flags |= PMBUS_WRITE_PROTECTED | PMBUS_SKIP_STATUS_CHECK;
+ }
if (data->info->pages)
pmbus_clear_faults(client);
@@ -2255,6 +2367,17 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
return ret;
}
}
+
+ if (data->flags & PMBUS_USE_COEFFICIENTS_CMD) {
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BLOCK_PROC_CALL))
+ return -ENODEV;
+
+ ret = pmbus_init_coefficients(client, info);
+ if (ret < 0)
+ return ret;
+ }
+
return 0;
}
diff --git a/drivers/hwmon/pmbus/q54sj108a2.c b/drivers/hwmon/pmbus/q54sj108a2.c
index b6e8b20466f1..fa298b4265a1 100644
--- a/drivers/hwmon/pmbus/q54sj108a2.c
+++ b/drivers/hwmon/pmbus/q54sj108a2.c
@@ -299,7 +299,7 @@ static int q54sj108a2_probe(struct i2c_client *client)
dev_err(&client->dev, "Failed to read Manufacturer ID\n");
return ret;
}
- if (ret != 5 || strncmp(buf, "DELTA", 5)) {
+ if (ret != 6 || strncmp(buf, "DELTA", 5)) {
buf[ret] = '\0';
dev_err(dev, "Unsupported Manufacturer ID '%s'\n", buf);
return -ENODEV;
diff --git a/drivers/hwmon/pmbus/zl6100.c b/drivers/hwmon/pmbus/zl6100.c
index b7d4eacdc3ef..e9df0c56d91e 100644
--- a/drivers/hwmon/pmbus/zl6100.c
+++ b/drivers/hwmon/pmbus/zl6100.c
@@ -18,7 +18,7 @@
#include "pmbus.h"
enum chips { zl2004, zl2005, zl2006, zl2008, zl2105, zl2106, zl6100, zl6105,
- zl9101, zl9117 };
+ zl8802, zl9101, zl9117, zls1003, zls4009 };
struct zl6100_data {
int id;
@@ -34,6 +34,13 @@ struct zl6100_data {
#define ZL6100_MFR_XTEMP_ENABLE BIT(7)
+#define ZL8802_MFR_USER_GLOBAL_CONFIG 0xe9
+#define ZL8802_MFR_TMON_ENABLE BIT(12)
+#define ZL8802_MFR_USER_CONFIG 0xd1
+#define ZL8802_MFR_XTEMP_ENABLE_2 BIT(1)
+#define ZL8802_MFR_DDC_CONFIG 0xd3
+#define ZL8802_MFR_PHASES_MASK 0x0007
+
#define MFR_VMON_OV_FAULT_LIMIT 0xf5
#define MFR_VMON_UV_FAULT_LIMIT 0xf6
#define MFR_READ_VMON 0xf7
@@ -132,7 +139,7 @@ static int zl6100_read_word_data(struct i2c_client *client, int page,
struct zl6100_data *data = to_zl6100_data(info);
int ret, vreg;
- if (page > 0)
+ if (page >= info->pages)
return -ENXIO;
if (data->id == zl2005) {
@@ -191,7 +198,7 @@ static int zl6100_read_byte_data(struct i2c_client *client, int page, int reg)
struct zl6100_data *data = to_zl6100_data(info);
int ret, status;
- if (page > 0)
+ if (page >= info->pages)
return -ENXIO;
zl6100_wait(data);
@@ -230,7 +237,7 @@ static int zl6100_write_word_data(struct i2c_client *client, int page, int reg,
struct zl6100_data *data = to_zl6100_data(info);
int ret, vreg;
- if (page > 0)
+ if (page >= info->pages)
return -ENXIO;
switch (reg) {
@@ -271,7 +278,7 @@ static int zl6100_write_byte(struct i2c_client *client, int page, u8 value)
struct zl6100_data *data = to_zl6100_data(info);
int ret;
- if (page > 0)
+ if (page >= info->pages)
return -ENXIO;
zl6100_wait(data);
@@ -287,6 +294,10 @@ static const struct i2c_device_id zl6100_id[] = {
{"bmr462", zl2008},
{"bmr463", zl2008},
{"bmr464", zl2008},
+ {"bmr465", zls4009},
+ {"bmr466", zls1003},
+ {"bmr467", zls4009},
+ {"bmr469", zl8802},
{"zl2004", zl2004},
{"zl2005", zl2005},
{"zl2006", zl2006},
@@ -295,15 +306,18 @@ static const struct i2c_device_id zl6100_id[] = {
{"zl2106", zl2106},
{"zl6100", zl6100},
{"zl6105", zl6105},
+ {"zl8802", zl8802},
{"zl9101", zl9101},
{"zl9117", zl9117},
+ {"zls1003", zls1003},
+ {"zls4009", zls4009},
{ }
};
MODULE_DEVICE_TABLE(i2c, zl6100_id);
static int zl6100_probe(struct i2c_client *client)
{
- int ret;
+ int ret, i;
struct zl6100_data *data;
struct pmbus_driver_info *info;
u8 device_id[I2C_SMBUS_BLOCK_MAX + 1];
@@ -367,18 +381,70 @@ static int zl6100_probe(struct i2c_client *client)
| PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
/*
- * ZL2004, ZL9101M, and ZL9117M support monitoring an extra voltage
- * (VMON for ZL2004, VDRV for ZL9101M and ZL9117M). Report it as vmon.
+ * ZL2004, ZL8802, ZL9101M, ZL9117M and ZLS4009 support monitoring
+ * an extra voltage (VMON for ZL2004, ZL8802 and ZLS4009,
+ * VDRV for ZL9101M and ZL9117M). Report it as vmon.
*/
- if (data->id == zl2004 || data->id == zl9101 || data->id == zl9117)
+ if (data->id == zl2004 || data->id == zl8802 || data->id == zl9101 ||
+ data->id == zl9117 || data->id == zls4009)
info->func[0] |= PMBUS_HAVE_VMON | PMBUS_HAVE_STATUS_VMON;
- ret = i2c_smbus_read_word_data(client, ZL6100_MFR_CONFIG);
- if (ret < 0)
- return ret;
+ /*
+ * ZL8802 has two outputs that can be used either independently or in
+ * a current sharing configuration. The driver uses the DDC_CONFIG
+ * register to check if the module is running with independent or
+ * shared outputs. If the module is in shared output mode, only one
+ * output voltage will be reported.
+ */
+ if (data->id == zl8802) {
+ info->pages = 2;
+ info->func[0] |= PMBUS_HAVE_IIN;
+
+ ret = i2c_smbus_read_word_data(client, ZL8802_MFR_DDC_CONFIG);
+ if (ret < 0)
+ return ret;
+
+ data->access = ktime_get();
+ zl6100_wait(data);
+
+ if (ret & ZL8802_MFR_PHASES_MASK)
+ info->func[1] |= PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT;
+ else
+ info->func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT;
- if (ret & ZL6100_MFR_XTEMP_ENABLE)
- info->func[0] |= PMBUS_HAVE_TEMP2;
+ for (i = 0; i < 2; i++) {
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, i);
+ if (ret < 0)
+ return ret;
+
+ data->access = ktime_get();
+ zl6100_wait(data);
+
+ ret = i2c_smbus_read_word_data(client, ZL8802_MFR_USER_CONFIG);
+ if (ret < 0)
+ return ret;
+
+ if (ret & ZL8802_MFR_XTEMP_ENABLE_2)
+ info->func[i] |= PMBUS_HAVE_TEMP2;
+
+ data->access = ktime_get();
+ zl6100_wait(data);
+ }
+ ret = i2c_smbus_read_word_data(client, ZL8802_MFR_USER_GLOBAL_CONFIG);
+ if (ret < 0)
+ return ret;
+
+ if (ret & ZL8802_MFR_TMON_ENABLE)
+ info->func[0] |= PMBUS_HAVE_TEMP3;
+ } else {
+ ret = i2c_smbus_read_word_data(client, ZL6100_MFR_CONFIG);
+ if (ret < 0)
+ return ret;
+
+ if (ret & ZL6100_MFR_XTEMP_ENABLE)
+ info->func[0] |= PMBUS_HAVE_TEMP2;
+ }
data->access = ktime_get();
zl6100_wait(data);
diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c
index 4324a5dbc968..8f1b569c69e7 100644
--- a/drivers/hwmon/sch5627.c
+++ b/drivers/hwmon/sch5627.c
@@ -64,7 +64,6 @@ static const char * const SCH5627_IN_LABELS[SCH5627_NO_IN] = {
struct sch5627_data {
unsigned short addr;
- struct sch56xx_watchdog_data *watchdog;
u8 control;
u8 temp_max[SCH5627_NO_TEMPS];
u8 temp_crit[SCH5627_NO_TEMPS];
@@ -357,16 +356,6 @@ static const struct hwmon_chip_info sch5627_chip_info = {
.info = sch5627_info,
};
-static int sch5627_remove(struct platform_device *pdev)
-{
- struct sch5627_data *data = platform_get_drvdata(pdev);
-
- if (data->watchdog)
- sch56xx_watchdog_unregister(data->watchdog);
-
- return 0;
-}
-
static int sch5627_probe(struct platform_device *pdev)
{
struct sch5627_data *data;
@@ -460,9 +449,9 @@ static int sch5627_probe(struct platform_device *pdev)
return PTR_ERR(hwmon_dev);
/* Note failing to register the watchdog is not a fatal error */
- data->watchdog = sch56xx_watchdog_register(&pdev->dev, data->addr,
- (build_code << 24) | (build_id << 8) | hwmon_rev,
- &data->update_lock, 1);
+ sch56xx_watchdog_register(&pdev->dev, data->addr,
+ (build_code << 24) | (build_id << 8) | hwmon_rev,
+ &data->update_lock, 1);
return 0;
}
@@ -472,7 +461,6 @@ static struct platform_driver sch5627_driver = {
.name = DRVNAME,
},
.probe = sch5627_probe,
- .remove = sch5627_remove,
};
module_platform_driver(sch5627_driver);
diff --git a/drivers/hwmon/sch5636.c b/drivers/hwmon/sch5636.c
index 5683a38740f6..a5cd4de36575 100644
--- a/drivers/hwmon/sch5636.c
+++ b/drivers/hwmon/sch5636.c
@@ -54,7 +54,6 @@ static const u16 SCH5636_REG_FAN_VAL[SCH5636_NO_FANS] = {
struct sch5636_data {
unsigned short addr;
struct device *hwmon_dev;
- struct sch56xx_watchdog_data *watchdog;
struct mutex update_lock;
char valid; /* !=0 if following fields are valid */
@@ -372,9 +371,6 @@ static int sch5636_remove(struct platform_device *pdev)
struct sch5636_data *data = platform_get_drvdata(pdev);
int i;
- if (data->watchdog)
- sch56xx_watchdog_unregister(data->watchdog);
-
if (data->hwmon_dev)
hwmon_device_unregister(data->hwmon_dev);
@@ -495,9 +491,8 @@ static int sch5636_probe(struct platform_device *pdev)
}
/* Note failing to register the watchdog is not a fatal error */
- data->watchdog = sch56xx_watchdog_register(&pdev->dev, data->addr,
- (revision[0] << 8) | revision[1],
- &data->update_lock, 0);
+ sch56xx_watchdog_register(&pdev->dev, data->addr, (revision[0] << 8) | revision[1],
+ &data->update_lock, 0);
return 0;
diff --git a/drivers/hwmon/sch56xx-common.c b/drivers/hwmon/sch56xx-common.c
index 6c84780e358e..40cdadad35e5 100644
--- a/drivers/hwmon/sch56xx-common.c
+++ b/drivers/hwmon/sch56xx-common.c
@@ -20,8 +20,8 @@
#include "sch56xx-common.h"
/* Insmod parameters */
-static int nowayout = WATCHDOG_NOWAYOUT;
-module_param(nowayout, int, 0);
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
@@ -378,8 +378,8 @@ static const struct watchdog_ops watchdog_ops = {
.set_timeout = watchdog_set_timeout,
};
-struct sch56xx_watchdog_data *sch56xx_watchdog_register(struct device *parent,
- u16 addr, u32 revision, struct mutex *io_lock, int check_enabled)
+void sch56xx_watchdog_register(struct device *parent, u16 addr, u32 revision,
+ struct mutex *io_lock, int check_enabled)
{
struct sch56xx_watchdog_data *data;
int err, control, output_enable;
@@ -393,23 +393,22 @@ struct sch56xx_watchdog_data *sch56xx_watchdog_register(struct device *parent,
mutex_unlock(io_lock);
if (control < 0)
- return NULL;
+ return;
if (output_enable < 0)
- return NULL;
+ return;
if (check_enabled && !(output_enable & SCH56XX_WDOG_OUTPUT_ENABLE)) {
pr_warn("Watchdog not enabled by BIOS, not registering\n");
- return NULL;
+ return;
}
- data = kzalloc(sizeof(struct sch56xx_watchdog_data), GFP_KERNEL);
+ data = devm_kzalloc(parent, sizeof(struct sch56xx_watchdog_data), GFP_KERNEL);
if (!data)
- return NULL;
+ return;
data->addr = addr;
data->io_lock = io_lock;
- strlcpy(data->wdinfo.identity, "sch56xx watchdog",
- sizeof(data->wdinfo.identity));
+ strscpy(data->wdinfo.identity, "sch56xx watchdog", sizeof(data->wdinfo.identity));
data->wdinfo.firmware_version = revision;
data->wdinfo.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT;
if (!nowayout)
@@ -421,8 +420,7 @@ struct sch56xx_watchdog_data *sch56xx_watchdog_register(struct device *parent,
data->wddev.timeout = 60;
data->wddev.min_timeout = 1;
data->wddev.max_timeout = 255 * 60;
- if (nowayout)
- set_bit(WDOG_NO_WAY_OUT, &data->wddev.status);
+ watchdog_set_nowayout(&data->wddev, nowayout);
if (output_enable & SCH56XX_WDOG_OUTPUT_ENABLE)
set_bit(WDOG_ACTIVE, &data->wddev.status);
@@ -438,24 +436,14 @@ struct sch56xx_watchdog_data *sch56xx_watchdog_register(struct device *parent,
data->watchdog_output_enable = output_enable;
watchdog_set_drvdata(&data->wddev, data);
- err = watchdog_register_device(&data->wddev);
+ err = devm_watchdog_register_device(parent, &data->wddev);
if (err) {
pr_err("Registering watchdog chardev: %d\n", err);
- kfree(data);
- return NULL;
+ devm_kfree(parent, data);
}
-
- return data;
}
EXPORT_SYMBOL(sch56xx_watchdog_register);
-void sch56xx_watchdog_unregister(struct sch56xx_watchdog_data *data)
-{
- watchdog_unregister_device(&data->wddev);
- kfree(data);
-}
-EXPORT_SYMBOL(sch56xx_watchdog_unregister);
-
/*
* platform dev find, add and remove functions
*/
@@ -516,37 +504,18 @@ static int __init sch56xx_device_add(int address, const char *name)
struct resource res = {
.start = address,
.end = address + REGION_LENGTH - 1,
+ .name = name,
.flags = IORESOURCE_IO,
};
int err;
- sch56xx_pdev = platform_device_alloc(name, address);
- if (!sch56xx_pdev)
- return -ENOMEM;
-
- res.name = sch56xx_pdev->name;
err = acpi_check_resource_conflict(&res);
if (err)
- goto exit_device_put;
-
- err = platform_device_add_resources(sch56xx_pdev, &res, 1);
- if (err) {
- pr_err("Device resource addition failed\n");
- goto exit_device_put;
- }
-
- err = platform_device_add(sch56xx_pdev);
- if (err) {
- pr_err("Device addition failed\n");
- goto exit_device_put;
- }
-
- return 0;
+ return err;
-exit_device_put:
- platform_device_put(sch56xx_pdev);
+ sch56xx_pdev = platform_device_register_simple(name, -1, &res, 1);
- return err;
+ return PTR_ERR_OR_ZERO(sch56xx_pdev);
}
static int __init sch56xx_init(void)
diff --git a/drivers/hwmon/sch56xx-common.h b/drivers/hwmon/sch56xx-common.h
index 75eb73617cf2..e907d9da0dd5 100644
--- a/drivers/hwmon/sch56xx-common.h
+++ b/drivers/hwmon/sch56xx-common.h
@@ -14,6 +14,6 @@ int sch56xx_read_virtual_reg16(u16 addr, u16 reg);
int sch56xx_read_virtual_reg12(u16 addr, u16 msb_reg, u16 lsn_reg,
int high_nibble);
-struct sch56xx_watchdog_data *sch56xx_watchdog_register(struct device *parent,
- u16 addr, u32 revision, struct mutex *io_lock, int check_enabled);
+void sch56xx_watchdog_register(struct device *parent, u16 addr, u32 revision,
+ struct mutex *io_lock, int check_enabled);
void sch56xx_watchdog_unregister(struct sch56xx_watchdog_data *data);
diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c
index 25aac40f2764..919877970ae3 100644
--- a/drivers/hwmon/scpi-hwmon.c
+++ b/drivers/hwmon/scpi-hwmon.c
@@ -99,6 +99,15 @@ scpi_show_sensor(struct device *dev, struct device_attribute *attr, char *buf)
scpi_scale_reading(&value, sensor);
+ /*
+ * Temperature sensor values are treated as signed values based on
+ * observation even though that is not explicitly specified, and
+ * because an unsigned u64 temperature does not really make practical
+ * sense especially when the temperature is below zero degrees Celsius.
+ */
+ if (sensor->info.class == TEMPERATURE)
+ return sprintf(buf, "%lld\n", (s64)value);
+
return sprintf(buf, "%llu\n", value);
}
diff --git a/drivers/hwmon/sht4x.c b/drivers/hwmon/sht4x.c
new file mode 100644
index 000000000000..09c2a0b06444
--- /dev/null
+++ b/drivers/hwmon/sht4x.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Copyright (c) Linumiz 2021
+ *
+ * sht4x.c - Linux hwmon driver for SHT4x Temperature and Humidity sensor
+ *
+ * Author: Navin Sankar Velliangiri <navin@linumiz.com>
+ */
+
+#include <linux/crc8.h>
+#include <linux/delay.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+
+/*
+ * Poll intervals (in milliseconds)
+ */
+#define SHT4X_MIN_POLL_INTERVAL 2000
+
+/*
+ * I2C command delays (in microseconds)
+ */
+#define SHT4X_MEAS_DELAY 1000
+#define SHT4X_DELAY_EXTRA 10000
+
+/*
+ * Command Bytes
+ */
+#define SHT4X_CMD_MEASURE_HPM 0b11111101
+#define SHT4X_CMD_RESET 0b10010100
+
+#define SHT4X_CMD_LEN 1
+#define SHT4X_CRC8_LEN 1
+#define SHT4X_WORD_LEN 2
+#define SHT4X_RESPONSE_LENGTH 6
+#define SHT4X_CRC8_POLYNOMIAL 0x31
+#define SHT4X_CRC8_INIT 0xff
+#define SHT4X_MIN_TEMPERATURE -45000
+#define SHT4X_MAX_TEMPERATURE 125000
+#define SHT4X_MIN_HUMIDITY 0
+#define SHT4X_MAX_HUMIDITY 100000
+
+DECLARE_CRC8_TABLE(sht4x_crc8_table);
+
+/**
+ * struct sht4x_data - All the data required to operate an SHT4X chip
+ * @client: the i2c client associated with the SHT4X
+ * @lock: a mutex that is used to prevent parallel access to the i2c client
+ * @update_interval: the minimum poll interval
+ * @last_updated: the previous time that the SHT4X was polled
+ * @temperature: the latest temperature value received from the SHT4X
+ * @humidity: the latest humidity value received from the SHT4X
+ */
+struct sht4x_data {
+ struct i2c_client *client;
+ struct mutex lock; /* atomic read data updates */
+ bool valid; /* validity of fields below */
+ long update_interval; /* in milli-seconds */
+ long last_updated; /* in jiffies */
+ s32 temperature;
+ s32 humidity;
+};
+
+/**
+ * sht4x_read_values() - read and parse the raw data from the SHT4X
+ * @sht4x_data: the struct sht4x_data to use for the lock
+ * Return: 0 if successful, -ERRNO if not
+ */
+static int sht4x_read_values(struct sht4x_data *data)
+{
+ int ret = 0;
+ u16 t_ticks, rh_ticks;
+ unsigned long next_update;
+ struct i2c_client *client = data->client;
+ u8 crc;
+ u8 cmd[SHT4X_CMD_LEN] = {SHT4X_CMD_MEASURE_HPM};
+ u8 raw_data[SHT4X_RESPONSE_LENGTH];
+
+ mutex_lock(&data->lock);
+ next_update = data->last_updated +
+ msecs_to_jiffies(data->update_interval);
+
+ if (data->valid && time_before_eq(jiffies, next_update))
+ goto unlock;
+
+ ret = i2c_master_send(client, cmd, SHT4X_CMD_LEN);
+ if (ret < 0)
+ goto unlock;
+
+ usleep_range(SHT4X_MEAS_DELAY, SHT4X_MEAS_DELAY + SHT4X_DELAY_EXTRA);
+
+ ret = i2c_master_recv(client, raw_data, SHT4X_RESPONSE_LENGTH);
+ if (ret != SHT4X_RESPONSE_LENGTH) {
+ if (ret >= 0)
+ ret = -ENODATA;
+ goto unlock;
+ }
+
+ t_ticks = raw_data[0] << 8 | raw_data[1];
+ rh_ticks = raw_data[3] << 8 | raw_data[4];
+
+ crc = crc8(sht4x_crc8_table, &raw_data[0], SHT4X_WORD_LEN, CRC8_INIT_VALUE);
+ if (crc != raw_data[2]) {
+ dev_err(&client->dev, "data integrity check failed\n");
+ ret = -EIO;
+ goto unlock;
+ }
+
+ crc = crc8(sht4x_crc8_table, &raw_data[3], SHT4X_WORD_LEN, CRC8_INIT_VALUE);
+ if (crc != raw_data[5]) {
+ dev_err(&client->dev, "data integrity check failed\n");
+ ret = -EIO;
+ goto unlock;
+ }
+
+ data->temperature = ((21875 * (int32_t)t_ticks) >> 13) - 45000;
+ data->humidity = ((15625 * (int32_t)rh_ticks) >> 13) - 6000;
+ data->last_updated = jiffies;
+ data->valid = true;
+ ret = 0;
+
+unlock:
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static ssize_t sht4x_interval_write(struct sht4x_data *data, long val)
+{
+ data->update_interval = clamp_val(val, SHT4X_MIN_POLL_INTERVAL, UINT_MAX);
+
+ return 0;
+}
+
+/* sht4x_interval_read() - read the minimum poll interval in milliseconds */
+static size_t sht4x_interval_read(struct sht4x_data *data, long *val)
+{
+ *val = data->update_interval;
+ return 0;
+}
+
+/* sht4x_temperature1_read() - read the temperature in millidegrees */
+static int sht4x_temperature1_read(struct sht4x_data *data, long *val)
+{
+ int ret;
+
+ ret = sht4x_read_values(data);
+ if (ret < 0)
+ return ret;
+
+ *val = data->temperature;
+
+ return 0;
+}
+
+/* sht4x_humidity1_read() - read a relative humidity in millipercent */
+static int sht4x_humidity1_read(struct sht4x_data *data, long *val)
+{
+ int ret;
+
+ ret = sht4x_read_values(data);
+ if (ret < 0)
+ return ret;
+
+ *val = data->humidity;
+
+ return 0;
+}
+
+static umode_t sht4x_hwmon_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_temp:
+ case hwmon_humidity:
+ return 0444;
+ case hwmon_chip:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static int sht4x_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct sht4x_data *data = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_temp:
+ return sht4x_temperature1_read(data, val);
+ case hwmon_humidity:
+ return sht4x_humidity1_read(data, val);
+ case hwmon_chip:
+ return sht4x_interval_read(data, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int sht4x_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct sht4x_data *data = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_chip:
+ return sht4x_interval_write(data, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hwmon_channel_info *sht4x_info[] = {
+ HWMON_CHANNEL_INFO(chip, HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
+ HWMON_CHANNEL_INFO(humidity, HWMON_H_INPUT),
+ NULL,
+};
+
+static const struct hwmon_ops sht4x_hwmon_ops = {
+ .is_visible = sht4x_hwmon_visible,
+ .read = sht4x_hwmon_read,
+ .write = sht4x_hwmon_write,
+};
+
+static const struct hwmon_chip_info sht4x_chip_info = {
+ .ops = &sht4x_hwmon_ops,
+ .info = sht4x_info,
+};
+
+static int sht4x_probe(struct i2c_client *client,
+ const struct i2c_device_id *sht4x_id)
+{
+ struct device *device = &client->dev;
+ struct device *hwmon_dev;
+ struct sht4x_data *data;
+ u8 cmd[] = {SHT4X_CMD_RESET};
+ int ret;
+
+ /*
+ * we require full i2c support since the sht4x uses multi-byte read and
+ * writes as well as multi-byte commands which are not supported by
+ * the smbus protocol
+ */
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -EOPNOTSUPP;
+
+ data = devm_kzalloc(device, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->update_interval = SHT4X_MIN_POLL_INTERVAL;
+ data->client = client;
+
+ mutex_init(&data->lock);
+
+ crc8_populate_msb(sht4x_crc8_table, SHT4X_CRC8_POLYNOMIAL);
+
+ ret = i2c_master_send(client, cmd, SHT4X_CMD_LEN);
+ if (ret < 0)
+ return ret;
+ if (ret != SHT4X_CMD_LEN)
+ return -EIO;
+
+ hwmon_dev = devm_hwmon_device_register_with_info(device,
+ client->name,
+ data,
+ &sht4x_chip_info,
+ NULL);
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id sht4x_id[] = {
+ { "sht4x", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, sht4x_id);
+
+static struct i2c_driver sht4x_driver = {
+ .driver = {
+ .name = "sht4x",
+ },
+ .probe = sht4x_probe,
+ .id_table = sht4x_id,
+};
+
+module_i2c_driver(sht4x_driver);
+
+MODULE_AUTHOR("Navin Sankar Velliangiri <navin@linumiz.com>");
+MODULE_DESCRIPTION("Sensirion SHT4x humidity and temperature sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/tps23861.c b/drivers/hwmon/tps23861.c
index c2484f15298b..8bd6435c13e8 100644
--- a/drivers/hwmon/tps23861.c
+++ b/drivers/hwmon/tps23861.c
@@ -99,11 +99,14 @@
#define POWER_ENABLE 0x19
#define TPS23861_NUM_PORTS 4
+#define TPS23861_GENERAL_MASK_1 0x17
+#define TPS23861_CURRENT_SHUNT_MASK BIT(0)
+
#define TEMPERATURE_LSB 652 /* 0.652 degrees Celsius */
#define VOLTAGE_LSB 3662 /* 3.662 mV */
#define SHUNT_RESISTOR_DEFAULT 255000 /* 255 mOhm */
-#define CURRENT_LSB_255 62260 /* 62.260 uA */
-#define CURRENT_LSB_250 61039 /* 61.039 uA */
+#define CURRENT_LSB_250 62260 /* 62.260 uA */
+#define CURRENT_LSB_255 61039 /* 61.039 uA */
#define RESISTANCE_LSB 110966 /* 11.0966 Ohm*/
#define RESISTANCE_LSB_LOW 157216 /* 15.7216 Ohm*/
@@ -117,6 +120,7 @@ struct tps23861_data {
static struct regmap_config tps23861_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
+ .max_register = 0x6f,
};
static int tps23861_read_temp(struct tps23861_data *data, long *val)
@@ -560,6 +564,15 @@ static int tps23861_probe(struct i2c_client *client)
else
data->shunt_resistor = SHUNT_RESISTOR_DEFAULT;
+ if (data->shunt_resistor == SHUNT_RESISTOR_DEFAULT)
+ regmap_clear_bits(data->regmap,
+ TPS23861_GENERAL_MASK_1,
+ TPS23861_CURRENT_SHUNT_MASK);
+ else
+ regmap_set_bits(data->regmap,
+ TPS23861_GENERAL_MASK_1,
+ TPS23861_CURRENT_SHUNT_MASK);
+
hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
data, &tps23861_chip_info,
NULL);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 281a65d9b44b..10acece9d7b9 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -647,7 +647,7 @@ config I2C_HIGHLANDER
config I2C_HISI
tristate "HiSilicon I2C controller"
- depends on ARM64 || COMPILE_TEST
+ depends on (ARM64 && ACPI) || COMPILE_TEST
help
Say Y here if you want to have Hisilicon I2C controller support
available on the Kunpeng Server.
diff --git a/drivers/i2c/busses/i2c-ali1563.c b/drivers/i2c/busses/i2c-ali1563.c
index 4d12e3da12f0..55a9e93fbfeb 100644
--- a/drivers/i2c/busses/i2c-ali1563.c
+++ b/drivers/i2c/busses/i2c-ali1563.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* i2c-ali1563.c - i2c driver for the ALi 1563 Southbridge
*
* Copyright (C) 2004 Patrick Mochel
diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c
index 7d62cbda6e06..354cf7e45c4a 100644
--- a/drivers/i2c/busses/i2c-altera.c
+++ b/drivers/i2c/busses/i2c-altera.c
@@ -55,7 +55,7 @@
#define ALTR_I2C_XFER_TIMEOUT (msecs_to_jiffies(250))
/**
- * altr_i2c_dev - I2C device context
+ * struct altr_i2c_dev - I2C device context
* @base: pointer to register struct
* @msg: pointer to current message
* @msg_len: number of bytes transferred in msg
@@ -172,7 +172,7 @@ static void altr_i2c_init(struct altr_i2c_dev *idev)
altr_i2c_int_enable(idev, ALTR_I2C_ALL_IRQ, false);
}
-/**
+/*
* altr_i2c_transfer - On the last byte to be transmitted, send
* a Stop bit on the last byte.
*/
@@ -185,7 +185,7 @@ static void altr_i2c_transfer(struct altr_i2c_dev *idev, u32 data)
writel(data, idev->base + ALTR_I2C_TFR_CMD);
}
-/**
+/*
* altr_i2c_empty_rx_fifo - Fetch data from RX FIFO until end of
* transfer. Send a Stop bit on the last byte.
*/
@@ -201,9 +201,8 @@ static void altr_i2c_empty_rx_fifo(struct altr_i2c_dev *idev)
}
}
-/**
+/*
* altr_i2c_fill_tx_fifo - Fill TX FIFO from current message buffer.
- * @return: Number of bytes left to transfer.
*/
static int altr_i2c_fill_tx_fifo(struct altr_i2c_dev *idev)
{
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index c1bbc4caeb5c..66aafa7d1123 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -144,7 +144,7 @@ enum cdns_i2c_mode {
};
/**
- * enum cdns_i2c_slave_mode - Slave state when I2C is operating in slave mode
+ * enum cdns_i2c_slave_state - Slave state when I2C is operating in slave mode
*
* @CDNS_I2C_SLAVE_STATE_IDLE: I2C slave idle
* @CDNS_I2C_SLAVE_STATE_SEND: I2C slave sending data to master
diff --git a/drivers/i2c/busses/i2c-cp2615.c b/drivers/i2c/busses/i2c-cp2615.c
index 78cfecd1ea76..3ded28632e4c 100644
--- a/drivers/i2c/busses/i2c-cp2615.c
+++ b/drivers/i2c/busses/i2c-cp2615.c
@@ -138,17 +138,23 @@ cp2615_i2c_send(struct usb_interface *usbif, struct cp2615_i2c_transfer *i2c_w)
static int
cp2615_i2c_recv(struct usb_interface *usbif, unsigned char tag, void *buf)
{
- struct cp2615_iop_msg *msg = kzalloc(sizeof(*msg), GFP_KERNEL);
- struct cp2615_i2c_transfer_result *i2c_r = (struct cp2615_i2c_transfer_result *)&msg->data;
struct usb_device *usbdev = interface_to_usbdev(usbif);
- int res = usb_bulk_msg(usbdev, usb_rcvbulkpipe(usbdev, IOP_EP_IN),
- msg, sizeof(struct cp2615_iop_msg), NULL, 0);
+ struct cp2615_iop_msg *msg;
+ struct cp2615_i2c_transfer_result *i2c_r;
+ int res;
+
+ msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+ res = usb_bulk_msg(usbdev, usb_rcvbulkpipe(usbdev, IOP_EP_IN), msg,
+ sizeof(struct cp2615_iop_msg), NULL, 0);
if (res < 0) {
kfree(msg);
return res;
}
+ i2c_r = (struct cp2615_i2c_transfer_result *)&msg->data;
if (msg->msg != htons(iop_I2cTransferResult) || i2c_r->tag != tag) {
kfree(msg);
return -EIO;
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index 13be1d678c39..9b08bb5df38d 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -165,7 +165,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
}
/**
- * i2c_dw_init() - Initialize the designware I2C master hardware
+ * i2c_dw_init_master() - Initialize the designware I2C master hardware
* @dev: device private data
*
* This functions configures and enables the I2C master.
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 843b31a0f752..321b2770feab 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -148,7 +148,7 @@ struct i2c_algo_pch_data {
/**
* struct adapter_info - This structure holds the adapter information for the
- PCH i2c controller
+ * PCH i2c controller
* @pch_data: stores a list of i2c_algo_pch_data
* @pch_i2c_suspended: specifies whether the system is suspended or not
* perhaps with more lines and words.
@@ -358,6 +358,7 @@ static void pch_i2c_repstart(struct i2c_algo_pch_data *adap)
/**
* pch_i2c_writebytes() - write data to I2C bus in normal mode
* @i2c_adap: Pointer to the struct i2c_adapter.
+ * @msgs: Pointer to the i2c message structure.
* @last: specifies whether last message or not.
* In the case of compound mode it will be 1 for last message,
* otherwise 0.
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 99d446763530..04a1e38f2a6f 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -395,11 +395,9 @@ static int i801_check_post(struct i801_priv *priv, int status)
dev_err(&priv->pci_dev->dev, "Transaction timeout\n");
/* try to stop the current command */
dev_dbg(&priv->pci_dev->dev, "Terminating the current operation\n");
- outb_p(inb_p(SMBHSTCNT(priv)) | SMBHSTCNT_KILL,
- SMBHSTCNT(priv));
+ outb_p(SMBHSTCNT_KILL, SMBHSTCNT(priv));
usleep_range(1000, 2000);
- outb_p(inb_p(SMBHSTCNT(priv)) & (~SMBHSTCNT_KILL),
- SMBHSTCNT(priv));
+ outb_p(0, SMBHSTCNT(priv));
/* Check if it worked */
status = inb_p(SMBHSTSTS(priv));
@@ -980,6 +978,9 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
}
out:
+ /* Unlock the SMBus device for use by BIOS/ACPI */
+ outb_p(SMBHSTSTS_INUSE_STS, SMBHSTSTS(priv));
+
pm_runtime_mark_last_busy(&priv->pci_dev->dev);
pm_runtime_put_autosuspend(&priv->pci_dev->dev);
mutex_unlock(&priv->acpi_lock);
diff --git a/drivers/i2c/busses/i2c-icy.c b/drivers/i2c/busses/i2c-icy.c
index c8c422e9dda4..5dae7cab7260 100644
--- a/drivers/i2c/busses/i2c-icy.c
+++ b/drivers/i2c/busses/i2c-icy.c
@@ -123,7 +123,6 @@ static int icy_probe(struct zorro_dev *z,
{
struct icy_i2c *i2c;
struct i2c_algo_pcf_data *algo_data;
- struct fwnode_handle *new_fwnode;
struct i2c_board_info ltc2990_info = {
.type = "ltc2990",
.swnode = &icy_ltc2990_node,
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index 30d9e89a3db2..dcca9c2396db 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -19,6 +19,7 @@
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/fsl_devices.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
@@ -45,6 +46,7 @@
#define CCR_MTX 0x10
#define CCR_TXAK 0x08
#define CCR_RSTA 0x04
+#define CCR_RSVD 0x02
#define CSR_MCF 0x80
#define CSR_MAAS 0x40
@@ -97,7 +99,7 @@ struct mpc_i2c {
u32 block;
int rc;
int expect_rxack;
-
+ bool has_errata_A004447;
};
struct mpc_i2c_divider {
@@ -136,6 +138,75 @@ static void mpc_i2c_fixup(struct mpc_i2c *i2c)
}
}
+static int i2c_mpc_wait_sr(struct mpc_i2c *i2c, int mask)
+{
+ void __iomem *addr = i2c->base + MPC_I2C_SR;
+ u8 val;
+
+ return readb_poll_timeout(addr, val, val & mask, 0, 100);
+}
+
+/*
+ * Workaround for Erratum A004447. From the P2040CE Rev Q
+ *
+ * 1. Set up the frequency divider and sampling rate.
+ * 2. I2CCR - a0h
+ * 3. Poll for I2CSR[MBB] to get set.
+ * 4. If I2CSR[MAL] is set (an indication that SDA is stuck low), then go to
+ * step 5. If MAL is not set, then go to step 13.
+ * 5. I2CCR - 00h
+ * 6. I2CCR - 22h
+ * 7. I2CCR - a2h
+ * 8. Poll for I2CSR[MBB] to get set.
+ * 9. Issue read to I2CDR.
+ * 10. Poll for I2CSR[MIF] to be set.
+ * 11. I2CCR - 82h
+ * 12. Workaround complete. Skip the next steps.
+ * 13. Issue read to I2CDR.
+ * 14. Poll for I2CSR[MIF] to be set.
+ * 15. I2CCR - 80h
+ */
+static void mpc_i2c_fixup_A004447(struct mpc_i2c *i2c)
+{
+ int ret;
+ u32 val;
+
+ writeccr(i2c, CCR_MEN | CCR_MSTA);
+ ret = i2c_mpc_wait_sr(i2c, CSR_MBB);
+ if (ret) {
+ dev_err(i2c->dev, "timeout waiting for CSR_MBB\n");
+ return;
+ }
+
+ val = readb(i2c->base + MPC_I2C_SR);
+
+ if (val & CSR_MAL) {
+ writeccr(i2c, 0x00);
+ writeccr(i2c, CCR_MSTA | CCR_RSVD);
+ writeccr(i2c, CCR_MEN | CCR_MSTA | CCR_RSVD);
+ ret = i2c_mpc_wait_sr(i2c, CSR_MBB);
+ if (ret) {
+ dev_err(i2c->dev, "timeout waiting for CSR_MBB\n");
+ return;
+ }
+ val = readb(i2c->base + MPC_I2C_DR);
+ ret = i2c_mpc_wait_sr(i2c, CSR_MIF);
+ if (ret) {
+ dev_err(i2c->dev, "timeout waiting for CSR_MIF\n");
+ return;
+ }
+ writeccr(i2c, CCR_MEN | CCR_RSVD);
+ } else {
+ val = readb(i2c->base + MPC_I2C_DR);
+ ret = i2c_mpc_wait_sr(i2c, CSR_MIF);
+ if (ret) {
+ dev_err(i2c->dev, "timeout waiting for CSR_MIF\n");
+ return;
+ }
+ writeccr(i2c, CCR_MEN);
+ }
+}
+
#if defined(CONFIG_PPC_MPC52xx) || defined(CONFIG_PPC_MPC512x)
static const struct mpc_i2c_divider mpc_i2c_dividers_52xx[] = {
{20, 0x20}, {22, 0x21}, {24, 0x22}, {26, 0x23},
@@ -670,7 +741,10 @@ static int fsl_i2c_bus_recovery(struct i2c_adapter *adap)
{
struct mpc_i2c *i2c = i2c_get_adapdata(adap);
- mpc_i2c_fixup(i2c);
+ if (i2c->has_errata_A004447)
+ mpc_i2c_fixup_A004447(i2c);
+ else
+ mpc_i2c_fixup(i2c);
return 0;
}
@@ -767,6 +841,9 @@ static int fsl_i2c_probe(struct platform_device *op)
}
dev_info(i2c->dev, "timeout %u us\n", mpc_ops.timeout * 1000000 / HZ);
+ if (of_property_read_bool(op->dev.of_node, "fsl,i2c-erratum-a004447"))
+ i2c->has_errata_A004447 = true;
+
i2c->adap = mpc_ops;
scnprintf(i2c->adap.name, sizeof(i2c->adap.name),
"MPC adapter (%s)", of_node_full_name(op->dev.of_node));
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 5ddfa4e56ee2..4e9fb6b44436 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -479,6 +479,11 @@ static void mtk_i2c_clock_disable(struct mtk_i2c *i2c)
static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
{
u16 control_reg;
+ u16 intr_stat_reg;
+
+ mtk_i2c_writew(i2c, I2C_CHN_CLR_FLAG, OFFSET_START);
+ intr_stat_reg = mtk_i2c_readw(i2c, OFFSET_INTR_STAT);
+ mtk_i2c_writew(i2c, intr_stat_reg, OFFSET_INTR_STAT);
if (i2c->dev_comp->apdma_sync) {
writel(I2C_DMA_WARM_RST, i2c->pdmabase + OFFSET_RST);
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index dc77e1c4e80f..a2d12a5b1c34 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -159,7 +159,7 @@ struct i2c_nmk_client {
* @clk_freq: clock frequency for the operation mode
* @tft: Tx FIFO Threshold in bytes
* @rft: Rx FIFO Threshold in bytes
- * @timeout Slave response timeout (ms)
+ * @timeout: Slave response timeout (ms)
* @sm: speed mode
* @stop: stop condition.
* @xfer_complete: acknowledge completion for a I2C message.
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 273222e38056..a0af027db04c 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -250,7 +250,7 @@ static irqreturn_t ocores_isr(int irq, void *dev_id)
}
/**
- * Process timeout event
+ * ocores_process_timeout() - Process timeout event
* @i2c: ocores I2C device instance
*/
static void ocores_process_timeout(struct ocores_i2c *i2c)
@@ -264,7 +264,7 @@ static void ocores_process_timeout(struct ocores_i2c *i2c)
}
/**
- * Wait until something change in a given register
+ * ocores_wait() - Wait until something change in a given register
* @i2c: ocores I2C device instance
* @reg: register to query
* @mask: bitmask to apply on register value
@@ -296,7 +296,7 @@ static int ocores_wait(struct ocores_i2c *i2c,
}
/**
- * Wait until is possible to process some data
+ * ocores_poll_wait() - Wait until is possible to process some data
* @i2c: ocores I2C device instance
*
* Used when the device is in polling mode (interrupts disabled).
@@ -334,7 +334,7 @@ static int ocores_poll_wait(struct ocores_i2c *i2c)
}
/**
- * It handles an IRQ-less transfer
+ * ocores_process_polling() - It handles an IRQ-less transfer
* @i2c: ocores I2C device instance
*
* Even if IRQ are disabled, the I2C OpenCore IP behavior is exactly the same
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index 8c4ec7f13f5a..50f21cdbe90d 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -138,7 +138,7 @@ static inline void i2c_pnx_arm_timer(struct i2c_pnx_algo_data *alg_data)
/**
* i2c_pnx_start - start a device
* @slave_addr: slave address
- * @adap: pointer to adapter structure
+ * @alg_data: pointer to local driver data structure
*
* Generate a START signal in the desired mode.
*/
@@ -194,7 +194,7 @@ static int i2c_pnx_start(unsigned char slave_addr,
/**
* i2c_pnx_stop - stop a device
- * @adap: pointer to I2C adapter structure
+ * @alg_data: pointer to local driver data structure
*
* Generate a STOP signal to terminate the master transaction.
*/
@@ -223,7 +223,7 @@ static void i2c_pnx_stop(struct i2c_pnx_algo_data *alg_data)
/**
* i2c_pnx_master_xmit - transmit data to slave
- * @adap: pointer to I2C adapter structure
+ * @alg_data: pointer to local driver data structure
*
* Sends one byte of data to the slave
*/
@@ -293,7 +293,7 @@ static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data *alg_data)
/**
* i2c_pnx_master_rcv - receive data from slave
- * @adap: pointer to I2C adapter structure
+ * @alg_data: pointer to local driver data structure
*
* Reads one byte data from the slave
*/
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 214b4c913a13..6d635a7c104c 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -100,7 +100,7 @@ static const struct geni_i2c_err_log gi2c_log[] = {
[GP_IRQ0] = {-EIO, "Unknown I2C err GP_IRQ0"},
[NACK] = {-ENXIO, "NACK: slv unresponsive, check its power/reset-ln"},
[GP_IRQ2] = {-EIO, "Unknown I2C err GP IRQ2"},
- [BUS_PROTO] = {-EPROTO, "Bus proto err, noisy/unepxected start/stop"},
+ [BUS_PROTO] = {-EPROTO, "Bus proto err, noisy/unexpected start/stop"},
[ARB_LOST] = {-EAGAIN, "Bus arbitration lost, clock line undriveable"},
[GP_IRQ5] = {-EIO, "Unknown I2C err GP IRQ5"},
[GENI_OVERRUN] = {-EIO, "Cmd overrun, check GENI cmd-state machine"},
@@ -650,6 +650,14 @@ static int geni_i2c_remove(struct platform_device *pdev)
return 0;
}
+static void geni_i2c_shutdown(struct platform_device *pdev)
+{
+ struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
+
+ /* Make client i2c transfers start failing */
+ i2c_mark_adapter_suspended(&gi2c->adap);
+}
+
static int __maybe_unused geni_i2c_runtime_suspend(struct device *dev)
{
int ret;
@@ -690,6 +698,8 @@ static int __maybe_unused geni_i2c_suspend_noirq(struct device *dev)
{
struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
+ i2c_mark_adapter_suspended(&gi2c->adap);
+
if (!gi2c->suspended) {
geni_i2c_runtime_suspend(dev);
pm_runtime_disable(dev);
@@ -699,8 +709,16 @@ static int __maybe_unused geni_i2c_suspend_noirq(struct device *dev)
return 0;
}
+static int __maybe_unused geni_i2c_resume_noirq(struct device *dev)
+{
+ struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
+
+ i2c_mark_adapter_resumed(&gi2c->adap);
+ return 0;
+}
+
static const struct dev_pm_ops geni_i2c_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(geni_i2c_suspend_noirq, NULL)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(geni_i2c_suspend_noirq, geni_i2c_resume_noirq)
SET_RUNTIME_PM_OPS(geni_i2c_runtime_suspend, geni_i2c_runtime_resume,
NULL)
};
@@ -714,6 +732,7 @@ MODULE_DEVICE_TABLE(of, geni_i2c_dt_match);
static struct platform_driver geni_i2c_driver = {
.probe = geni_i2c_probe,
.remove = geni_i2c_remove,
+ .shutdown = geni_i2c_shutdown,
.driver = {
.name = "geni_i2c",
.pm = &geni_i2c_pm_ops,
diff --git a/drivers/i2c/busses/i2c-robotfuzz-osif.c b/drivers/i2c/busses/i2c-robotfuzz-osif.c
index a39f7d092797..66dfa211e736 100644
--- a/drivers/i2c/busses/i2c-robotfuzz-osif.c
+++ b/drivers/i2c/busses/i2c-robotfuzz-osif.c
@@ -83,7 +83,7 @@ static int osif_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
}
}
- ret = osif_usb_read(adapter, OSIFI2C_STOP, 0, 0, NULL, 0);
+ ret = osif_usb_write(adapter, OSIFI2C_STOP, 0, 0, NULL, 0);
if (ret) {
dev_err(&adapter->dev, "failure sending STOP\n");
return -EREMOTEIO;
@@ -153,7 +153,7 @@ static int osif_probe(struct usb_interface *interface,
* Set bus frequency. The frequency is:
* 120,000,000 / ( 16 + 2 * div * 4^prescale).
* Using dev = 52, prescale = 0 give 100KHz */
- ret = osif_usb_read(&priv->adapter, OSIFI2C_SET_BIT_RATE, 52, 0,
+ ret = osif_usb_write(&priv->adapter, OSIFI2C_SET_BIT_RATE, 52, 0,
NULL, 0);
if (ret) {
dev_err(&interface->dev, "failure sending bit rate");
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index ab928613afba..4d82761e1585 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -480,7 +480,10 @@ static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
* forces us to send a new START
* when we change direction
*/
+ dev_dbg(i2c->dev,
+ "missing START before write->read\n");
s3c24xx_i2c_stop(i2c, -EINVAL);
+ break;
}
goto retry_write;
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 3ae6ca21a02c..2d2e630fd438 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -807,7 +807,7 @@ static const struct sh_mobile_dt_config r8a7740_dt_config = {
static const struct of_device_id sh_mobile_i2c_dt_ids[] = {
{ .compatible = "renesas,iic-r8a73a4", .data = &fast_clock_dt_config },
{ .compatible = "renesas,iic-r8a7740", .data = &r8a7740_dt_config },
- { .compatible = "renesas,iic-r8a774c0", .data = &fast_clock_dt_config },
+ { .compatible = "renesas,iic-r8a774c0", .data = &v2_freq_calc_dt_config },
{ .compatible = "renesas,iic-r8a7790", .data = &v2_freq_calc_dt_config },
{ .compatible = "renesas,iic-r8a7791", .data = &v2_freq_calc_dt_config },
{ .compatible = "renesas,iic-r8a7792", .data = &v2_freq_calc_dt_config },
diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c
index faa81a95551f..88482316d22a 100644
--- a/drivers/i2c/busses/i2c-st.c
+++ b/drivers/i2c/busses/i2c-st.c
@@ -524,7 +524,7 @@ static void st_i2c_handle_write(struct st_i2c_dev *i2c_dev)
}
/**
- * st_i2c_handle_write() - Handle FIFO enmpty interrupt in case of read
+ * st_i2c_handle_read() - Handle FIFO empty interrupt in case of read
* @i2c_dev: Controller's private data
*/
static void st_i2c_handle_read(struct st_i2c_dev *i2c_dev)
@@ -558,7 +558,7 @@ static void st_i2c_handle_read(struct st_i2c_dev *i2c_dev)
}
/**
- * st_i2c_isr() - Interrupt routine
+ * st_i2c_isr_thread() - Interrupt routine
* @irq: interrupt number
* @data: Controller's private data
*/
diff --git a/drivers/i2c/busses/i2c-stm32f4.c b/drivers/i2c/busses/i2c-stm32f4.c
index 4933fc8ce3fd..eebce7ecef25 100644
--- a/drivers/i2c/busses/i2c-stm32f4.c
+++ b/drivers/i2c/busses/i2c-stm32f4.c
@@ -313,7 +313,7 @@ static int stm32f4_i2c_wait_free_bus(struct stm32f4_i2c_dev *i2c_dev)
}
/**
- * stm32f4_i2c_write_ byte() - Write a byte in the data register
+ * stm32f4_i2c_write_byte() - Write a byte in the data register
* @i2c_dev: Controller's private data
* @byte: Data to write in the register
*/
diff --git a/drivers/i2c/busses/i2c-tegra-bpmp.c b/drivers/i2c/busses/i2c-tegra-bpmp.c
index 3680d608698b..ec0c7cad4240 100644
--- a/drivers/i2c/busses/i2c-tegra-bpmp.c
+++ b/drivers/i2c/busses/i2c-tegra-bpmp.c
@@ -65,7 +65,7 @@ static void tegra_bpmp_xlate_flags(u16 flags, u16 *out)
*out |= SERIALI2C_RECV_LEN;
}
-/**
+/*
* The serialized I2C format is simply the following:
* [addr little-endian][flags little-endian][len little-endian][data if write]
* [addr little-endian][flags little-endian][len little-endian][data if write]
@@ -109,7 +109,7 @@ static void tegra_bpmp_serialize_i2c_msg(struct tegra_bpmp_i2c *i2c,
request->xfer.data_size = pos;
}
-/**
+/*
* The data in the BPMP -> CPU direction is composed of sequential blocks for
* those messages that have I2C_M_RD. So, for example, if you have:
*
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 6ef38a8ee95c..cb64fe649390 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -526,7 +526,7 @@ static long compat_i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned lo
return put_user(funcs, (compat_ulong_t __user *)arg);
case I2C_RDWR: {
struct i2c_rdwr_ioctl_data32 rdwr_arg;
- struct i2c_msg32 *p;
+ struct i2c_msg32 __user *p;
struct i2c_msg *rdwr_pa;
int i;
diff --git a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
index 6dc88902c189..1c78657631f4 100644
--- a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
+++ b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
@@ -34,7 +34,7 @@ struct i2c_arbitrator_data {
};
-/**
+/*
* i2c_arbitrator_select - claim the I2C bus
*
* Use the GPIO-based signalling protocol; return -EBUSY if we fail.
@@ -77,7 +77,7 @@ static int i2c_arbitrator_select(struct i2c_mux_core *muxc, u32 chan)
return -EBUSY;
}
-/**
+/*
* i2c_arbitrator_deselect - release the I2C bus
*
* Release the I2C bus using the GPIO-based signalling protocol.
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
deleted file mode 100644
index 19abf11c84c8..000000000000
--- a/drivers/ide/Kconfig
+++ /dev/null
@@ -1,849 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# IDE ATA ATAPI Block device driver configuration
-#
-
-# Select HAVE_IDE if IDE is supported
-config HAVE_IDE
- bool
-
-menuconfig IDE
- tristate "ATA/ATAPI/MFM/RLL support (DEPRECATED)"
- depends on HAVE_IDE
- depends on BLOCK
- select BLK_SCSI_REQUEST
- help
- If you say Y here, your kernel will be able to manage ATA/(E)IDE and
- ATAPI units. The most common cases are IDE hard drives and ATAPI
- CD-ROM drives.
-
- This subsystem is currently in maintenance mode with only bug fix
- changes applied. Users of ATA hardware are encouraged to migrate to
- the newer ATA subsystem ("Serial ATA (prod) and Parallel ATA
- (experimental) drivers") which is more actively maintained.
-
- To compile this driver as a module, choose M here: the
- module will be called ide-core.
-
- For further information, please read <file:Documentation/ide/ide.rst>.
-
- If unsure, say N.
-
-if IDE
-
-comment "Please see Documentation/ide/ide.rst for help/info on IDE drives"
-
-config IDE_XFER_MODE
- bool
-
-config IDE_TIMINGS
- bool
- select IDE_XFER_MODE
-
-config IDE_ATAPI
- bool
-
-config IDE_LEGACY
- bool
-
-config BLK_DEV_IDE_SATA
- bool "Support for SATA (deprecated; conflicts with libata SATA driver)"
- default n
- help
- There are two drivers for Serial ATA controllers.
-
- The main driver, "libata", uses the SCSI subsystem
- and supports most modern SATA controllers. In order to use it
- you may take a look at "Serial ATA (prod) and Parallel ATA
- (experimental) drivers".
-
- The IDE driver (which you are currently configuring) supports
- a few first-generation SATA controllers.
-
- In order to eliminate conflicts between the two subsystems,
- this config option enables the IDE driver's SATA support.
- Normally this is disabled, as it is preferred that libata
- supports SATA controllers, and this (IDE) driver supports
- PATA controllers.
-
- If unsure, say N.
-
-config IDE_GD
- tristate "generic ATA/ATAPI disk support"
- default y
- help
- Support for ATA/ATAPI disks (including ATAPI floppy drives).
-
- To compile this driver as a module, choose M here.
- The module will be called ide-gd_mod.
-
- If unsure, say Y.
-
-config IDE_GD_ATA
- bool "ATA disk support"
- depends on IDE_GD
- default y
- help
- This will include support for ATA hard disks.
-
- If unsure, say Y.
-
-config IDE_GD_ATAPI
- bool "ATAPI floppy support"
- depends on IDE_GD
- select IDE_ATAPI
- help
- This will include support for ATAPI floppy drives
- (i.e. Iomega ZIP or MKE LS-120).
-
- For information about jumper settings and the question
- of when a ZIP drive uses a partition table, see
- <http://www.win.tue.nl/~aeb/linux/zip/zip-1.html>.
-
- If unsure, say N.
-
-config BLK_DEV_IDECS
- tristate "PCMCIA IDE support"
- depends on PCMCIA
- help
- Support for Compact Flash cards, outboard IDE disks, tape drives,
- and CD-ROM drives connected through a PCMCIA card.
-
-config BLK_DEV_DELKIN
- tristate "Cardbus IDE support (Delkin/ASKA/Workbit)"
- depends on CARDBUS && PCI
- help
- Support for Delkin, ASKA, and Workbit Cardbus CompactFlash
- Adapters. This may also work for similar SD and XD adapters.
-
-config BLK_DEV_IDECD
- tristate "Include IDE/ATAPI CDROM support"
- depends on BLK_DEV
- select IDE_ATAPI
- select CDROM
- help
- If you have a CD-ROM drive using the ATAPI protocol, say Y. ATAPI is
- a newer protocol used by IDE CD-ROM and TAPE drives, similar to the
- SCSI protocol. Most new CD-ROM drives use ATAPI, including the
- NEC-260, Mitsumi FX400, Sony 55E, and just about all non-SCSI
- double(2X) or better speed drives.
-
- If you say Y here, the CD-ROM drive will be identified at boot time
- along with other IDE devices, as "hdb" or "hdc", or something
- similar (check the boot messages with dmesg). If this is your only
- CD-ROM drive, you can say N to all other CD-ROM options, but be sure
- to say Y or M to "ISO 9660 CD-ROM file system support".
-
- To compile this driver as a module, choose M here: the
- module will be called ide-cd.
-
-config BLK_DEV_IDECD_VERBOSE_ERRORS
- bool "Verbose error logging for IDE/ATAPI CDROM driver" if EXPERT
- depends on BLK_DEV_IDECD
- default y
- help
- Turn this on to have the driver print out the meanings of the
- ATAPI error codes. This will use up additional 8kB of kernel-space
- memory, though.
-
-config BLK_DEV_IDETAPE
- tristate "Include IDE/ATAPI TAPE support"
- select IDE_ATAPI
- help
- If you have an IDE tape drive using the ATAPI protocol, say Y.
- ATAPI is a newer protocol used by IDE tape and CD-ROM drives,
- similar to the SCSI protocol. If you have an SCSI tape drive
- however, you can say N here.
-
- You should also say Y if you have an OnStream DI-30 tape drive; this
- will not work with the SCSI protocol, until there is support for the
- SC-30 and SC-50 versions.
-
- If you say Y here, the tape drive will be identified at boot time
- along with other IDE devices, as "hdb" or "hdc", or something
- similar, and will be mapped to a character device such as "ht0"
- (check the boot messages with dmesg). Be sure to consult the
- <file:drivers/ide/ide-tape.c> and <file:Documentation/ide/ide.rst>
- files for usage information.
-
- To compile this driver as a module, choose M here: the
- module will be called ide-tape.
-
-config BLK_DEV_IDEACPI
- bool "IDE ACPI support"
- depends on ACPI
- help
- Implement ACPI support for generic IDE devices. On modern
- machines ACPI support is required to properly handle ACPI S3 states.
-
-config IDE_TASK_IOCTL
- bool "IDE Taskfile Access"
- help
- This is a direct raw access to the media. It is a complex but
- elegant solution to test and validate the domain of the hardware and
- perform below the driver data recovery if needed. This is the most
- basic form of media-forensics.
-
- If you are unsure, say N here.
-
-config IDE_PROC_FS
- bool "legacy /proc/ide/ support"
- depends on IDE && PROC_FS
- default y
- help
- This option enables support for the various files in
- /proc/ide. In Linux 2.6 this has been superseded by
- files in sysfs but many legacy applications rely on this.
-
- If unsure say Y.
-
-comment "IDE chipset support/bugfixes"
-
-config IDE_GENERIC
- tristate "generic/default IDE chipset support"
- depends on ALPHA || X86 || IA64 || MIPS || ARCH_RPC
- default ARM && ARCH_RPC
- help
- This is the generic IDE driver. This driver attaches to the
- fixed legacy ports (e.g. on PCs 0x1f0/0x170, 0x1e8/0x168 and
- so on). Please note that if this driver is built into the
- kernel or loaded before other ATA (IDE or libata) drivers
- and the controller is located at legacy ports, this driver
- may grab those ports and thus can prevent the controller
- specific driver from attaching.
-
- Also, currently, IDE generic doesn't allow IRQ sharing
- meaning that the IRQs it grabs won't be available to other
- controllers sharing those IRQs which usually makes drivers
- for those controllers fail. Generally, it's not a good idea
- to load IDE generic driver on modern systems.
-
- If unsure, say N.
-
-config BLK_DEV_PLATFORM
- tristate "Platform driver for IDE interfaces"
- help
- This is the platform IDE driver, used mostly for Memory Mapped
- IDE devices, like Compact Flashes running in True IDE mode.
-
- If unsure, say N.
-
-config BLK_DEV_CMD640
- tristate "CMD640 chipset bugfix/support"
- depends on X86
- select IDE_TIMINGS
- help
- The CMD-Technologies CMD640 IDE chip is used on many common 486 and
- Pentium motherboards, usually in combination with a "Neptune" or
- "SiS" chipset. Unfortunately, it has a number of rather nasty
- design flaws that can cause severe data corruption under many common
- conditions. Say Y here to include code which tries to automatically
- detect and correct the problems under Linux. This option also
- enables access to the secondary IDE ports in some CMD640 based
- systems.
-
- This driver will work automatically in PCI based systems (most new
- systems have PCI slots). But if your system uses VESA local bus
- (VLB) instead of PCI, you must also supply a kernel boot parameter
- to enable the CMD640 bugfix/support: "cmd640.probe_vlb". (Try "man
- bootparam" or see the documentation of your boot loader about how to
- pass options to the kernel.)
-
- The CMD640 chip is also used on add-in cards by Acculogic, and on
- the "CSA-6400E PCI to IDE controller" that some people have. For
- details, read <file:Documentation/ide/ide.rst>.
-
-config BLK_DEV_CMD640_ENHANCED
- bool "CMD640 enhanced support"
- depends on BLK_DEV_CMD640
- help
- This option includes support for setting/autotuning PIO modes and
- prefetch on CMD640 IDE interfaces. For details, read
- <file:Documentation/ide/ide.rst>. If you have a CMD640 IDE interface
- and your BIOS does not already do this for you, then say Y here.
- Otherwise say N.
-
-config BLK_DEV_IDEPNP
- tristate "PNP EIDE support"
- depends on PNP
- help
- If you have a PnP (Plug and Play) compatible EIDE card and
- would like the kernel to automatically detect and activate
- it, say Y here.
-
-config BLK_DEV_IDEDMA_SFF
- bool
-
-if PCI
-
-comment "PCI IDE chipsets support"
-
-config BLK_DEV_IDEPCI
- bool
-
-config IDEPCI_PCIBUS_ORDER
- bool "Probe IDE PCI devices in the PCI bus order (DEPRECATED)"
- depends on IDE=y && BLK_DEV_IDEPCI
- default y
- help
- Probe IDE PCI devices in the order in which they appear on the
- PCI bus (i.e. 00:1f.1 PCI device before 02:01.0 PCI device)
- instead of the order in which IDE PCI host drivers are loaded.
-
- Please note that this method of assuring stable naming of
- IDE devices is unreliable and use other means for achieving
- it (i.e. udev).
-
- If in doubt, say N.
-
-# TODO: split it on per host driver config options (or module parameters)
-config BLK_DEV_OFFBOARD
- bool "Boot off-board chipsets first support (DEPRECATED)"
- depends on BLK_DEV_IDEPCI && (BLK_DEV_AEC62XX || BLK_DEV_GENERIC || BLK_DEV_HPT366 || BLK_DEV_PDC202XX_NEW || BLK_DEV_PDC202XX_OLD || BLK_DEV_TC86C001)
- help
- Normally, IDE controllers built into the motherboard (on-board
- controllers) are assigned to ide0 and ide1 while those on add-in PCI
- cards (off-board controllers) are relegated to ide2 and ide3.
- Answering Y here will allow you to reverse the situation, with
- off-board controllers on ide0/1 and on-board controllers on ide2/3.
- This can improve the usability of some boot managers such as lilo
- when booting from a drive on an off-board controller.
-
- Note that, if you do this, the order of the hd* devices will be
- rearranged which may require modification of fstab and other files.
-
- Please also note that this method of assuring stable naming of
- IDE devices is unreliable and use other means for achieving it
- (i.e. udev).
-
- If in doubt, say N.
-
-config BLK_DEV_GENERIC
- tristate "Generic PCI IDE Chipset Support"
- select BLK_DEV_IDEPCI
- help
- This option provides generic support for various PCI IDE Chipsets
- which otherwise might not be supported.
-
-config BLK_DEV_OPTI621
- tristate "OPTi 82C621 chipset enhanced support"
- select BLK_DEV_IDEPCI
- help
- This is a driver for the OPTi 82C621 EIDE controller.
- Please read the comments at the top of <file:drivers/ide/opti621.c>.
-
-config BLK_DEV_RZ1000
- tristate "RZ1000 chipset bugfix/support"
- depends on X86
- select BLK_DEV_IDEPCI
- help
- The PC-Technologies RZ1000 IDE chip is used on many common 486 and
- Pentium motherboards, usually along with the "Neptune" chipset.
- Unfortunately, it has a rather nasty design flaw that can cause
- severe data corruption under many conditions. Say Y here to include
- code which automatically detects and corrects the problem under
- Linux. This may slow disk throughput by a few percent, but at least
- things will operate 100% reliably.
-
-config BLK_DEV_IDEDMA_PCI
- bool
- select BLK_DEV_IDEPCI
- select BLK_DEV_IDEDMA_SFF
-
-config BLK_DEV_AEC62XX
- tristate "AEC62XX chipset support"
- select BLK_DEV_IDEDMA_PCI
- help
- This driver adds explicit support for Acard AEC62xx (Artop ATP8xx)
- IDE controllers. This allows the kernel to change PIO, DMA and UDMA
- speeds and to configure the chip to optimum performance.
-
-config BLK_DEV_ALI15X3
- tristate "ALI M15x3 chipset support"
- select IDE_TIMINGS
- select BLK_DEV_IDEDMA_PCI
- help
- This driver ensures (U)DMA support for ALI 1533, 1543 and 1543C
- onboard chipsets. It also tests for Simplex mode and enables
- normal dual channel support.
-
- Please read the comments at the top of
- <file:drivers/ide/alim15x3.c>.
-
- If unsure, say N.
-
-config BLK_DEV_AMD74XX
- tristate "AMD and nVidia IDE support"
- depends on !ARM
- select IDE_TIMINGS
- select BLK_DEV_IDEDMA_PCI
- help
- This driver adds explicit support for AMD-7xx and AMD-8111 chips
- and also for the nVidia nForce chip. This allows the kernel to
- change PIO, DMA and UDMA speeds and to configure the chip to
- optimum performance.
-
-config BLK_DEV_ATIIXP
- tristate "ATI IXP chipset IDE support"
- depends on X86
- select BLK_DEV_IDEDMA_PCI
- help
- This driver adds explicit support for ATI IXP chipset.
- This allows the kernel to change PIO, DMA and UDMA speeds
- and to configure the chip to optimum performance.
-
- Say Y here if you have an ATI IXP chipset IDE controller.
-
-config BLK_DEV_CMD64X
- tristate "CMD64{3|6|8|9} chipset support"
- select IDE_TIMINGS
- select BLK_DEV_IDEDMA_PCI
- help
- Say Y here if you have an IDE controller which uses any of these
- chipsets: CMD643, CMD646, or CMD648.
-
-config BLK_DEV_TRIFLEX
- tristate "Compaq Triflex IDE support"
- select BLK_DEV_IDEDMA_PCI
- help
- Say Y here if you have a Compaq Triflex IDE controller, such
- as those commonly found on Compaq Pentium-Pro systems
-
-config BLK_DEV_CY82C693
- tristate "CY82C693 chipset support"
- depends on ALPHA
- select IDE_TIMINGS
- select BLK_DEV_IDEDMA_PCI
- help
- This driver adds detection and support for the CY82C693 chipset
- used on Digital's PC-Alpha 164SX boards.
-
-config BLK_DEV_CS5520
- tristate "Cyrix CS5510/20 MediaGX chipset support (VERY EXPERIMENTAL)"
- depends on X86_32 || COMPILE_TEST
- select BLK_DEV_IDEDMA_PCI
- help
- Include support for PIO tuning and virtual DMA on the Cyrix MediaGX
- 5510/5520 chipset. This will automatically be detected and
- configured if found.
-
- It is safe to say Y to this question.
-
-config BLK_DEV_CS5530
- tristate "Cyrix/National Semiconductor CS5530 MediaGX chipset support"
- depends on X86_32 || COMPILE_TEST
- select BLK_DEV_IDEDMA_PCI
- help
- Include support for UDMA on the Cyrix MediaGX 5530 chipset. This
- will automatically be detected and configured if found.
-
- It is safe to say Y to this question.
-
-config BLK_DEV_CS5535
- tristate "AMD CS5535 chipset support"
- depends on X86_32
- select BLK_DEV_IDEDMA_PCI
- help
- Include support for UDMA on the NSC/AMD CS5535 companion chipset.
- This will automatically be detected and configured if found.
-
- It is safe to say Y to this question.
-
-config BLK_DEV_CS5536
- tristate "CS5536 chipset support"
- depends on X86_32
- select BLK_DEV_IDEDMA_PCI
- help
- This option enables support for the AMD CS5536
- companion chip used with the Geode LX processor family.
-
- If unsure, say N.
-
-config BLK_DEV_HPT366
- tristate "HPT36X/37X chipset support"
- select BLK_DEV_IDEDMA_PCI
- help
- HPT366 is an Ultra DMA chipset for ATA-66.
- HPT368 is an Ultra DMA chipset for ATA-66 RAID Based.
- HPT370 is an Ultra DMA chipset for ATA-100.
- HPT372 is an Ultra DMA chipset for ATA-100.
- HPT374 is an Ultra DMA chipset for ATA-100.
-
- This driver adds up to 4 more EIDE devices sharing a single
- interrupt.
-
- The HPT366 chipset in its current form is bootable. One solution
- for this problem are special LILO commands for redirecting the
- reference to device 0x80. The other solution is to say Y to "Boot
- off-board chipsets first support" (CONFIG_BLK_DEV_OFFBOARD) unless
- your mother board has the chipset natively mounted. Regardless one
- should use the fore mentioned option and call at LILO.
-
- This driver requires dynamic tuning of the chipset during the
- ide-probe at boot. It is reported to support DVD II drives, by the
- manufacturer.
-
-config BLK_DEV_JMICRON
- tristate "JMicron JMB36x support"
- select BLK_DEV_IDEDMA_PCI
- help
- Basic support for the JMicron ATA controllers. For full support
- use the libata drivers.
-
-config BLK_DEV_SC1200
- tristate "National SCx200 chipset support"
- depends on X86_32 || COMPILE_TEST
- select BLK_DEV_IDEDMA_PCI
- help
- This driver adds support for the on-board IDE controller on the
- National SCx200 series of embedded x86 "Geode" systems.
-
-config BLK_DEV_PIIX
- tristate "Intel PIIX/ICH chipsets support"
- select BLK_DEV_IDEDMA_PCI
- help
- This driver adds explicit support for Intel PIIX and ICH chips.
- This allows the kernel to change PIO, DMA and UDMA speeds and to
- configure the chip to optimum performance.
-
-config BLK_DEV_IT8172
- tristate "IT8172 IDE support"
- select BLK_DEV_IDEDMA_PCI
- help
- This driver adds support for the IDE controller on the
- IT8172 System Controller.
-
-config BLK_DEV_IT8213
- tristate "IT8213 IDE support"
- select BLK_DEV_IDEDMA_PCI
- help
- This driver adds support for the ITE 8213 IDE controller.
-
-config BLK_DEV_IT821X
- tristate "IT821X IDE support"
- select BLK_DEV_IDEDMA_PCI
- help
- This driver adds support for the ITE 8211 IDE controller and the
- IT 8212 IDE RAID controller in both RAID and pass-through mode.
-
-config BLK_DEV_NS87415
- tristate "NS87415 chipset support"
- select BLK_DEV_IDEDMA_PCI
- help
- This driver adds detection and support for the NS87415 chip
- (used mainly on SPARC64 and PA-RISC machines).
-
- Please read the comments at the top of <file:drivers/ide/ns87415.c>.
-
-config BLK_DEV_PDC202XX_OLD
- tristate "PROMISE PDC202{46|62|65|67} support"
- select BLK_DEV_IDEDMA_PCI
- help
- Promise Ultra33 or PDC20246
- Promise Ultra66 or PDC20262
- Promise Ultra100 or PDC20265/PDC20267/PDC20268
-
- This driver adds up to 4 more EIDE devices sharing a single
- interrupt. This add-on card is a bootable PCI UDMA controller. Since
- multiple cards can be installed and there are BIOS ROM problems that
- happen if the BIOS revisions of all installed cards (three-max) do
- not match, the driver attempts to do dynamic tuning of the chipset
- at boot-time for max-speed. Ultra33 BIOS 1.25 or newer is required
- for more than one card.
-
- Please read the comments at the top of
- <file:drivers/ide/pdc202xx_old.c>.
-
- If unsure, say N.
-
-config BLK_DEV_PDC202XX_NEW
- tristate "PROMISE PDC202{68|69|70|71|75|76|77} support"
- select BLK_DEV_IDEDMA_PCI
-
-config BLK_DEV_SVWKS
- tristate "ServerWorks OSB4/CSB5/CSB6 chipsets support"
- select BLK_DEV_IDEDMA_PCI
- help
- This driver adds PIO/(U)DMA support for the ServerWorks OSB4/CSB5
- chipsets.
-
-config BLK_DEV_SIIMAGE
- tristate "Silicon Image chipset support"
- select BLK_DEV_IDEDMA_PCI
- help
- This driver adds PIO/(U)DMA support for the SI CMD680 and SII
- 3112 (Serial ATA) chips.
-
-config BLK_DEV_SIS5513
- tristate "SiS5513 chipset support"
- depends on X86
- select BLK_DEV_IDEDMA_PCI
- help
- This driver ensures (U)DMA support for SIS5513 chipset family based
- mainboards.
-
- The following chipsets are supported:
- ATA16: SiS5511, SiS5513
- ATA33: SiS5591, SiS5597, SiS5598, SiS5600
- ATA66: SiS530, SiS540, SiS620, SiS630, SiS640
- ATA100: SiS635, SiS645, SiS650, SiS730, SiS735, SiS740,
- SiS745, SiS750
-
- Please read the comments at the top of <file:drivers/ide/sis5513.c>.
-
-config BLK_DEV_SL82C105
- tristate "Winbond SL82c105 support"
- depends on (PPC || ARM)
- select IDE_TIMINGS
- select BLK_DEV_IDEDMA_PCI
- help
- If you have a Winbond SL82c105 IDE controller, say Y here to enable
- special configuration for this chip. This is common on various CHRP
- motherboards, but could be used elsewhere. If in doubt, say Y.
-
-config BLK_DEV_SLC90E66
- tristate "SLC90E66 chipset support"
- select BLK_DEV_IDEDMA_PCI
- help
- This driver ensures (U)DMA support for Victory66 SouthBridges for
- SMsC with Intel NorthBridges. This is an Ultra66 based chipset.
- The nice thing about it is that you can mix Ultra/DMA/PIO devices
- and it will handle timing cycles. Since this is an improved
- look-a-like to the PIIX4 it should be a nice addition.
-
- Please read the comments at the top of
- <file:drivers/ide/slc90e66.c>.
-
-config BLK_DEV_TRM290
- tristate "Tekram TRM290 chipset support"
- select BLK_DEV_IDEDMA_PCI
- help
- This driver adds support for bus master DMA transfers
- using the Tekram TRM290 PCI IDE chip. Volunteers are
- needed for further tweaking and development.
- Please read the comments at the top of <file:drivers/ide/trm290.c>.
-
-config BLK_DEV_VIA82CXXX
- tristate "VIA82CXXX chipset support"
- select IDE_TIMINGS
- select BLK_DEV_IDEDMA_PCI
- help
- This driver adds explicit support for VIA BusMastering IDE chips.
- This allows the kernel to change PIO, DMA and UDMA speeds and to
- configure the chip to optimum performance.
-
-config BLK_DEV_TC86C001
- tristate "Toshiba TC86C001 support"
- select BLK_DEV_IDEDMA_PCI
- help
- This driver adds support for Toshiba TC86C001 GOKU-S chip.
-
-endif
-
-# TODO: BLK_DEV_IDEDMA_PCI -> BLK_DEV_IDEDMA_SFF
-config BLK_DEV_IDE_PMAC
- tristate "PowerMac on-board IDE support"
- depends on PPC_PMAC
- select IDE_TIMINGS
- select BLK_DEV_IDEDMA_PCI
- help
- This driver provides support for the on-board IDE controller on
- most of the recent Apple Power Macintoshes and PowerBooks.
- If unsure, say Y.
-
-config BLK_DEV_IDE_PMAC_ATA100FIRST
- bool "Probe on-board ATA/100 (Kauai) first"
- depends on BLK_DEV_IDE_PMAC
- help
- This option will cause the ATA/100 controller found in UniNorth2
- based machines (Windtunnel PowerMac, Aluminium PowerBooks, ...)
- to be probed before the ATA/66 and ATA/33 controllers. Without
- these, those machine used to have the hard disk on hdc and the
- CD-ROM on hda. This option changes this to more natural hda for
- hard disk and hdc for CD-ROM.
-
-config BLK_DEV_IDE_TX4938
- tristate "TX4938 internal IDE support"
- depends on SOC_TX4938
- select IDE_TIMINGS
-
-config BLK_DEV_IDE_TX4939
- tristate "TX4939 internal IDE support"
- depends on SOC_TX4939
- select BLK_DEV_IDEDMA_SFF
-
-config BLK_DEV_IDE_ICSIDE
- tristate "ICS IDE interface support"
- depends on ARM && ARCH_ACORN
- help
- On Acorn systems, say Y here if you wish to use the ICS IDE
- interface card. This is not required for ICS partition support.
- If you are unsure, say N to this.
-
-config BLK_DEV_IDEDMA_ICS
- bool "ICS DMA support"
- depends on BLK_DEV_IDE_ICSIDE
- help
- Say Y here if you want to add DMA (Direct Memory Access) support to
- the ICS IDE driver.
-
-config BLK_DEV_IDE_RAPIDE
- tristate "RapIDE interface support"
- depends on ARM && ARCH_ACORN
- help
- Say Y here if you want to support the Yellowstone RapIDE controller
- manufactured for use with Acorn computers.
-
-config BLK_DEV_GAYLE
- tristate "Amiga Gayle IDE interface support"
- depends on AMIGA
- help
- This is the IDE driver for the Amiga Gayle IDE interface. It supports
- both the `A1200 style' and `A4000 style' of the Gayle IDE interface,
- This includes on-board IDE interfaces on some Amiga models (A600,
- A1200, A4000, and A4000T), and IDE interfaces on the Zorro expansion
- bus (M-Tech E-Matrix 530 expansion card).
-
- It also provides support for the so-called `IDE doublers' (made
- by various manufacturers, e.g. Eyetech) that can be connected to
- the on-board IDE interface of some Amiga models. Using such an IDE
- doubler, you can connect up to four instead of two IDE devices to
- the Amiga's on-board IDE interface. The feature is enabled at kernel
- runtime using the "gayle.doubler" kernel boot parameter.
-
- Say Y if you have an Amiga with a Gayle IDE interface and want to use
- IDE devices (hard disks, CD-ROM drives, etc.) that are connected to
- it.
-
- Note that you also have to enable Zorro bus support if you want to
- use Gayle IDE interfaces on the Zorro expansion bus.
-
-config BLK_DEV_BUDDHA
- tristate "Buddha/Catweasel/X-Surf IDE interface support"
- depends on ZORRO
- help
- This is the IDE driver for the IDE interfaces on the Buddha, Catweasel
- and X-Surf expansion boards. It supports up to two interfaces on the
- Buddha, three on the Catweasel and two on the X-Surf.
-
- Say Y if you have a Buddha or Catweasel expansion board and want to
- use IDE devices (hard disks, CD-ROM drives, etc.) that are connected
- to one of its IDE interfaces.
-
-config BLK_DEV_FALCON_IDE
- tristate "Falcon IDE interface support"
- depends on ATARI
- help
- This is the IDE driver for the on-board IDE interface on the Atari
- Falcon. Say Y if you have a Falcon and want to use IDE devices (hard
- disks, CD-ROM drives, etc.) that are connected to the on-board IDE
- interface.
-
-config BLK_DEV_MAC_IDE
- tristate "Macintosh Quadra/Powerbook IDE interface support"
- depends on MAC
- help
- This is the IDE driver for the on-board IDE interface on some m68k
- Macintosh models, namely Quadra/Centris 630, Performa 588 and
- Powerbook 150. The IDE interface on the Powerbook 190 is not
- supported by this driver and requires BLK_DEV_PLATFORM or
- PATA_PLATFORM.
-
- Say Y if you have such an Macintosh model and want to use IDE
- devices (hard disks, CD-ROM drives, etc.) that are connected to the
- on-board IDE interface.
-
-config BLK_DEV_Q40IDE
- tristate "Q40/Q60 IDE interface support"
- depends on Q40
- help
- Enable the on-board IDE controller in the Q40/Q60. This should
- normally be on; disable it only if you are running a custom hard
- drive subsystem through an expansion card.
-
-config BLK_DEV_PALMCHIP_BK3710
- tristate "Palmchip bk3710 IDE controller support"
- depends on ARCH_DAVINCI
- select IDE_TIMINGS
- select BLK_DEV_IDEDMA_SFF
- help
- Say Y here if you want to support the onchip IDE controller on the
- TI DaVinci SoC
-
-# no isa -> no vlb
-if ISA && (ALPHA || X86 || MIPS)
-
-comment "Other IDE chipsets support"
-comment "Note: most of these also require special kernel boot parameters"
-
-config BLK_DEV_4DRIVES
- tristate "Generic 4 drives/port support"
- help
- Certain older chipsets, including the Tekram 690CD, use a single set
- of I/O ports at 0x1f0 to control up to four drives, instead of the
- customary two drives per port. Support for this can be enabled at
- runtime using the "ide-4drives.probe" kernel boot parameter if you
- say Y here.
-
-config BLK_DEV_ALI14XX
- tristate "ALI M14xx support"
- select IDE_TIMINGS
- select IDE_LEGACY
- help
- This driver is enabled at runtime using the "ali14xx.probe" kernel
- boot parameter. It enables support for the secondary IDE interface
- of the ALI M1439/1443/1445/1487/1489 chipsets, and permits faster
- I/O speeds to be set as well.
- See the files <file:Documentation/ide/ide.rst> and
- <file:drivers/ide/ali14xx.c> for more info.
-
-config BLK_DEV_DTC2278
- tristate "DTC-2278 support"
- select IDE_XFER_MODE
- select IDE_LEGACY
- help
- This driver is enabled at runtime using the "dtc2278.probe" kernel
- boot parameter. It enables support for the secondary IDE interface
- of the DTC-2278 card, and permits faster I/O speeds to be set as
- well. See the <file:Documentation/ide/ide.rst> and
- <file:drivers/ide/dtc2278.c> files for more info.
-
-config BLK_DEV_HT6560B
- tristate "Holtek HT6560B support"
- select IDE_TIMINGS
- select IDE_LEGACY
- help
- This driver is enabled at runtime using the "ht6560b.probe" kernel
- boot parameter. It enables support for the secondary IDE interface
- of the Holtek card, and permits faster I/O speeds to be set as well.
- See the <file:Documentation/ide/ide.rst> and
- <file:drivers/ide/ht6560b.c> files for more info.
-
-config BLK_DEV_QD65XX
- tristate "QDI QD65xx support"
- select IDE_TIMINGS
- select IDE_LEGACY
- help
- This driver is enabled at runtime using the "qd65xx.probe" kernel
- boot parameter. It permits faster I/O speeds to be set. See the
- <file:Documentation/ide/ide.rst> and <file:drivers/ide/qd65xx.c>
- for more info.
-
-config BLK_DEV_UMC8672
- tristate "UMC-8672 support"
- select IDE_XFER_MODE
- select IDE_LEGACY
- help
- This driver is enabled at runtime using the "umc8672.probe" kernel
- boot parameter. It enables support for the secondary IDE interface
- of the UMC-8672, and permits faster I/O speeds to be set as well.
- See the files <file:Documentation/ide/ide.rst> and
- <file:drivers/ide/umc8672.c> for more info.
-
-endif
-
-config BLK_DEV_IDEDMA
- def_bool BLK_DEV_IDEDMA_SFF || BLK_DEV_IDEDMA_ICS
- select IDE_XFER_MODE
-
-endif # IDE
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
deleted file mode 100644
index 2605b3cdaf47..000000000000
--- a/drivers/ide/Makefile
+++ /dev/null
@@ -1,111 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# link order is important here
-#
-
-ide-core-y += ide.o ide-ioctls.o ide-io.o ide-iops.o ide-lib.o ide-probe.o \
- ide-taskfile.o ide-pm.o ide-park.o ide-sysfs.o ide-devsets.o \
- ide-io-std.o ide-eh.o
-
-# core IDE code
-ide-core-$(CONFIG_IDE_XFER_MODE) += ide-pio-blacklist.o ide-xfer-mode.o
-ide-core-$(CONFIG_IDE_TIMINGS) += ide-timings.o
-ide-core-$(CONFIG_IDE_ATAPI) += ide-atapi.o
-ide-core-$(CONFIG_BLK_DEV_IDEPCI) += setup-pci.o
-ide-core-$(CONFIG_BLK_DEV_IDEDMA) += ide-dma.o
-ide-core-$(CONFIG_BLK_DEV_IDEDMA_SFF) += ide-dma-sff.o
-ide-core-$(CONFIG_IDE_PROC_FS) += ide-proc.o
-ide-core-$(CONFIG_BLK_DEV_IDEACPI) += ide-acpi.o
-ide-core-$(CONFIG_IDE_LEGACY) += ide-legacy.o
-
-obj-$(CONFIG_IDE) += ide-core.o
-
-obj-$(CONFIG_BLK_DEV_ALI14XX) += ali14xx.o
-obj-$(CONFIG_BLK_DEV_UMC8672) += umc8672.o
-obj-$(CONFIG_BLK_DEV_DTC2278) += dtc2278.o
-obj-$(CONFIG_BLK_DEV_HT6560B) += ht6560b.o
-obj-$(CONFIG_BLK_DEV_QD65XX) += qd65xx.o
-obj-$(CONFIG_BLK_DEV_4DRIVES) += ide-4drives.o
-
-obj-$(CONFIG_BLK_DEV_GAYLE) += gayle.o
-obj-$(CONFIG_BLK_DEV_FALCON_IDE) += falconide.o
-obj-$(CONFIG_BLK_DEV_MAC_IDE) += macide.o
-obj-$(CONFIG_BLK_DEV_Q40IDE) += q40ide.o
-obj-$(CONFIG_BLK_DEV_BUDDHA) += buddha.o
-
-obj-$(CONFIG_BLK_DEV_AEC62XX) += aec62xx.o
-obj-$(CONFIG_BLK_DEV_ALI15X3) += alim15x3.o
-obj-$(CONFIG_BLK_DEV_AMD74XX) += amd74xx.o
-obj-$(CONFIG_BLK_DEV_ATIIXP) += atiixp.o
-obj-$(CONFIG_BLK_DEV_CMD64X) += cmd64x.o
-obj-$(CONFIG_BLK_DEV_CS5520) += cs5520.o
-obj-$(CONFIG_BLK_DEV_CS5530) += cs5530.o
-obj-$(CONFIG_BLK_DEV_CS5535) += cs5535.o
-obj-$(CONFIG_BLK_DEV_CS5536) += cs5536.o
-obj-$(CONFIG_BLK_DEV_SC1200) += sc1200.o
-obj-$(CONFIG_BLK_DEV_CY82C693) += cy82c693.o
-obj-$(CONFIG_BLK_DEV_DELKIN) += delkin_cb.o
-obj-$(CONFIG_BLK_DEV_HPT366) += hpt366.o
-obj-$(CONFIG_BLK_DEV_IT8172) += it8172.o
-obj-$(CONFIG_BLK_DEV_IT8213) += it8213.o
-obj-$(CONFIG_BLK_DEV_IT821X) += it821x.o
-obj-$(CONFIG_BLK_DEV_JMICRON) += jmicron.o
-obj-$(CONFIG_BLK_DEV_NS87415) += ns87415.o
-obj-$(CONFIG_BLK_DEV_OPTI621) += opti621.o
-obj-$(CONFIG_BLK_DEV_PDC202XX_OLD) += pdc202xx_old.o
-obj-$(CONFIG_BLK_DEV_PDC202XX_NEW) += pdc202xx_new.o
-obj-$(CONFIG_BLK_DEV_PIIX) += piix.o
-obj-$(CONFIG_BLK_DEV_RZ1000) += rz1000.o
-obj-$(CONFIG_BLK_DEV_SVWKS) += serverworks.o
-obj-$(CONFIG_BLK_DEV_SIIMAGE) += siimage.o
-obj-$(CONFIG_BLK_DEV_SIS5513) += sis5513.o
-obj-$(CONFIG_BLK_DEV_SL82C105) += sl82c105.o
-obj-$(CONFIG_BLK_DEV_SLC90E66) += slc90e66.o
-obj-$(CONFIG_BLK_DEV_TC86C001) += tc86c001.o
-obj-$(CONFIG_BLK_DEV_TRIFLEX) += triflex.o
-obj-$(CONFIG_BLK_DEV_TRM290) += trm290.o
-obj-$(CONFIG_BLK_DEV_VIA82CXXX) += via82cxxx.o
-
-# Must appear at the end of the block
-obj-$(CONFIG_BLK_DEV_GENERIC) += ide-pci-generic.o
-
-obj-$(CONFIG_IDEPCI_PCIBUS_ORDER) += ide-scan-pci.o
-
-obj-$(CONFIG_BLK_DEV_CMD640) += cmd640.o
-
-obj-$(CONFIG_BLK_DEV_IDE_PMAC) += pmac.o
-
-obj-$(CONFIG_IDE_GENERIC) += ide-generic.o
-obj-$(CONFIG_BLK_DEV_IDEPNP) += ide-pnp.o
-
-ide-gd_mod-y += ide-gd.o
-ide-cd_mod-y += ide-cd.o ide-cd_ioctl.o ide-cd_verbose.o
-
-ifeq ($(CONFIG_IDE_GD_ATA), y)
- ide-gd_mod-y += ide-disk.o ide-disk_ioctl.o
-ifeq ($(CONFIG_IDE_PROC_FS), y)
- ide-gd_mod-y += ide-disk_proc.o
-endif
-endif
-
-ifeq ($(CONFIG_IDE_GD_ATAPI), y)
- ide-gd_mod-y += ide-floppy.o ide-floppy_ioctl.o
-ifeq ($(CONFIG_IDE_PROC_FS), y)
- ide-gd_mod-y += ide-floppy_proc.o
-endif
-endif
-
-obj-$(CONFIG_IDE_GD) += ide-gd_mod.o
-obj-$(CONFIG_BLK_DEV_IDECD) += ide-cd_mod.o
-obj-$(CONFIG_BLK_DEV_IDETAPE) += ide-tape.o
-
-obj-$(CONFIG_BLK_DEV_IDECS) += ide-cs.o
-
-obj-$(CONFIG_BLK_DEV_PLATFORM) += ide_platform.o
-
-obj-$(CONFIG_BLK_DEV_IDE_ICSIDE) += icside.o
-obj-$(CONFIG_BLK_DEV_IDE_RAPIDE) += rapide.o
-obj-$(CONFIG_BLK_DEV_PALMCHIP_BK3710) += palm_bk3710.o
-
-obj-$(CONFIG_BLK_DEV_IDE_TX4938) += tx4938ide.o
-obj-$(CONFIG_BLK_DEV_IDE_TX4939) += tx4939ide.o
diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
deleted file mode 100644
index 4c959ce41ba9..000000000000
--- a/drivers/ide/aec62xx.c
+++ /dev/null
@@ -1,331 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1999-2002 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2007 MontaVista Software, Inc. <source@mvista.com>
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#include <asm/io.h>
-
-#define DRV_NAME "aec62xx"
-
-struct chipset_bus_clock_list_entry {
- u8 xfer_speed;
- u8 chipset_settings;
- u8 ultra_settings;
-};
-
-static const struct chipset_bus_clock_list_entry aec6xxx_33_base [] = {
- { XFER_UDMA_6, 0x31, 0x07 },
- { XFER_UDMA_5, 0x31, 0x06 },
- { XFER_UDMA_4, 0x31, 0x05 },
- { XFER_UDMA_3, 0x31, 0x04 },
- { XFER_UDMA_2, 0x31, 0x03 },
- { XFER_UDMA_1, 0x31, 0x02 },
- { XFER_UDMA_0, 0x31, 0x01 },
-
- { XFER_MW_DMA_2, 0x31, 0x00 },
- { XFER_MW_DMA_1, 0x31, 0x00 },
- { XFER_MW_DMA_0, 0x0a, 0x00 },
- { XFER_PIO_4, 0x31, 0x00 },
- { XFER_PIO_3, 0x33, 0x00 },
- { XFER_PIO_2, 0x08, 0x00 },
- { XFER_PIO_1, 0x0a, 0x00 },
- { XFER_PIO_0, 0x00, 0x00 },
- { 0, 0x00, 0x00 }
-};
-
-static const struct chipset_bus_clock_list_entry aec6xxx_34_base [] = {
- { XFER_UDMA_6, 0x41, 0x06 },
- { XFER_UDMA_5, 0x41, 0x05 },
- { XFER_UDMA_4, 0x41, 0x04 },
- { XFER_UDMA_3, 0x41, 0x03 },
- { XFER_UDMA_2, 0x41, 0x02 },
- { XFER_UDMA_1, 0x41, 0x01 },
- { XFER_UDMA_0, 0x41, 0x01 },
-
- { XFER_MW_DMA_2, 0x41, 0x00 },
- { XFER_MW_DMA_1, 0x42, 0x00 },
- { XFER_MW_DMA_0, 0x7a, 0x00 },
- { XFER_PIO_4, 0x41, 0x00 },
- { XFER_PIO_3, 0x43, 0x00 },
- { XFER_PIO_2, 0x78, 0x00 },
- { XFER_PIO_1, 0x7a, 0x00 },
- { XFER_PIO_0, 0x70, 0x00 },
- { 0, 0x00, 0x00 }
-};
-
-/*
- * TO DO: active tuning and correction of cards without a bios.
- */
-static u8 pci_bus_clock_list (u8 speed, struct chipset_bus_clock_list_entry * chipset_table)
-{
- for ( ; chipset_table->xfer_speed ; chipset_table++)
- if (chipset_table->xfer_speed == speed) {
- return chipset_table->chipset_settings;
- }
- return chipset_table->chipset_settings;
-}
-
-static u8 pci_bus_clock_list_ultra (u8 speed, struct chipset_bus_clock_list_entry * chipset_table)
-{
- for ( ; chipset_table->xfer_speed ; chipset_table++)
- if (chipset_table->xfer_speed == speed) {
- return chipset_table->ultra_settings;
- }
- return chipset_table->ultra_settings;
-}
-
-static void aec6210_set_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct ide_host *host = pci_get_drvdata(dev);
- struct chipset_bus_clock_list_entry *bus_clock = host->host_priv;
- u16 d_conf = 0;
- u8 ultra = 0, ultra_conf = 0;
- u8 tmp0 = 0, tmp1 = 0, tmp2 = 0;
- const u8 speed = drive->dma_mode;
- unsigned long flags;
-
- local_irq_save(flags);
- /* 0x40|(2*drive->dn): Active, 0x41|(2*drive->dn): Recovery */
- pci_read_config_word(dev, 0x40|(2*drive->dn), &d_conf);
- tmp0 = pci_bus_clock_list(speed, bus_clock);
- d_conf = ((tmp0 & 0xf0) << 4) | (tmp0 & 0xf);
- pci_write_config_word(dev, 0x40|(2*drive->dn), d_conf);
-
- tmp1 = 0x00;
- tmp2 = 0x00;
- pci_read_config_byte(dev, 0x54, &ultra);
- tmp1 = ((0x00 << (2*drive->dn)) | (ultra & ~(3 << (2*drive->dn))));
- ultra_conf = pci_bus_clock_list_ultra(speed, bus_clock);
- tmp2 = ((ultra_conf << (2*drive->dn)) | (tmp1 & ~(3 << (2*drive->dn))));
- pci_write_config_byte(dev, 0x54, tmp2);
- local_irq_restore(flags);
-}
-
-static void aec6260_set_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct ide_host *host = pci_get_drvdata(dev);
- struct chipset_bus_clock_list_entry *bus_clock = host->host_priv;
- u8 unit = drive->dn & 1;
- u8 tmp1 = 0, tmp2 = 0;
- u8 ultra = 0, drive_conf = 0, ultra_conf = 0;
- const u8 speed = drive->dma_mode;
- unsigned long flags;
-
- local_irq_save(flags);
- /* high 4-bits: Active, low 4-bits: Recovery */
- pci_read_config_byte(dev, 0x40|drive->dn, &drive_conf);
- drive_conf = pci_bus_clock_list(speed, bus_clock);
- pci_write_config_byte(dev, 0x40|drive->dn, drive_conf);
-
- pci_read_config_byte(dev, (0x44|hwif->channel), &ultra);
- tmp1 = ((0x00 << (4*unit)) | (ultra & ~(7 << (4*unit))));
- ultra_conf = pci_bus_clock_list_ultra(speed, bus_clock);
- tmp2 = ((ultra_conf << (4*unit)) | (tmp1 & ~(7 << (4*unit))));
- pci_write_config_byte(dev, (0x44|hwif->channel), tmp2);
- local_irq_restore(flags);
-}
-
-static void aec_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- drive->dma_mode = drive->pio_mode;
- hwif->port_ops->set_dma_mode(hwif, drive);
-}
-
-static int init_chipset_aec62xx(struct pci_dev *dev)
-{
- /* These are necessary to get AEC6280 Macintosh cards to work */
- if ((dev->device == PCI_DEVICE_ID_ARTOP_ATP865) ||
- (dev->device == PCI_DEVICE_ID_ARTOP_ATP865R)) {
- u8 reg49h = 0, reg4ah = 0;
- /* Clear reset and test bits. */
- pci_read_config_byte(dev, 0x49, &reg49h);
- pci_write_config_byte(dev, 0x49, reg49h & ~0x30);
- /* Enable chip interrupt output. */
- pci_read_config_byte(dev, 0x4a, &reg4ah);
- pci_write_config_byte(dev, 0x4a, reg4ah & ~0x01);
- /* Enable burst mode. */
- pci_read_config_byte(dev, 0x4a, &reg4ah);
- pci_write_config_byte(dev, 0x4a, reg4ah | 0x80);
- }
-
- return 0;
-}
-
-static u8 atp86x_cable_detect(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u8 ata66 = 0, mask = hwif->channel ? 0x02 : 0x01;
-
- pci_read_config_byte(dev, 0x49, &ata66);
-
- return (ata66 & mask) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
-}
-
-static const struct ide_port_ops atp850_port_ops = {
- .set_pio_mode = aec_set_pio_mode,
- .set_dma_mode = aec6210_set_mode,
-};
-
-static const struct ide_port_ops atp86x_port_ops = {
- .set_pio_mode = aec_set_pio_mode,
- .set_dma_mode = aec6260_set_mode,
- .cable_detect = atp86x_cable_detect,
-};
-
-static const struct ide_port_info aec62xx_chipsets[] = {
- { /* 0: AEC6210 */
- .name = DRV_NAME,
- .init_chipset = init_chipset_aec62xx,
- .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
- .port_ops = &atp850_port_ops,
- .host_flags = IDE_HFLAG_SERIALIZE |
- IDE_HFLAG_NO_ATAPI_DMA |
- IDE_HFLAG_NO_DSC |
- IDE_HFLAG_OFF_BOARD,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA2,
- },
- { /* 1: AEC6260 */
- .name = DRV_NAME,
- .init_chipset = init_chipset_aec62xx,
- .port_ops = &atp86x_port_ops,
- .host_flags = IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_NO_AUTODMA |
- IDE_HFLAG_OFF_BOARD,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA4,
- },
- { /* 2: AEC6260R */
- .name = DRV_NAME,
- .init_chipset = init_chipset_aec62xx,
- .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
- .port_ops = &atp86x_port_ops,
- .host_flags = IDE_HFLAG_NO_ATAPI_DMA |
- IDE_HFLAG_NON_BOOTABLE,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA4,
- },
- { /* 3: AEC6280 */
- .name = DRV_NAME,
- .init_chipset = init_chipset_aec62xx,
- .port_ops = &atp86x_port_ops,
- .host_flags = IDE_HFLAG_NO_ATAPI_DMA |
- IDE_HFLAG_OFF_BOARD,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA5,
- },
- { /* 4: AEC6280R */
- .name = DRV_NAME,
- .init_chipset = init_chipset_aec62xx,
- .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
- .port_ops = &atp86x_port_ops,
- .host_flags = IDE_HFLAG_NO_ATAPI_DMA |
- IDE_HFLAG_OFF_BOARD,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA5,
- }
-};
-
-/**
- * aec62xx_init_one - called when a AEC is found
- * @dev: the aec62xx device
- * @id: the matching pci id
- *
- * Called when the PCI registration layer (or the IDE initialization)
- * finds a device matching our IDE device tables.
- *
- * NOTE: since we're going to modify the 'name' field for AEC-6[26]80[R]
- * chips, pass a local copy of 'struct ide_port_info' down the call chain.
- */
-
-static int aec62xx_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- const struct chipset_bus_clock_list_entry *bus_clock;
- struct ide_port_info d;
- u8 idx = id->driver_data;
- int bus_speed = ide_pci_clk ? ide_pci_clk : 33;
- int err;
-
- if (bus_speed <= 33)
- bus_clock = aec6xxx_33_base;
- else
- bus_clock = aec6xxx_34_base;
-
- err = pci_enable_device(dev);
- if (err)
- return err;
-
- d = aec62xx_chipsets[idx];
-
- if (idx == 3 || idx == 4) {
- unsigned long dma_base = pci_resource_start(dev, 4);
-
- if (inb(dma_base + 2) & 0x10) {
- printk(KERN_INFO DRV_NAME " %s: AEC6880%s card detected"
- "\n", pci_name(dev), (idx == 4) ? "R" : "");
- d.udma_mask = ATA_UDMA6;
- }
- }
-
- err = ide_pci_init_one(dev, &d, (void *)bus_clock);
- if (err)
- pci_disable_device(dev);
-
- return err;
-}
-
-static void aec62xx_remove(struct pci_dev *dev)
-{
- ide_pci_remove(dev);
- pci_disable_device(dev);
-}
-
-static const struct pci_device_id aec62xx_pci_tbl[] = {
- { PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP850UF), 0 },
- { PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP860), 1 },
- { PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP860R), 2 },
- { PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP865), 3 },
- { PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP865R), 4 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, aec62xx_pci_tbl);
-
-static struct pci_driver aec62xx_pci_driver = {
- .name = "AEC62xx_IDE",
- .id_table = aec62xx_pci_tbl,
- .probe = aec62xx_init_one,
- .remove = aec62xx_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init aec62xx_ide_init(void)
-{
- return ide_pci_register_driver(&aec62xx_pci_driver);
-}
-
-static void __exit aec62xx_ide_exit(void)
-{
- pci_unregister_driver(&aec62xx_pci_driver);
-}
-
-module_init(aec62xx_ide_init);
-module_exit(aec62xx_ide_exit);
-
-MODULE_AUTHOR("Andre Hedrick");
-MODULE_DESCRIPTION("PCI driver module for ARTOP AEC62xx IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/ali14xx.c b/drivers/ide/ali14xx.c
deleted file mode 100644
index 3268931c2c7a..000000000000
--- a/drivers/ide/ali14xx.c
+++ /dev/null
@@ -1,250 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1996 Linus Torvalds & author (see below)
- */
-
-/*
- * ALI M14xx chipset EIDE controller
- *
- * Works for ALI M1439/1443/1445/1487/1489 chipsets.
- *
- * Adapted from code developed by derekn@vw.ece.cmu.edu. -ml
- * Derek's notes follow:
- *
- * I think the code should be pretty understandable,
- * but I'll be happy to (try to) answer questions.
- *
- * The critical part is in the setupDrive function. The initRegisters
- * function doesn't seem to be necessary, but the DOS driver does it, so
- * I threw it in.
- *
- * I've only tested this on my system, which only has one disk. I posted
- * it to comp.sys.linux.hardware, so maybe some other people will try it
- * out.
- *
- * Derek Noonburg (derekn@ece.cmu.edu)
- * 95-sep-26
- *
- * Update 96-jul-13:
- *
- * I've since upgraded to two disks and a CD-ROM, with no trouble, and
- * I've also heard from several others who have used it successfully.
- * This driver appears to work with both the 1443/1445 and the 1487/1489
- * chipsets. I've added support for PIO mode 4 for the 1487. This
- * seems to work just fine on the 1443 also, although I'm not sure it's
- * advertised as supporting mode 4. (I've been running a WDC AC21200 in
- * mode 4 for a while now with no trouble.) -Derek
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/ioport.h>
-#include <linux/blkdev.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#include <asm/io.h>
-
-#define DRV_NAME "ali14xx"
-
-/* port addresses for auto-detection */
-#define ALI_NUM_PORTS 4
-static const int ports[ALI_NUM_PORTS] __initconst =
- { 0x074, 0x0f4, 0x034, 0x0e4 };
-
-/* register initialization data */
-typedef struct { u8 reg, data; } RegInitializer;
-
-static const RegInitializer initData[] __initconst = {
- {0x01, 0x0f}, {0x02, 0x00}, {0x03, 0x00}, {0x04, 0x00},
- {0x05, 0x00}, {0x06, 0x00}, {0x07, 0x2b}, {0x0a, 0x0f},
- {0x25, 0x00}, {0x26, 0x00}, {0x27, 0x00}, {0x28, 0x00},
- {0x29, 0x00}, {0x2a, 0x00}, {0x2f, 0x00}, {0x2b, 0x00},
- {0x2c, 0x00}, {0x2d, 0x00}, {0x2e, 0x00}, {0x30, 0x00},
- {0x31, 0x00}, {0x32, 0x00}, {0x33, 0x00}, {0x34, 0xff},
- {0x35, 0x03}, {0x00, 0x00}
-};
-
-/* timing parameter registers for each drive */
-static struct { u8 reg1, reg2, reg3, reg4; } regTab[4] = {
- {0x03, 0x26, 0x04, 0x27}, /* drive 0 */
- {0x05, 0x28, 0x06, 0x29}, /* drive 1 */
- {0x2b, 0x30, 0x2c, 0x31}, /* drive 2 */
- {0x2d, 0x32, 0x2e, 0x33}, /* drive 3 */
-};
-
-static int basePort; /* base port address */
-static int regPort; /* port for register number */
-static int dataPort; /* port for register data */
-static u8 regOn; /* output to base port to access registers */
-static u8 regOff; /* output to base port to close registers */
-
-/*------------------------------------------------------------------------*/
-
-/*
- * Read a controller register.
- */
-static inline u8 inReg(u8 reg)
-{
- outb_p(reg, regPort);
- return inb(dataPort);
-}
-
-/*
- * Write a controller register.
- */
-static void outReg(u8 data, u8 reg)
-{
- outb_p(reg, regPort);
- outb_p(data, dataPort);
-}
-
-static DEFINE_SPINLOCK(ali14xx_lock);
-
-/*
- * Set PIO mode for the specified drive.
- * This function computes timing parameters
- * and sets controller registers accordingly.
- */
-static void ali14xx_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- int driveNum;
- int time1, time2;
- u8 param1, param2, param3, param4;
- unsigned long flags;
- int bus_speed = ide_vlb_clk ? ide_vlb_clk : 50;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
- struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
-
- /* calculate timing, according to PIO mode */
- time1 = ide_pio_cycle_time(drive, pio);
- time2 = t->active;
- param3 = param1 = (time2 * bus_speed + 999) / 1000;
- param4 = param2 = (time1 * bus_speed + 999) / 1000 - param1;
- if (pio < 3) {
- param3 += 8;
- param4 += 8;
- }
- printk(KERN_DEBUG "%s: PIO mode%d, t1=%dns, t2=%dns, cycles = %d+%d, %d+%d\n",
- drive->name, pio, time1, time2, param1, param2, param3, param4);
-
- /* stuff timing parameters into controller registers */
- driveNum = (drive->hwif->index << 1) + (drive->dn & 1);
- spin_lock_irqsave(&ali14xx_lock, flags);
- outb_p(regOn, basePort);
- outReg(param1, regTab[driveNum].reg1);
- outReg(param2, regTab[driveNum].reg2);
- outReg(param3, regTab[driveNum].reg3);
- outReg(param4, regTab[driveNum].reg4);
- outb_p(regOff, basePort);
- spin_unlock_irqrestore(&ali14xx_lock, flags);
-}
-
-/*
- * Auto-detect the IDE controller port.
- */
-static int __init findPort(void)
-{
- int i;
- u8 t;
- unsigned long flags;
-
- local_irq_save(flags);
- for (i = 0; i < ALI_NUM_PORTS; ++i) {
- basePort = ports[i];
- regOff = inb(basePort);
- for (regOn = 0x30; regOn <= 0x33; ++regOn) {
- outb_p(regOn, basePort);
- if (inb(basePort) == regOn) {
- regPort = basePort + 4;
- dataPort = basePort + 8;
- t = inReg(0) & 0xf0;
- outb_p(regOff, basePort);
- local_irq_restore(flags);
- if (t != 0x50)
- return 0;
- return 1; /* success */
- }
- }
- outb_p(regOff, basePort);
- }
- local_irq_restore(flags);
- return 0;
-}
-
-/*
- * Initialize controller registers with default values.
- */
-static int __init initRegisters(void)
-{
- const RegInitializer *p;
- u8 t;
- unsigned long flags;
-
- local_irq_save(flags);
- outb_p(regOn, basePort);
- for (p = initData; p->reg != 0; ++p)
- outReg(p->data, p->reg);
- outb_p(0x01, regPort);
- t = inb(regPort) & 0x01;
- outb_p(regOff, basePort);
- local_irq_restore(flags);
- return t;
-}
-
-static const struct ide_port_ops ali14xx_port_ops = {
- .set_pio_mode = ali14xx_set_pio_mode,
-};
-
-static const struct ide_port_info ali14xx_port_info = {
- .name = DRV_NAME,
- .chipset = ide_ali14xx,
- .port_ops = &ali14xx_port_ops,
- .host_flags = IDE_HFLAG_NO_DMA,
- .pio_mask = ATA_PIO4,
-};
-
-static int __init ali14xx_probe(void)
-{
- printk(KERN_DEBUG "ali14xx: base=0x%03x, regOn=0x%02x.\n",
- basePort, regOn);
-
- /* initialize controller registers */
- if (!initRegisters()) {
- printk(KERN_ERR "ali14xx: Chip initialization failed.\n");
- return 1;
- }
-
- return ide_legacy_device_add(&ali14xx_port_info, 0);
-}
-
-static bool probe_ali14xx;
-
-module_param_named(probe, probe_ali14xx, bool, 0);
-MODULE_PARM_DESC(probe, "probe for ALI M14xx chipsets");
-
-static int __init ali14xx_init(void)
-{
- if (probe_ali14xx == 0)
- goto out;
-
- /* auto-detect IDE controller port */
- if (findPort()) {
- if (ali14xx_probe())
- return -ENODEV;
- return 0;
- }
- printk(KERN_ERR "ali14xx: not found.\n");
-out:
- return -ENODEV;
-}
-
-module_init(ali14xx_init);
-
-MODULE_AUTHOR("see local file");
-MODULE_DESCRIPTION("support of ALI 14XX IDE chipsets");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
deleted file mode 100644
index 3265970aee34..000000000000
--- a/drivers/ide/alim15x3.c
+++ /dev/null
@@ -1,602 +0,0 @@
-/*
- * Copyright (C) 1998-2000 Michel Aubry, Maintainer
- * Copyright (C) 1998-2000 Andrzej Krzysztofowicz, Maintainer
- * Copyright (C) 1999-2000 CJ, cjtsai@ali.com.tw, Maintainer
- *
- * Copyright (C) 1998-2000 Andre Hedrick (andre@linux-ide.org)
- * May be copied or modified under the terms of the GNU General Public License
- * Copyright (C) 2002 Alan Cox
- * ALi (now ULi M5228) support by Clear Zhang <Clear.Zhang@ali.com.tw>
- * Copyright (C) 2007 MontaVista Software, Inc. <source@mvista.com>
- * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz
- *
- * (U)DMA capable version of ali 1533/1543(C), 1535(D)
- *
- **********************************************************************
- * 9/7/99 --Parts from the above author are included and need to be
- * converted into standard interface, once I finish the thought.
- *
- * Recent changes
- * Don't use LBA48 mode on ALi <= 0xC4
- * Don't poke 0x79 with a non ALi northbridge
- * Don't flip undefined bits on newer chipsets (fix Fujitsu laptop hang)
- * Allow UDMA6 on revisions > 0xC4
- *
- * Documentation
- * Chipset documentation available under NDA only
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-#include <linux/dmi.h>
-
-#include <asm/io.h>
-
-#define DRV_NAME "alim15x3"
-
-/*
- * ALi devices are not plug in. Otherwise these static values would
- * need to go. They ought to go away anyway
- */
-
-static u8 m5229_revision;
-static u8 chip_is_1543c_e;
-static struct pci_dev *isa_dev;
-
-static void ali_fifo_control(ide_hwif_t *hwif, ide_drive_t *drive, int on)
-{
- struct pci_dev *pdev = to_pci_dev(hwif->dev);
- int pio_fifo = 0x54 + hwif->channel;
- u8 fifo;
- int shift = 4 * (drive->dn & 1);
-
- pci_read_config_byte(pdev, pio_fifo, &fifo);
- fifo &= ~(0x0F << shift);
- fifo |= (on << shift);
- pci_write_config_byte(pdev, pio_fifo, fifo);
-}
-
-static void ali_program_timings(ide_hwif_t *hwif, ide_drive_t *drive,
- struct ide_timing *t, u8 ultra)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- int port = hwif->channel ? 0x5c : 0x58;
- int udmat = 0x56 + hwif->channel;
- u8 unit = drive->dn & 1, udma;
- int shift = 4 * unit;
-
- /* Set up the UDMA */
- pci_read_config_byte(dev, udmat, &udma);
- udma &= ~(0x0F << shift);
- udma |= ultra << shift;
- pci_write_config_byte(dev, udmat, udma);
-
- if (t == NULL)
- return;
-
- t->setup = clamp_val(t->setup, 1, 8) & 7;
- t->act8b = clamp_val(t->act8b, 1, 8) & 7;
- t->rec8b = clamp_val(t->rec8b, 1, 16) & 15;
- t->active = clamp_val(t->active, 1, 8) & 7;
- t->recover = clamp_val(t->recover, 1, 16) & 15;
-
- pci_write_config_byte(dev, port, t->setup);
- pci_write_config_byte(dev, port + 1, (t->act8b << 4) | t->rec8b);
- pci_write_config_byte(dev, port + unit + 2,
- (t->active << 4) | t->recover);
-}
-
-/**
- * ali_set_pio_mode - set host controller for PIO mode
- * @hwif: port
- * @drive: drive
- *
- * Program the controller for the given PIO mode.
- */
-
-static void ali_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- ide_drive_t *pair = ide_get_pair_dev(drive);
- int bus_speed = ide_pci_clk ? ide_pci_clk : 33;
- unsigned long T = 1000000 / bus_speed; /* PCI clock based */
- struct ide_timing t;
-
- ide_timing_compute(drive, drive->pio_mode, &t, T, 1);
- if (pair) {
- struct ide_timing p;
-
- ide_timing_compute(pair, pair->pio_mode, &p, T, 1);
- ide_timing_merge(&p, &t, &t,
- IDE_TIMING_SETUP | IDE_TIMING_8BIT);
- if (pair->dma_mode) {
- ide_timing_compute(pair, pair->dma_mode, &p, T, 1);
- ide_timing_merge(&p, &t, &t,
- IDE_TIMING_SETUP | IDE_TIMING_8BIT);
- }
- }
-
- /*
- * PIO mode => ATA FIFO on, ATAPI FIFO off
- */
- ali_fifo_control(hwif, drive, (drive->media == ide_disk) ? 0x05 : 0x00);
-
- ali_program_timings(hwif, drive, &t, 0);
-}
-
-/**
- * ali_udma_filter - compute UDMA mask
- * @drive: IDE device
- *
- * Return available UDMA modes.
- *
- * The actual rules for the ALi are:
- * No UDMA on revisions <= 0x20
- * Disk only for revisions < 0xC2
- * Not WDC drives on M1543C-E (?)
- */
-
-static u8 ali_udma_filter(ide_drive_t *drive)
-{
- if (m5229_revision > 0x20 && m5229_revision < 0xC2) {
- if (drive->media != ide_disk)
- return 0;
- if (chip_is_1543c_e &&
- strstr((char *)&drive->id[ATA_ID_PROD], "WDC "))
- return 0;
- }
-
- return drive->hwif->ultra_mask;
-}
-
-/**
- * ali_set_dma_mode - set host controller for DMA mode
- * @hwif: port
- * @drive: drive
- *
- * Configure the hardware for the desired IDE transfer mode.
- */
-
-static void ali_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- static u8 udma_timing[7] = { 0xC, 0xB, 0xA, 0x9, 0x8, 0xF, 0xD };
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- ide_drive_t *pair = ide_get_pair_dev(drive);
- int bus_speed = ide_pci_clk ? ide_pci_clk : 33;
- unsigned long T = 1000000 / bus_speed; /* PCI clock based */
- const u8 speed = drive->dma_mode;
- u8 tmpbyte = 0x00;
- struct ide_timing t;
-
- if (speed < XFER_UDMA_0) {
- ide_timing_compute(drive, drive->dma_mode, &t, T, 1);
- if (pair) {
- struct ide_timing p;
-
- ide_timing_compute(pair, pair->pio_mode, &p, T, 1);
- ide_timing_merge(&p, &t, &t,
- IDE_TIMING_SETUP | IDE_TIMING_8BIT);
- if (pair->dma_mode) {
- ide_timing_compute(pair, pair->dma_mode,
- &p, T, 1);
- ide_timing_merge(&p, &t, &t,
- IDE_TIMING_SETUP | IDE_TIMING_8BIT);
- }
- }
- ali_program_timings(hwif, drive, &t, 0);
- } else {
- ali_program_timings(hwif, drive, NULL,
- udma_timing[speed - XFER_UDMA_0]);
- if (speed >= XFER_UDMA_3) {
- pci_read_config_byte(dev, 0x4b, &tmpbyte);
- tmpbyte |= 1;
- pci_write_config_byte(dev, 0x4b, tmpbyte);
- }
- }
-}
-
-/**
- * ali_dma_check - DMA check
- * @drive: target device
- * @cmd: command
- *
- * Returns 1 if the DMA cannot be performed, zero on success.
- */
-
-static int ali_dma_check(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- if (m5229_revision < 0xC2 && drive->media != ide_disk) {
- if (cmd->tf_flags & IDE_TFLAG_WRITE)
- return 1; /* try PIO instead of DMA */
- }
- return 0;
-}
-
-/**
- * init_chipset_ali15x3 - Initialise an ALi IDE controller
- * @dev: PCI device
- *
- * This function initializes the ALI IDE controller and where
- * appropriate also sets up the 1533 southbridge.
- */
-
-static int init_chipset_ali15x3(struct pci_dev *dev)
-{
- unsigned long flags;
- u8 tmpbyte;
- struct pci_dev *north = pci_get_slot(dev->bus, PCI_DEVFN(0,0));
-
- m5229_revision = dev->revision;
-
- isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
-
- local_irq_save(flags);
-
- if (m5229_revision < 0xC2) {
- /*
- * revision 0x20 (1543-E, 1543-F)
- * revision 0xC0, 0xC1 (1543C-C, 1543C-D, 1543C-E)
- * clear CD-ROM DMA write bit, m5229, 0x4b, bit 7
- */
- pci_read_config_byte(dev, 0x4b, &tmpbyte);
- /*
- * clear bit 7
- */
- pci_write_config_byte(dev, 0x4b, tmpbyte & 0x7F);
- /*
- * check m1533, 0x5e, bit 1~4 == 1001 => & 00011110 = 00010010
- */
- if (m5229_revision >= 0x20 && isa_dev) {
- pci_read_config_byte(isa_dev, 0x5e, &tmpbyte);
- chip_is_1543c_e = ((tmpbyte & 0x1e) == 0x12) ? 1: 0;
- }
- goto out;
- }
-
- /*
- * 1543C-B?, 1535, 1535D, 1553
- * Note 1: not all "motherboard" support this detection
- * Note 2: if no udma 66 device, the detection may "error".
- * but in this case, we will not set the device to
- * ultra 66, the detection result is not important
- */
-
- /*
- * enable "Cable Detection", m5229, 0x4b, bit3
- */
- pci_read_config_byte(dev, 0x4b, &tmpbyte);
- pci_write_config_byte(dev, 0x4b, tmpbyte | 0x08);
-
- /*
- * We should only tune the 1533 enable if we are using an ALi
- * North bridge. We might have no north found on some zany
- * box without a device at 0:0.0. The ALi bridge will be at
- * 0:0.0 so if we didn't find one we know what is cooking.
- */
- if (north && north->vendor != PCI_VENDOR_ID_AL)
- goto out;
-
- if (m5229_revision < 0xC5 && isa_dev)
- {
- /*
- * set south-bridge's enable bit, m1533, 0x79
- */
-
- pci_read_config_byte(isa_dev, 0x79, &tmpbyte);
- if (m5229_revision == 0xC2) {
- /*
- * 1543C-B0 (m1533, 0x79, bit 2)
- */
- pci_write_config_byte(isa_dev, 0x79, tmpbyte | 0x04);
- } else if (m5229_revision >= 0xC3) {
- /*
- * 1553/1535 (m1533, 0x79, bit 1)
- */
- pci_write_config_byte(isa_dev, 0x79, tmpbyte | 0x02);
- }
- }
-
-out:
- /*
- * CD_ROM DMA on (m5229, 0x53, bit0)
- * Enable this bit even if we want to use PIO.
- * PIO FIFO off (m5229, 0x53, bit1)
- * The hardware will use 0x54h and 0x55h to control PIO FIFO.
- * (Not on later devices it seems)
- *
- * 0x53 changes meaning on later revs - we must no touch
- * bit 1 on them. Need to check if 0x20 is the right break.
- */
- if (m5229_revision >= 0x20) {
- pci_read_config_byte(dev, 0x53, &tmpbyte);
-
- if (m5229_revision <= 0x20)
- tmpbyte = (tmpbyte & (~0x02)) | 0x01;
- else if (m5229_revision == 0xc7 || m5229_revision == 0xc8)
- tmpbyte |= 0x03;
- else
- tmpbyte |= 0x01;
-
- pci_write_config_byte(dev, 0x53, tmpbyte);
- }
- local_irq_restore(flags);
- pci_dev_put(north);
- pci_dev_put(isa_dev);
- return 0;
-}
-
-/*
- * Cable special cases
- */
-
-static const struct dmi_system_id cable_dmi_table[] = {
- {
- .ident = "HP Pavilion N5430",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_BOARD_VERSION, "OmniBook N32N-736"),
- },
- },
- {
- .ident = "Toshiba Satellite S1800-814",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "S1800-814"),
- },
- },
- { }
-};
-
-static int ali_cable_override(struct pci_dev *pdev)
-{
- /* Fujitsu P2000 */
- if (pdev->subsystem_vendor == 0x10CF &&
- pdev->subsystem_device == 0x10AF)
- return 1;
-
- /* Mitac 8317 (Winbook-A) and relatives */
- if (pdev->subsystem_vendor == 0x1071 &&
- pdev->subsystem_device == 0x8317)
- return 1;
-
- /* Systems by DMI */
- if (dmi_check_system(cable_dmi_table))
- return 1;
-
- return 0;
-}
-
-/**
- * ali_cable_detect - cable detection
- * @hwif: IDE interface
- *
- * This checks if the controller and the cable are capable
- * of UDMA66 transfers. It doesn't check the drives.
- */
-
-static u8 ali_cable_detect(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u8 cbl = ATA_CBL_PATA40, tmpbyte;
-
- if (m5229_revision >= 0xC2) {
- /*
- * m5229 80-pin cable detection (from Host View)
- *
- * 0x4a bit0 is 0 => primary channel has 80-pin
- * 0x4a bit1 is 0 => secondary channel has 80-pin
- *
- * Certain laptops use short but suitable cables
- * and don't implement the detect logic.
- */
- if (ali_cable_override(dev))
- cbl = ATA_CBL_PATA40_SHORT;
- else {
- pci_read_config_byte(dev, 0x4a, &tmpbyte);
- if ((tmpbyte & (1 << hwif->channel)) == 0)
- cbl = ATA_CBL_PATA80;
- }
- }
-
- return cbl;
-}
-
-#ifndef CONFIG_SPARC64
-/**
- * init_hwif_ali15x3 - Initialize the ALI IDE x86 stuff
- * @hwif: interface to configure
- *
- * Obtain the IRQ tables for an ALi based IDE solution on the PC
- * class platforms. This part of the code isn't applicable to the
- * Sparc systems.
- */
-
-static void init_hwif_ali15x3(ide_hwif_t *hwif)
-{
- u8 ideic, inmir;
- s8 irq_routing_table[] = { -1, 9, 3, 10, 4, 5, 7, 6,
- 1, 11, 0, 12, 0, 14, 0, 15 };
- int irq = -1;
-
- if (isa_dev) {
- /*
- * read IDE interface control
- */
- pci_read_config_byte(isa_dev, 0x58, &ideic);
-
- /* bit0, bit1 */
- ideic = ideic & 0x03;
-
- /* get IRQ for IDE Controller */
- if ((hwif->channel && ideic == 0x03) ||
- (!hwif->channel && !ideic)) {
- /*
- * get SIRQ1 routing table
- */
- pci_read_config_byte(isa_dev, 0x44, &inmir);
- inmir = inmir & 0x0f;
- irq = irq_routing_table[inmir];
- } else if (hwif->channel && !(ideic & 0x01)) {
- /*
- * get SIRQ2 routing table
- */
- pci_read_config_byte(isa_dev, 0x75, &inmir);
- inmir = inmir & 0x0f;
- irq = irq_routing_table[inmir];
- }
- if(irq >= 0)
- hwif->irq = irq;
- }
-}
-#else
-#define init_hwif_ali15x3 NULL
-#endif /* CONFIG_SPARC64 */
-
-/**
- * init_dma_ali15x3 - set up DMA on ALi15x3
- * @hwif: IDE interface
- * @d: IDE port info
- *
- * Set up the DMA functionality on the ALi 15x3.
- */
-
-static int init_dma_ali15x3(ide_hwif_t *hwif, const struct ide_port_info *d)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned long base = ide_pci_dma_base(hwif, d);
-
- if (base == 0)
- return -1;
-
- hwif->dma_base = base;
-
- if (ide_pci_check_simplex(hwif, d) < 0)
- return -1;
-
- if (ide_pci_set_master(dev, d->name) < 0)
- return -1;
-
- if (!hwif->channel)
- outb(inb(base + 2) & 0x60, base + 2);
-
- printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
- hwif->name, base, base + 7);
-
- if (ide_allocate_dma_engine(hwif))
- return -1;
-
- return 0;
-}
-
-static const struct ide_port_ops ali_port_ops = {
- .set_pio_mode = ali_set_pio_mode,
- .set_dma_mode = ali_set_dma_mode,
- .udma_filter = ali_udma_filter,
- .cable_detect = ali_cable_detect,
-};
-
-static const struct ide_dma_ops ali_dma_ops = {
- .dma_host_set = ide_dma_host_set,
- .dma_setup = ide_dma_setup,
- .dma_start = ide_dma_start,
- .dma_end = ide_dma_end,
- .dma_test_irq = ide_dma_test_irq,
- .dma_lost_irq = ide_dma_lost_irq,
- .dma_check = ali_dma_check,
- .dma_timer_expiry = ide_dma_sff_timer_expiry,
- .dma_sff_read_status = ide_dma_sff_read_status,
-};
-
-static const struct ide_port_info ali15x3_chipset = {
- .name = DRV_NAME,
- .init_chipset = init_chipset_ali15x3,
- .init_hwif = init_hwif_ali15x3,
- .init_dma = init_dma_ali15x3,
- .port_ops = &ali_port_ops,
- .dma_ops = &sff_dma_ops,
- .pio_mask = ATA_PIO5,
- .swdma_mask = ATA_SWDMA2,
- .mwdma_mask = ATA_MWDMA2,
-};
-
-/**
- * alim15x3_init_one - set up an ALi15x3 IDE controller
- * @dev: PCI device to set up
- *
- * Perform the actual set up for an ALi15x3 that has been found by the
- * hot plug layer.
- */
-
-static int alim15x3_init_one(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- struct ide_port_info d = ali15x3_chipset;
- u8 rev = dev->revision, idx = id->driver_data;
-
- /* don't use LBA48 DMA on ALi devices before rev 0xC5 */
- if (rev <= 0xC4)
- d.host_flags |= IDE_HFLAG_NO_LBA48_DMA;
-
- if (rev >= 0x20) {
- if (rev == 0x20)
- d.host_flags |= IDE_HFLAG_NO_ATAPI_DMA;
-
- if (rev < 0xC2)
- d.udma_mask = ATA_UDMA2;
- else if (rev == 0xC2 || rev == 0xC3)
- d.udma_mask = ATA_UDMA4;
- else if (rev == 0xC4)
- d.udma_mask = ATA_UDMA5;
- else
- d.udma_mask = ATA_UDMA6;
-
- d.dma_ops = &ali_dma_ops;
- } else {
- d.host_flags |= IDE_HFLAG_NO_DMA;
-
- d.mwdma_mask = d.swdma_mask = 0;
- }
-
- if (idx == 0)
- d.host_flags |= IDE_HFLAG_CLEAR_SIMPLEX;
-
- return ide_pci_init_one(dev, &d, NULL);
-}
-
-
-static const struct pci_device_id alim15x3_pci_tbl[] = {
- { PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5229), 0 },
- { PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5228), 1 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, alim15x3_pci_tbl);
-
-static struct pci_driver alim15x3_pci_driver = {
- .name = "ALI15x3_IDE",
- .id_table = alim15x3_pci_tbl,
- .probe = alim15x3_init_one,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init ali15x3_ide_init(void)
-{
- return ide_pci_register_driver(&alim15x3_pci_driver);
-}
-
-static void __exit ali15x3_ide_exit(void)
-{
- pci_unregister_driver(&alim15x3_pci_driver);
-}
-
-module_init(ali15x3_ide_init);
-module_exit(ali15x3_ide_exit);
-
-MODULE_AUTHOR("Michael Aubry, Andrzej Krzysztofowicz, CJ, Andre Hedrick, Alan Cox, Bartlomiej Zolnierkiewicz");
-MODULE_DESCRIPTION("PCI driver module for ALi 15x3 IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
deleted file mode 100644
index 7340597a373e..000000000000
--- a/drivers/ide/amd74xx.c
+++ /dev/null
@@ -1,343 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * AMD 755/756/766/8111 and nVidia nForce/2/2s/3/3s/CK804/MCP04
- * IDE driver for Linux.
- *
- * Copyright (c) 2000-2002 Vojtech Pavlik
- * Copyright (c) 2007-2010 Bartlomiej Zolnierkiewicz
- *
- * Based on the work of:
- * Andre Hedrick
- */
-
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/ide.h>
-
-#define DRV_NAME "amd74xx"
-
-enum {
- AMD_IDE_CONFIG = 0x41,
- AMD_CABLE_DETECT = 0x42,
- AMD_DRIVE_TIMING = 0x48,
- AMD_8BIT_TIMING = 0x4e,
- AMD_ADDRESS_SETUP = 0x4c,
- AMD_UDMA_TIMING = 0x50,
-};
-
-static unsigned int amd_80w;
-static unsigned int amd_clock;
-
-static char *amd_dma[] = { "16", "25", "33", "44", "66", "100", "133" };
-static unsigned char amd_cyc2udma[] = { 6, 6, 5, 4, 0, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 7 };
-
-static inline u8 amd_offset(struct pci_dev *dev)
-{
- return (dev->vendor == PCI_VENDOR_ID_NVIDIA) ? 0x10 : 0;
-}
-
-/*
- * amd_set_speed() writes timing values to the chipset registers
- */
-
-static void amd_set_speed(struct pci_dev *dev, u8 dn, u8 udma_mask,
- struct ide_timing *timing)
-{
- u8 t = 0, offset = amd_offset(dev);
-
- pci_read_config_byte(dev, AMD_ADDRESS_SETUP + offset, &t);
- t = (t & ~(3 << ((3 - dn) << 1))) | ((clamp_val(timing->setup, 1, 4) - 1) << ((3 - dn) << 1));
- pci_write_config_byte(dev, AMD_ADDRESS_SETUP + offset, t);
-
- pci_write_config_byte(dev, AMD_8BIT_TIMING + offset + (1 - (dn >> 1)),
- ((clamp_val(timing->act8b, 1, 16) - 1) << 4) | (clamp_val(timing->rec8b, 1, 16) - 1));
-
- pci_write_config_byte(dev, AMD_DRIVE_TIMING + offset + (3 - dn),
- ((clamp_val(timing->active, 1, 16) - 1) << 4) | (clamp_val(timing->recover, 1, 16) - 1));
-
- switch (udma_mask) {
- case ATA_UDMA2: t = timing->udma ? (0xc0 | (clamp_val(timing->udma, 2, 5) - 2)) : 0x03; break;
- case ATA_UDMA4: t = timing->udma ? (0xc0 | amd_cyc2udma[clamp_val(timing->udma, 2, 10)]) : 0x03; break;
- case ATA_UDMA5: t = timing->udma ? (0xc0 | amd_cyc2udma[clamp_val(timing->udma, 1, 10)]) : 0x03; break;
- case ATA_UDMA6: t = timing->udma ? (0xc0 | amd_cyc2udma[clamp_val(timing->udma, 1, 15)]) : 0x03; break;
- default: return;
- }
-
- if (timing->udma)
- pci_write_config_byte(dev, AMD_UDMA_TIMING + offset + 3 - dn, t);
-}
-
-/*
- * amd_set_drive() computes timing values and configures the chipset
- * to a desired transfer mode. It also can be called by upper layers.
- */
-
-static void amd_set_drive(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- ide_drive_t *peer = ide_get_pair_dev(drive);
- struct ide_timing t, p;
- int T, UT;
- u8 udma_mask = hwif->ultra_mask;
- const u8 speed = drive->dma_mode;
-
- T = 1000000000 / amd_clock;
- UT = (udma_mask == ATA_UDMA2) ? T : (T / 2);
-
- ide_timing_compute(drive, speed, &t, T, UT);
-
- if (peer) {
- ide_timing_compute(peer, peer->pio_mode, &p, T, UT);
- ide_timing_merge(&p, &t, &t, IDE_TIMING_8BIT);
- }
-
- if (speed == XFER_UDMA_5 && amd_clock <= 33333) t.udma = 1;
- if (speed == XFER_UDMA_6 && amd_clock <= 33333) t.udma = 15;
-
- amd_set_speed(dev, drive->dn, udma_mask, &t);
-}
-
-/*
- * amd_set_pio_mode() is a callback from upper layers for PIO-only tuning.
- */
-
-static void amd_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- drive->dma_mode = drive->pio_mode;
- amd_set_drive(hwif, drive);
-}
-
-static void amd7409_cable_detect(struct pci_dev *dev)
-{
- /* no host side cable detection */
- amd_80w = 0x03;
-}
-
-static void amd7411_cable_detect(struct pci_dev *dev)
-{
- int i;
- u32 u = 0;
- u8 t = 0, offset = amd_offset(dev);
-
- pci_read_config_byte(dev, AMD_CABLE_DETECT + offset, &t);
- pci_read_config_dword(dev, AMD_UDMA_TIMING + offset, &u);
- amd_80w = ((t & 0x3) ? 1 : 0) | ((t & 0xc) ? 2 : 0);
- for (i = 24; i >= 0; i -= 8)
- if (((u >> i) & 4) && !(amd_80w & (1 << (1 - (i >> 4))))) {
- printk(KERN_WARNING DRV_NAME " %s: BIOS didn't set "
- "cable bits correctly. Enabling workaround.\n",
- pci_name(dev));
- amd_80w |= (1 << (1 - (i >> 4)));
- }
-}
-
-/*
- * The initialization callback. Initialize drive independent registers.
- */
-
-static int init_chipset_amd74xx(struct pci_dev *dev)
-{
- u8 t = 0, offset = amd_offset(dev);
-
-/*
- * Check 80-wire cable presence.
- */
-
- if (dev->vendor == PCI_VENDOR_ID_AMD &&
- dev->device == PCI_DEVICE_ID_AMD_COBRA_7401)
- ; /* no UDMA > 2 */
- else if (dev->vendor == PCI_VENDOR_ID_AMD &&
- dev->device == PCI_DEVICE_ID_AMD_VIPER_7409)
- amd7409_cable_detect(dev);
- else
- amd7411_cable_detect(dev);
-
-/*
- * Take care of prefetch & postwrite.
- */
-
- pci_read_config_byte(dev, AMD_IDE_CONFIG + offset, &t);
- /*
- * Check for broken FIFO support.
- */
- if (dev->vendor == PCI_VENDOR_ID_AMD &&
- dev->device == PCI_DEVICE_ID_AMD_VIPER_7411)
- t &= 0x0f;
- else
- t |= 0xf0;
- pci_write_config_byte(dev, AMD_IDE_CONFIG + offset, t);
-
- return 0;
-}
-
-static u8 amd_cable_detect(ide_hwif_t *hwif)
-{
- if ((amd_80w >> hwif->channel) & 1)
- return ATA_CBL_PATA80;
- else
- return ATA_CBL_PATA40;
-}
-
-static const struct ide_port_ops amd_port_ops = {
- .set_pio_mode = amd_set_pio_mode,
- .set_dma_mode = amd_set_drive,
- .cable_detect = amd_cable_detect,
-};
-
-#define IDE_HFLAGS_AMD \
- (IDE_HFLAG_PIO_NO_BLACKLIST | \
- IDE_HFLAG_POST_SET_MODE | \
- IDE_HFLAG_IO_32BIT | \
- IDE_HFLAG_UNMASK_IRQS)
-
-#define DECLARE_AMD_DEV(swdma, udma) \
- { \
- .name = DRV_NAME, \
- .init_chipset = init_chipset_amd74xx, \
- .enablebits = {{0x40,0x02,0x02}, {0x40,0x01,0x01}}, \
- .port_ops = &amd_port_ops, \
- .host_flags = IDE_HFLAGS_AMD, \
- .pio_mask = ATA_PIO5, \
- .swdma_mask = swdma, \
- .mwdma_mask = ATA_MWDMA2, \
- .udma_mask = udma, \
- }
-
-#define DECLARE_NV_DEV(udma) \
- { \
- .name = DRV_NAME, \
- .init_chipset = init_chipset_amd74xx, \
- .enablebits = {{0x50,0x02,0x02}, {0x50,0x01,0x01}}, \
- .port_ops = &amd_port_ops, \
- .host_flags = IDE_HFLAGS_AMD, \
- .pio_mask = ATA_PIO5, \
- .swdma_mask = ATA_SWDMA2, \
- .mwdma_mask = ATA_MWDMA2, \
- .udma_mask = udma, \
- }
-
-static const struct ide_port_info amd74xx_chipsets[] = {
- /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
- /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
- /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
- /* 3: AMD8111 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA6),
-
- /* 4: NFORCE */ DECLARE_NV_DEV(ATA_UDMA5),
- /* 5: >= NFORCE2 */ DECLARE_NV_DEV(ATA_UDMA6),
-
- /* 6: AMD5536 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
-};
-
-static int amd74xx_probe(struct pci_dev *dev, const struct pci_device_id *id)
-{
- struct ide_port_info d;
- u8 idx = id->driver_data;
-
- d = amd74xx_chipsets[idx];
-
- /*
- * Check for bad SWDMA and incorrectly wired Serenade mainboards.
- */
- if (idx == 1) {
- if (dev->revision <= 7)
- d.swdma_mask = 0;
- d.host_flags |= IDE_HFLAG_CLEAR_SIMPLEX;
- } else if (idx == 3) {
- if (dev->subsystem_vendor == PCI_VENDOR_ID_AMD &&
- dev->subsystem_device == PCI_DEVICE_ID_AMD_SERENADE)
- d.udma_mask = ATA_UDMA5;
- }
-
- /*
- * It seems that on some nVidia controllers using AltStatus
- * register can be unreliable so default to Status register
- * if the device is in Compatibility Mode.
- */
- if (dev->vendor == PCI_VENDOR_ID_NVIDIA &&
- ide_pci_is_in_compatibility_mode(dev))
- d.host_flags |= IDE_HFLAG_BROKEN_ALTSTATUS;
-
- printk(KERN_INFO "%s %s: UDMA%s controller\n",
- d.name, pci_name(dev), amd_dma[fls(d.udma_mask) - 1]);
-
- /*
- * Determine the system bus clock.
- */
- amd_clock = (ide_pci_clk ? ide_pci_clk : 33) * 1000;
-
- switch (amd_clock) {
- case 33000: amd_clock = 33333; break;
- case 37000: amd_clock = 37500; break;
- case 41000: amd_clock = 41666; break;
- }
-
- if (amd_clock < 20000 || amd_clock > 50000) {
- printk(KERN_WARNING "%s: User given PCI clock speed impossible"
- " (%d), using 33 MHz instead.\n",
- d.name, amd_clock);
- amd_clock = 33333;
- }
-
- return ide_pci_init_one(dev, &d, NULL);
-}
-
-static const struct pci_device_id amd74xx_pci_tbl[] = {
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_COBRA_7401), 0 },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7409), 1 },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7411), 2 },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_OPUS_7441), 2 },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_8111_IDE), 3 },
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_IDE), 4 },
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE), 5 },
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE), 5 },
-#ifdef CONFIG_BLK_DEV_IDE_SATA
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), 5 },
-#endif
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE), 5 },
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE), 5 },
-#ifdef CONFIG_BLK_DEV_IDE_SATA
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), 5 },
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), 5 },
-#endif
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE), 5 },
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE), 5 },
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE), 5 },
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE), 5 },
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE), 5 },
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE), 5 },
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE), 5 },
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE), 5 },
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE), 5 },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), 6 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, amd74xx_pci_tbl);
-
-static struct pci_driver amd74xx_pci_driver = {
- .name = "AMD_IDE",
- .id_table = amd74xx_pci_tbl,
- .probe = amd74xx_probe,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init amd74xx_ide_init(void)
-{
- return ide_pci_register_driver(&amd74xx_pci_driver);
-}
-
-static void __exit amd74xx_ide_exit(void)
-{
- pci_unregister_driver(&amd74xx_pci_driver);
-}
-
-module_init(amd74xx_ide_init);
-module_exit(amd74xx_ide_exit);
-
-MODULE_AUTHOR("Vojtech Pavlik, Bartlomiej Zolnierkiewicz");
-MODULE_DESCRIPTION("AMD PCI IDE driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
deleted file mode 100644
index e08b0aac08b9..000000000000
--- a/drivers/ide/atiixp.c
+++ /dev/null
@@ -1,212 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2003 ATI Inc. <hyu@ati.com>
- * Copyright (C) 2004,2007 Bartlomiej Zolnierkiewicz
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#define DRV_NAME "atiixp"
-
-#define ATIIXP_IDE_PIO_TIMING 0x40
-#define ATIIXP_IDE_MDMA_TIMING 0x44
-#define ATIIXP_IDE_PIO_CONTROL 0x48
-#define ATIIXP_IDE_PIO_MODE 0x4a
-#define ATIIXP_IDE_UDMA_CONTROL 0x54
-#define ATIIXP_IDE_UDMA_MODE 0x56
-
-struct atiixp_ide_timing {
- u8 command_width;
- u8 recover_width;
-};
-
-static struct atiixp_ide_timing pio_timing[] = {
- { 0x05, 0x0d },
- { 0x04, 0x07 },
- { 0x03, 0x04 },
- { 0x02, 0x02 },
- { 0x02, 0x00 },
-};
-
-static struct atiixp_ide_timing mdma_timing[] = {
- { 0x07, 0x07 },
- { 0x02, 0x01 },
- { 0x02, 0x00 },
-};
-
-static DEFINE_SPINLOCK(atiixp_lock);
-
-/**
- * atiixp_set_pio_mode - set host controller for PIO mode
- * @hwif: port
- * @drive: drive
- *
- * Set the interface PIO mode.
- */
-
-static void atiixp_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned long flags;
- int timing_shift = (drive->dn ^ 1) * 8;
- u32 pio_timing_data;
- u16 pio_mode_data;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
-
- spin_lock_irqsave(&atiixp_lock, flags);
-
- pci_read_config_word(dev, ATIIXP_IDE_PIO_MODE, &pio_mode_data);
- pio_mode_data &= ~(0x07 << (drive->dn * 4));
- pio_mode_data |= (pio << (drive->dn * 4));
- pci_write_config_word(dev, ATIIXP_IDE_PIO_MODE, pio_mode_data);
-
- pci_read_config_dword(dev, ATIIXP_IDE_PIO_TIMING, &pio_timing_data);
- pio_timing_data &= ~(0xff << timing_shift);
- pio_timing_data |= (pio_timing[pio].recover_width << timing_shift) |
- (pio_timing[pio].command_width << (timing_shift + 4));
- pci_write_config_dword(dev, ATIIXP_IDE_PIO_TIMING, pio_timing_data);
-
- spin_unlock_irqrestore(&atiixp_lock, flags);
-}
-
-/**
- * atiixp_set_dma_mode - set host controller for DMA mode
- * @hwif: port
- * @drive: drive
- *
- * Set a ATIIXP host controller to the desired DMA mode. This involves
- * programming the right timing data into the PCI configuration space.
- */
-
-static void atiixp_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned long flags;
- int timing_shift = (drive->dn ^ 1) * 8;
- u32 tmp32;
- u16 tmp16;
- u16 udma_ctl = 0;
- const u8 speed = drive->dma_mode;
-
- spin_lock_irqsave(&atiixp_lock, flags);
-
- pci_read_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, &udma_ctl);
-
- if (speed >= XFER_UDMA_0) {
- pci_read_config_word(dev, ATIIXP_IDE_UDMA_MODE, &tmp16);
- tmp16 &= ~(0x07 << (drive->dn * 4));
- tmp16 |= ((speed & 0x07) << (drive->dn * 4));
- pci_write_config_word(dev, ATIIXP_IDE_UDMA_MODE, tmp16);
-
- udma_ctl |= (1 << drive->dn);
- } else if (speed >= XFER_MW_DMA_0) {
- u8 i = speed & 0x03;
-
- pci_read_config_dword(dev, ATIIXP_IDE_MDMA_TIMING, &tmp32);
- tmp32 &= ~(0xff << timing_shift);
- tmp32 |= (mdma_timing[i].recover_width << timing_shift) |
- (mdma_timing[i].command_width << (timing_shift + 4));
- pci_write_config_dword(dev, ATIIXP_IDE_MDMA_TIMING, tmp32);
-
- udma_ctl &= ~(1 << drive->dn);
- }
-
- pci_write_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, udma_ctl);
-
- spin_unlock_irqrestore(&atiixp_lock, flags);
-}
-
-static u8 atiixp_cable_detect(ide_hwif_t *hwif)
-{
- struct pci_dev *pdev = to_pci_dev(hwif->dev);
- u8 udma_mode = 0, ch = hwif->channel;
-
- pci_read_config_byte(pdev, ATIIXP_IDE_UDMA_MODE + ch, &udma_mode);
-
- if ((udma_mode & 0x07) >= 0x04 || (udma_mode & 0x70) >= 0x40)
- return ATA_CBL_PATA80;
- else
- return ATA_CBL_PATA40;
-}
-
-static const struct ide_port_ops atiixp_port_ops = {
- .set_pio_mode = atiixp_set_pio_mode,
- .set_dma_mode = atiixp_set_dma_mode,
- .cable_detect = atiixp_cable_detect,
-};
-
-static const struct ide_port_info atiixp_pci_info[] = {
- { /* 0: IXP200/300/400/700 */
- .name = DRV_NAME,
- .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
- .port_ops = &atiixp_port_ops,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA5,
- },
- { /* 1: IXP600 */
- .name = DRV_NAME,
- .enablebits = {{0x48,0x01,0x00}, {0x00,0x00,0x00}},
- .port_ops = &atiixp_port_ops,
- .host_flags = IDE_HFLAG_SINGLE,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA5,
- },
-};
-
-/**
- * atiixp_init_one - called when a ATIIXP is found
- * @dev: the atiixp device
- * @id: the matching pci id
- *
- * Called when the PCI registration layer (or the IDE initialization)
- * finds a device matching our IDE device tables.
- */
-
-static int atiixp_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- return ide_pci_init_one(dev, &atiixp_pci_info[id->driver_data], NULL);
-}
-
-static const struct pci_device_id atiixp_pci_tbl[] = {
- { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP200_IDE), 0 },
- { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP300_IDE), 0 },
- { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), 0 },
- { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), 1 },
- { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), 0 },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_HUDSON2_IDE), 0 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, atiixp_pci_tbl);
-
-static struct pci_driver atiixp_pci_driver = {
- .name = "ATIIXP_IDE",
- .id_table = atiixp_pci_tbl,
- .probe = atiixp_init_one,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init atiixp_ide_init(void)
-{
- return ide_pci_register_driver(&atiixp_pci_driver);
-}
-
-static void __exit atiixp_ide_exit(void)
-{
- pci_unregister_driver(&atiixp_pci_driver);
-}
-
-module_init(atiixp_ide_init);
-module_exit(atiixp_ide_exit);
-
-MODULE_AUTHOR("HUI YU");
-MODULE_DESCRIPTION("PCI driver module for ATI IXP IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/buddha.c b/drivers/ide/buddha.c
deleted file mode 100644
index 46eaf58d881b..000000000000
--- a/drivers/ide/buddha.c
+++ /dev/null
@@ -1,238 +0,0 @@
-/*
- * Amiga Buddha, Catweasel and X-Surf IDE Driver
- *
- * Copyright (C) 1997, 2001 by Geert Uytterhoeven and others
- *
- * This driver was written based on the specifications in README.buddha and
- * the X-Surf info from Inside_XSurf.txt available at
- * http://www.jschoenfeld.com
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- *
- * TODO:
- * - test it :-)
- * - tune the timings using the speed-register
- */
-
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/blkdev.h>
-#include <linux/zorro.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-#include <linux/module.h>
-
-#include <asm/amigahw.h>
-#include <asm/amigaints.h>
-
-
- /*
- * The Buddha has 2 IDE interfaces, the Catweasel has 3, X-Surf has 2
- */
-
-#define BUDDHA_NUM_HWIFS 2
-#define CATWEASEL_NUM_HWIFS 3
-#define XSURF_NUM_HWIFS 2
-
-#define MAX_NUM_HWIFS 3
-
- /*
- * Bases of the IDE interfaces (relative to the board address)
- */
-
-#define BUDDHA_BASE1 0x800
-#define BUDDHA_BASE2 0xa00
-#define BUDDHA_BASE3 0xc00
-
-#define XSURF_BASE1 0xb000 /* 2.5" Interface */
-#define XSURF_BASE2 0xd000 /* 3.5" Interface */
-
-static u_int buddha_bases[CATWEASEL_NUM_HWIFS] __initdata = {
- BUDDHA_BASE1, BUDDHA_BASE2, BUDDHA_BASE3
-};
-
-static u_int xsurf_bases[XSURF_NUM_HWIFS] __initdata = {
- XSURF_BASE1, XSURF_BASE2
-};
-
- /*
- * Offsets from one of the above bases
- */
-
-#define BUDDHA_CONTROL 0x11a
-
- /*
- * Other registers
- */
-
-#define BUDDHA_IRQ1 0xf00 /* MSB = 1, Harddisk is source of */
-#define BUDDHA_IRQ2 0xf40 /* interrupt */
-#define BUDDHA_IRQ3 0xf80
-
-#define XSURF_IRQ1 0x7e
-#define XSURF_IRQ2 0x7e
-
-static int buddha_irqports[CATWEASEL_NUM_HWIFS] __initdata = {
- BUDDHA_IRQ1, BUDDHA_IRQ2, BUDDHA_IRQ3
-};
-
-static int xsurf_irqports[XSURF_NUM_HWIFS] __initdata = {
- XSURF_IRQ1, XSURF_IRQ2
-};
-
-#define BUDDHA_IRQ_MR 0xfc0 /* master interrupt enable */
-
-
- /*
- * Board information
- */
-
-typedef enum BuddhaType_Enum {
- BOARD_BUDDHA, BOARD_CATWEASEL, BOARD_XSURF
-} BuddhaType;
-
-static const char *buddha_board_name[] = { "Buddha", "Catweasel", "X-Surf" };
-
- /*
- * Check and acknowledge the interrupt status
- */
-
-static int buddha_test_irq(ide_hwif_t *hwif)
-{
- unsigned char ch;
-
- ch = z_readb(hwif->io_ports.irq_addr);
- if (!(ch & 0x80))
- return 0;
- return 1;
-}
-
-static void xsurf_clear_irq(ide_drive_t *drive)
-{
- /*
- * X-Surf needs 0 written to IRQ register to ensure ISA bit A11 stays at 0
- */
- z_writeb(0, drive->hwif->io_ports.irq_addr);
-}
-
-static void __init buddha_setup_ports(struct ide_hw *hw, unsigned long base,
- unsigned long ctl, unsigned long irq_port)
-{
- int i;
-
- memset(hw, 0, sizeof(*hw));
-
- hw->io_ports.data_addr = base;
-
- for (i = 1; i < 8; i++)
- hw->io_ports_array[i] = base + 2 + i * 4;
-
- hw->io_ports.ctl_addr = ctl;
- hw->io_ports.irq_addr = irq_port;
-
- hw->irq = IRQ_AMIGA_PORTS;
-}
-
-static const struct ide_port_ops buddha_port_ops = {
- .test_irq = buddha_test_irq,
-};
-
-static const struct ide_port_ops xsurf_port_ops = {
- .clear_irq = xsurf_clear_irq,
- .test_irq = buddha_test_irq,
-};
-
-static const struct ide_port_info buddha_port_info = {
- .port_ops = &buddha_port_ops,
- .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA,
- .irq_flags = IRQF_SHARED,
- .chipset = ide_generic,
-};
-
- /*
- * Probe for a Buddha or Catweasel IDE interface
- */
-
-static int __init buddha_init(void)
-{
- struct zorro_dev *z = NULL;
- u_long buddha_board = 0;
- BuddhaType type;
- int buddha_num_hwifs, i;
-
- while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
- unsigned long board;
- struct ide_hw hw[MAX_NUM_HWIFS], *hws[MAX_NUM_HWIFS];
- struct ide_port_info d = buddha_port_info;
-
- if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_BUDDHA) {
- buddha_num_hwifs = BUDDHA_NUM_HWIFS;
- type=BOARD_BUDDHA;
- } else if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_CATWEASEL) {
- buddha_num_hwifs = CATWEASEL_NUM_HWIFS;
- type=BOARD_CATWEASEL;
- } else if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF) {
- buddha_num_hwifs = XSURF_NUM_HWIFS;
- type=BOARD_XSURF;
- d.port_ops = &xsurf_port_ops;
- } else
- continue;
-
- board = z->resource.start;
-
- if(type != BOARD_XSURF) {
- if (!request_mem_region(board+BUDDHA_BASE1, 0x800, "IDE"))
- continue;
- } else {
- if (!request_mem_region(board+XSURF_BASE1, 0x1000, "IDE"))
- continue;
- if (!request_mem_region(board+XSURF_BASE2, 0x1000, "IDE"))
- goto fail_base2;
- if (!request_mem_region(board+XSURF_IRQ1, 0x8, "IDE")) {
- release_mem_region(board+XSURF_BASE2, 0x1000);
-fail_base2:
- release_mem_region(board+XSURF_BASE1, 0x1000);
- continue;
- }
- }
- buddha_board = (unsigned long)ZTWO_VADDR(board);
-
- /* write to BUDDHA_IRQ_MR to enable the board IRQ */
- /* X-Surf doesn't have this. IRQs are always on */
- if (type != BOARD_XSURF)
- z_writeb(0, buddha_board+BUDDHA_IRQ_MR);
-
- printk(KERN_INFO "ide: %s IDE controller\n",
- buddha_board_name[type]);
-
- for (i = 0; i < buddha_num_hwifs; i++) {
- unsigned long base, ctl, irq_port;
-
- if (type != BOARD_XSURF) {
- base = buddha_board + buddha_bases[i];
- ctl = base + BUDDHA_CONTROL;
- irq_port = buddha_board + buddha_irqports[i];
- } else {
- base = buddha_board + xsurf_bases[i];
- /* X-Surf has no CS1* (Control/AltStat) */
- ctl = 0;
- irq_port = buddha_board + xsurf_irqports[i];
- }
-
- buddha_setup_ports(&hw[i], base, ctl, irq_port);
-
- hws[i] = &hw[i];
- }
-
- ide_host_add(&d, hws, i, NULL);
- }
-
- return 0;
-}
-
-module_init(buddha_init);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/cmd640.c b/drivers/ide/cmd640.c
deleted file mode 100644
index f48decb9fac4..000000000000
--- a/drivers/ide/cmd640.c
+++ /dev/null
@@ -1,848 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1995-1996 Linus Torvalds & authors (see below)
- */
-
-/*
- * Original authors: abramov@cecmow.enet.dec.com (Igor Abramov)
- * mlord@pobox.com (Mark Lord)
- *
- * See linux/MAINTAINERS for address of current maintainer.
- *
- * This file provides support for the advanced features and bugs
- * of IDE interfaces using the CMD Technologies 0640 IDE interface chip.
- *
- * These chips are basically fucked by design, and getting this driver
- * to work on every motherboard design that uses this screwed chip seems
- * bloody well impossible. However, we're still trying.
- *
- * Version 0.97 worked for everybody.
- *
- * User feedback is essential. Many thanks to the beta test team:
- *
- * A.Hartgers@stud.tue.nl, JZDQC@CUNYVM.CUNY.edu, abramov@cecmow.enet.dec.com,
- * bardj@utopia.ppp.sn.no, bart@gaga.tue.nl, bbol001@cs.auckland.ac.nz,
- * chrisc@dbass.demon.co.uk, dalecki@namu26.Num.Math.Uni-Goettingen.de,
- * derekn@vw.ece.cmu.edu, florian@btp2x3.phy.uni-bayreuth.de,
- * flynn@dei.unipd.it, gadio@netvision.net.il, godzilla@futuris.net,
- * j@pobox.com, jkemp1@mises.uni-paderborn.de, jtoppe@hiwaay.net,
- * kerouac@ssnet.com, meskes@informatik.rwth-aachen.de, hzoli@cs.elte.hu,
- * peter@udgaard.isgtec.com, phil@tazenda.demon.co.uk, roadcapw@cfw.com,
- * s0033las@sun10.vsz.bme.hu, schaffer@tam.cornell.edu, sjd@slip.net,
- * steve@ei.org, ulrpeg@bigcomm.gun.de, ism@tardis.ed.ac.uk, mack@cray.com
- * liug@mama.indstate.edu, and others.
- *
- * Version 0.01 Initial version, hacked out of ide.c,
- * and #include'd rather than compiled separately.
- * This will get cleaned up in a subsequent release.
- *
- * Version 0.02 Fixes for vlb initialization code, enable prefetch
- * for versions 'B' and 'C' of chip by default,
- * some code cleanup.
- *
- * Version 0.03 Added reset of secondary interface,
- * and black list for devices which are not compatible
- * with prefetch mode. Separate function for setting
- * prefetch is added, possibly it will be called some
- * day from ioctl processing code.
- *
- * Version 0.04 Now configs/compiles separate from ide.c
- *
- * Version 0.05 Major rewrite of interface timing code.
- * Added new function cmd640_set_mode to set PIO mode
- * from ioctl call. New drives added to black list.
- *
- * Version 0.06 More code cleanup. Prefetch is enabled only for
- * detected hard drives, not included in prefetch
- * black list.
- *
- * Version 0.07 Changed to more conservative drive tuning policy.
- * Unknown drives, which report PIO < 4 are set to
- * (reported_PIO - 1) if it is supported, or to PIO0.
- * List of known drives extended by info provided by
- * CMD at their ftp site.
- *
- * Version 0.08 Added autotune/noautotune support.
- *
- * Version 0.09 Try to be smarter about 2nd port enabling.
- * Version 0.10 Be nice and don't reset 2nd port.
- * Version 0.11 Try to handle more weird situations.
- *
- * Version 0.12 Lots of bug fixes from Laszlo Peter
- * irq unmasking disabled for reliability.
- * try to be even smarter about the second port.
- * tidy up source code formatting.
- * Version 0.13 permit irq unmasking again.
- * Version 0.90 massive code cleanup, some bugs fixed.
- * defaults all drives to PIO mode0, prefetch off.
- * autotune is OFF by default, with compile time flag.
- * prefetch can be turned OFF/ON using "hdparm -p8/-p9"
- * (requires hdparm-3.1 or newer)
- * Version 0.91 first release to linux-kernel list.
- * Version 0.92 move initial reg dump to separate callable function
- * change "readahead" to "prefetch" to avoid confusion
- * Version 0.95 respect original BIOS timings unless autotuning.
- * tons of code cleanup and rearrangement.
- * added CONFIG_BLK_DEV_CMD640_ENHANCED option
- * prevent use of unmask when prefetch is on
- * Version 0.96 prevent use of io_32bit when prefetch is off
- * Version 0.97 fix VLB secondary interface for sjd@slip.net
- * other minor tune-ups: 0.96 was very good.
- * Version 0.98 ignore PCI version when disabled by BIOS
- * Version 0.99 display setup/active/recovery clocks with PIO mode
- * Version 1.00 Mmm.. cannot depend on PCMD_ENA in all systems
- * Version 1.01 slow/fast devsel can be selected with "hdparm -p6/-p7"
- * ("fast" is necessary for 32bit I/O in some systems)
- * Version 1.02 fix bug that resulted in slow "setup times"
- * (patch courtesy of Zoltan Hidvegi)
- */
-
-#define CMD640_PREFETCH_MASKS 1
-
-/*#define CMD640_DUMP_REGS */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-#include <linux/module.h>
-
-#include <asm/io.h>
-
-#define DRV_NAME "cmd640"
-
-static bool cmd640_vlb;
-
-/*
- * CMD640 specific registers definition.
- */
-
-#define VID 0x00
-#define DID 0x02
-#define PCMD 0x04
-#define PCMD_ENA 0x01
-#define PSTTS 0x06
-#define REVID 0x08
-#define PROGIF 0x09
-#define SUBCL 0x0a
-#define BASCL 0x0b
-#define BaseA0 0x10
-#define BaseA1 0x14
-#define BaseA2 0x18
-#define BaseA3 0x1c
-#define INTLINE 0x3c
-#define INPINE 0x3d
-
-#define CFR 0x50
-#define CFR_DEVREV 0x03
-#define CFR_IDE01INTR 0x04
-#define CFR_DEVID 0x18
-#define CFR_AT_VESA_078h 0x20
-#define CFR_DSA1 0x40
-#define CFR_DSA0 0x80
-
-#define CNTRL 0x51
-#define CNTRL_DIS_RA0 0x40
-#define CNTRL_DIS_RA1 0x80
-#define CNTRL_ENA_2ND 0x08
-
-#define CMDTIM 0x52
-#define ARTTIM0 0x53
-#define DRWTIM0 0x54
-#define ARTTIM1 0x55
-#define DRWTIM1 0x56
-#define ARTTIM23 0x57
-#define ARTTIM23_DIS_RA2 0x04
-#define ARTTIM23_DIS_RA3 0x08
-#define ARTTIM23_IDE23INTR 0x10
-#define DRWTIM23 0x58
-#define BRST 0x59
-
-/*
- * Registers and masks for easy access by drive index:
- */
-static u8 prefetch_regs[4] = {CNTRL, CNTRL, ARTTIM23, ARTTIM23};
-static u8 prefetch_masks[4] = {CNTRL_DIS_RA0, CNTRL_DIS_RA1, ARTTIM23_DIS_RA2, ARTTIM23_DIS_RA3};
-
-#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
-
-static u8 arttim_regs[4] = {ARTTIM0, ARTTIM1, ARTTIM23, ARTTIM23};
-static u8 drwtim_regs[4] = {DRWTIM0, DRWTIM1, DRWTIM23, DRWTIM23};
-
-/*
- * Current cmd640 timing values for each drive.
- * The defaults for each are the slowest possible timings.
- */
-static u8 setup_counts[4] = {4, 4, 4, 4}; /* Address setup count (in clocks) */
-static u8 active_counts[4] = {16, 16, 16, 16}; /* Active count (encoded) */
-static u8 recovery_counts[4] = {16, 16, 16, 16}; /* Recovery count (encoded) */
-
-#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
-
-static DEFINE_SPINLOCK(cmd640_lock);
-
-/*
- * Interface to access cmd640x registers
- */
-static unsigned int cmd640_key;
-static void (*__put_cmd640_reg)(u16 reg, u8 val);
-static u8 (*__get_cmd640_reg)(u16 reg);
-
-/*
- * This is read from the CFR reg, and is used in several places.
- */
-static unsigned int cmd640_chip_version;
-
-/*
- * The CMD640x chip does not support DWORD config write cycles, but some
- * of the BIOSes use them to implement the config services.
- * Therefore, we must use direct IO instead.
- */
-
-/* PCI method 1 access */
-
-static void put_cmd640_reg_pci1(u16 reg, u8 val)
-{
- outl_p((reg & 0xfc) | cmd640_key, 0xcf8);
- outb_p(val, (reg & 3) | 0xcfc);
-}
-
-static u8 get_cmd640_reg_pci1(u16 reg)
-{
- outl_p((reg & 0xfc) | cmd640_key, 0xcf8);
- return inb_p((reg & 3) | 0xcfc);
-}
-
-/* PCI method 2 access (from CMD datasheet) */
-
-static void put_cmd640_reg_pci2(u16 reg, u8 val)
-{
- outb_p(0x10, 0xcf8);
- outb_p(val, cmd640_key + reg);
- outb_p(0, 0xcf8);
-}
-
-static u8 get_cmd640_reg_pci2(u16 reg)
-{
- u8 b;
-
- outb_p(0x10, 0xcf8);
- b = inb_p(cmd640_key + reg);
- outb_p(0, 0xcf8);
- return b;
-}
-
-/* VLB access */
-
-static void put_cmd640_reg_vlb(u16 reg, u8 val)
-{
- outb_p(reg, cmd640_key);
- outb_p(val, cmd640_key + 4);
-}
-
-static u8 get_cmd640_reg_vlb(u16 reg)
-{
- outb_p(reg, cmd640_key);
- return inb_p(cmd640_key + 4);
-}
-
-static u8 get_cmd640_reg(u16 reg)
-{
- unsigned long flags;
- u8 b;
-
- spin_lock_irqsave(&cmd640_lock, flags);
- b = __get_cmd640_reg(reg);
- spin_unlock_irqrestore(&cmd640_lock, flags);
- return b;
-}
-
-static void put_cmd640_reg(u16 reg, u8 val)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cmd640_lock, flags);
- __put_cmd640_reg(reg, val);
- spin_unlock_irqrestore(&cmd640_lock, flags);
-}
-
-static int __init match_pci_cmd640_device(void)
-{
- const u8 ven_dev[4] = {0x95, 0x10, 0x40, 0x06};
- unsigned int i;
- for (i = 0; i < 4; i++) {
- if (get_cmd640_reg(i) != ven_dev[i])
- return 0;
- }
-#ifdef STUPIDLY_TRUST_BROKEN_PCMD_ENA_BIT
- if ((get_cmd640_reg(PCMD) & PCMD_ENA) == 0) {
- printk("ide: cmd640 on PCI disabled by BIOS\n");
- return 0;
- }
-#endif /* STUPIDLY_TRUST_BROKEN_PCMD_ENA_BIT */
- return 1; /* success */
-}
-
-/*
- * Probe for CMD640x -- pci method 1
- */
-static int __init probe_for_cmd640_pci1(void)
-{
- __get_cmd640_reg = get_cmd640_reg_pci1;
- __put_cmd640_reg = put_cmd640_reg_pci1;
- for (cmd640_key = 0x80000000;
- cmd640_key <= 0x8000f800;
- cmd640_key += 0x800) {
- if (match_pci_cmd640_device())
- return 1; /* success */
- }
- return 0;
-}
-
-/*
- * Probe for CMD640x -- pci method 2
- */
-static int __init probe_for_cmd640_pci2(void)
-{
- __get_cmd640_reg = get_cmd640_reg_pci2;
- __put_cmd640_reg = put_cmd640_reg_pci2;
- for (cmd640_key = 0xc000; cmd640_key <= 0xcf00; cmd640_key += 0x100) {
- if (match_pci_cmd640_device())
- return 1; /* success */
- }
- return 0;
-}
-
-/*
- * Probe for CMD640x -- vlb
- */
-static int __init probe_for_cmd640_vlb(void)
-{
- u8 b;
-
- __get_cmd640_reg = get_cmd640_reg_vlb;
- __put_cmd640_reg = put_cmd640_reg_vlb;
- cmd640_key = 0x178;
- b = get_cmd640_reg(CFR);
- if (b == 0xff || b == 0x00 || (b & CFR_AT_VESA_078h)) {
- cmd640_key = 0x78;
- b = get_cmd640_reg(CFR);
- if (b == 0xff || b == 0x00 || !(b & CFR_AT_VESA_078h))
- return 0;
- }
- return 1; /* success */
-}
-
-/*
- * Returns 1 if an IDE interface/drive exists at 0x170,
- * Returns 0 otherwise.
- */
-static int __init secondary_port_responding(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cmd640_lock, flags);
-
- outb_p(0x0a, 0x176); /* select drive0 */
- udelay(100);
- if ((inb_p(0x176) & 0x1f) != 0x0a) {
- outb_p(0x1a, 0x176); /* select drive1 */
- udelay(100);
- if ((inb_p(0x176) & 0x1f) != 0x1a) {
- spin_unlock_irqrestore(&cmd640_lock, flags);
- return 0; /* nothing responded */
- }
- }
- spin_unlock_irqrestore(&cmd640_lock, flags);
- return 1; /* success */
-}
-
-#ifdef CMD640_DUMP_REGS
-/*
- * Dump out all cmd640 registers. May be called from ide.c
- */
-static void cmd640_dump_regs(void)
-{
- unsigned int reg = cmd640_vlb ? 0x50 : 0x00;
-
- /* Dump current state of chip registers */
- printk("ide: cmd640 internal register dump:");
- for (; reg <= 0x59; reg++) {
- if (!(reg & 0x0f))
- printk("\n%04x:", reg);
- printk(" %02x", get_cmd640_reg(reg));
- }
- printk("\n");
-}
-#endif
-
-static void __set_prefetch_mode(ide_drive_t *drive, int mode)
-{
- if (mode) { /* want prefetch on? */
-#if CMD640_PREFETCH_MASKS
- drive->dev_flags |= IDE_DFLAG_NO_UNMASK;
- drive->dev_flags &= ~IDE_DFLAG_UNMASK;
-#endif
- drive->dev_flags &= ~IDE_DFLAG_NO_IO_32BIT;
- } else {
- drive->dev_flags &= ~IDE_DFLAG_NO_UNMASK;
- drive->dev_flags |= IDE_DFLAG_NO_IO_32BIT;
- drive->io_32bit = 0;
- }
-}
-
-#ifndef CONFIG_BLK_DEV_CMD640_ENHANCED
-/*
- * Check whether prefetch is on for a drive,
- * and initialize the unmask flags for safe operation.
- */
-static void __init check_prefetch(ide_drive_t *drive, unsigned int index)
-{
- u8 b = get_cmd640_reg(prefetch_regs[index]);
-
- __set_prefetch_mode(drive, (b & prefetch_masks[index]) ? 0 : 1);
-}
-#else
-
-/*
- * Sets prefetch mode for a drive.
- */
-static void set_prefetch_mode(ide_drive_t *drive, unsigned int index, int mode)
-{
- unsigned long flags;
- int reg = prefetch_regs[index];
- u8 b;
-
- spin_lock_irqsave(&cmd640_lock, flags);
- b = __get_cmd640_reg(reg);
- __set_prefetch_mode(drive, mode);
- if (mode)
- b &= ~prefetch_masks[index]; /* enable prefetch */
- else
- b |= prefetch_masks[index]; /* disable prefetch */
- __put_cmd640_reg(reg, b);
- spin_unlock_irqrestore(&cmd640_lock, flags);
-}
-
-/*
- * Dump out current drive clocks settings
- */
-static void display_clocks(unsigned int index)
-{
- u8 active_count, recovery_count;
-
- active_count = active_counts[index];
- if (active_count == 1)
- ++active_count;
- recovery_count = recovery_counts[index];
- if (active_count > 3 && recovery_count == 1)
- ++recovery_count;
- if (cmd640_chip_version > 1)
- recovery_count += 1; /* cmd640b uses (count + 1)*/
- printk(", clocks=%d/%d/%d\n", setup_counts[index], active_count, recovery_count);
-}
-
-/*
- * Pack active and recovery counts into single byte representation
- * used by controller
- */
-static inline u8 pack_nibbles(u8 upper, u8 lower)
-{
- return ((upper & 0x0f) << 4) | (lower & 0x0f);
-}
-
-/*
- * This routine writes the prepared setup/active/recovery counts
- * for a drive into the cmd640 chipset registers to active them.
- */
-static void program_drive_counts(ide_drive_t *drive, unsigned int index)
-{
- unsigned long flags;
- u8 setup_count = setup_counts[index];
- u8 active_count = active_counts[index];
- u8 recovery_count = recovery_counts[index];
-
- /*
- * Set up address setup count and drive read/write timing registers.
- * Primary interface has individual count/timing registers for
- * each drive. Secondary interface has one common set of registers,
- * so we merge the timings, using the slowest value for each timing.
- */
- if (index > 1) {
- ide_drive_t *peer = ide_get_pair_dev(drive);
- unsigned int mate = index ^ 1;
-
- if (peer) {
- if (setup_count < setup_counts[mate])
- setup_count = setup_counts[mate];
- if (active_count < active_counts[mate])
- active_count = active_counts[mate];
- if (recovery_count < recovery_counts[mate])
- recovery_count = recovery_counts[mate];
- }
- }
-
- /*
- * Convert setup_count to internal chipset representation
- */
- switch (setup_count) {
- case 4: setup_count = 0x00; break;
- case 3: setup_count = 0x80; break;
- case 1:
- case 2: setup_count = 0x40; break;
- default: setup_count = 0xc0; /* case 5 */
- }
-
- /*
- * Now that everything is ready, program the new timings
- */
- spin_lock_irqsave(&cmd640_lock, flags);
- /*
- * Program the address_setup clocks into ARTTIM reg,
- * and then the active/recovery counts into the DRWTIM reg
- * (this converts counts of 16 into counts of zero -- okay).
- */
- setup_count |= __get_cmd640_reg(arttim_regs[index]) & 0x3f;
- __put_cmd640_reg(arttim_regs[index], setup_count);
- __put_cmd640_reg(drwtim_regs[index], pack_nibbles(active_count, recovery_count));
- spin_unlock_irqrestore(&cmd640_lock, flags);
-}
-
-/*
- * Set a specific pio_mode for a drive
- */
-static void cmd640_set_mode(ide_drive_t *drive, unsigned int index,
- u8 pio_mode, unsigned int cycle_time)
-{
- struct ide_timing *t;
- int setup_time, active_time, recovery_time, clock_time;
- u8 setup_count, active_count, recovery_count, recovery_count2, cycle_count;
- int bus_speed;
-
- if (cmd640_vlb)
- bus_speed = ide_vlb_clk ? ide_vlb_clk : 50;
- else
- bus_speed = ide_pci_clk ? ide_pci_clk : 33;
-
- if (pio_mode > 5)
- pio_mode = 5;
-
- t = ide_timing_find_mode(XFER_PIO_0 + pio_mode);
- setup_time = t->setup;
- active_time = t->active;
-
- recovery_time = cycle_time - (setup_time + active_time);
- clock_time = 1000 / bus_speed;
- cycle_count = DIV_ROUND_UP(cycle_time, clock_time);
-
- setup_count = DIV_ROUND_UP(setup_time, clock_time);
-
- active_count = DIV_ROUND_UP(active_time, clock_time);
- if (active_count < 2)
- active_count = 2; /* minimum allowed by cmd640 */
-
- recovery_count = DIV_ROUND_UP(recovery_time, clock_time);
- recovery_count2 = cycle_count - (setup_count + active_count);
- if (recovery_count2 > recovery_count)
- recovery_count = recovery_count2;
- if (recovery_count < 2)
- recovery_count = 2; /* minimum allowed by cmd640 */
- if (recovery_count > 17) {
- active_count += recovery_count - 17;
- recovery_count = 17;
- }
- if (active_count > 16)
- active_count = 16; /* maximum allowed by cmd640 */
- if (cmd640_chip_version > 1)
- recovery_count -= 1; /* cmd640b uses (count + 1)*/
- if (recovery_count > 16)
- recovery_count = 16; /* maximum allowed by cmd640 */
-
- setup_counts[index] = setup_count;
- active_counts[index] = active_count;
- recovery_counts[index] = recovery_count;
-
- /*
- * In a perfect world, we might set the drive pio mode here
- * (using WIN_SETFEATURE) before continuing.
- *
- * But we do not, because:
- * 1) this is the wrong place to do it (proper is do_special() in ide.c)
- * 2) in practice this is rarely, if ever, necessary
- */
- program_drive_counts(drive, index);
-}
-
-static void cmd640_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- unsigned int index = 0, cycle_time;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
- u8 b;
-
- switch (pio) {
- case 6: /* set fast-devsel off */
- case 7: /* set fast-devsel on */
- b = get_cmd640_reg(CNTRL) & ~0x27;
- if (pio & 1)
- b |= 0x27;
- put_cmd640_reg(CNTRL, b);
- printk("%s: %sabled cmd640 fast host timing (devsel)\n",
- drive->name, (pio & 1) ? "en" : "dis");
- return;
- case 8: /* set prefetch off */
- case 9: /* set prefetch on */
- set_prefetch_mode(drive, index, pio & 1);
- printk("%s: %sabled cmd640 prefetch\n",
- drive->name, (pio & 1) ? "en" : "dis");
- return;
- }
-
- cycle_time = ide_pio_cycle_time(drive, pio);
- cmd640_set_mode(drive, index, pio, cycle_time);
-
- printk("%s: selected cmd640 PIO mode%d (%dns)",
- drive->name, pio, cycle_time);
-
- display_clocks(index);
-}
-#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
-
-static void __init cmd640_init_dev(ide_drive_t *drive)
-{
- unsigned int i = drive->hwif->channel * 2 + (drive->dn & 1);
-
-#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
- /*
- * Reset timing to the slowest speed and turn off prefetch.
- * This way, the drive identify code has a better chance.
- */
- setup_counts[i] = 4; /* max possible */
- active_counts[i] = 16; /* max possible */
- recovery_counts[i] = 16; /* max possible */
- program_drive_counts(drive, i);
- set_prefetch_mode(drive, i, 0);
- printk(KERN_INFO DRV_NAME ": drive%d timings/prefetch cleared\n", i);
-#else
- /*
- * Set the drive unmask flags to match the prefetch setting.
- */
- check_prefetch(drive, i);
- printk(KERN_INFO DRV_NAME ": drive%d timings/prefetch(%s) preserved\n",
- i, (drive->dev_flags & IDE_DFLAG_NO_IO_32BIT) ? "off" : "on");
-#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
-}
-
-static int cmd640_test_irq(ide_hwif_t *hwif)
-{
- int irq_reg = hwif->channel ? ARTTIM23 : CFR;
- u8 irq_mask = hwif->channel ? ARTTIM23_IDE23INTR :
- CFR_IDE01INTR;
- u8 irq_stat = get_cmd640_reg(irq_reg);
-
- return (irq_stat & irq_mask) ? 1 : 0;
-}
-
-static const struct ide_port_ops cmd640_port_ops = {
- .init_dev = cmd640_init_dev,
-#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
- .set_pio_mode = cmd640_set_pio_mode,
-#endif
- .test_irq = cmd640_test_irq,
-};
-
-static int pci_conf1(void)
-{
- unsigned long flags;
- u32 tmp;
-
- spin_lock_irqsave(&cmd640_lock, flags);
- outb(0x01, 0xCFB);
- tmp = inl(0xCF8);
- outl(0x80000000, 0xCF8);
- if (inl(0xCF8) == 0x80000000) {
- outl(tmp, 0xCF8);
- spin_unlock_irqrestore(&cmd640_lock, flags);
- return 1;
- }
- outl(tmp, 0xCF8);
- spin_unlock_irqrestore(&cmd640_lock, flags);
- return 0;
-}
-
-static int pci_conf2(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cmd640_lock, flags);
- outb(0x00, 0xCFB);
- outb(0x00, 0xCF8);
- outb(0x00, 0xCFA);
- if (inb(0xCF8) == 0x00 && inb(0xCF8) == 0x00) {
- spin_unlock_irqrestore(&cmd640_lock, flags);
- return 1;
- }
- spin_unlock_irqrestore(&cmd640_lock, flags);
- return 0;
-}
-
-static const struct ide_port_info cmd640_port_info __initconst = {
- .chipset = ide_cmd640,
- .host_flags = IDE_HFLAG_SERIALIZE |
- IDE_HFLAG_NO_DMA |
- IDE_HFLAG_ABUSE_PREFETCH |
- IDE_HFLAG_ABUSE_FAST_DEVSEL,
- .port_ops = &cmd640_port_ops,
- .pio_mask = ATA_PIO5,
-};
-
-static int __init cmd640x_init_one(unsigned long base, unsigned long ctl)
-{
- if (!request_region(base, 8, DRV_NAME)) {
- printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
- DRV_NAME, base, base + 7);
- return -EBUSY;
- }
-
- if (!request_region(ctl, 1, DRV_NAME)) {
- printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
- DRV_NAME, ctl);
- release_region(base, 8);
- return -EBUSY;
- }
-
- return 0;
-}
-
-/*
- * Probe for a cmd640 chipset, and initialize it if found.
- */
-static int __init cmd640x_init(void)
-{
- int second_port_cmd640 = 0, rc;
- const char *bus_type, *port2;
- u8 b, cfr;
- struct ide_hw hw[2], *hws[2];
-
- if (cmd640_vlb && probe_for_cmd640_vlb()) {
- bus_type = "VLB";
- } else {
- cmd640_vlb = 0;
- /* Find out what kind of PCI probing is supported otherwise
- Justin Gibbs will sulk.. */
- if (pci_conf1() && probe_for_cmd640_pci1())
- bus_type = "PCI (type1)";
- else if (pci_conf2() && probe_for_cmd640_pci2())
- bus_type = "PCI (type2)";
- else
- return 0;
- }
- /*
- * Undocumented magic (there is no 0x5b reg in specs)
- */
- put_cmd640_reg(0x5b, 0xbd);
- if (get_cmd640_reg(0x5b) != 0xbd) {
- printk(KERN_ERR "ide: cmd640 init failed: wrong value in reg 0x5b\n");
- return 0;
- }
- put_cmd640_reg(0x5b, 0);
-
-#ifdef CMD640_DUMP_REGS
- cmd640_dump_regs();
-#endif
-
- /*
- * Documented magic begins here
- */
- cfr = get_cmd640_reg(CFR);
- cmd640_chip_version = cfr & CFR_DEVREV;
- if (cmd640_chip_version == 0) {
- printk("ide: bad cmd640 revision: %d\n", cmd640_chip_version);
- return 0;
- }
-
- rc = cmd640x_init_one(0x1f0, 0x3f6);
- if (rc)
- return rc;
-
- rc = cmd640x_init_one(0x170, 0x376);
- if (rc) {
- release_region(0x3f6, 1);
- release_region(0x1f0, 8);
- return rc;
- }
-
- memset(&hw, 0, sizeof(hw));
-
- ide_std_init_ports(&hw[0], 0x1f0, 0x3f6);
- hw[0].irq = 14;
-
- ide_std_init_ports(&hw[1], 0x170, 0x376);
- hw[1].irq = 15;
-
- printk(KERN_INFO "cmd640: buggy cmd640%c interface on %s, config=0x%02x"
- "\n", 'a' + cmd640_chip_version - 1, bus_type, cfr);
-
- /*
- * Initialize data for primary port
- */
- hws[0] = &hw[0];
-
- /*
- * Ensure compatibility by always using the slowest timings
- * for access to the drive's command register block,
- * and reset the prefetch burstsize to default (512 bytes).
- *
- * Maybe we need a way to NOT do these on *some* systems?
- */
- put_cmd640_reg(CMDTIM, 0);
- put_cmd640_reg(BRST, 0x40);
-
- b = get_cmd640_reg(CNTRL);
-
- /*
- * Try to enable the secondary interface, if not already enabled
- */
- if (secondary_port_responding()) {
- if ((b & CNTRL_ENA_2ND)) {
- second_port_cmd640 = 1;
- port2 = "okay";
- } else if (cmd640_vlb) {
- second_port_cmd640 = 1;
- port2 = "alive";
- } else
- port2 = "not cmd640";
- } else {
- put_cmd640_reg(CNTRL, b ^ CNTRL_ENA_2ND); /* toggle the bit */
- if (secondary_port_responding()) {
- second_port_cmd640 = 1;
- port2 = "enabled";
- } else {
- put_cmd640_reg(CNTRL, b); /* restore original setting */
- port2 = "not responding";
- }
- }
-
- /*
- * Initialize data for secondary cmd640 port, if enabled
- */
- if (second_port_cmd640)
- hws[1] = &hw[1];
-
- printk(KERN_INFO "cmd640: %sserialized, secondary interface %s\n",
- second_port_cmd640 ? "" : "not ", port2);
-
-#ifdef CMD640_DUMP_REGS
- cmd640_dump_regs();
-#endif
-
- return ide_host_add(&cmd640_port_info, hws, second_port_cmd640 ? 2 : 1,
- NULL);
-}
-
-module_param_named(probe_vlb, cmd640_vlb, bool, 0);
-MODULE_PARM_DESC(probe_vlb, "probe for VLB version of CMD640 chipset");
-
-module_init(cmd640x_init);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
deleted file mode 100644
index 943bf944bf72..000000000000
--- a/drivers/ide/cmd64x.c
+++ /dev/null
@@ -1,452 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * cmd64x.c: Enable interrupts at initialization time on Ultra/PCI machines.
- * Due to massive hardware bugs, UltraDMA is only supported
- * on the 646U2 and not on the 646U.
- *
- * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
- * Copyright (C) 1998 David S. Miller (davem@redhat.com)
- *
- * Copyright (C) 1999-2002 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz
- * Copyright (C) 2007,2009 MontaVista Software, Inc. <source@mvista.com>
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#include <asm/io.h>
-
-#define DRV_NAME "cmd64x"
-
-/*
- * CMD64x specific registers definition.
- */
-#define CFR 0x50
-#define CFR_INTR_CH0 0x04
-
-#define CMDTIM 0x52
-#define ARTTIM0 0x53
-#define DRWTIM0 0x54
-#define ARTTIM1 0x55
-#define DRWTIM1 0x56
-#define ARTTIM23 0x57
-#define ARTTIM23_DIS_RA2 0x04
-#define ARTTIM23_DIS_RA3 0x08
-#define ARTTIM23_INTR_CH1 0x10
-#define DRWTIM2 0x58
-#define BRST 0x59
-#define DRWTIM3 0x5b
-
-#define BMIDECR0 0x70
-#define MRDMODE 0x71
-#define MRDMODE_INTR_CH0 0x04
-#define MRDMODE_INTR_CH1 0x08
-#define UDIDETCR0 0x73
-#define DTPR0 0x74
-#define BMIDECR1 0x78
-#define BMIDECSR 0x79
-#define UDIDETCR1 0x7B
-#define DTPR1 0x7C
-
-static void cmd64x_program_timings(ide_drive_t *drive, u8 mode)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
- int bus_speed = ide_pci_clk ? ide_pci_clk : 33;
- const unsigned long T = 1000000 / bus_speed;
- static const u8 recovery_values[] =
- {15, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 0};
- static const u8 setup_values[] = {0x40, 0x40, 0x40, 0x80, 0, 0xc0};
- static const u8 arttim_regs[4] = {ARTTIM0, ARTTIM1, ARTTIM23, ARTTIM23};
- static const u8 drwtim_regs[4] = {DRWTIM0, DRWTIM1, DRWTIM2, DRWTIM3};
- struct ide_timing t;
- u8 arttim = 0;
-
- if (drive->dn >= ARRAY_SIZE(drwtim_regs))
- return;
-
- ide_timing_compute(drive, mode, &t, T, 0);
-
- /*
- * In case we've got too long recovery phase, try to lengthen
- * the active phase
- */
- if (t.recover > 16) {
- t.active += t.recover - 16;
- t.recover = 16;
- }
- if (t.active > 16) /* shouldn't actually happen... */
- t.active = 16;
-
- /*
- * Convert values to internal chipset representation
- */
- t.recover = recovery_values[t.recover];
- t.active &= 0x0f;
-
- /* Program the active/recovery counts into the DRWTIM register */
- pci_write_config_byte(dev, drwtim_regs[drive->dn],
- (t.active << 4) | t.recover);
-
- /*
- * The primary channel has individual address setup timing registers
- * for each drive and the hardware selects the slowest timing itself.
- * The secondary channel has one common register and we have to select
- * the slowest address setup timing ourselves.
- */
- if (hwif->channel) {
- ide_drive_t *pair = ide_get_pair_dev(drive);
-
- if (pair) {
- struct ide_timing tp;
-
- ide_timing_compute(pair, pair->pio_mode, &tp, T, 0);
- ide_timing_merge(&t, &tp, &t, IDE_TIMING_SETUP);
- if (pair->dma_mode) {
- ide_timing_compute(pair, pair->dma_mode,
- &tp, T, 0);
- ide_timing_merge(&tp, &t, &t, IDE_TIMING_SETUP);
- }
- }
- }
-
- if (t.setup > 5) /* shouldn't actually happen... */
- t.setup = 5;
-
- /*
- * Program the address setup clocks into the ARTTIM registers.
- * Avoid clearing the secondary channel's interrupt bit.
- */
- (void) pci_read_config_byte (dev, arttim_regs[drive->dn], &arttim);
- if (hwif->channel)
- arttim &= ~ARTTIM23_INTR_CH1;
- arttim &= ~0xc0;
- arttim |= setup_values[t.setup];
- (void) pci_write_config_byte(dev, arttim_regs[drive->dn], arttim);
-}
-
-/*
- * Attempts to set drive's PIO mode.
- * Special cases are 8: prefetch off, 9: prefetch on (both never worked)
- */
-
-static void cmd64x_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- const u8 pio = drive->pio_mode - XFER_PIO_0;
-
- /*
- * Filter out the prefetch control values
- * to prevent PIO5 from being programmed
- */
- if (pio == 8 || pio == 9)
- return;
-
- cmd64x_program_timings(drive, XFER_PIO_0 + pio);
-}
-
-static void cmd64x_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u8 unit = drive->dn & 0x01;
- u8 regU = 0, pciU = hwif->channel ? UDIDETCR1 : UDIDETCR0;
- const u8 speed = drive->dma_mode;
-
- pci_read_config_byte(dev, pciU, &regU);
- regU &= ~(unit ? 0xCA : 0x35);
-
- switch(speed) {
- case XFER_UDMA_5:
- regU |= unit ? 0x0A : 0x05;
- break;
- case XFER_UDMA_4:
- regU |= unit ? 0x4A : 0x15;
- break;
- case XFER_UDMA_3:
- regU |= unit ? 0x8A : 0x25;
- break;
- case XFER_UDMA_2:
- regU |= unit ? 0x42 : 0x11;
- break;
- case XFER_UDMA_1:
- regU |= unit ? 0x82 : 0x21;
- break;
- case XFER_UDMA_0:
- regU |= unit ? 0xC2 : 0x31;
- break;
- case XFER_MW_DMA_2:
- case XFER_MW_DMA_1:
- case XFER_MW_DMA_0:
- cmd64x_program_timings(drive, speed);
- break;
- }
-
- pci_write_config_byte(dev, pciU, regU);
-}
-
-static void cmd648_clear_irq(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned long base = pci_resource_start(dev, 4);
- u8 irq_mask = hwif->channel ? MRDMODE_INTR_CH1 :
- MRDMODE_INTR_CH0;
- u8 mrdmode = inb(base + 1);
-
- /* clear the interrupt bit */
- outb((mrdmode & ~(MRDMODE_INTR_CH0 | MRDMODE_INTR_CH1)) | irq_mask,
- base + 1);
-}
-
-static void cmd64x_clear_irq(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- int irq_reg = hwif->channel ? ARTTIM23 : CFR;
- u8 irq_mask = hwif->channel ? ARTTIM23_INTR_CH1 :
- CFR_INTR_CH0;
- u8 irq_stat = 0;
-
- (void) pci_read_config_byte(dev, irq_reg, &irq_stat);
- /* clear the interrupt bit */
- (void) pci_write_config_byte(dev, irq_reg, irq_stat | irq_mask);
-}
-
-static int cmd648_test_irq(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned long base = pci_resource_start(dev, 4);
- u8 irq_mask = hwif->channel ? MRDMODE_INTR_CH1 :
- MRDMODE_INTR_CH0;
- u8 mrdmode = inb(base + 1);
-
- pr_debug("%s: mrdmode: 0x%02x irq_mask: 0x%02x\n",
- hwif->name, mrdmode, irq_mask);
-
- return (mrdmode & irq_mask) ? 1 : 0;
-}
-
-static int cmd64x_test_irq(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- int irq_reg = hwif->channel ? ARTTIM23 : CFR;
- u8 irq_mask = hwif->channel ? ARTTIM23_INTR_CH1 :
- CFR_INTR_CH0;
- u8 irq_stat = 0;
-
- (void) pci_read_config_byte(dev, irq_reg, &irq_stat);
-
- pr_debug("%s: irq_stat: 0x%02x irq_mask: 0x%02x\n",
- hwif->name, irq_stat, irq_mask);
-
- return (irq_stat & irq_mask) ? 1 : 0;
-}
-
-/*
- * ASUS P55T2P4D with CMD646 chipset revision 0x01 requires the old
- * event order for DMA transfers.
- */
-
-static int cmd646_1_dma_end(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 dma_stat = 0, dma_cmd = 0;
-
- /* get DMA status */
- dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
- /* read DMA command state */
- dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
- /* stop DMA */
- outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD);
- /* clear the INTR & ERROR bits */
- outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS);
- /* verify good DMA status */
- return (dma_stat & 7) != 4;
-}
-
-static int init_chipset_cmd64x(struct pci_dev *dev)
-{
- u8 mrdmode = 0;
-
- /* Set a good latency timer and cache line size value. */
- (void) pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64);
- /* FIXME: pci_set_master() to ensure a good latency timer value */
-
- /*
- * Enable interrupts, select MEMORY READ LINE for reads.
- *
- * NOTE: although not mentioned in the PCI0646U specs,
- * bits 0-1 are write only and won't be read back as
- * set or not -- PCI0646U2 specs clarify this point.
- */
- (void) pci_read_config_byte (dev, MRDMODE, &mrdmode);
- mrdmode &= ~0x30;
- (void) pci_write_config_byte(dev, MRDMODE, (mrdmode | 0x02));
-
- return 0;
-}
-
-static u8 cmd64x_cable_detect(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u8 bmidecsr = 0, mask = hwif->channel ? 0x02 : 0x01;
-
- switch (dev->device) {
- case PCI_DEVICE_ID_CMD_648:
- case PCI_DEVICE_ID_CMD_649:
- pci_read_config_byte(dev, BMIDECSR, &bmidecsr);
- return (bmidecsr & mask) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
- default:
- return ATA_CBL_PATA40;
- }
-}
-
-static const struct ide_port_ops cmd64x_port_ops = {
- .set_pio_mode = cmd64x_set_pio_mode,
- .set_dma_mode = cmd64x_set_dma_mode,
- .clear_irq = cmd64x_clear_irq,
- .test_irq = cmd64x_test_irq,
- .cable_detect = cmd64x_cable_detect,
-};
-
-static const struct ide_port_ops cmd648_port_ops = {
- .set_pio_mode = cmd64x_set_pio_mode,
- .set_dma_mode = cmd64x_set_dma_mode,
- .clear_irq = cmd648_clear_irq,
- .test_irq = cmd648_test_irq,
- .cable_detect = cmd64x_cable_detect,
-};
-
-static const struct ide_dma_ops cmd646_rev1_dma_ops = {
- .dma_host_set = ide_dma_host_set,
- .dma_setup = ide_dma_setup,
- .dma_start = ide_dma_start,
- .dma_end = cmd646_1_dma_end,
- .dma_test_irq = ide_dma_test_irq,
- .dma_lost_irq = ide_dma_lost_irq,
- .dma_timer_expiry = ide_dma_sff_timer_expiry,
- .dma_sff_read_status = ide_dma_sff_read_status,
-};
-
-static const struct ide_port_info cmd64x_chipsets[] = {
- { /* 0: CMD643 */
- .name = DRV_NAME,
- .init_chipset = init_chipset_cmd64x,
- .enablebits = {{0x00,0x00,0x00}, {0x51,0x08,0x08}},
- .port_ops = &cmd64x_port_ops,
- .host_flags = IDE_HFLAG_CLEAR_SIMPLEX |
- IDE_HFLAG_ABUSE_PREFETCH |
- IDE_HFLAG_SERIALIZE,
- .pio_mask = ATA_PIO5,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = 0x00, /* no udma */
- },
- { /* 1: CMD646 */
- .name = DRV_NAME,
- .init_chipset = init_chipset_cmd64x,
- .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
- .port_ops = &cmd648_port_ops,
- .host_flags = IDE_HFLAG_ABUSE_PREFETCH |
- IDE_HFLAG_SERIALIZE,
- .pio_mask = ATA_PIO5,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA2,
- },
- { /* 2: CMD648 */
- .name = DRV_NAME,
- .init_chipset = init_chipset_cmd64x,
- .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
- .port_ops = &cmd648_port_ops,
- .host_flags = IDE_HFLAG_ABUSE_PREFETCH,
- .pio_mask = ATA_PIO5,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA4,
- },
- { /* 3: CMD649 */
- .name = DRV_NAME,
- .init_chipset = init_chipset_cmd64x,
- .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
- .port_ops = &cmd648_port_ops,
- .host_flags = IDE_HFLAG_ABUSE_PREFETCH,
- .pio_mask = ATA_PIO5,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA5,
- }
-};
-
-static int cmd64x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- struct ide_port_info d;
- u8 idx = id->driver_data;
-
- d = cmd64x_chipsets[idx];
-
- if (idx == 1) {
- /*
- * UltraDMA only supported on PCI646U and PCI646U2, which
- * correspond to revisions 0x03, 0x05 and 0x07 respectively.
- * Actually, although the CMD tech support people won't
- * tell me the details, the 0x03 revision cannot support
- * UDMA correctly without hardware modifications, and even
- * then it only works with Quantum disks due to some
- * hold time assumptions in the 646U part which are fixed
- * in the 646U2.
- *
- * So we only do UltraDMA on revision 0x05 and 0x07 chipsets.
- */
- if (dev->revision < 5) {
- d.udma_mask = 0x00;
- /*
- * The original PCI0646 didn't have the primary
- * channel enable bit, it appeared starting with
- * PCI0646U (i.e. revision ID 3).
- */
- if (dev->revision < 3) {
- d.enablebits[0].reg = 0;
- d.port_ops = &cmd64x_port_ops;
- if (dev->revision == 1)
- d.dma_ops = &cmd646_rev1_dma_ops;
- }
- }
- }
-
- return ide_pci_init_one(dev, &d, NULL);
-}
-
-static const struct pci_device_id cmd64x_pci_tbl[] = {
- { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_643), 0 },
- { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_646), 1 },
- { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_648), 2 },
- { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_649), 3 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, cmd64x_pci_tbl);
-
-static struct pci_driver cmd64x_pci_driver = {
- .name = "CMD64x_IDE",
- .id_table = cmd64x_pci_tbl,
- .probe = cmd64x_init_one,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init cmd64x_ide_init(void)
-{
- return ide_pci_register_driver(&cmd64x_pci_driver);
-}
-
-static void __exit cmd64x_ide_exit(void)
-{
- pci_unregister_driver(&cmd64x_pci_driver);
-}
-
-module_init(cmd64x_ide_init);
-module_exit(cmd64x_ide_exit);
-
-MODULE_AUTHOR("Eddie Dost, David Miller, Andre Hedrick, Bartlomiej Zolnierkiewicz");
-MODULE_DESCRIPTION("PCI driver module for CMD64x IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
deleted file mode 100644
index 89a4ff100b7a..000000000000
--- a/drivers/ide/cs5520.c
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * IDE tuning and bus mastering support for the CS5510/CS5520
- * chipsets
- *
- * The CS5510/CS5520 are slightly unusual devices. Unlike the
- * typical IDE controllers they do bus mastering with the drive in
- * PIO mode and smarter silicon.
- *
- * The practical upshot of this is that we must always tune the
- * drive for the right PIO mode. We must also ignore all the blacklists
- * and the drive bus mastering DMA information.
- *
- * *** This driver is strictly experimental ***
- *
- * (c) Copyright Red Hat Inc 2002
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * For the avoidance of doubt the "preferred form" of this code is one which
- * is in an open non patent encumbered format. Where cryptographic key signing
- * forms part of the process of creating an executable the information
- * including keys needed to generate an equivalently functional executable
- * are deemed to be part of the source code.
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/dma-mapping.h>
-
-#define DRV_NAME "cs5520"
-
-struct pio_clocks
-{
- int address;
- int assert;
- int recovery;
-};
-
-static struct pio_clocks cs5520_pio_clocks[]={
- {3, 6, 11},
- {2, 5, 6},
- {1, 4, 3},
- {1, 3, 2},
- {1, 2, 1}
-};
-
-static void cs5520_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *pdev = to_pci_dev(hwif->dev);
- int controller = drive->dn > 1 ? 1 : 0;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
-
- /* 8bit CAT/CRT - 8bit command timing for channel */
- pci_write_config_byte(pdev, 0x62 + controller,
- (cs5520_pio_clocks[pio].recovery << 4) |
- (cs5520_pio_clocks[pio].assert));
-
- /* 0x64 - 16bit Primary, 0x68 - 16bit Secondary */
-
- /* FIXME: should these use address ? */
- /* Data read timing */
- pci_write_config_byte(pdev, 0x64 + 4*controller + (drive->dn&1),
- (cs5520_pio_clocks[pio].recovery << 4) |
- (cs5520_pio_clocks[pio].assert));
- /* Write command timing */
- pci_write_config_byte(pdev, 0x66 + 4*controller + (drive->dn&1),
- (cs5520_pio_clocks[pio].recovery << 4) |
- (cs5520_pio_clocks[pio].assert));
-}
-
-static void cs5520_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- printk(KERN_ERR "cs55x0: bad ide timing.\n");
-
- drive->pio_mode = XFER_PIO_0 + 0;
- cs5520_set_pio_mode(hwif, drive);
-}
-
-static const struct ide_port_ops cs5520_port_ops = {
- .set_pio_mode = cs5520_set_pio_mode,
- .set_dma_mode = cs5520_set_dma_mode,
-};
-
-static const struct ide_port_info cyrix_chipset = {
- .name = DRV_NAME,
- .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
- .port_ops = &cs5520_port_ops,
- .host_flags = IDE_HFLAG_ISA_PORTS | IDE_HFLAG_CS5520,
- .pio_mask = ATA_PIO4,
-};
-
-/*
- * The 5510/5520 are a bit weird. They don't quite set up the way
- * the PCI helper layer expects so we must do much of the set up
- * work longhand.
- */
-
-static int cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- const struct ide_port_info *d = &cyrix_chipset;
- struct ide_hw hw[2], *hws[] = { NULL, NULL };
-
- ide_setup_pci_noise(dev, d);
-
- /* We must not grab the entire device, it has 'ISA' space in its
- * BARS too and we will freak out other bits of the kernel
- */
- if (pci_enable_device_io(dev)) {
- printk(KERN_WARNING "%s: Unable to enable 55x0.\n", d->name);
- return -ENODEV;
- }
- pci_set_master(dev);
- if (dma_set_mask(&dev->dev, DMA_BIT_MASK(32))) {
- printk(KERN_WARNING "%s: No suitable DMA available.\n",
- d->name);
- return -ENODEV;
- }
-
- /*
- * Now the chipset is configured we can let the core
- * do all the device setup for us
- */
-
- ide_pci_setup_ports(dev, d, &hw[0], &hws[0]);
- hw[0].irq = 14;
- hw[1].irq = 15;
-
- return ide_host_add(d, hws, 2, NULL);
-}
-
-static const struct pci_device_id cs5520_pci_tbl[] = {
- { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5510), 0 },
- { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5520), 1 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, cs5520_pci_tbl);
-
-static struct pci_driver cs5520_pci_driver = {
- .name = "Cyrix_IDE",
- .id_table = cs5520_pci_tbl,
- .probe = cs5520_init_one,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init cs5520_ide_init(void)
-{
- return ide_pci_register_driver(&cs5520_pci_driver);
-}
-
-module_init(cs5520_ide_init);
-
-MODULE_AUTHOR("Alan Cox");
-MODULE_DESCRIPTION("PCI driver module for Cyrix 5510/5520 IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
deleted file mode 100644
index 65371599b976..000000000000
--- a/drivers/ide/cs5530.c
+++ /dev/null
@@ -1,295 +0,0 @@
-/*
- * Copyright (C) 2000 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2000 Mark Lord <mlord@pobox.com>
- * Copyright (C) 2007 Bartlomiej Zolnierkiewicz
- *
- * May be copied or modified under the terms of the GNU General Public License
- *
- * Development of this chipset driver was funded
- * by the nice folks at National Semiconductor.
- *
- * Documentation:
- * CS5530 documentation available from National Semiconductor.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/ide.h>
-
-#include <asm/io.h>
-
-#define DRV_NAME "cs5530"
-
-/*
- * Here are the standard PIO mode 0-4 timings for each "format".
- * Format-0 uses fast data reg timings, with slower command reg timings.
- * Format-1 uses fast timings for all registers, but won't work with all drives.
- */
-static unsigned int cs5530_pio_timings[2][5] = {
- {0x00009172, 0x00012171, 0x00020080, 0x00032010, 0x00040010},
- {0xd1329172, 0x71212171, 0x30200080, 0x20102010, 0x00100010}
-};
-
-/*
- * After chip reset, the PIO timings are set to 0x0000e132, which is not valid.
- */
-#define CS5530_BAD_PIO(timings) (((timings)&~0x80000000)==0x0000e132)
-#define CS5530_BASEREG(hwif) (((hwif)->dma_base & ~0xf) + ((hwif)->channel ? 0x30 : 0x20))
-
-/**
- * cs5530_set_pio_mode - set host controller for PIO mode
- * @hwif: port
- * @drive: drive
- *
- * Handles setting of PIO mode for the chipset.
- *
- * The init_hwif_cs5530() routine guarantees that all drives
- * will have valid default PIO timings set up before we get here.
- */
-
-static void cs5530_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- unsigned long basereg = CS5530_BASEREG(hwif);
- unsigned int format = (inl(basereg + 4) >> 31) & 1;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
-
- outl(cs5530_pio_timings[format][pio], basereg + ((drive->dn & 1)<<3));
-}
-
-/**
- * cs5530_udma_filter - UDMA filter
- * @drive: drive
- *
- * cs5530_udma_filter() does UDMA mask filtering for the given drive
- * taking into the consideration capabilities of the mate device.
- *
- * The CS5530 specifies that two drives sharing a cable cannot mix
- * UDMA/MDMA. It has to be one or the other, for the pair, though
- * different timings can still be chosen for each drive. We could
- * set the appropriate timing bits on the fly, but that might be
- * a bit confusing. So, for now we statically handle this requirement
- * by looking at our mate drive to see what it is capable of, before
- * choosing a mode for our own drive.
- *
- * Note: This relies on the fact we never fail from UDMA to MWDMA2
- * but instead drop to PIO.
- */
-
-static u8 cs5530_udma_filter(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- ide_drive_t *mate = ide_get_pair_dev(drive);
- u16 *mateid;
- u8 mask = hwif->ultra_mask;
-
- if (mate == NULL)
- goto out;
- mateid = mate->id;
-
- if (ata_id_has_dma(mateid) && __ide_dma_bad_drive(mate) == 0) {
- if ((mateid[ATA_ID_FIELD_VALID] & 4) &&
- (mateid[ATA_ID_UDMA_MODES] & 7))
- goto out;
- if (mateid[ATA_ID_MWDMA_MODES] & 7)
- mask = 0;
- }
-out:
- return mask;
-}
-
-static void cs5530_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- unsigned long basereg;
- unsigned int reg, timings = 0;
-
- switch (drive->dma_mode) {
- case XFER_UDMA_0: timings = 0x00921250; break;
- case XFER_UDMA_1: timings = 0x00911140; break;
- case XFER_UDMA_2: timings = 0x00911030; break;
- case XFER_MW_DMA_0: timings = 0x00077771; break;
- case XFER_MW_DMA_1: timings = 0x00012121; break;
- case XFER_MW_DMA_2: timings = 0x00002020; break;
- }
- basereg = CS5530_BASEREG(hwif);
- reg = inl(basereg + 4); /* get drive0 config register */
- timings |= reg & 0x80000000; /* preserve PIO format bit */
- if ((drive-> dn & 1) == 0) { /* are we configuring drive0? */
- outl(timings, basereg + 4); /* write drive0 config register */
- } else {
- if (timings & 0x00100000)
- reg |= 0x00100000; /* enable UDMA timings for both drives */
- else
- reg &= ~0x00100000; /* disable UDMA timings for both drives */
- outl(reg, basereg + 4); /* write drive0 config register */
- outl(timings, basereg + 12); /* write drive1 config register */
- }
-}
-
-/**
- * init_chipset_5530 - set up 5530 bridge
- * @dev: PCI device
- *
- * Initialize the cs5530 bridge for reliable IDE DMA operation.
- */
-
-static int init_chipset_cs5530(struct pci_dev *dev)
-{
- struct pci_dev *master_0 = NULL, *cs5530_0 = NULL;
-
- if (pci_resource_start(dev, 4) == 0)
- return -EFAULT;
-
- dev = NULL;
- while ((dev = pci_get_device(PCI_VENDOR_ID_CYRIX, PCI_ANY_ID, dev)) != NULL) {
- switch (dev->device) {
- case PCI_DEVICE_ID_CYRIX_PCI_MASTER:
- master_0 = pci_dev_get(dev);
- break;
- case PCI_DEVICE_ID_CYRIX_5530_LEGACY:
- cs5530_0 = pci_dev_get(dev);
- break;
- }
- }
- if (!master_0) {
- printk(KERN_ERR DRV_NAME ": unable to locate PCI MASTER function\n");
- goto out;
- }
- if (!cs5530_0) {
- printk(KERN_ERR DRV_NAME ": unable to locate CS5530 LEGACY function\n");
- goto out;
- }
-
- /*
- * Enable BusMaster and MemoryWriteAndInvalidate for the cs5530:
- * --> OR 0x14 into 16-bit PCI COMMAND reg of function 0 of the cs5530
- */
-
- pci_set_master(cs5530_0);
- pci_try_set_mwi(cs5530_0);
-
- /*
- * Set PCI CacheLineSize to 16-bytes:
- * --> Write 0x04 into 8-bit PCI CACHELINESIZE reg of function 0 of the cs5530
- */
-
- pci_write_config_byte(cs5530_0, PCI_CACHE_LINE_SIZE, 0x04);
-
- /*
- * Disable trapping of UDMA register accesses (Win98 hack):
- * --> Write 0x5006 into 16-bit reg at offset 0xd0 of function 0 of the cs5530
- */
-
- pci_write_config_word(cs5530_0, 0xd0, 0x5006);
-
- /*
- * Bit-1 at 0x40 enables MemoryWriteAndInvalidate on internal X-bus:
- * The other settings are what is necessary to get the register
- * into a sane state for IDE DMA operation.
- */
-
- pci_write_config_byte(master_0, 0x40, 0x1e);
-
- /*
- * Set max PCI burst size (16-bytes seems to work best):
- * 16bytes: set bit-1 at 0x41 (reg value of 0x16)
- * all others: clear bit-1 at 0x41, and do:
- * 128bytes: OR 0x00 at 0x41
- * 256bytes: OR 0x04 at 0x41
- * 512bytes: OR 0x08 at 0x41
- * 1024bytes: OR 0x0c at 0x41
- */
-
- pci_write_config_byte(master_0, 0x41, 0x14);
-
- /*
- * These settings are necessary to get the chip
- * into a sane state for IDE DMA operation.
- */
-
- pci_write_config_byte(master_0, 0x42, 0x00);
- pci_write_config_byte(master_0, 0x43, 0xc1);
-
-out:
- pci_dev_put(master_0);
- pci_dev_put(cs5530_0);
- return 0;
-}
-
-/**
- * init_hwif_cs5530 - initialise an IDE channel
- * @hwif: IDE to initialize
- *
- * This gets invoked by the IDE driver once for each channel. It
- * performs channel-specific pre-initialization before drive probing.
- */
-
-static void init_hwif_cs5530 (ide_hwif_t *hwif)
-{
- unsigned long basereg;
- u32 d0_timings;
-
- basereg = CS5530_BASEREG(hwif);
- d0_timings = inl(basereg + 0);
- if (CS5530_BAD_PIO(d0_timings))
- outl(cs5530_pio_timings[(d0_timings >> 31) & 1][0], basereg + 0);
- if (CS5530_BAD_PIO(inl(basereg + 8)))
- outl(cs5530_pio_timings[(d0_timings >> 31) & 1][0], basereg + 8);
-}
-
-static const struct ide_port_ops cs5530_port_ops = {
- .set_pio_mode = cs5530_set_pio_mode,
- .set_dma_mode = cs5530_set_dma_mode,
- .udma_filter = cs5530_udma_filter,
-};
-
-static const struct ide_port_info cs5530_chipset = {
- .name = DRV_NAME,
- .init_chipset = init_chipset_cs5530,
- .init_hwif = init_hwif_cs5530,
- .port_ops = &cs5530_port_ops,
- .host_flags = IDE_HFLAG_SERIALIZE |
- IDE_HFLAG_POST_SET_MODE,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA2,
-};
-
-static int cs5530_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- return ide_pci_init_one(dev, &cs5530_chipset, NULL);
-}
-
-static const struct pci_device_id cs5530_pci_tbl[] = {
- { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5530_IDE), 0 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, cs5530_pci_tbl);
-
-static struct pci_driver cs5530_pci_driver = {
- .name = "CS5530 IDE",
- .id_table = cs5530_pci_tbl,
- .probe = cs5530_init_one,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init cs5530_ide_init(void)
-{
- return ide_pci_register_driver(&cs5530_pci_driver);
-}
-
-static void __exit cs5530_ide_exit(void)
-{
- pci_unregister_driver(&cs5530_pci_driver);
-}
-
-module_init(cs5530_ide_init);
-module_exit(cs5530_ide_exit);
-
-MODULE_AUTHOR("Mark Lord");
-MODULE_DESCRIPTION("PCI driver module for Cyrix/NS 5530 IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
deleted file mode 100644
index 70fdbe3161f8..000000000000
--- a/drivers/ide/cs5535.c
+++ /dev/null
@@ -1,216 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2004-2005 Advanced Micro Devices, Inc.
- * Copyright (C) 2007 Bartlomiej Zolnierkiewicz
- *
- * History:
- * 09/20/2005 - Jaya Kumar <jayakumar.ide@gmail.com>
- * - Reworked tuneproc, set_drive, misc mods to prep for mainline
- * - Work was sponsored by CIS (M) Sdn Bhd.
- * Ported to Kernel 2.6.11 on June 26, 2005 by
- * Wolfgang Zuleger <wolfgang.zuleger@gmx.de>
- * Alexander Kiausch <alex.kiausch@t-online.de>
- * Originally developed by AMD for 2.4/2.6
- *
- * Development of this chipset driver was funded
- * by the nice folks at National Semiconductor/AMD.
- *
- * Documentation:
- * CS5535 documentation available from AMD
- */
-
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-
-#define DRV_NAME "cs5535"
-
-#define MSR_ATAC_BASE 0x51300000
-#define ATAC_GLD_MSR_CAP (MSR_ATAC_BASE+0)
-#define ATAC_GLD_MSR_CONFIG (MSR_ATAC_BASE+0x01)
-#define ATAC_GLD_MSR_SMI (MSR_ATAC_BASE+0x02)
-#define ATAC_GLD_MSR_ERROR (MSR_ATAC_BASE+0x03)
-#define ATAC_GLD_MSR_PM (MSR_ATAC_BASE+0x04)
-#define ATAC_GLD_MSR_DIAG (MSR_ATAC_BASE+0x05)
-#define ATAC_IO_BAR (MSR_ATAC_BASE+0x08)
-#define ATAC_RESET (MSR_ATAC_BASE+0x10)
-#define ATAC_CH0D0_PIO (MSR_ATAC_BASE+0x20)
-#define ATAC_CH0D0_DMA (MSR_ATAC_BASE+0x21)
-#define ATAC_CH0D1_PIO (MSR_ATAC_BASE+0x22)
-#define ATAC_CH0D1_DMA (MSR_ATAC_BASE+0x23)
-#define ATAC_PCI_ABRTERR (MSR_ATAC_BASE+0x24)
-#define ATAC_BM0_CMD_PRIM 0x00
-#define ATAC_BM0_STS_PRIM 0x02
-#define ATAC_BM0_PRD 0x04
-#define CS5535_CABLE_DETECT 0x48
-
-/* Format I PIO settings. We separate out cmd and data for safer timings */
-
-static unsigned int cs5535_pio_cmd_timings[5] =
-{ 0xF7F4, 0x53F3, 0x13F1, 0x5131, 0x1131 };
-static unsigned int cs5535_pio_dta_timings[5] =
-{ 0xF7F4, 0xF173, 0x8141, 0x5131, 0x1131 };
-
-static unsigned int cs5535_mwdma_timings[3] =
-{ 0x7F0FFFF3, 0x7F035352, 0x7f024241 };
-
-static unsigned int cs5535_udma_timings[5] =
-{ 0x7F7436A1, 0x7F733481, 0x7F723261, 0x7F713161, 0x7F703061 };
-
-/* Macros to check if the register is the reset value - reset value is an
- invalid timing and indicates the register has not been set previously */
-
-#define CS5535_BAD_PIO(timings) ( (timings&~0x80000000UL) == 0x00009172 )
-#define CS5535_BAD_DMA(timings) ( (timings & 0x000FFFFF) == 0x00077771 )
-
-/****
- * cs5535_set_speed - Configure the chipset to the new speed
- * @drive: Drive to set up
- * @speed: desired speed
- *
- * cs5535_set_speed() configures the chipset to a new speed.
- */
-static void cs5535_set_speed(ide_drive_t *drive, const u8 speed)
-{
- u32 reg = 0, dummy;
- u8 unit = drive->dn & 1;
-
- /* Set the PIO timings */
- if (speed < XFER_SW_DMA_0) {
- ide_drive_t *pair = ide_get_pair_dev(drive);
- u8 cmd, pioa;
-
- cmd = pioa = speed - XFER_PIO_0;
-
- if (pair) {
- u8 piob = pair->pio_mode - XFER_PIO_0;
-
- if (piob < cmd)
- cmd = piob;
- }
-
- /* Write the speed of the current drive */
- reg = (cs5535_pio_cmd_timings[cmd] << 16) |
- cs5535_pio_dta_timings[pioa];
- wrmsr(unit ? ATAC_CH0D1_PIO : ATAC_CH0D0_PIO, reg, 0);
-
- /* And if nessesary - change the speed of the other drive */
- rdmsr(unit ? ATAC_CH0D0_PIO : ATAC_CH0D1_PIO, reg, dummy);
-
- if (((reg >> 16) & cs5535_pio_cmd_timings[cmd]) !=
- cs5535_pio_cmd_timings[cmd]) {
- reg &= 0x0000FFFF;
- reg |= cs5535_pio_cmd_timings[cmd] << 16;
- wrmsr(unit ? ATAC_CH0D0_PIO : ATAC_CH0D1_PIO, reg, 0);
- }
-
- /* Set bit 31 of the DMA register for PIO format 1 timings */
- rdmsr(unit ? ATAC_CH0D1_DMA : ATAC_CH0D0_DMA, reg, dummy);
- wrmsr(unit ? ATAC_CH0D1_DMA : ATAC_CH0D0_DMA,
- reg | 0x80000000UL, 0);
- } else {
- rdmsr(unit ? ATAC_CH0D1_DMA : ATAC_CH0D0_DMA, reg, dummy);
-
- reg &= 0x80000000UL; /* Preserve the PIO format bit */
-
- if (speed >= XFER_UDMA_0 && speed <= XFER_UDMA_4)
- reg |= cs5535_udma_timings[speed - XFER_UDMA_0];
- else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
- reg |= cs5535_mwdma_timings[speed - XFER_MW_DMA_0];
- else
- return;
-
- wrmsr(unit ? ATAC_CH0D1_DMA : ATAC_CH0D0_DMA, reg, 0);
- }
-}
-
-/**
- * cs5535_set_dma_mode - set host controller for DMA mode
- * @hwif: port
- * @drive: drive
- *
- * Programs the chipset for DMA mode.
- */
-
-static void cs5535_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- cs5535_set_speed(drive, drive->dma_mode);
-}
-
-/**
- * cs5535_set_pio_mode - set host controller for PIO mode
- * @hwif: port
- * @drive: drive
- *
- * A callback from the upper layers for PIO-only tuning.
- */
-
-static void cs5535_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- cs5535_set_speed(drive, drive->pio_mode);
-}
-
-static u8 cs5535_cable_detect(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u8 bit;
-
- /* if a 80 wire cable was detected */
- pci_read_config_byte(dev, CS5535_CABLE_DETECT, &bit);
-
- return (bit & 1) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
-}
-
-static const struct ide_port_ops cs5535_port_ops = {
- .set_pio_mode = cs5535_set_pio_mode,
- .set_dma_mode = cs5535_set_dma_mode,
- .cable_detect = cs5535_cable_detect,
-};
-
-static const struct ide_port_info cs5535_chipset = {
- .name = DRV_NAME,
- .port_ops = &cs5535_port_ops,
- .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA4,
-};
-
-static int cs5535_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- return ide_pci_init_one(dev, &cs5535_chipset, NULL);
-}
-
-static const struct pci_device_id cs5535_pci_tbl[] = {
- { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_CS5535_IDE), 0 },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5535_IDE), },
- { 0, },
-};
-
-MODULE_DEVICE_TABLE(pci, cs5535_pci_tbl);
-
-static struct pci_driver cs5535_pci_driver = {
- .name = "CS5535_IDE",
- .id_table = cs5535_pci_tbl,
- .probe = cs5535_init_one,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init cs5535_ide_init(void)
-{
- return ide_pci_register_driver(&cs5535_pci_driver);
-}
-
-static void __exit cs5535_ide_exit(void)
-{
- pci_unregister_driver(&cs5535_pci_driver);
-}
-
-module_init(cs5535_ide_init);
-module_exit(cs5535_ide_exit);
-
-MODULE_AUTHOR("AMD");
-MODULE_DESCRIPTION("PCI driver module for AMD/NS CS5535 IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/cs5536.c b/drivers/ide/cs5536.c
deleted file mode 100644
index 8b5ca145191b..000000000000
--- a/drivers/ide/cs5536.c
+++ /dev/null
@@ -1,294 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * CS5536 PATA support
- * (C) 2007 Martin K. Petersen <mkp@mkp.net>
- * (C) 2009 Bartlomiej Zolnierkiewicz
- *
- * Documentation:
- * Available from AMD web site.
- *
- * The IDE timing registers for the CS5536 live in the Geode Machine
- * Specific Register file and not PCI config space. Most BIOSes
- * virtualize the PCI registers so the chip looks like a standard IDE
- * controller. Unfortunately not all implementations get this right.
- * In particular some have problems with unaligned accesses to the
- * virtualized PCI registers. This driver always does full dword
- * writes to work around the issue. Also, in case of a bad BIOS this
- * driver can be loaded with the "msr=1" parameter which forces using
- * the Machine Specific Registers to configure the device.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/ide.h>
-#include <asm/msr.h>
-
-#define DRV_NAME "cs5536"
-
-enum {
- MSR_IDE_CFG = 0x51300010,
- PCI_IDE_CFG = 0x40,
-
- CFG = 0,
- DTC = 2,
- CAST = 3,
- ETC = 4,
-
- IDE_CFG_CHANEN = (1 << 1),
- IDE_CFG_CABLE = (1 << 17) | (1 << 16),
-
- IDE_D0_SHIFT = 24,
- IDE_D1_SHIFT = 16,
- IDE_DRV_MASK = 0xff,
-
- IDE_CAST_D0_SHIFT = 6,
- IDE_CAST_D1_SHIFT = 4,
- IDE_CAST_DRV_MASK = 0x3,
-
- IDE_CAST_CMD_SHIFT = 24,
- IDE_CAST_CMD_MASK = 0xff,
-
- IDE_ETC_UDMA_MASK = 0xc0,
-};
-
-static int use_msr;
-
-static int cs5536_read(struct pci_dev *pdev, int reg, u32 *val)
-{
- if (unlikely(use_msr)) {
- u32 dummy;
-
- rdmsr(MSR_IDE_CFG + reg, *val, dummy);
- return 0;
- }
-
- return pci_read_config_dword(pdev, PCI_IDE_CFG + reg * 4, val);
-}
-
-static int cs5536_write(struct pci_dev *pdev, int reg, int val)
-{
- if (unlikely(use_msr)) {
- wrmsr(MSR_IDE_CFG + reg, val, 0);
- return 0;
- }
-
- return pci_write_config_dword(pdev, PCI_IDE_CFG + reg * 4, val);
-}
-
-static void cs5536_program_dtc(ide_drive_t *drive, u8 tim)
-{
- struct pci_dev *pdev = to_pci_dev(drive->hwif->dev);
- int dshift = (drive->dn & 1) ? IDE_D1_SHIFT : IDE_D0_SHIFT;
- u32 dtc;
-
- cs5536_read(pdev, DTC, &dtc);
- dtc &= ~(IDE_DRV_MASK << dshift);
- dtc |= tim << dshift;
- cs5536_write(pdev, DTC, dtc);
-}
-
-/**
- * cs5536_cable_detect - detect cable type
- * @hwif: Port to detect on
- *
- * Perform cable detection for ATA66 capable cable.
- *
- * Returns a cable type.
- */
-
-static u8 cs5536_cable_detect(ide_hwif_t *hwif)
-{
- struct pci_dev *pdev = to_pci_dev(hwif->dev);
- u32 cfg;
-
- cs5536_read(pdev, CFG, &cfg);
-
- if (cfg & IDE_CFG_CABLE)
- return ATA_CBL_PATA80;
- else
- return ATA_CBL_PATA40;
-}
-
-/**
- * cs5536_set_pio_mode - PIO timing setup
- * @hwif: ATA port
- * @drive: ATA device
- */
-
-static void cs5536_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- static const u8 drv_timings[5] = {
- 0x98, 0x55, 0x32, 0x21, 0x20,
- };
-
- static const u8 addr_timings[5] = {
- 0x2, 0x1, 0x0, 0x0, 0x0,
- };
-
- static const u8 cmd_timings[5] = {
- 0x99, 0x92, 0x90, 0x22, 0x20,
- };
-
- struct pci_dev *pdev = to_pci_dev(hwif->dev);
- ide_drive_t *pair = ide_get_pair_dev(drive);
- int cshift = (drive->dn & 1) ? IDE_CAST_D1_SHIFT : IDE_CAST_D0_SHIFT;
- unsigned long timings = (unsigned long)ide_get_drivedata(drive);
- u32 cast;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
- u8 cmd_pio = pio;
-
- if (pair)
- cmd_pio = min_t(u8, pio, pair->pio_mode - XFER_PIO_0);
-
- timings &= (IDE_DRV_MASK << 8);
- timings |= drv_timings[pio];
- ide_set_drivedata(drive, (void *)timings);
-
- cs5536_program_dtc(drive, drv_timings[pio]);
-
- cs5536_read(pdev, CAST, &cast);
-
- cast &= ~(IDE_CAST_DRV_MASK << cshift);
- cast |= addr_timings[pio] << cshift;
-
- cast &= ~(IDE_CAST_CMD_MASK << IDE_CAST_CMD_SHIFT);
- cast |= cmd_timings[cmd_pio] << IDE_CAST_CMD_SHIFT;
-
- cs5536_write(pdev, CAST, cast);
-}
-
-/**
- * cs5536_set_dma_mode - DMA timing setup
- * @hwif: ATA port
- * @drive: ATA device
- */
-
-static void cs5536_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- static const u8 udma_timings[6] = {
- 0xc2, 0xc1, 0xc0, 0xc4, 0xc5, 0xc6,
- };
-
- static const u8 mwdma_timings[3] = {
- 0x67, 0x21, 0x20,
- };
-
- struct pci_dev *pdev = to_pci_dev(hwif->dev);
- int dshift = (drive->dn & 1) ? IDE_D1_SHIFT : IDE_D0_SHIFT;
- unsigned long timings = (unsigned long)ide_get_drivedata(drive);
- u32 etc;
- const u8 mode = drive->dma_mode;
-
- cs5536_read(pdev, ETC, &etc);
-
- if (mode >= XFER_UDMA_0) {
- etc &= ~(IDE_DRV_MASK << dshift);
- etc |= udma_timings[mode - XFER_UDMA_0] << dshift;
- } else { /* MWDMA */
- etc &= ~(IDE_ETC_UDMA_MASK << dshift);
- timings &= IDE_DRV_MASK;
- timings |= mwdma_timings[mode - XFER_MW_DMA_0] << 8;
- ide_set_drivedata(drive, (void *)timings);
- }
-
- cs5536_write(pdev, ETC, etc);
-}
-
-static void cs5536_dma_start(ide_drive_t *drive)
-{
- unsigned long timings = (unsigned long)ide_get_drivedata(drive);
-
- if (drive->current_speed < XFER_UDMA_0 &&
- (timings >> 8) != (timings & IDE_DRV_MASK))
- cs5536_program_dtc(drive, timings >> 8);
-
- ide_dma_start(drive);
-}
-
-static int cs5536_dma_end(ide_drive_t *drive)
-{
- int ret = ide_dma_end(drive);
- unsigned long timings = (unsigned long)ide_get_drivedata(drive);
-
- if (drive->current_speed < XFER_UDMA_0 &&
- (timings >> 8) != (timings & IDE_DRV_MASK))
- cs5536_program_dtc(drive, timings & IDE_DRV_MASK);
-
- return ret;
-}
-
-static const struct ide_port_ops cs5536_port_ops = {
- .set_pio_mode = cs5536_set_pio_mode,
- .set_dma_mode = cs5536_set_dma_mode,
- .cable_detect = cs5536_cable_detect,
-};
-
-static const struct ide_dma_ops cs5536_dma_ops = {
- .dma_host_set = ide_dma_host_set,
- .dma_setup = ide_dma_setup,
- .dma_start = cs5536_dma_start,
- .dma_end = cs5536_dma_end,
- .dma_test_irq = ide_dma_test_irq,
- .dma_lost_irq = ide_dma_lost_irq,
- .dma_timer_expiry = ide_dma_sff_timer_expiry,
- .dma_sff_read_status = ide_dma_sff_read_status,
-};
-
-static const struct ide_port_info cs5536_info = {
- .name = DRV_NAME,
- .port_ops = &cs5536_port_ops,
- .dma_ops = &cs5536_dma_ops,
- .host_flags = IDE_HFLAG_SINGLE,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA5,
-};
-
-/**
- * cs5536_init_one
- * @dev: PCI device
- * @id: Entry in match table
- */
-
-static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- u32 cfg;
-
- if (use_msr)
- printk(KERN_INFO DRV_NAME ": Using MSR regs instead of PCI\n");
-
- cs5536_read(dev, CFG, &cfg);
-
- if ((cfg & IDE_CFG_CHANEN) == 0) {
- printk(KERN_ERR DRV_NAME ": disabled by BIOS\n");
- return -ENODEV;
- }
-
- return ide_pci_init_one(dev, &cs5536_info, NULL);
-}
-
-static const struct pci_device_id cs5536_pci_tbl[] = {
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), },
- { },
-};
-
-static struct pci_driver cs5536_pci_driver = {
- .name = DRV_NAME,
- .id_table = cs5536_pci_tbl,
- .probe = cs5536_init_one,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-module_pci_driver(cs5536_pci_driver);
-
-MODULE_AUTHOR("Martin K. Petersen, Bartlomiej Zolnierkiewicz");
-MODULE_DESCRIPTION("low-level driver for the CS5536 IDE controller");
-MODULE_LICENSE("GPL");
-MODULE_DEVICE_TABLE(pci, cs5536_pci_tbl);
-
-module_param_named(msr, use_msr, int, 0644);
-MODULE_PARM_DESC(msr, "Force using MSR to configure IDE function (Default: 0)");
diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
deleted file mode 100644
index bc01660ee8fd..000000000000
--- a/drivers/ide/cy82c693.c
+++ /dev/null
@@ -1,234 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1998-2000 Andreas S. Krebs (akrebs@altavista.net), Maintainer
- * Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>, Integrator
- * Copyright (C) 2007-2011 Bartlomiej Zolnierkiewicz
- *
- * CYPRESS CY82C693 chipset IDE controller
- *
- * The CY82C693 chipset is used on Digital's PC-Alpha 164SX boards.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#include <asm/io.h>
-
-#define DRV_NAME "cy82c693"
-
-/*
- * NOTE: the value for busmaster timeout is tricky and I got it by
- * trial and error! By using a to low value will cause DMA timeouts
- * and drop IDE performance, and by using a to high value will cause
- * audio playback to scatter.
- * If you know a better value or how to calc it, please let me know.
- */
-
-/* twice the value written in cy82c693ub datasheet */
-#define BUSMASTER_TIMEOUT 0x50
-/*
- * the value above was tested on my machine and it seems to work okay
- */
-
-/* here are the offset definitions for the registers */
-#define CY82_IDE_CMDREG 0x04
-#define CY82_IDE_ADDRSETUP 0x48
-#define CY82_IDE_MASTER_IOR 0x4C
-#define CY82_IDE_MASTER_IOW 0x4D
-#define CY82_IDE_SLAVE_IOR 0x4E
-#define CY82_IDE_SLAVE_IOW 0x4F
-#define CY82_IDE_MASTER_8BIT 0x50
-#define CY82_IDE_SLAVE_8BIT 0x51
-
-#define CY82_INDEX_PORT 0x22
-#define CY82_DATA_PORT 0x23
-
-#define CY82_INDEX_CHANNEL0 0x30
-#define CY82_INDEX_CHANNEL1 0x31
-#define CY82_INDEX_TIMEOUT 0x32
-
-/*
- * set DMA mode a specific channel for CY82C693
- */
-
-static void cy82c693_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- const u8 mode = drive->dma_mode;
- u8 single = (mode & 0x10) >> 4, index = 0, data = 0;
-
- index = hwif->channel ? CY82_INDEX_CHANNEL1 : CY82_INDEX_CHANNEL0;
-
- data = (mode & 3) | (single << 2);
-
- outb(index, CY82_INDEX_PORT);
- outb(data, CY82_DATA_PORT);
-
- /*
- * note: below we set the value for Bus Master IDE TimeOut Register
- * I'm not absolutely sure what this does, but it solved my problem
- * with IDE DMA and sound, so I now can play sound and work with
- * my IDE driver at the same time :-)
- *
- * If you know the correct (best) value for this register please
- * let me know - ASK
- */
-
- data = BUSMASTER_TIMEOUT;
- outb(CY82_INDEX_TIMEOUT, CY82_INDEX_PORT);
- outb(data, CY82_DATA_PORT);
-}
-
-static void cy82c693_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- int bus_speed = ide_pci_clk ? ide_pci_clk : 33;
- const unsigned long T = 1000000 / bus_speed;
- unsigned int addrCtrl;
- struct ide_timing t;
- u8 time_16, time_8;
-
- /* select primary or secondary channel */
- if (drive->dn > 1) { /* drive is on the secondary channel */
- dev = pci_get_slot(dev->bus, dev->devfn+1);
- if (!dev) {
- printk(KERN_ERR "%s: tune_drive: "
- "Cannot find secondary interface!\n",
- drive->name);
- return;
- }
- }
-
- ide_timing_compute(drive, drive->pio_mode, &t, T, 1);
-
- time_16 = clamp_val(t.recover - 1, 0, 15) |
- (clamp_val(t.active - 1, 0, 15) << 4);
- time_8 = clamp_val(t.act8b - 1, 0, 15) |
- (clamp_val(t.rec8b - 1, 0, 15) << 4);
-
- /* now let's write the clocks registers */
- if ((drive->dn & 1) == 0) {
- /*
- * set master drive
- * address setup control register
- * is 32 bit !!!
- */
- pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl);
-
- addrCtrl &= (~0xF);
- addrCtrl |= clamp_val(t.setup - 1, 0, 15);
- pci_write_config_dword(dev, CY82_IDE_ADDRSETUP, addrCtrl);
-
- /* now let's set the remaining registers */
- pci_write_config_byte(dev, CY82_IDE_MASTER_IOR, time_16);
- pci_write_config_byte(dev, CY82_IDE_MASTER_IOW, time_16);
- pci_write_config_byte(dev, CY82_IDE_MASTER_8BIT, time_8);
- } else {
- /*
- * set slave drive
- * address setup control register
- * is 32 bit !!!
- */
- pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl);
-
- addrCtrl &= (~0xF0);
- addrCtrl |= (clamp_val(t.setup - 1, 0, 15) << 4);
- pci_write_config_dword(dev, CY82_IDE_ADDRSETUP, addrCtrl);
-
- /* now let's set the remaining registers */
- pci_write_config_byte(dev, CY82_IDE_SLAVE_IOR, time_16);
- pci_write_config_byte(dev, CY82_IDE_SLAVE_IOW, time_16);
- pci_write_config_byte(dev, CY82_IDE_SLAVE_8BIT, time_8);
- }
- if (drive->dn > 1)
- pci_dev_put(dev);
-}
-
-static void init_iops_cy82c693(ide_hwif_t *hwif)
-{
- static ide_hwif_t *primary;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
-
- if (PCI_FUNC(dev->devfn) == 1)
- primary = hwif;
- else {
- hwif->mate = primary;
- hwif->channel = 1;
- }
-}
-
-static const struct ide_port_ops cy82c693_port_ops = {
- .set_pio_mode = cy82c693_set_pio_mode,
- .set_dma_mode = cy82c693_set_dma_mode,
-};
-
-static const struct ide_port_info cy82c693_chipset = {
- .name = DRV_NAME,
- .init_iops = init_iops_cy82c693,
- .port_ops = &cy82c693_port_ops,
- .host_flags = IDE_HFLAG_SINGLE,
- .pio_mask = ATA_PIO4,
- .swdma_mask = ATA_SWDMA2,
- .mwdma_mask = ATA_MWDMA2,
-};
-
-static int cy82c693_init_one(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- struct pci_dev *dev2;
- int ret = -ENODEV;
-
- /* CY82C693 is more than only a IDE controller.
- Function 1 is primary IDE channel, function 2 - secondary. */
- if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE &&
- PCI_FUNC(dev->devfn) == 1) {
- dev2 = pci_get_slot(dev->bus, dev->devfn + 1);
- ret = ide_pci_init_two(dev, dev2, &cy82c693_chipset, NULL);
- if (ret)
- pci_dev_put(dev2);
- }
- return ret;
-}
-
-static void cy82c693_remove(struct pci_dev *dev)
-{
- struct ide_host *host = pci_get_drvdata(dev);
- struct pci_dev *dev2 = host->dev[1] ? to_pci_dev(host->dev[1]) : NULL;
-
- ide_pci_remove(dev);
- pci_dev_put(dev2);
-}
-
-static const struct pci_device_id cy82c693_pci_tbl[] = {
- { PCI_VDEVICE(CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693), 0 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, cy82c693_pci_tbl);
-
-static struct pci_driver cy82c693_pci_driver = {
- .name = "Cypress_IDE",
- .id_table = cy82c693_pci_tbl,
- .probe = cy82c693_init_one,
- .remove = cy82c693_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init cy82c693_ide_init(void)
-{
- return ide_pci_register_driver(&cy82c693_pci_driver);
-}
-
-static void __exit cy82c693_ide_exit(void)
-{
- pci_unregister_driver(&cy82c693_pci_driver);
-}
-
-module_init(cy82c693_ide_init);
-module_exit(cy82c693_ide_exit);
-
-MODULE_AUTHOR("Andreas Krebs, Andre Hedrick, Bartlomiej Zolnierkiewicz");
-MODULE_DESCRIPTION("PCI driver module for the Cypress CY82C693 IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/delkin_cb.c b/drivers/ide/delkin_cb.c
deleted file mode 100644
index 300daabaa575..000000000000
--- a/drivers/ide/delkin_cb.c
+++ /dev/null
@@ -1,181 +0,0 @@
-/*
- * Created 20 Oct 2004 by Mark Lord
- *
- * Basic support for Delkin/ASKA/Workbit Cardbus CompactFlash adapter
- *
- * Modeled after the 16-bit PCMCIA driver: ide-cs.c
- *
- * This is slightly peculiar, in that it is a PCI driver,
- * but is NOT an IDE PCI driver -- the IDE layer does not directly
- * support hot insertion/removal of PCI interfaces, so this driver
- * is unable to use the IDE PCI interfaces. Instead, it uses the
- * same interfaces as the ide-cs (PCMCIA) driver uses.
- * On the plus side, the driver is also smaller/simpler this way.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-
-#include <asm/io.h>
-
-/*
- * No chip documentation has yet been found,
- * so these configuration values were pulled from
- * a running Win98 system using "debug".
- * This gives around 3MByte/second read performance,
- * which is about 2/3 of what the chip is capable of.
- *
- * There is also a 4KByte mmio region on the card,
- * but its purpose has yet to be reverse-engineered.
- */
-static const u8 setup[] = {
- 0x00, 0x05, 0xbe, 0x01, 0x20, 0x8f, 0x00, 0x00,
- 0xa4, 0x1f, 0xb3, 0x1b, 0x00, 0x00, 0x00, 0x80,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0xa4, 0x83, 0x02, 0x13,
-};
-
-static const struct ide_port_ops delkin_cb_port_ops = {
- .quirkproc = ide_undecoded_slave,
-};
-
-static int delkin_cb_init_chipset(struct pci_dev *dev)
-{
- unsigned long base = pci_resource_start(dev, 0);
- int i;
-
- outb(0x02, base + 0x1e); /* set nIEN to block interrupts */
- inb(base + 0x17); /* read status to clear interrupts */
-
- for (i = 0; i < sizeof(setup); ++i) {
- if (setup[i])
- outb(setup[i], base + i);
- }
-
- return 0;
-}
-
-static const struct ide_port_info delkin_cb_port_info = {
- .port_ops = &delkin_cb_port_ops,
- .host_flags = IDE_HFLAG_IO_32BIT | IDE_HFLAG_UNMASK_IRQS |
- IDE_HFLAG_NO_DMA,
- .irq_flags = IRQF_SHARED,
- .init_chipset = delkin_cb_init_chipset,
- .chipset = ide_pci,
-};
-
-static int delkin_cb_probe(struct pci_dev *dev, const struct pci_device_id *id)
-{
- struct ide_host *host;
- unsigned long base;
- int rc;
- struct ide_hw hw, *hws[] = { &hw };
-
- rc = pci_enable_device(dev);
- if (rc) {
- printk(KERN_ERR "delkin_cb: pci_enable_device failed (%d)\n", rc);
- return rc;
- }
- rc = pci_request_regions(dev, "delkin_cb");
- if (rc) {
- printk(KERN_ERR "delkin_cb: pci_request_regions failed (%d)\n", rc);
- pci_disable_device(dev);
- return rc;
- }
- base = pci_resource_start(dev, 0);
-
- delkin_cb_init_chipset(dev);
-
- memset(&hw, 0, sizeof(hw));
- ide_std_init_ports(&hw, base + 0x10, base + 0x1e);
- hw.irq = dev->irq;
- hw.dev = &dev->dev;
-
- rc = ide_host_add(&delkin_cb_port_info, hws, 1, &host);
- if (rc)
- goto out_disable;
-
- pci_set_drvdata(dev, host);
-
- return 0;
-
-out_disable:
- pci_release_regions(dev);
- pci_disable_device(dev);
- return rc;
-}
-
-static void
-delkin_cb_remove (struct pci_dev *dev)
-{
- struct ide_host *host = pci_get_drvdata(dev);
-
- ide_host_remove(host);
-
- pci_release_regions(dev);
- pci_disable_device(dev);
-}
-
-#ifdef CONFIG_PM
-static int delkin_cb_suspend(struct pci_dev *dev, pm_message_t state)
-{
- pci_save_state(dev);
- pci_disable_device(dev);
- pci_set_power_state(dev, pci_choose_state(dev, state));
-
- return 0;
-}
-
-static int delkin_cb_resume(struct pci_dev *dev)
-{
- struct ide_host *host = pci_get_drvdata(dev);
- int rc;
-
- pci_set_power_state(dev, PCI_D0);
-
- rc = pci_enable_device(dev);
- if (rc)
- return rc;
-
- pci_restore_state(dev);
- pci_set_master(dev);
-
- if (host->init_chipset)
- host->init_chipset(dev);
-
- return 0;
-}
-#else
-#define delkin_cb_suspend NULL
-#define delkin_cb_resume NULL
-#endif
-
-static struct pci_device_id delkin_cb_pci_tbl[] = {
- { 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- { 0x1145, 0xf024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, delkin_cb_pci_tbl);
-
-static struct pci_driver delkin_cb_pci_driver = {
- .name = "Delkin-ASKA-Workbit Cardbus IDE",
- .id_table = delkin_cb_pci_tbl,
- .probe = delkin_cb_probe,
- .remove = delkin_cb_remove,
- .suspend = delkin_cb_suspend,
- .resume = delkin_cb_resume,
-};
-
-module_pci_driver(delkin_cb_pci_driver);
-
-MODULE_AUTHOR("Mark Lord");
-MODULE_DESCRIPTION("Basic support for Delkin/ASKA/Workbit Cardbus IDE");
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/ide/dtc2278.c b/drivers/ide/dtc2278.c
deleted file mode 100644
index 714e8cd0fa49..000000000000
--- a/drivers/ide/dtc2278.c
+++ /dev/null
@@ -1,155 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1996 Linus Torvalds & author (see below)
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/ioport.h>
-#include <linux/blkdev.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#include <asm/io.h>
-
-#define DRV_NAME "dtc2278"
-
-/*
- * Changing this #undef to #define may solve start up problems in some systems.
- */
-#undef ALWAYS_SET_DTC2278_PIO_MODE
-
-/*
- * From: andy@cercle.cts.com (Dyan Wile)
- *
- * Below is a patch for DTC-2278 - alike software-programmable controllers
- * The code enables the secondary IDE controller and the PIO4 (3?) timings on
- * the primary (EIDE). You may probably have to enable the 32-bit support to
- * get the full speed. You better get the disk interrupts disabled ( hdparm -u0
- * /dev/hd.. ) for the drives connected to the EIDE interface. (I get my
- * filesystem corrupted with -u1, but under heavy disk load only :-)
- *
- * This card is now forced to use the "serialize" feature,
- * and irq-unmasking is disallowed. If io_32bit is enabled,
- * it must be done for BOTH drives on each interface.
- *
- * This code was written for the DTC2278E, but might work with any of these:
- *
- * DTC2278S has only a single IDE interface.
- * DTC2278D has two IDE interfaces and is otherwise identical to the S version.
- * DTC2278E also has serial ports and a printer port
- * DTC2278EB: has onboard BIOS, and "works like a charm" -- Kent Bradford <kent@theory.caltech.edu>
- *
- * There may be a fourth controller type. The S and D versions use the
- * Winbond chip, and I think the E version does also.
- *
- */
-
-static void sub22 (char b, char c)
-{
- int i;
-
- for(i = 0; i < 3; ++i) {
- inb(0x3f6);
- outb_p(b,0xb0);
- inb(0x3f6);
- outb_p(c,0xb4);
- inb(0x3f6);
- if(inb(0xb4) == c) {
- outb_p(7,0xb0);
- inb(0x3f6);
- return; /* success */
- }
- }
-}
-
-static DEFINE_SPINLOCK(dtc2278_lock);
-
-static void dtc2278_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- unsigned long flags;
-
- if (drive->pio_mode >= XFER_PIO_3) {
- spin_lock_irqsave(&dtc2278_lock, flags);
- /*
- * This enables PIO mode4 (3?) on the first interface
- */
- sub22(1,0xc3);
- sub22(0,0xa0);
- spin_unlock_irqrestore(&dtc2278_lock, flags);
- } else {
- /* we don't know how to set it back again.. */
- /* Actually we do - there is a data sheet available for the
- Winbond but does anyone actually care */
- }
-}
-
-static const struct ide_port_ops dtc2278_port_ops = {
- .set_pio_mode = dtc2278_set_pio_mode,
-};
-
-static const struct ide_port_info dtc2278_port_info __initconst = {
- .name = DRV_NAME,
- .chipset = ide_dtc2278,
- .port_ops = &dtc2278_port_ops,
- .host_flags = IDE_HFLAG_SERIALIZE |
- IDE_HFLAG_NO_UNMASK_IRQS |
- IDE_HFLAG_IO_32BIT |
- /* disallow ->io_32bit changes */
- IDE_HFLAG_NO_IO_32BIT |
- IDE_HFLAG_NO_DMA |
- IDE_HFLAG_DTC2278,
- .pio_mask = ATA_PIO4,
-};
-
-static int __init dtc2278_probe(void)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- /*
- * This enables the second interface
- */
- outb_p(4,0xb0);
- inb(0x3f6);
- outb_p(0x20,0xb4);
- inb(0x3f6);
-#ifdef ALWAYS_SET_DTC2278_PIO_MODE
- /*
- * This enables PIO mode4 (3?) on the first interface
- * and may solve start-up problems for some people.
- */
- sub22(1,0xc3);
- sub22(0,0xa0);
-#endif
- local_irq_restore(flags);
-
- return ide_legacy_device_add(&dtc2278_port_info, 0);
-}
-
-static bool probe_dtc2278;
-
-module_param_named(probe, probe_dtc2278, bool, 0);
-MODULE_PARM_DESC(probe, "probe for DTC2278xx chipsets");
-
-static int __init dtc2278_init(void)
-{
- if (probe_dtc2278 == 0)
- return -ENODEV;
-
- if (dtc2278_probe()) {
- printk(KERN_ERR "dtc2278: ide interfaces already in use!\n");
- return -EBUSY;
- }
- return 0;
-}
-
-module_init(dtc2278_init);
-
-MODULE_AUTHOR("See Local File");
-MODULE_DESCRIPTION("support of DTC-2278 VLB IDE chipsets");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/falconide.c b/drivers/ide/falconide.c
deleted file mode 100644
index bb86d84558d9..000000000000
--- a/drivers/ide/falconide.c
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * Atari Falcon IDE Driver
- *
- * Created 12 Jul 1997 by Geert Uytterhoeven
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/blkdev.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-
-#include <asm/setup.h>
-#include <asm/atarihw.h>
-#include <asm/atariints.h>
-#include <asm/atari_stdma.h>
-#include <asm/ide.h>
-
-#define DRV_NAME "falconide"
-
- /*
- * Offsets from base address
- */
-
-#define ATA_HD_CONTROL 0x39
-
- /*
- * falconide_intr_lock is used to obtain access to the IDE interrupt,
- * which is shared between several drivers.
- */
-
-static int falconide_intr_lock;
-
-static void falconide_release_lock(void)
-{
- if (falconide_intr_lock == 0) {
- printk(KERN_ERR "%s: bug\n", __func__);
- return;
- }
- falconide_intr_lock = 0;
- stdma_release();
-}
-
-static void falconide_get_lock(irq_handler_t handler, void *data)
-{
- if (falconide_intr_lock == 0) {
- stdma_lock(handler, data);
- falconide_intr_lock = 1;
- }
-}
-
-static void falconide_input_data(ide_drive_t *drive, struct ide_cmd *cmd,
- void *buf, unsigned int len)
-{
- unsigned long data_addr = drive->hwif->io_ports.data_addr;
-
- if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) {
- __ide_mm_insw(data_addr, buf, (len + 1) / 2);
- return;
- }
-
- raw_insw_swapw((u16 *)data_addr, buf, (len + 1) / 2);
-}
-
-static void falconide_output_data(ide_drive_t *drive, struct ide_cmd *cmd,
- void *buf, unsigned int len)
-{
- unsigned long data_addr = drive->hwif->io_ports.data_addr;
-
- if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) {
- __ide_mm_outsw(data_addr, buf, (len + 1) / 2);
- return;
- }
-
- raw_outsw_swapw((u16 *)data_addr, buf, (len + 1) / 2);
-}
-
-/* Atari has a byte-swapped IDE interface */
-static const struct ide_tp_ops falconide_tp_ops = {
- .exec_command = ide_exec_command,
- .read_status = ide_read_status,
- .read_altstatus = ide_read_altstatus,
- .write_devctl = ide_write_devctl,
-
- .dev_select = ide_dev_select,
- .tf_load = ide_tf_load,
- .tf_read = ide_tf_read,
-
- .input_data = falconide_input_data,
- .output_data = falconide_output_data,
-};
-
-static const struct ide_port_info falconide_port_info = {
- .get_lock = falconide_get_lock,
- .release_lock = falconide_release_lock,
- .tp_ops = &falconide_tp_ops,
- .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_SERIALIZE |
- IDE_HFLAG_NO_DMA,
- .irq_flags = IRQF_SHARED,
- .chipset = ide_generic,
-};
-
-static void __init falconide_setup_ports(struct ide_hw *hw, unsigned long base)
-{
- int i;
-
- memset(hw, 0, sizeof(*hw));
-
- hw->io_ports.data_addr = base;
-
- for (i = 1; i < 8; i++)
- hw->io_ports_array[i] = base + 1 + i * 4;
-
- hw->io_ports.ctl_addr = base + ATA_HD_CONTROL;
-
- hw->irq = IRQ_MFP_IDE;
-}
-
- /*
- * Probe for a Falcon IDE interface
- */
-
-static int __init falconide_init(struct platform_device *pdev)
-{
- struct resource *res;
- struct ide_host *host;
- struct ide_hw hw, *hws[] = { &hw };
- unsigned long base;
- int rc;
-
- dev_info(&pdev->dev, "Atari Falcon IDE controller\n");
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
- if (!devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), DRV_NAME)) {
- dev_err(&pdev->dev, "resources busy\n");
- return -EBUSY;
- }
-
- base = (unsigned long)res->start;
-
- falconide_setup_ports(&hw, base);
-
- host = ide_host_alloc(&falconide_port_info, hws, 1);
- if (host == NULL) {
- rc = -ENOMEM;
- goto err;
- }
-
- falconide_get_lock(NULL, NULL);
- rc = ide_host_register(host, &falconide_port_info, hws);
- falconide_release_lock();
-
- if (rc)
- goto err_free;
-
- platform_set_drvdata(pdev, host);
- return 0;
-err_free:
- ide_host_free(host);
-err:
- release_mem_region(res->start, resource_size(res));
- return rc;
-}
-
-static int falconide_remove(struct platform_device *pdev)
-{
- struct ide_host *host = platform_get_drvdata(pdev);
-
- ide_host_remove(host);
-
- return 0;
-}
-
-static struct platform_driver ide_falcon_driver = {
- .remove = falconide_remove,
- .driver = {
- .name = "atari-falcon-ide",
- },
-};
-
-module_platform_driver_probe(ide_falcon_driver, falconide_init);
-
-MODULE_AUTHOR("Geert Uytterhoeven");
-MODULE_DESCRIPTION("low-level driver for Atari Falcon IDE");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:atari-falcon-ide");
diff --git a/drivers/ide/gayle.c b/drivers/ide/gayle.c
deleted file mode 100644
index 901e6ebfeb96..000000000000
--- a/drivers/ide/gayle.c
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- * Amiga Gayle IDE Driver
- *
- * Created 9 Jul 1997 by Geert Uytterhoeven
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- */
-
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/blkdev.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-#include <linux/zorro.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-
-#include <asm/setup.h>
-#include <asm/amigahw.h>
-#include <asm/amigaints.h>
-#include <asm/amigayle.h>
-
-
- /*
- * Offsets from one of the above bases
- */
-
-#define GAYLE_CONTROL 0x101a
-
- /*
- * These are at different offsets from the base
- */
-
-#define GAYLE_IRQ_4000 0xdd3020 /* MSB = 1, Harddisk is source of */
-#define GAYLE_IRQ_1200 0xda9000 /* interrupt */
-
-
- /*
- * Offset of the secondary port for IDE doublers
- * Note that GAYLE_CONTROL is NOT available then!
- */
-
-#define GAYLE_NEXT_PORT 0x1000
-
-#define GAYLE_NUM_HWIFS 2
-#define GAYLE_NUM_PROBE_HWIFS (ide_doubler ? GAYLE_NUM_HWIFS : \
- GAYLE_NUM_HWIFS-1)
-#define GAYLE_HAS_CONTROL_REG (!ide_doubler)
-
-static bool ide_doubler;
-module_param_named(doubler, ide_doubler, bool, 0);
-MODULE_PARM_DESC(doubler, "enable support for IDE doublers");
-
- /*
- * Check and acknowledge the interrupt status
- */
-
-static int gayle_test_irq(ide_hwif_t *hwif)
-{
- unsigned char ch;
-
- ch = z_readb(hwif->io_ports.irq_addr);
- if (!(ch & GAYLE_IRQ_IDE))
- return 0;
- return 1;
-}
-
-static void gayle_a1200_clear_irq(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
-
- (void)z_readb(hwif->io_ports.status_addr);
- z_writeb(0x7c, hwif->io_ports.irq_addr);
-}
-
-static void __init gayle_setup_ports(struct ide_hw *hw, unsigned long base,
- unsigned long ctl, unsigned long irq_port)
-{
- int i;
-
- memset(hw, 0, sizeof(*hw));
-
- hw->io_ports.data_addr = base;
-
- for (i = 1; i < 8; i++)
- hw->io_ports_array[i] = base + 2 + i * 4;
-
- hw->io_ports.ctl_addr = ctl;
- hw->io_ports.irq_addr = irq_port;
-
- hw->irq = IRQ_AMIGA_PORTS;
-}
-
-static const struct ide_port_ops gayle_a4000_port_ops = {
- .test_irq = gayle_test_irq,
-};
-
-static const struct ide_port_ops gayle_a1200_port_ops = {
- .clear_irq = gayle_a1200_clear_irq,
- .test_irq = gayle_test_irq,
-};
-
-static const struct ide_port_info gayle_port_info = {
- .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_SERIALIZE |
- IDE_HFLAG_NO_DMA,
- .irq_flags = IRQF_SHARED,
- .chipset = ide_generic,
-};
-
- /*
- * Probe for a Gayle IDE interface (and optionally for an IDE doubler)
- */
-
-static int __init amiga_gayle_ide_probe(struct platform_device *pdev)
-{
- struct resource *res;
- struct gayle_ide_platform_data *pdata;
- unsigned long base, ctrlport, irqport;
- unsigned int i;
- int error;
- struct ide_hw hw[GAYLE_NUM_HWIFS], *hws[GAYLE_NUM_HWIFS];
- struct ide_port_info d = gayle_port_info;
- struct ide_host *host;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
- if (!request_mem_region(res->start, resource_size(res), "IDE"))
- return -EBUSY;
-
- pdata = dev_get_platdata(&pdev->dev);
- pr_info("ide: Gayle IDE controller (A%u style%s)\n",
- pdata->explicit_ack ? 1200 : 4000,
- ide_doubler ? ", IDE doubler" : "");
-
- base = (unsigned long)ZTWO_VADDR(pdata->base);
- ctrlport = 0;
- irqport = (unsigned long)ZTWO_VADDR(pdata->irqport);
- if (pdata->explicit_ack)
- d.port_ops = &gayle_a1200_port_ops;
- else
- d.port_ops = &gayle_a4000_port_ops;
-
- for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++, base += GAYLE_NEXT_PORT) {
- if (GAYLE_HAS_CONTROL_REG)
- ctrlport = base + GAYLE_CONTROL;
-
- gayle_setup_ports(&hw[i], base, ctrlport, irqport);
- hws[i] = &hw[i];
- }
-
- error = ide_host_add(&d, hws, i, &host);
- if (error)
- goto out;
-
- platform_set_drvdata(pdev, host);
- return 0;
-
-out:
- release_mem_region(res->start, resource_size(res));
- return error;
-}
-
-static int __exit amiga_gayle_ide_remove(struct platform_device *pdev)
-{
- struct ide_host *host = platform_get_drvdata(pdev);
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- ide_host_remove(host);
- release_mem_region(res->start, resource_size(res));
- return 0;
-}
-
-static struct platform_driver amiga_gayle_ide_driver = {
- .remove = __exit_p(amiga_gayle_ide_remove),
- .driver = {
- .name = "amiga-gayle-ide",
- },
-};
-
-module_platform_driver_probe(amiga_gayle_ide_driver, amiga_gayle_ide_probe);
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:amiga-gayle-ide");
diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
deleted file mode 100644
index 50c9a41467c8..000000000000
--- a/drivers/ide/hpt366.c
+++ /dev/null
@@ -1,1545 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
- * Portions Copyright (C) 2001 Sun Microsystems, Inc.
- * Portions Copyright (C) 2003 Red Hat Inc
- * Portions Copyright (C) 2007 Bartlomiej Zolnierkiewicz
- * Portions Copyright (C) 2005-2009 MontaVista Software, Inc.
- *
- * Thanks to HighPoint Technologies for their assistance, and hardware.
- * Special Thanks to Jon Burchmore in SanDiego for the deep pockets, his
- * donation of an ABit BP6 mainboard, processor, and memory acellerated
- * development and support.
- *
- *
- * HighPoint has its own drivers (open source except for the RAID part)
- * available from http://www.highpoint-tech.com/USA_new/service_support.htm
- * This may be useful to anyone wanting to work on this driver, however do not
- * trust them too much since the code tends to become less and less meaningful
- * as the time passes... :-/
- *
- * Note that final HPT370 support was done by force extraction of GPL.
- *
- * - add function for getting/setting power status of drive
- * - the HPT370's state machine can get confused. reset it before each dma
- * xfer to prevent that from happening.
- * - reset state engine whenever we get an error.
- * - check for busmaster state at end of dma.
- * - use new highpoint timings.
- * - detect bus speed using highpoint register.
- * - use pll if we don't have a clock table. added a 66MHz table that's
- * just 2x the 33MHz table.
- * - removed turnaround. NOTE: we never want to switch between pll and
- * pci clocks as the chip can glitch in those cases. the highpoint
- * approved workaround slows everything down too much to be useful. in
- * addition, we would have to serialize access to each chip.
- * Adrian Sun <a.sun@sun.com>
- *
- * add drive timings for 66MHz PCI bus,
- * fix ATA Cable signal detection, fix incorrect /proc info
- * add /proc display for per-drive PIO/DMA/UDMA mode and
- * per-channel ATA-33/66 Cable detect.
- * Duncan Laurie <void@sun.com>
- *
- * fixup /proc output for multiple controllers
- * Tim Hockin <thockin@sun.com>
- *
- * On hpt366:
- * Reset the hpt366 on error, reset on dma
- * Fix disabling Fast Interrupt hpt366.
- * Mike Waychison <crlf@sun.com>
- *
- * Added support for 372N clocking and clock switching. The 372N needs
- * different clocks on read/write. This requires overloading rw_disk and
- * other deeply crazy things. Thanks to <http://www.hoerstreich.de> for
- * keeping me sane.
- * Alan Cox <alan@lxorguk.ukuu.org.uk>
- *
- * - fix the clock turnaround code: it was writing to the wrong ports when
- * called for the secondary channel, caching the current clock mode per-
- * channel caused the cached register value to get out of sync with the
- * actual one, the channels weren't serialized, the turnaround shouldn't
- * be done on 66 MHz PCI bus
- * - disable UltraATA/100 for HPT370 by default as the 33 MHz clock being used
- * does not allow for this speed anyway
- * - avoid touching disabled channels (e.g. HPT371/N are single channel chips,
- * their primary channel is kind of virtual, it isn't tied to any pins)
- * - fix/remove bad/unused timing tables and use one set of tables for the whole
- * HPT37x chip family; save space by introducing the separate transfer mode
- * table in which the mode lookup is done
- * - use f_CNT value saved by the HighPoint BIOS as reading it directly gives
- * the wrong PCI frequency since DPLL has already been calibrated by BIOS;
- * read it only from the function 0 of HPT374 chips
- * - fix the hotswap code: it caused RESET- to glitch when tristating the bus,
- * and for HPT36x the obsolete HDIO_TRISTATE_HWIF handler was called instead
- * - pass to init_chipset() handlers a copy of the IDE PCI device structure as
- * they tamper with its fields
- * - pass to the init_setup handlers a copy of the ide_pci_device_t structure
- * since they may tamper with its fields
- * - prefix the driver startup messages with the real chip name
- * - claim the extra 240 bytes of I/O space for all chips
- * - optimize the UltraDMA filtering and the drive list lookup code
- * - use pci_get_slot() to get to the function 1 of HPT36x/374
- * - cache offset of the channel's misc. control registers (MCRs) being used
- * throughout the driver
- * - only touch the relevant MCR when detecting the cable type on HPT374's
- * function 1
- * - rename all the register related variables consistently
- * - move all the interrupt twiddling code from the speedproc handlers into
- * init_hwif_hpt366(), also grouping all the DMA related code together there
- * - merge HPT36x/HPT37x speedproc handlers, fix PIO timing register mask and
- * separate the UltraDMA and MWDMA masks there to avoid changing PIO timings
- * when setting an UltraDMA mode
- * - fix hpt3xx_tune_drive() to set the PIO mode requested, not always select
- * the best possible one
- * - clean up DMA timeout handling for HPT370
- * - switch to using the enumeration type to differ between the numerous chip
- * variants, matching PCI device/revision ID with the chip type early, at the
- * init_setup stage
- * - extend the hpt_info structure to hold the DPLL and PCI clock frequencies,
- * stop duplicating it for each channel by storing the pointer in the pci_dev
- * structure: first, at the init_setup stage, point it to a static "template"
- * with only the chip type and its specific base DPLL frequency, the highest
- * UltraDMA mode, and the chip settings table pointer filled, then, at the
- * init_chipset stage, allocate per-chip instance and fill it with the rest
- * of the necessary information
- * - get rid of the constant thresholds in the HPT37x PCI clock detection code,
- * switch to calculating PCI clock frequency based on the chip's base DPLL
- * frequency
- * - switch to using the DPLL clock and enable UltraATA/133 mode by default on
- * anything newer than HPT370/A (except HPT374 that is not capable of this
- * mode according to the manual)
- * - fold PCI clock detection and DPLL setup code into init_chipset_hpt366(),
- * also fixing the interchanged 25/40 MHz PCI clock cases for HPT36x chips;
- * unify HPT36x/37x timing setup code and the speedproc handlers by joining
- * the register setting lists into the table indexed by the clock selected
- * - set the correct hwif->ultra_mask for each individual chip
- * - add Ultra and MW DMA mode filtering for the HPT37[24] based SATA cards
- * - stop resetting HPT370's state machine before each DMA transfer as that has
- * caused more harm than good
- * Sergei Shtylyov, <sshtylyov@ru.mvista.com> or <source@mvista.com>
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/blkdev.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/ide.h>
-#include <linux/slab.h>
-
-#include <linux/uaccess.h>
-#include <asm/io.h>
-
-#define DRV_NAME "hpt366"
-
-/* various tuning parameters */
-#undef HPT_RESET_STATE_ENGINE
-#undef HPT_DELAY_INTERRUPT
-
-static const char *bad_ata100_5[] = {
- "IBM-DTLA-307075",
- "IBM-DTLA-307060",
- "IBM-DTLA-307045",
- "IBM-DTLA-307030",
- "IBM-DTLA-307020",
- "IBM-DTLA-307015",
- "IBM-DTLA-305040",
- "IBM-DTLA-305030",
- "IBM-DTLA-305020",
- "IC35L010AVER07-0",
- "IC35L020AVER07-0",
- "IC35L030AVER07-0",
- "IC35L040AVER07-0",
- "IC35L060AVER07-0",
- "WDC AC310200R",
- NULL
-};
-
-static const char *bad_ata66_4[] = {
- "IBM-DTLA-307075",
- "IBM-DTLA-307060",
- "IBM-DTLA-307045",
- "IBM-DTLA-307030",
- "IBM-DTLA-307020",
- "IBM-DTLA-307015",
- "IBM-DTLA-305040",
- "IBM-DTLA-305030",
- "IBM-DTLA-305020",
- "IC35L010AVER07-0",
- "IC35L020AVER07-0",
- "IC35L030AVER07-0",
- "IC35L040AVER07-0",
- "IC35L060AVER07-0",
- "WDC AC310200R",
- "MAXTOR STM3320620A",
- NULL
-};
-
-static const char *bad_ata66_3[] = {
- "WDC AC310200R",
- NULL
-};
-
-static const char *bad_ata33[] = {
- "Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3", "Maxtor 90845U3", "Maxtor 90650U2",
- "Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5", "Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2",
- "Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6", "Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4",
- "Maxtor 90510D4",
- "Maxtor 90432D3", "Maxtor 90288D2", "Maxtor 90256D2",
- "Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7", "Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4",
- "Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5", "Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2",
- NULL
-};
-
-static u8 xfer_speeds[] = {
- XFER_UDMA_6,
- XFER_UDMA_5,
- XFER_UDMA_4,
- XFER_UDMA_3,
- XFER_UDMA_2,
- XFER_UDMA_1,
- XFER_UDMA_0,
-
- XFER_MW_DMA_2,
- XFER_MW_DMA_1,
- XFER_MW_DMA_0,
-
- XFER_PIO_4,
- XFER_PIO_3,
- XFER_PIO_2,
- XFER_PIO_1,
- XFER_PIO_0
-};
-
-/* Key for bus clock timings
- * 36x 37x
- * bits bits
- * 0:3 0:3 data_high_time. Inactive time of DIOW_/DIOR_ for PIO and MW DMA.
- * cycles = value + 1
- * 4:7 4:8 data_low_time. Active time of DIOW_/DIOR_ for PIO and MW DMA.
- * cycles = value + 1
- * 8:11 9:12 cmd_high_time. Inactive time of DIOW_/DIOR_ during task file
- * register access.
- * 12:15 13:17 cmd_low_time. Active time of DIOW_/DIOR_ during task file
- * register access.
- * 16:18 18:20 udma_cycle_time. Clock cycles for UDMA xfer.
- * - 21 CLK frequency: 0=ATA clock, 1=dual ATA clock.
- * 19:21 22:24 pre_high_time. Time to initialize the 1st cycle for PIO and
- * MW DMA xfer.
- * 22:24 25:27 cmd_pre_high_time. Time to initialize the 1st PIO cycle for
- * task file register access.
- * 28 28 UDMA enable.
- * 29 29 DMA enable.
- * 30 30 PIO MST enable. If set, the chip is in bus master mode during
- * PIO xfer.
- * 31 31 FIFO enable.
- */
-
-static u32 forty_base_hpt36x[] = {
- /* XFER_UDMA_6 */ 0x900fd943,
- /* XFER_UDMA_5 */ 0x900fd943,
- /* XFER_UDMA_4 */ 0x900fd943,
- /* XFER_UDMA_3 */ 0x900ad943,
- /* XFER_UDMA_2 */ 0x900bd943,
- /* XFER_UDMA_1 */ 0x9008d943,
- /* XFER_UDMA_0 */ 0x9008d943,
-
- /* XFER_MW_DMA_2 */ 0xa008d943,
- /* XFER_MW_DMA_1 */ 0xa010d955,
- /* XFER_MW_DMA_0 */ 0xa010d9fc,
-
- /* XFER_PIO_4 */ 0xc008d963,
- /* XFER_PIO_3 */ 0xc010d974,
- /* XFER_PIO_2 */ 0xc010d997,
- /* XFER_PIO_1 */ 0xc010d9c7,
- /* XFER_PIO_0 */ 0xc018d9d9
-};
-
-static u32 thirty_three_base_hpt36x[] = {
- /* XFER_UDMA_6 */ 0x90c9a731,
- /* XFER_UDMA_5 */ 0x90c9a731,
- /* XFER_UDMA_4 */ 0x90c9a731,
- /* XFER_UDMA_3 */ 0x90cfa731,
- /* XFER_UDMA_2 */ 0x90caa731,
- /* XFER_UDMA_1 */ 0x90cba731,
- /* XFER_UDMA_0 */ 0x90c8a731,
-
- /* XFER_MW_DMA_2 */ 0xa0c8a731,
- /* XFER_MW_DMA_1 */ 0xa0c8a732, /* 0xa0c8a733 */
- /* XFER_MW_DMA_0 */ 0xa0c8a797,
-
- /* XFER_PIO_4 */ 0xc0c8a731,
- /* XFER_PIO_3 */ 0xc0c8a742,
- /* XFER_PIO_2 */ 0xc0d0a753,
- /* XFER_PIO_1 */ 0xc0d0a7a3, /* 0xc0d0a793 */
- /* XFER_PIO_0 */ 0xc0d0a7aa /* 0xc0d0a7a7 */
-};
-
-static u32 twenty_five_base_hpt36x[] = {
- /* XFER_UDMA_6 */ 0x90c98521,
- /* XFER_UDMA_5 */ 0x90c98521,
- /* XFER_UDMA_4 */ 0x90c98521,
- /* XFER_UDMA_3 */ 0x90cf8521,
- /* XFER_UDMA_2 */ 0x90cf8521,
- /* XFER_UDMA_1 */ 0x90cb8521,
- /* XFER_UDMA_0 */ 0x90cb8521,
-
- /* XFER_MW_DMA_2 */ 0xa0ca8521,
- /* XFER_MW_DMA_1 */ 0xa0ca8532,
- /* XFER_MW_DMA_0 */ 0xa0ca8575,
-
- /* XFER_PIO_4 */ 0xc0ca8521,
- /* XFER_PIO_3 */ 0xc0ca8532,
- /* XFER_PIO_2 */ 0xc0ca8542,
- /* XFER_PIO_1 */ 0xc0d08572,
- /* XFER_PIO_0 */ 0xc0d08585
-};
-
-/*
- * The following are the new timing tables with PIO mode data/taskfile transfer
- * overclocking fixed...
- */
-
-/* This table is taken from the HPT370 data manual rev. 1.02 */
-static u32 thirty_three_base_hpt37x[] = {
- /* XFER_UDMA_6 */ 0x16455031, /* 0x16655031 ?? */
- /* XFER_UDMA_5 */ 0x16455031,
- /* XFER_UDMA_4 */ 0x16455031,
- /* XFER_UDMA_3 */ 0x166d5031,
- /* XFER_UDMA_2 */ 0x16495031,
- /* XFER_UDMA_1 */ 0x164d5033,
- /* XFER_UDMA_0 */ 0x16515097,
-
- /* XFER_MW_DMA_2 */ 0x26515031,
- /* XFER_MW_DMA_1 */ 0x26515033,
- /* XFER_MW_DMA_0 */ 0x26515097,
-
- /* XFER_PIO_4 */ 0x06515021,
- /* XFER_PIO_3 */ 0x06515022,
- /* XFER_PIO_2 */ 0x06515033,
- /* XFER_PIO_1 */ 0x06915065,
- /* XFER_PIO_0 */ 0x06d1508a
-};
-
-static u32 fifty_base_hpt37x[] = {
- /* XFER_UDMA_6 */ 0x1a861842,
- /* XFER_UDMA_5 */ 0x1a861842,
- /* XFER_UDMA_4 */ 0x1aae1842,
- /* XFER_UDMA_3 */ 0x1a8e1842,
- /* XFER_UDMA_2 */ 0x1a0e1842,
- /* XFER_UDMA_1 */ 0x1a161854,
- /* XFER_UDMA_0 */ 0x1a1a18ea,
-
- /* XFER_MW_DMA_2 */ 0x2a821842,
- /* XFER_MW_DMA_1 */ 0x2a821854,
- /* XFER_MW_DMA_0 */ 0x2a8218ea,
-
- /* XFER_PIO_4 */ 0x0a821842,
- /* XFER_PIO_3 */ 0x0a821843,
- /* XFER_PIO_2 */ 0x0a821855,
- /* XFER_PIO_1 */ 0x0ac218a8,
- /* XFER_PIO_0 */ 0x0b02190c
-};
-
-static u32 sixty_six_base_hpt37x[] = {
- /* XFER_UDMA_6 */ 0x1c86fe62,
- /* XFER_UDMA_5 */ 0x1caefe62, /* 0x1c8afe62 */
- /* XFER_UDMA_4 */ 0x1c8afe62,
- /* XFER_UDMA_3 */ 0x1c8efe62,
- /* XFER_UDMA_2 */ 0x1c92fe62,
- /* XFER_UDMA_1 */ 0x1c9afe62,
- /* XFER_UDMA_0 */ 0x1c82fe62,
-
- /* XFER_MW_DMA_2 */ 0x2c82fe62,
- /* XFER_MW_DMA_1 */ 0x2c82fe66,
- /* XFER_MW_DMA_0 */ 0x2c82ff2e,
-
- /* XFER_PIO_4 */ 0x0c82fe62,
- /* XFER_PIO_3 */ 0x0c82fe84,
- /* XFER_PIO_2 */ 0x0c82fea6,
- /* XFER_PIO_1 */ 0x0d02ff26,
- /* XFER_PIO_0 */ 0x0d42ff7f
-};
-
-#define HPT371_ALLOW_ATA133_6 1
-#define HPT302_ALLOW_ATA133_6 1
-#define HPT372_ALLOW_ATA133_6 1
-#define HPT370_ALLOW_ATA100_5 0
-#define HPT366_ALLOW_ATA66_4 1
-#define HPT366_ALLOW_ATA66_3 1
-
-/* Supported ATA clock frequencies */
-enum ata_clock {
- ATA_CLOCK_25MHZ,
- ATA_CLOCK_33MHZ,
- ATA_CLOCK_40MHZ,
- ATA_CLOCK_50MHZ,
- ATA_CLOCK_66MHZ,
- NUM_ATA_CLOCKS
-};
-
-struct hpt_timings {
- u32 pio_mask;
- u32 dma_mask;
- u32 ultra_mask;
- u32 *clock_table[NUM_ATA_CLOCKS];
-};
-
-/*
- * Hold all the HighPoint chip information in one place.
- */
-
-struct hpt_info {
- char *chip_name; /* Chip name */
- u8 chip_type; /* Chip type */
- u8 udma_mask; /* Allowed UltraDMA modes mask. */
- u8 dpll_clk; /* DPLL clock in MHz */
- u8 pci_clk; /* PCI clock in MHz */
- struct hpt_timings *timings; /* Chipset timing data */
- u8 clock; /* ATA clock selected */
-};
-
-/* Supported HighPoint chips */
-enum {
- HPT36x,
- HPT370,
- HPT370A,
- HPT374,
- HPT372,
- HPT372A,
- HPT302,
- HPT371,
- HPT372N,
- HPT302N,
- HPT371N
-};
-
-static struct hpt_timings hpt36x_timings = {
- .pio_mask = 0xc1f8ffff,
- .dma_mask = 0x303800ff,
- .ultra_mask = 0x30070000,
- .clock_table = {
- [ATA_CLOCK_25MHZ] = twenty_five_base_hpt36x,
- [ATA_CLOCK_33MHZ] = thirty_three_base_hpt36x,
- [ATA_CLOCK_40MHZ] = forty_base_hpt36x,
- [ATA_CLOCK_50MHZ] = NULL,
- [ATA_CLOCK_66MHZ] = NULL
- }
-};
-
-static struct hpt_timings hpt37x_timings = {
- .pio_mask = 0xcfc3ffff,
- .dma_mask = 0x31c001ff,
- .ultra_mask = 0x303c0000,
- .clock_table = {
- [ATA_CLOCK_25MHZ] = NULL,
- [ATA_CLOCK_33MHZ] = thirty_three_base_hpt37x,
- [ATA_CLOCK_40MHZ] = NULL,
- [ATA_CLOCK_50MHZ] = fifty_base_hpt37x,
- [ATA_CLOCK_66MHZ] = sixty_six_base_hpt37x
- }
-};
-
-static const struct hpt_info hpt36x = {
- .chip_name = "HPT36x",
- .chip_type = HPT36x,
- .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
- .dpll_clk = 0, /* no DPLL */
- .timings = &hpt36x_timings
-};
-
-static const struct hpt_info hpt370 = {
- .chip_name = "HPT370",
- .chip_type = HPT370,
- .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
- .dpll_clk = 48,
- .timings = &hpt37x_timings
-};
-
-static const struct hpt_info hpt370a = {
- .chip_name = "HPT370A",
- .chip_type = HPT370A,
- .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
- .dpll_clk = 48,
- .timings = &hpt37x_timings
-};
-
-static const struct hpt_info hpt374 = {
- .chip_name = "HPT374",
- .chip_type = HPT374,
- .udma_mask = ATA_UDMA5,
- .dpll_clk = 48,
- .timings = &hpt37x_timings
-};
-
-static const struct hpt_info hpt372 = {
- .chip_name = "HPT372",
- .chip_type = HPT372,
- .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
- .dpll_clk = 55,
- .timings = &hpt37x_timings
-};
-
-static const struct hpt_info hpt372a = {
- .chip_name = "HPT372A",
- .chip_type = HPT372A,
- .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
- .dpll_clk = 66,
- .timings = &hpt37x_timings
-};
-
-static const struct hpt_info hpt302 = {
- .chip_name = "HPT302",
- .chip_type = HPT302,
- .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
- .dpll_clk = 66,
- .timings = &hpt37x_timings
-};
-
-static const struct hpt_info hpt371 = {
- .chip_name = "HPT371",
- .chip_type = HPT371,
- .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
- .dpll_clk = 66,
- .timings = &hpt37x_timings
-};
-
-static const struct hpt_info hpt372n = {
- .chip_name = "HPT372N",
- .chip_type = HPT372N,
- .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
- .dpll_clk = 77,
- .timings = &hpt37x_timings
-};
-
-static const struct hpt_info hpt302n = {
- .chip_name = "HPT302N",
- .chip_type = HPT302N,
- .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
- .dpll_clk = 77,
- .timings = &hpt37x_timings
-};
-
-static const struct hpt_info hpt371n = {
- .chip_name = "HPT371N",
- .chip_type = HPT371N,
- .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
- .dpll_clk = 77,
- .timings = &hpt37x_timings
-};
-
-static bool check_in_drive_list(ide_drive_t *drive, const char **list)
-{
- return match_string(list, -1, (char *)&drive->id[ATA_ID_PROD]) >= 0;
-}
-
-static struct hpt_info *hpt3xx_get_info(struct device *dev)
-{
- struct ide_host *host = dev_get_drvdata(dev);
- struct hpt_info *info = (struct hpt_info *)host->host_priv;
-
- return dev == host->dev[1] ? info + 1 : info;
-}
-
-/*
- * The Marvell bridge chips used on the HighPoint SATA cards do not seem
- * to support the UltraDMA modes 1, 2, and 3 as well as any MWDMA modes...
- */
-
-static u8 hpt3xx_udma_filter(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct hpt_info *info = hpt3xx_get_info(hwif->dev);
- u8 mask = hwif->ultra_mask;
-
- switch (info->chip_type) {
- case HPT36x:
- if (!HPT366_ALLOW_ATA66_4 ||
- check_in_drive_list(drive, bad_ata66_4))
- mask = ATA_UDMA3;
-
- if (!HPT366_ALLOW_ATA66_3 ||
- check_in_drive_list(drive, bad_ata66_3))
- mask = ATA_UDMA2;
- break;
- case HPT370:
- if (!HPT370_ALLOW_ATA100_5 ||
- check_in_drive_list(drive, bad_ata100_5))
- mask = ATA_UDMA4;
- break;
- case HPT370A:
- if (!HPT370_ALLOW_ATA100_5 ||
- check_in_drive_list(drive, bad_ata100_5))
- return ATA_UDMA4;
- fallthrough;
- case HPT372 :
- case HPT372A:
- case HPT372N:
- case HPT374 :
- if (ata_id_is_sata(drive->id))
- mask &= ~0x0e;
- fallthrough;
- default:
- return mask;
- }
-
- return check_in_drive_list(drive, bad_ata33) ? 0x00 : mask;
-}
-
-static u8 hpt3xx_mdma_filter(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct hpt_info *info = hpt3xx_get_info(hwif->dev);
-
- switch (info->chip_type) {
- case HPT372 :
- case HPT372A:
- case HPT372N:
- case HPT374 :
- if (ata_id_is_sata(drive->id))
- return 0x00;
- fallthrough;
- default:
- return 0x07;
- }
-}
-
-static u32 get_speed_setting(u8 speed, struct hpt_info *info)
-{
- int i;
-
- /*
- * Lookup the transfer mode table to get the index into
- * the timing table.
- *
- * NOTE: For XFER_PIO_SLOW, PIO mode 0 timings will be used.
- */
- for (i = 0; i < ARRAY_SIZE(xfer_speeds) - 1; i++)
- if (xfer_speeds[i] == speed)
- break;
-
- return info->timings->clock_table[info->clock][i];
-}
-
-static void hpt3xx_set_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct hpt_info *info = hpt3xx_get_info(hwif->dev);
- struct hpt_timings *t = info->timings;
- u8 itr_addr = 0x40 + (drive->dn * 4);
- u32 old_itr = 0;
- const u8 speed = drive->dma_mode;
- u32 new_itr = get_speed_setting(speed, info);
- u32 itr_mask = speed < XFER_MW_DMA_0 ? t->pio_mask :
- (speed < XFER_UDMA_0 ? t->dma_mask :
- t->ultra_mask);
-
- pci_read_config_dword(dev, itr_addr, &old_itr);
- new_itr = (old_itr & ~itr_mask) | (new_itr & itr_mask);
- /*
- * Disable on-chip PIO FIFO/buffer (and PIO MST mode as well)
- * to avoid problems handling I/O errors later
- */
- new_itr &= ~0xc0000000;
-
- pci_write_config_dword(dev, itr_addr, new_itr);
-}
-
-static void hpt3xx_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- drive->dma_mode = drive->pio_mode;
- hpt3xx_set_mode(hwif, drive);
-}
-
-static void hpt3xx_maskproc(ide_drive_t *drive, int mask)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct hpt_info *info = hpt3xx_get_info(hwif->dev);
-
- if ((drive->dev_flags & IDE_DFLAG_NIEN_QUIRK) == 0)
- return;
-
- if (info->chip_type >= HPT370) {
- u8 scr1 = 0;
-
- pci_read_config_byte(dev, 0x5a, &scr1);
- if (((scr1 & 0x10) >> 4) != mask) {
- if (mask)
- scr1 |= 0x10;
- else
- scr1 &= ~0x10;
- pci_write_config_byte(dev, 0x5a, scr1);
- }
- } else if (mask)
- disable_irq(hwif->irq);
- else
- enable_irq(hwif->irq);
-}
-
-/*
- * This is specific to the HPT366 UDMA chipset
- * by HighPoint|Triones Technologies, Inc.
- */
-static void hpt366_dma_lost_irq(ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
- u8 mcr1 = 0, mcr3 = 0, scr1 = 0;
-
- pci_read_config_byte(dev, 0x50, &mcr1);
- pci_read_config_byte(dev, 0x52, &mcr3);
- pci_read_config_byte(dev, 0x5a, &scr1);
- printk("%s: (%s) mcr1=0x%02x, mcr3=0x%02x, scr1=0x%02x\n",
- drive->name, __func__, mcr1, mcr3, scr1);
- if (scr1 & 0x10)
- pci_write_config_byte(dev, 0x5a, scr1 & ~0x10);
- ide_dma_lost_irq(drive);
-}
-
-static void hpt370_clear_engine(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
-
- pci_write_config_byte(dev, hwif->select_data, 0x37);
- udelay(10);
-}
-
-static void hpt370_irq_timeout(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u16 bfifo = 0;
- u8 dma_cmd;
-
- pci_read_config_word(dev, hwif->select_data + 2, &bfifo);
- printk(KERN_DEBUG "%s: %d bytes in FIFO\n", drive->name, bfifo & 0x1ff);
-
- /* get DMA command mode */
- dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
- /* stop DMA */
- outb(dma_cmd & ~ATA_DMA_START, hwif->dma_base + ATA_DMA_CMD);
- hpt370_clear_engine(drive);
-}
-
-static void hpt370_dma_start(ide_drive_t *drive)
-{
-#ifdef HPT_RESET_STATE_ENGINE
- hpt370_clear_engine(drive);
-#endif
- ide_dma_start(drive);
-}
-
-static int hpt370_dma_end(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
-
- if (dma_stat & ATA_DMA_ACTIVE) {
- /* wait a little */
- udelay(20);
- dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
- if (dma_stat & ATA_DMA_ACTIVE)
- hpt370_irq_timeout(drive);
- }
- return ide_dma_end(drive);
-}
-
-/* returns 1 if DMA IRQ issued, 0 otherwise */
-static int hpt374_dma_test_irq(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u16 bfifo = 0;
- u8 dma_stat;
-
- pci_read_config_word(dev, hwif->select_data + 2, &bfifo);
- if (bfifo & 0x1FF) {
-// printk("%s: %d bytes in FIFO\n", drive->name, bfifo);
- return 0;
- }
-
- dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
- /* return 1 if INTR asserted */
- if (dma_stat & ATA_DMA_INTR)
- return 1;
-
- return 0;
-}
-
-static int hpt374_dma_end(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u8 mcr = 0, mcr_addr = hwif->select_data;
- u8 bwsr = 0, mask = hwif->channel ? 0x02 : 0x01;
-
- pci_read_config_byte(dev, 0x6a, &bwsr);
- pci_read_config_byte(dev, mcr_addr, &mcr);
- if (bwsr & mask)
- pci_write_config_byte(dev, mcr_addr, mcr | 0x30);
- return ide_dma_end(drive);
-}
-
-/**
- * hpt3xxn_set_clock - perform clock switching dance
- * @hwif: hwif to switch
- * @mode: clocking mode (0x21 for write, 0x23 otherwise)
- *
- * Switch the DPLL clock on the HPT3xxN devices. This is a right mess.
- */
-
-static void hpt3xxn_set_clock(ide_hwif_t *hwif, u8 mode)
-{
- unsigned long base = hwif->extra_base;
- u8 scr2 = inb(base + 0x6b);
-
- if ((scr2 & 0x7f) == mode)
- return;
-
- /* Tristate the bus */
- outb(0x80, base + 0x63);
- outb(0x80, base + 0x67);
-
- /* Switch clock and reset channels */
- outb(mode, base + 0x6b);
- outb(0xc0, base + 0x69);
-
- /*
- * Reset the state machines.
- * NOTE: avoid accidentally enabling the disabled channels.
- */
- outb(inb(base + 0x60) | 0x32, base + 0x60);
- outb(inb(base + 0x64) | 0x32, base + 0x64);
-
- /* Complete reset */
- outb(0x00, base + 0x69);
-
- /* Reconnect channels to bus */
- outb(0x00, base + 0x63);
- outb(0x00, base + 0x67);
-}
-
-/**
- * hpt3xxn_rw_disk - prepare for I/O
- * @drive: drive for command
- * @rq: block request structure
- *
- * This is called when a disk I/O is issued to HPT3xxN.
- * We need it because of the clock switching.
- */
-
-static void hpt3xxn_rw_disk(ide_drive_t *drive, struct request *rq)
-{
- hpt3xxn_set_clock(drive->hwif, rq_data_dir(rq) ? 0x21 : 0x23);
-}
-
-/**
- * hpt37x_calibrate_dpll - calibrate the DPLL
- * @dev: PCI device
- *
- * Perform a calibration cycle on the DPLL.
- * Returns 1 if this succeeds
- */
-static int hpt37x_calibrate_dpll(struct pci_dev *dev, u16 f_low, u16 f_high)
-{
- u32 dpll = (f_high << 16) | f_low | 0x100;
- u8 scr2;
- int i;
-
- pci_write_config_dword(dev, 0x5c, dpll);
-
- /* Wait for oscillator ready */
- for(i = 0; i < 0x5000; ++i) {
- udelay(50);
- pci_read_config_byte(dev, 0x5b, &scr2);
- if (scr2 & 0x80)
- break;
- }
- /* See if it stays ready (we'll just bail out if it's not yet) */
- for(i = 0; i < 0x1000; ++i) {
- pci_read_config_byte(dev, 0x5b, &scr2);
- /* DPLL destabilized? */
- if(!(scr2 & 0x80))
- return 0;
- }
- /* Turn off tuning, we have the DPLL set */
- pci_read_config_dword (dev, 0x5c, &dpll);
- pci_write_config_dword(dev, 0x5c, (dpll & ~0x100));
- return 1;
-}
-
-static void hpt3xx_disable_fast_irq(struct pci_dev *dev, u8 mcr_addr)
-{
- struct ide_host *host = pci_get_drvdata(dev);
- struct hpt_info *info = host->host_priv + (&dev->dev == host->dev[1]);
- u8 chip_type = info->chip_type;
- u8 new_mcr, old_mcr = 0;
-
- /*
- * Disable the "fast interrupt" prediction. Don't hold off
- * on interrupts. (== 0x01 despite what the docs say)
- */
- pci_read_config_byte(dev, mcr_addr + 1, &old_mcr);
-
- if (chip_type >= HPT374)
- new_mcr = old_mcr & ~0x07;
- else if (chip_type >= HPT370) {
- new_mcr = old_mcr;
- new_mcr &= ~0x02;
-#ifdef HPT_DELAY_INTERRUPT
- new_mcr &= ~0x01;
-#else
- new_mcr |= 0x01;
-#endif
- } else /* HPT366 and HPT368 */
- new_mcr = old_mcr & ~0x80;
-
- if (new_mcr != old_mcr)
- pci_write_config_byte(dev, mcr_addr + 1, new_mcr);
-}
-
-static int init_chipset_hpt366(struct pci_dev *dev)
-{
- unsigned long io_base = pci_resource_start(dev, 4);
- struct hpt_info *info = hpt3xx_get_info(&dev->dev);
- const char *name = DRV_NAME;
- u8 pci_clk, dpll_clk = 0; /* PCI and DPLL clock in MHz */
- u8 chip_type;
- enum ata_clock clock;
-
- chip_type = info->chip_type;
-
- pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
- pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
- pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
- pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
-
- /*
- * First, try to estimate the PCI clock frequency...
- */
- if (chip_type >= HPT370) {
- u8 scr1 = 0;
- u16 f_cnt = 0;
- u32 temp = 0;
-
- /* Interrupt force enable. */
- pci_read_config_byte(dev, 0x5a, &scr1);
- if (scr1 & 0x10)
- pci_write_config_byte(dev, 0x5a, scr1 & ~0x10);
-
- /*
- * HighPoint does this for HPT372A.
- * NOTE: This register is only writeable via I/O space.
- */
- if (chip_type == HPT372A)
- outb(0x0e, io_base + 0x9c);
-
- /*
- * Default to PCI clock. Make sure MA15/16 are set to output
- * to prevent drives having problems with 40-pin cables.
- */
- pci_write_config_byte(dev, 0x5b, 0x23);
-
- /*
- * We'll have to read f_CNT value in order to determine
- * the PCI clock frequency according to the following ratio:
- *
- * f_CNT = Fpci * 192 / Fdpll
- *
- * First try reading the register in which the HighPoint BIOS
- * saves f_CNT value before reprogramming the DPLL from its
- * default setting (which differs for the various chips).
- *
- * NOTE: This register is only accessible via I/O space;
- * HPT374 BIOS only saves it for the function 0, so we have to
- * always read it from there -- no need to check the result of
- * pci_get_slot() for the function 0 as the whole device has
- * been already "pinned" (via function 1) in init_setup_hpt374()
- */
- if (chip_type == HPT374 && (PCI_FUNC(dev->devfn) & 1)) {
- struct pci_dev *dev1 = pci_get_slot(dev->bus,
- dev->devfn - 1);
- unsigned long io_base = pci_resource_start(dev1, 4);
-
- temp = inl(io_base + 0x90);
- pci_dev_put(dev1);
- } else
- temp = inl(io_base + 0x90);
-
- /*
- * In case the signature check fails, we'll have to
- * resort to reading the f_CNT register itself in hopes
- * that nobody has touched the DPLL yet...
- */
- if ((temp & 0xFFFFF000) != 0xABCDE000) {
- int i;
-
- printk(KERN_WARNING "%s %s: no clock data saved by "
- "BIOS\n", name, pci_name(dev));
-
- /* Calculate the average value of f_CNT. */
- for (temp = i = 0; i < 128; i++) {
- pci_read_config_word(dev, 0x78, &f_cnt);
- temp += f_cnt & 0x1ff;
- mdelay(1);
- }
- f_cnt = temp / 128;
- } else
- f_cnt = temp & 0x1ff;
-
- dpll_clk = info->dpll_clk;
- pci_clk = (f_cnt * dpll_clk) / 192;
-
- /* Clamp PCI clock to bands. */
- if (pci_clk < 40)
- pci_clk = 33;
- else if(pci_clk < 45)
- pci_clk = 40;
- else if(pci_clk < 55)
- pci_clk = 50;
- else
- pci_clk = 66;
-
- printk(KERN_INFO "%s %s: DPLL base: %d MHz, f_CNT: %d, "
- "assuming %d MHz PCI\n", name, pci_name(dev),
- dpll_clk, f_cnt, pci_clk);
- } else {
- u32 itr1 = 0;
-
- pci_read_config_dword(dev, 0x40, &itr1);
-
- /* Detect PCI clock by looking at cmd_high_time. */
- switch ((itr1 >> 8) & 0x0f) {
- case 0x09:
- pci_clk = 40;
- break;
- case 0x05:
- pci_clk = 25;
- break;
- case 0x07:
- default:
- pci_clk = 33;
- break;
- }
- }
-
- /* Let's assume we'll use PCI clock for the ATA clock... */
- switch (pci_clk) {
- case 25:
- clock = ATA_CLOCK_25MHZ;
- break;
- case 33:
- default:
- clock = ATA_CLOCK_33MHZ;
- break;
- case 40:
- clock = ATA_CLOCK_40MHZ;
- break;
- case 50:
- clock = ATA_CLOCK_50MHZ;
- break;
- case 66:
- clock = ATA_CLOCK_66MHZ;
- break;
- }
-
- /*
- * Only try the DPLL if we don't have a table for the PCI clock that
- * we are running at for HPT370/A, always use it for anything newer...
- *
- * NOTE: Using the internal DPLL results in slow reads on 33 MHz PCI.
- * We also don't like using the DPLL because this causes glitches
- * on PRST-/SRST- when the state engine gets reset...
- */
- if (chip_type >= HPT374 || info->timings->clock_table[clock] == NULL) {
- u16 f_low, delta = pci_clk < 50 ? 2 : 4;
- int adjust;
-
- /*
- * Select 66 MHz DPLL clock only if UltraATA/133 mode is
- * supported/enabled, use 50 MHz DPLL clock otherwise...
- */
- if (info->udma_mask == ATA_UDMA6) {
- dpll_clk = 66;
- clock = ATA_CLOCK_66MHZ;
- } else if (dpll_clk) { /* HPT36x chips don't have DPLL */
- dpll_clk = 50;
- clock = ATA_CLOCK_50MHZ;
- }
-
- if (info->timings->clock_table[clock] == NULL) {
- printk(KERN_ERR "%s %s: unknown bus timing!\n",
- name, pci_name(dev));
- return -EIO;
- }
-
- /* Select the DPLL clock. */
- pci_write_config_byte(dev, 0x5b, 0x21);
-
- /*
- * Adjust the DPLL based upon PCI clock, enable it,
- * and wait for stabilization...
- */
- f_low = (pci_clk * 48) / dpll_clk;
-
- for (adjust = 0; adjust < 8; adjust++) {
- if(hpt37x_calibrate_dpll(dev, f_low, f_low + delta))
- break;
-
- /*
- * See if it'll settle at a fractionally different clock
- */
- if (adjust & 1)
- f_low -= adjust >> 1;
- else
- f_low += adjust >> 1;
- }
- if (adjust == 8) {
- printk(KERN_ERR "%s %s: DPLL did not stabilize!\n",
- name, pci_name(dev));
- return -EIO;
- }
-
- printk(KERN_INFO "%s %s: using %d MHz DPLL clock\n",
- name, pci_name(dev), dpll_clk);
- } else {
- /* Mark the fact that we're not using the DPLL. */
- dpll_clk = 0;
-
- printk(KERN_INFO "%s %s: using %d MHz PCI clock\n",
- name, pci_name(dev), pci_clk);
- }
-
- /* Store the clock frequencies. */
- info->dpll_clk = dpll_clk;
- info->pci_clk = pci_clk;
- info->clock = clock;
-
- if (chip_type >= HPT370) {
- u8 mcr1, mcr4;
-
- /*
- * Reset the state engines.
- * NOTE: Avoid accidentally enabling the disabled channels.
- */
- pci_read_config_byte (dev, 0x50, &mcr1);
- pci_read_config_byte (dev, 0x54, &mcr4);
- pci_write_config_byte(dev, 0x50, (mcr1 | 0x32));
- pci_write_config_byte(dev, 0x54, (mcr4 | 0x32));
- udelay(100);
- }
-
- /*
- * On HPT371N, if ATA clock is 66 MHz we must set bit 2 in
- * the MISC. register to stretch the UltraDMA Tss timing.
- * NOTE: This register is only writeable via I/O space.
- */
- if (chip_type == HPT371N && clock == ATA_CLOCK_66MHZ)
- outb(inb(io_base + 0x9c) | 0x04, io_base + 0x9c);
-
- hpt3xx_disable_fast_irq(dev, 0x50);
- hpt3xx_disable_fast_irq(dev, 0x54);
-
- return 0;
-}
-
-static u8 hpt3xx_cable_detect(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct hpt_info *info = hpt3xx_get_info(hwif->dev);
- u8 chip_type = info->chip_type;
- u8 scr1 = 0, ata66 = hwif->channel ? 0x01 : 0x02;
-
- /*
- * The HPT37x uses the CBLID pins as outputs for MA15/MA16
- * address lines to access an external EEPROM. To read valid
- * cable detect state the pins must be enabled as inputs.
- */
- if (chip_type == HPT374 && (PCI_FUNC(dev->devfn) & 1)) {
- /*
- * HPT374 PCI function 1
- * - set bit 15 of reg 0x52 to enable TCBLID as input
- * - set bit 15 of reg 0x56 to enable FCBLID as input
- */
- u8 mcr_addr = hwif->select_data + 2;
- u16 mcr;
-
- pci_read_config_word(dev, mcr_addr, &mcr);
- pci_write_config_word(dev, mcr_addr, mcr | 0x8000);
- /* Debounce, then read cable ID register */
- udelay(10);
- pci_read_config_byte(dev, 0x5a, &scr1);
- pci_write_config_word(dev, mcr_addr, mcr);
- } else if (chip_type >= HPT370) {
- /*
- * HPT370/372 and 374 pcifn 0
- * - clear bit 0 of reg 0x5b to enable P/SCBLID as inputs
- */
- u8 scr2 = 0;
-
- pci_read_config_byte(dev, 0x5b, &scr2);
- pci_write_config_byte(dev, 0x5b, scr2 & ~1);
- /* Debounce, then read cable ID register */
- udelay(10);
- pci_read_config_byte(dev, 0x5a, &scr1);
- pci_write_config_byte(dev, 0x5b, scr2);
- } else
- pci_read_config_byte(dev, 0x5a, &scr1);
-
- return (scr1 & ata66) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
-}
-
-static void init_hwif_hpt366(ide_hwif_t *hwif)
-{
- struct hpt_info *info = hpt3xx_get_info(hwif->dev);
- u8 chip_type = info->chip_type;
-
- /* Cache the channel's MISC. control registers' offset */
- hwif->select_data = hwif->channel ? 0x54 : 0x50;
-
- /*
- * HPT3xxN chips have some complications:
- *
- * - on 33 MHz PCI we must clock switch
- * - on 66 MHz PCI we must NOT use the PCI clock
- */
- if (chip_type >= HPT372N && info->dpll_clk && info->pci_clk < 66) {
- /*
- * Clock is shared between the channels,
- * so we'll have to serialize them... :-(
- */
- hwif->host->host_flags |= IDE_HFLAG_SERIALIZE;
- hwif->rw_disk = &hpt3xxn_rw_disk;
- }
-}
-
-static int init_dma_hpt366(ide_hwif_t *hwif,
- const struct ide_port_info *d)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned long flags, base = ide_pci_dma_base(hwif, d);
- u8 dma_old, dma_new, masterdma = 0, slavedma = 0;
-
- if (base == 0)
- return -1;
-
- hwif->dma_base = base;
-
- if (ide_pci_check_simplex(hwif, d) < 0)
- return -1;
-
- if (ide_pci_set_master(dev, d->name) < 0)
- return -1;
-
- dma_old = inb(base + 2);
-
- local_irq_save(flags);
-
- dma_new = dma_old;
- pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma);
- pci_read_config_byte(dev, hwif->channel ? 0x4f : 0x47, &slavedma);
-
- if (masterdma & 0x30) dma_new |= 0x20;
- if ( slavedma & 0x30) dma_new |= 0x40;
- if (dma_new != dma_old)
- outb(dma_new, base + 2);
-
- local_irq_restore(flags);
-
- printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
- hwif->name, base, base + 7);
-
- hwif->extra_base = base + (hwif->channel ? 8 : 16);
-
- if (ide_allocate_dma_engine(hwif))
- return -1;
-
- return 0;
-}
-
-static void hpt374_init(struct pci_dev *dev, struct pci_dev *dev2)
-{
- if (dev2->irq != dev->irq) {
- /* FIXME: we need a core pci_set_interrupt() */
- dev2->irq = dev->irq;
- printk(KERN_INFO DRV_NAME " %s: PCI config space interrupt "
- "fixed\n", pci_name(dev2));
- }
-}
-
-static void hpt371_init(struct pci_dev *dev)
-{
- u8 mcr1 = 0;
-
- /*
- * HPT371 chips physically have only one channel, the secondary one,
- * but the primary channel registers do exist! Go figure...
- * So, we manually disable the non-existing channel here
- * (if the BIOS hasn't done this already).
- */
- pci_read_config_byte(dev, 0x50, &mcr1);
- if (mcr1 & 0x04)
- pci_write_config_byte(dev, 0x50, mcr1 & ~0x04);
-}
-
-static int hpt36x_init(struct pci_dev *dev, struct pci_dev *dev2)
-{
- u8 mcr1 = 0, pin1 = 0, pin2 = 0;
-
- /*
- * Now we'll have to force both channels enabled if
- * at least one of them has been enabled by BIOS...
- */
- pci_read_config_byte(dev, 0x50, &mcr1);
- if (mcr1 & 0x30)
- pci_write_config_byte(dev, 0x50, mcr1 | 0x30);
-
- pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin1);
- pci_read_config_byte(dev2, PCI_INTERRUPT_PIN, &pin2);
-
- if (pin1 != pin2 && dev->irq == dev2->irq) {
- printk(KERN_INFO DRV_NAME " %s: onboard version of chipset, "
- "pin1=%d pin2=%d\n", pci_name(dev), pin1, pin2);
- return 1;
- }
-
- return 0;
-}
-
-#define IDE_HFLAGS_HPT3XX \
- (IDE_HFLAG_NO_ATAPI_DMA | \
- IDE_HFLAG_OFF_BOARD)
-
-static const struct ide_port_ops hpt3xx_port_ops = {
- .set_pio_mode = hpt3xx_set_pio_mode,
- .set_dma_mode = hpt3xx_set_mode,
- .maskproc = hpt3xx_maskproc,
- .mdma_filter = hpt3xx_mdma_filter,
- .udma_filter = hpt3xx_udma_filter,
- .cable_detect = hpt3xx_cable_detect,
-};
-
-static const struct ide_dma_ops hpt37x_dma_ops = {
- .dma_host_set = ide_dma_host_set,
- .dma_setup = ide_dma_setup,
- .dma_start = ide_dma_start,
- .dma_end = hpt374_dma_end,
- .dma_test_irq = hpt374_dma_test_irq,
- .dma_lost_irq = ide_dma_lost_irq,
- .dma_timer_expiry = ide_dma_sff_timer_expiry,
- .dma_sff_read_status = ide_dma_sff_read_status,
-};
-
-static const struct ide_dma_ops hpt370_dma_ops = {
- .dma_host_set = ide_dma_host_set,
- .dma_setup = ide_dma_setup,
- .dma_start = hpt370_dma_start,
- .dma_end = hpt370_dma_end,
- .dma_test_irq = ide_dma_test_irq,
- .dma_lost_irq = ide_dma_lost_irq,
- .dma_timer_expiry = ide_dma_sff_timer_expiry,
- .dma_clear = hpt370_irq_timeout,
- .dma_sff_read_status = ide_dma_sff_read_status,
-};
-
-static const struct ide_dma_ops hpt36x_dma_ops = {
- .dma_host_set = ide_dma_host_set,
- .dma_setup = ide_dma_setup,
- .dma_start = ide_dma_start,
- .dma_end = ide_dma_end,
- .dma_test_irq = ide_dma_test_irq,
- .dma_lost_irq = hpt366_dma_lost_irq,
- .dma_timer_expiry = ide_dma_sff_timer_expiry,
- .dma_sff_read_status = ide_dma_sff_read_status,
-};
-
-static const struct ide_port_info hpt366_chipsets[] = {
- { /* 0: HPT36x */
- .name = DRV_NAME,
- .init_chipset = init_chipset_hpt366,
- .init_hwif = init_hwif_hpt366,
- .init_dma = init_dma_hpt366,
- /*
- * HPT36x chips have one channel per function and have
- * both channel enable bits located differently and visible
- * to both functions -- really stupid design decision... :-(
- * Bit 4 is for the primary channel, bit 5 for the secondary.
- */
- .enablebits = {{0x50,0x10,0x10}, {0x54,0x04,0x04}},
- .port_ops = &hpt3xx_port_ops,
- .dma_ops = &hpt36x_dma_ops,
- .host_flags = IDE_HFLAGS_HPT3XX | IDE_HFLAG_SINGLE,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- },
- { /* 1: HPT3xx */
- .name = DRV_NAME,
- .init_chipset = init_chipset_hpt366,
- .init_hwif = init_hwif_hpt366,
- .init_dma = init_dma_hpt366,
- .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
- .port_ops = &hpt3xx_port_ops,
- .dma_ops = &hpt37x_dma_ops,
- .host_flags = IDE_HFLAGS_HPT3XX,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- }
-};
-
-/**
- * hpt366_init_one - called when an HPT366 is found
- * @dev: the hpt366 device
- * @id: the matching pci id
- *
- * Called when the PCI registration layer (or the IDE initialization)
- * finds a device matching our IDE device tables.
- */
-static int hpt366_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- const struct hpt_info *info = NULL;
- struct hpt_info *dyn_info;
- struct pci_dev *dev2 = NULL;
- struct ide_port_info d;
- u8 idx = id->driver_data;
- u8 rev = dev->revision;
- int ret;
-
- if ((idx == 0 || idx == 4) && (PCI_FUNC(dev->devfn) & 1))
- return -ENODEV;
-
- switch (idx) {
- case 0:
- if (rev < 3)
- info = &hpt36x;
- else {
- switch (min_t(u8, rev, 6)) {
- case 3: info = &hpt370; break;
- case 4: info = &hpt370a; break;
- case 5: info = &hpt372; break;
- case 6: info = &hpt372n; break;
- }
- idx++;
- }
- break;
- case 1:
- info = (rev > 1) ? &hpt372n : &hpt372a;
- break;
- case 2:
- info = (rev > 1) ? &hpt302n : &hpt302;
- break;
- case 3:
- hpt371_init(dev);
- info = (rev > 1) ? &hpt371n : &hpt371;
- break;
- case 4:
- info = &hpt374;
- break;
- case 5:
- info = &hpt372n;
- break;
- }
-
- printk(KERN_INFO DRV_NAME ": %s chipset detected\n", info->chip_name);
-
- d = hpt366_chipsets[min_t(u8, idx, 1)];
-
- d.udma_mask = info->udma_mask;
-
- /* fixup ->dma_ops for HPT370/HPT370A */
- if (info == &hpt370 || info == &hpt370a)
- d.dma_ops = &hpt370_dma_ops;
-
- if (info == &hpt36x || info == &hpt374)
- dev2 = pci_get_slot(dev->bus, dev->devfn + 1);
-
- dyn_info = kcalloc(dev2 ? 2 : 1, sizeof(*dyn_info), GFP_KERNEL);
- if (dyn_info == NULL) {
- printk(KERN_ERR "%s %s: out of memory!\n",
- d.name, pci_name(dev));
- pci_dev_put(dev2);
- return -ENOMEM;
- }
-
- /*
- * Copy everything from a static "template" structure
- * to just allocated per-chip hpt_info structure.
- */
- memcpy(dyn_info, info, sizeof(*dyn_info));
-
- if (dev2) {
- memcpy(dyn_info + 1, info, sizeof(*dyn_info));
-
- if (info == &hpt374)
- hpt374_init(dev, dev2);
- else {
- if (hpt36x_init(dev, dev2))
- d.host_flags &= ~IDE_HFLAG_NON_BOOTABLE;
- }
-
- ret = ide_pci_init_two(dev, dev2, &d, dyn_info);
- if (ret < 0) {
- pci_dev_put(dev2);
- kfree(dyn_info);
- }
- return ret;
- }
-
- ret = ide_pci_init_one(dev, &d, dyn_info);
- if (ret < 0)
- kfree(dyn_info);
-
- return ret;
-}
-
-static void hpt366_remove(struct pci_dev *dev)
-{
- struct ide_host *host = pci_get_drvdata(dev);
- struct ide_info *info = host->host_priv;
- struct pci_dev *dev2 = host->dev[1] ? to_pci_dev(host->dev[1]) : NULL;
-
- ide_pci_remove(dev);
- pci_dev_put(dev2);
- kfree(info);
-}
-
-static const struct pci_device_id hpt366_pci_tbl[] = {
- { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), 0 },
- { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372), 1 },
- { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT302), 2 },
- { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT371), 3 },
- { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT374), 4 },
- { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372N), 5 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, hpt366_pci_tbl);
-
-static struct pci_driver hpt366_pci_driver = {
- .name = "HPT366_IDE",
- .id_table = hpt366_pci_tbl,
- .probe = hpt366_init_one,
- .remove = hpt366_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init hpt366_ide_init(void)
-{
- return ide_pci_register_driver(&hpt366_pci_driver);
-}
-
-static void __exit hpt366_ide_exit(void)
-{
- pci_unregister_driver(&hpt366_pci_driver);
-}
-
-module_init(hpt366_ide_init);
-module_exit(hpt366_ide_exit);
-
-MODULE_AUTHOR("Andre Hedrick");
-MODULE_DESCRIPTION("PCI driver module for Highpoint HPT366 IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/ht6560b.c b/drivers/ide/ht6560b.c
deleted file mode 100644
index 743bc3693ac8..000000000000
--- a/drivers/ide/ht6560b.c
+++ /dev/null
@@ -1,383 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1995-2000 Linus Torvalds & author (see below)
- */
-
-/*
- * HT-6560B EIDE-controller support
- * To activate controller support use kernel parameter "ide0=ht6560b".
- * Use hdparm utility to enable PIO mode support.
- *
- * Author: Mikko Ala-Fossi <maf@iki.fi>
- * Jan Evert van Grootheest <j.e.van.grootheest@caiway.nl>
- *
- */
-
-#define DRV_NAME "ht6560b"
-#define HT6560B_VERSION "v0.08"
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/ioport.h>
-#include <linux/blkdev.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#include <asm/io.h>
-
-/* #define DEBUG */ /* remove comments for DEBUG messages */
-
-/*
- * The special i/o-port that HT-6560B uses to configuration:
- * bit0 (0x01): "1" selects secondary interface
- * bit2 (0x04): "1" enables FIFO function
- * bit5 (0x20): "1" enables prefetched data read function (???)
- *
- * The special i/o-port that HT-6560A uses to configuration:
- * bit0 (0x01): "1" selects secondary interface
- * bit1 (0x02): "1" enables prefetched data read function
- * bit2 (0x04): "0" enables multi-master system (?)
- * bit3 (0x08): "1" 3 cycle time, "0" 2 cycle time (?)
- */
-#define HT_CONFIG_PORT 0x3e6
-
-static inline u8 HT_CONFIG(ide_drive_t *drive)
-{
- return ((unsigned long)ide_get_drivedata(drive) & 0xff00) >> 8;
-}
-
-/*
- * FIFO + PREFETCH (both a/b-model)
- */
-#define HT_CONFIG_DEFAULT 0x1c /* no prefetch */
-/* #define HT_CONFIG_DEFAULT 0x3c */ /* with prefetch */
-#define HT_SECONDARY_IF 0x01
-#define HT_PREFETCH_MODE 0x20
-
-/*
- * ht6560b Timing values:
- *
- * I reviewed some assembler source listings of htide drivers and found
- * out how they setup those cycle time interfacing values, as they at Holtek
- * call them. IDESETUP.COM that is supplied with the drivers figures out
- * optimal values and fetches those values to drivers. I found out that
- * they use Select register to fetch timings to the ide board right after
- * interface switching. After that it was quite easy to add code to
- * ht6560b.c.
- *
- * IDESETUP.COM gave me values 0x24, 0x45, 0xaa, 0xff that worked fine
- * for hda and hdc. But hdb needed higher values to work, so I guess
- * that sometimes it is necessary to give higher value than IDESETUP
- * gives. [see cmd640.c for an extreme example of this. -ml]
- *
- * Perhaps I should explain something about these timing values:
- * The higher nibble of value is the Recovery Time (rt) and the lower nibble
- * of the value is the Active Time (at). Minimum value 2 is the fastest and
- * the maximum value 15 is the slowest. Default values should be 15 for both.
- * So 0x24 means 2 for rt and 4 for at. Each of the drives should have
- * both values, and IDESETUP gives automatically rt=15 st=15 for CDROMs or
- * similar. If value is too small there will be all sorts of failures.
- *
- * Timing byte consists of
- * High nibble: Recovery Cycle Time (rt)
- * The valid values range from 2 to 15. The default is 15.
- *
- * Low nibble: Active Cycle Time (at)
- * The valid values range from 2 to 15. The default is 15.
- *
- * You can obtain optimized timing values by running Holtek IDESETUP.COM
- * for DOS. DOS drivers get their timing values from command line, where
- * the first value is the Recovery Time and the second value is the
- * Active Time for each drive. Smaller value gives higher speed.
- * In case of failures you should probably fall back to a higher value.
- */
-static inline u8 HT_TIMING(ide_drive_t *drive)
-{
- return (unsigned long)ide_get_drivedata(drive) & 0x00ff;
-}
-
-#define HT_TIMING_DEFAULT 0xff
-
-/*
- * This routine handles interface switching for the peculiar hardware design
- * on the F.G.I./Holtek HT-6560B VLB IDE interface.
- * The HT-6560B can only enable one IDE port at a time, and requires a
- * silly sequence (below) whenever we switch between primary and secondary.
- */
-
-/*
- * This routine is invoked from ide.c to prepare for access to a given drive.
- */
-static void ht6560b_dev_select(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- unsigned long flags;
- static u8 current_select = 0;
- static u8 current_timing = 0;
- u8 select, timing;
-
- local_irq_save(flags);
-
- select = HT_CONFIG(drive);
- timing = HT_TIMING(drive);
-
- /*
- * Need to enforce prefetch sometimes because otherwise
- * it'll hang (hard).
- */
- if (drive->media != ide_disk ||
- (drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
- select |= HT_PREFETCH_MODE;
-
- if (select != current_select || timing != current_timing) {
- current_select = select;
- current_timing = timing;
- (void)inb(HT_CONFIG_PORT);
- (void)inb(HT_CONFIG_PORT);
- (void)inb(HT_CONFIG_PORT);
- (void)inb(HT_CONFIG_PORT);
- outb(select, HT_CONFIG_PORT);
- /*
- * Set timing for this drive:
- */
- outb(timing, hwif->io_ports.device_addr);
- (void)inb(hwif->io_ports.status_addr);
-#ifdef DEBUG
- printk("ht6560b: %s: select=%#x timing=%#x\n",
- drive->name, select, timing);
-#endif
- }
- local_irq_restore(flags);
-
- outb(drive->select | ATA_DEVICE_OBS, hwif->io_ports.device_addr);
-}
-
-/*
- * Autodetection and initialization of ht6560b
- */
-static int __init try_to_init_ht6560b(void)
-{
- u8 orig_value;
- int i;
-
- /* Autodetect ht6560b */
- if ((orig_value = inb(HT_CONFIG_PORT)) == 0xff)
- return 0;
-
- for (i=3;i>0;i--) {
- outb(0x00, HT_CONFIG_PORT);
- if (!( (~inb(HT_CONFIG_PORT)) & 0x3f )) {
- outb(orig_value, HT_CONFIG_PORT);
- return 0;
- }
- }
- outb(0x00, HT_CONFIG_PORT);
- if ((~inb(HT_CONFIG_PORT))& 0x3f) {
- outb(orig_value, HT_CONFIG_PORT);
- return 0;
- }
- /*
- * Ht6560b autodetected
- */
- outb(HT_CONFIG_DEFAULT, HT_CONFIG_PORT);
- outb(HT_TIMING_DEFAULT, 0x1f6); /* Select register */
- (void)inb(0x1f7); /* Status register */
-
- printk("ht6560b " HT6560B_VERSION
- ": chipset detected and initialized"
-#ifdef DEBUG
- " with debug enabled"
-#endif
- "\n"
- );
- return 1;
-}
-
-static u8 ht_pio2timings(ide_drive_t *drive, const u8 pio)
-{
- int active_time, recovery_time;
- int active_cycles, recovery_cycles;
- int bus_speed = ide_vlb_clk ? ide_vlb_clk : 50;
-
- if (pio) {
- unsigned int cycle_time;
- struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
-
- cycle_time = ide_pio_cycle_time(drive, pio);
-
- /*
- * Just like opti621.c we try to calculate the
- * actual cycle time for recovery and activity
- * according system bus speed.
- */
- active_time = t->active;
- recovery_time = cycle_time - active_time - t->setup;
- /*
- * Cycle times should be Vesa bus cycles
- */
- active_cycles = (active_time * bus_speed + 999) / 1000;
- recovery_cycles = (recovery_time * bus_speed + 999) / 1000;
- /*
- * Upper and lower limits
- */
- if (active_cycles < 2) active_cycles = 2;
- if (recovery_cycles < 2) recovery_cycles = 2;
- if (active_cycles > 15) active_cycles = 15;
- if (recovery_cycles > 15) recovery_cycles = 0; /* 0==16 */
-
-#ifdef DEBUG
- printk("ht6560b: drive %s setting pio=%d recovery=%d (%dns) active=%d (%dns)\n", drive->name, pio, recovery_cycles, recovery_time, active_cycles, active_time);
-#endif
-
- return (u8)((recovery_cycles << 4) | active_cycles);
- } else {
-
-#ifdef DEBUG
- printk("ht6560b: drive %s setting pio=0\n", drive->name);
-#endif
-
- return HT_TIMING_DEFAULT; /* default setting */
- }
-}
-
-static DEFINE_SPINLOCK(ht6560b_lock);
-
-/*
- * Enable/Disable so called prefetch mode
- */
-static void ht_set_prefetch(ide_drive_t *drive, u8 state)
-{
- unsigned long flags, config;
- int t = HT_PREFETCH_MODE << 8;
-
- spin_lock_irqsave(&ht6560b_lock, flags);
-
- config = (unsigned long)ide_get_drivedata(drive);
-
- /*
- * Prefetch mode and unmask irq seems to conflict
- */
- if (state) {
- config |= t; /* enable prefetch mode */
- drive->dev_flags |= IDE_DFLAG_NO_UNMASK;
- drive->dev_flags &= ~IDE_DFLAG_UNMASK;
- } else {
- config &= ~t; /* disable prefetch mode */
- drive->dev_flags &= ~IDE_DFLAG_NO_UNMASK;
- }
-
- ide_set_drivedata(drive, (void *)config);
-
- spin_unlock_irqrestore(&ht6560b_lock, flags);
-
-#ifdef DEBUG
- printk("ht6560b: drive %s prefetch mode %sabled\n", drive->name, (state ? "en" : "dis"));
-#endif
-}
-
-static void ht6560b_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- unsigned long flags, config;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
- u8 timing;
-
- switch (pio) {
- case 8: /* set prefetch off */
- case 9: /* set prefetch on */
- ht_set_prefetch(drive, pio & 1);
- return;
- }
-
- timing = ht_pio2timings(drive, pio);
-
- spin_lock_irqsave(&ht6560b_lock, flags);
- config = (unsigned long)ide_get_drivedata(drive);
- config &= 0xff00;
- config |= timing;
- ide_set_drivedata(drive, (void *)config);
- spin_unlock_irqrestore(&ht6560b_lock, flags);
-
-#ifdef DEBUG
- printk("ht6560b: drive %s tuned to pio mode %#x timing=%#x\n", drive->name, pio, timing);
-#endif
-}
-
-static void __init ht6560b_init_dev(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- /* Setting default configurations for drives. */
- unsigned long t = (HT_CONFIG_DEFAULT << 8) | HT_TIMING_DEFAULT;
-
- if (hwif->channel)
- t |= (HT_SECONDARY_IF << 8);
-
- ide_set_drivedata(drive, (void *)t);
-}
-
-static bool probe_ht6560b;
-
-module_param_named(probe, probe_ht6560b, bool, 0);
-MODULE_PARM_DESC(probe, "probe for HT6560B chipset");
-
-static const struct ide_tp_ops ht6560b_tp_ops = {
- .exec_command = ide_exec_command,
- .read_status = ide_read_status,
- .read_altstatus = ide_read_altstatus,
- .write_devctl = ide_write_devctl,
-
- .dev_select = ht6560b_dev_select,
- .tf_load = ide_tf_load,
- .tf_read = ide_tf_read,
-
- .input_data = ide_input_data,
- .output_data = ide_output_data,
-};
-
-static const struct ide_port_ops ht6560b_port_ops = {
- .init_dev = ht6560b_init_dev,
- .set_pio_mode = ht6560b_set_pio_mode,
-};
-
-static const struct ide_port_info ht6560b_port_info __initconst = {
- .name = DRV_NAME,
- .chipset = ide_ht6560b,
- .tp_ops = &ht6560b_tp_ops,
- .port_ops = &ht6560b_port_ops,
- .host_flags = IDE_HFLAG_SERIALIZE | /* is this needed? */
- IDE_HFLAG_NO_DMA |
- IDE_HFLAG_ABUSE_PREFETCH,
- .pio_mask = ATA_PIO4,
-};
-
-static int __init ht6560b_init(void)
-{
- if (probe_ht6560b == 0)
- return -ENODEV;
-
- if (!request_region(HT_CONFIG_PORT, 1, DRV_NAME)) {
- printk(KERN_NOTICE "%s: HT_CONFIG_PORT not found\n",
- __func__);
- return -ENODEV;
- }
-
- if (!try_to_init_ht6560b()) {
- printk(KERN_NOTICE "%s: HBA not found\n", __func__);
- goto release_region;
- }
-
- return ide_legacy_device_add(&ht6560b_port_info, 0);
-
-release_region:
- release_region(HT_CONFIG_PORT, 1);
- return -ENODEV;
-}
-
-module_init(ht6560b_init);
-
-MODULE_AUTHOR("See Local File");
-MODULE_DESCRIPTION("HT-6560B EIDE-controller support");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c
deleted file mode 100644
index 329c7e4bc9d0..000000000000
--- a/drivers/ide/icside.c
+++ /dev/null
@@ -1,692 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 1996-2004 Russell King.
- *
- * Please note that this platform does not support 32-bit IDE IO.
- */
-
-#include <linux/string.h>
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/blkdev.h>
-#include <linux/errno.h>
-#include <linux/ide.h>
-#include <linux/dma-mapping.h>
-#include <linux/device.h>
-#include <linux/init.h>
-#include <linux/scatterlist.h>
-#include <linux/io.h>
-
-#include <asm/dma.h>
-#include <asm/ecard.h>
-
-#define DRV_NAME "icside"
-
-#define ICS_IDENT_OFFSET 0x2280
-
-#define ICS_ARCIN_V5_INTRSTAT 0x0000
-#define ICS_ARCIN_V5_INTROFFSET 0x0004
-#define ICS_ARCIN_V5_IDEOFFSET 0x2800
-#define ICS_ARCIN_V5_IDEALTOFFSET 0x2b80
-#define ICS_ARCIN_V5_IDESTEPPING 6
-
-#define ICS_ARCIN_V6_IDEOFFSET_1 0x2000
-#define ICS_ARCIN_V6_INTROFFSET_1 0x2200
-#define ICS_ARCIN_V6_INTRSTAT_1 0x2290
-#define ICS_ARCIN_V6_IDEALTOFFSET_1 0x2380
-#define ICS_ARCIN_V6_IDEOFFSET_2 0x3000
-#define ICS_ARCIN_V6_INTROFFSET_2 0x3200
-#define ICS_ARCIN_V6_INTRSTAT_2 0x3290
-#define ICS_ARCIN_V6_IDEALTOFFSET_2 0x3380
-#define ICS_ARCIN_V6_IDESTEPPING 6
-
-struct cardinfo {
- unsigned int dataoffset;
- unsigned int ctrloffset;
- unsigned int stepping;
-};
-
-static struct cardinfo icside_cardinfo_v5 = {
- .dataoffset = ICS_ARCIN_V5_IDEOFFSET,
- .ctrloffset = ICS_ARCIN_V5_IDEALTOFFSET,
- .stepping = ICS_ARCIN_V5_IDESTEPPING,
-};
-
-static struct cardinfo icside_cardinfo_v6_1 = {
- .dataoffset = ICS_ARCIN_V6_IDEOFFSET_1,
- .ctrloffset = ICS_ARCIN_V6_IDEALTOFFSET_1,
- .stepping = ICS_ARCIN_V6_IDESTEPPING,
-};
-
-static struct cardinfo icside_cardinfo_v6_2 = {
- .dataoffset = ICS_ARCIN_V6_IDEOFFSET_2,
- .ctrloffset = ICS_ARCIN_V6_IDEALTOFFSET_2,
- .stepping = ICS_ARCIN_V6_IDESTEPPING,
-};
-
-struct icside_state {
- unsigned int channel;
- unsigned int enabled;
- void __iomem *irq_port;
- void __iomem *ioc_base;
- unsigned int sel;
- unsigned int type;
- struct ide_host *host;
-};
-
-#define ICS_TYPE_A3IN 0
-#define ICS_TYPE_A3USER 1
-#define ICS_TYPE_V6 3
-#define ICS_TYPE_V5 15
-#define ICS_TYPE_NOTYPE ((unsigned int)-1)
-
-/* ---------------- Version 5 PCB Support Functions --------------------- */
-/* Prototype: icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr)
- * Purpose : enable interrupts from card
- */
-static void icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr)
-{
- struct icside_state *state = ec->irq_data;
-
- writeb(0, state->irq_port + ICS_ARCIN_V5_INTROFFSET);
-}
-
-/* Prototype: icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr)
- * Purpose : disable interrupts from card
- */
-static void icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr)
-{
- struct icside_state *state = ec->irq_data;
-
- readb(state->irq_port + ICS_ARCIN_V5_INTROFFSET);
-}
-
-static const expansioncard_ops_t icside_ops_arcin_v5 = {
- .irqenable = icside_irqenable_arcin_v5,
- .irqdisable = icside_irqdisable_arcin_v5,
-};
-
-
-/* ---------------- Version 6 PCB Support Functions --------------------- */
-/* Prototype: icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
- * Purpose : enable interrupts from card
- */
-static void icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
-{
- struct icside_state *state = ec->irq_data;
- void __iomem *base = state->irq_port;
-
- state->enabled = 1;
-
- switch (state->channel) {
- case 0:
- writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1);
- readb(base + ICS_ARCIN_V6_INTROFFSET_2);
- break;
- case 1:
- writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2);
- readb(base + ICS_ARCIN_V6_INTROFFSET_1);
- break;
- }
-}
-
-/* Prototype: icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
- * Purpose : disable interrupts from card
- */
-static void icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
-{
- struct icside_state *state = ec->irq_data;
-
- state->enabled = 0;
-
- readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
- readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
-}
-
-/* Prototype: icside_irqprobe(struct expansion_card *ec)
- * Purpose : detect an active interrupt from card
- */
-static int icside_irqpending_arcin_v6(struct expansion_card *ec)
-{
- struct icside_state *state = ec->irq_data;
-
- return readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_1) & 1 ||
- readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_2) & 1;
-}
-
-static const expansioncard_ops_t icside_ops_arcin_v6 = {
- .irqenable = icside_irqenable_arcin_v6,
- .irqdisable = icside_irqdisable_arcin_v6,
- .irqpending = icside_irqpending_arcin_v6,
-};
-
-/*
- * Handle routing of interrupts. This is called before
- * we write the command to the drive.
- */
-static void icside_maskproc(ide_drive_t *drive, int mask)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct expansion_card *ec = ECARD_DEV(hwif->dev);
- struct icside_state *state = ecard_get_drvdata(ec);
- unsigned long flags;
-
- local_irq_save(flags);
-
- state->channel = hwif->channel;
-
- if (state->enabled && !mask) {
- switch (hwif->channel) {
- case 0:
- writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
- readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
- break;
- case 1:
- writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
- readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
- break;
- }
- } else {
- readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
- readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
- }
-
- local_irq_restore(flags);
-}
-
-static const struct ide_port_ops icside_v6_no_dma_port_ops = {
- .maskproc = icside_maskproc,
-};
-
-#ifdef CONFIG_BLK_DEV_IDEDMA_ICS
-/*
- * SG-DMA support.
- *
- * Similar to the BM-DMA, but we use the RiscPCs IOMD DMA controllers.
- * There is only one DMA controller per card, which means that only
- * one drive can be accessed at one time. NOTE! We do not enforce that
- * here, but we rely on the main IDE driver spotting that both
- * interfaces use the same IRQ, which should guarantee this.
- */
-
-/*
- * Configure the IOMD to give the appropriate timings for the transfer
- * mode being requested. We take the advice of the ATA standards, and
- * calculate the cycle time based on the transfer mode, and the EIDE
- * MW DMA specs that the drive provides in the IDENTIFY command.
- *
- * We have the following IOMD DMA modes to choose from:
- *
- * Type Active Recovery Cycle
- * A 250 (250) 312 (550) 562 (800)
- * B 187 250 437
- * C 125 (125) 125 (375) 250 (500)
- * D 62 125 187
- *
- * (figures in brackets are actual measured timings)
- *
- * However, we also need to take care of the read/write active and
- * recovery timings:
- *
- * Read Write
- * Mode Active -- Recovery -- Cycle IOMD type
- * MW0 215 50 215 480 A
- * MW1 80 50 50 150 C
- * MW2 70 25 25 120 C
- */
-static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- unsigned long cycle_time = 0;
- int use_dma_info = 0;
- const u8 xfer_mode = drive->dma_mode;
-
- switch (xfer_mode) {
- case XFER_MW_DMA_2:
- cycle_time = 250;
- use_dma_info = 1;
- break;
-
- case XFER_MW_DMA_1:
- cycle_time = 250;
- use_dma_info = 1;
- break;
-
- case XFER_MW_DMA_0:
- cycle_time = 480;
- break;
-
- case XFER_SW_DMA_2:
- case XFER_SW_DMA_1:
- case XFER_SW_DMA_0:
- cycle_time = 480;
- break;
- }
-
- /*
- * If we're going to be doing MW_DMA_1 or MW_DMA_2, we should
- * take care to note the values in the ID...
- */
- if (use_dma_info && drive->id[ATA_ID_EIDE_DMA_TIME] > cycle_time)
- cycle_time = drive->id[ATA_ID_EIDE_DMA_TIME];
-
- ide_set_drivedata(drive, (void *)cycle_time);
-
- printk(KERN_INFO "%s: %s selected (peak %luMB/s)\n",
- drive->name, ide_xfer_verbose(xfer_mode),
- 2000 / (cycle_time ? cycle_time : (unsigned long) -1));
-}
-
-static const struct ide_port_ops icside_v6_port_ops = {
- .set_dma_mode = icside_set_dma_mode,
- .maskproc = icside_maskproc,
-};
-
-static void icside_dma_host_set(ide_drive_t *drive, int on)
-{
-}
-
-static int icside_dma_end(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct expansion_card *ec = ECARD_DEV(hwif->dev);
-
- disable_dma(ec->dma);
-
- return get_dma_residue(ec->dma) != 0;
-}
-
-static void icside_dma_start(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct expansion_card *ec = ECARD_DEV(hwif->dev);
-
- /* We can not enable DMA on both channels simultaneously. */
- BUG_ON(dma_channel_active(ec->dma));
- enable_dma(ec->dma);
-}
-
-static int icside_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct expansion_card *ec = ECARD_DEV(hwif->dev);
- struct icside_state *state = ecard_get_drvdata(ec);
- unsigned int dma_mode;
-
- if (cmd->tf_flags & IDE_TFLAG_WRITE)
- dma_mode = DMA_MODE_WRITE;
- else
- dma_mode = DMA_MODE_READ;
-
- /*
- * We can not enable DMA on both channels.
- */
- BUG_ON(dma_channel_active(ec->dma));
-
- /*
- * Ensure that we have the right interrupt routed.
- */
- icside_maskproc(drive, 0);
-
- /*
- * Route the DMA signals to the correct interface.
- */
- writeb(state->sel | hwif->channel, state->ioc_base);
-
- /*
- * Select the correct timing for this drive.
- */
- set_dma_speed(ec->dma, (unsigned long)ide_get_drivedata(drive));
-
- /*
- * Tell the DMA engine about the SG table and
- * data direction.
- */
- set_dma_sg(ec->dma, hwif->sg_table, cmd->sg_nents);
- set_dma_mode(ec->dma, dma_mode);
-
- return 0;
-}
-
-static int icside_dma_test_irq(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct expansion_card *ec = ECARD_DEV(hwif->dev);
- struct icside_state *state = ecard_get_drvdata(ec);
-
- return readb(state->irq_port +
- (hwif->channel ?
- ICS_ARCIN_V6_INTRSTAT_2 :
- ICS_ARCIN_V6_INTRSTAT_1)) & 1;
-}
-
-static int icside_dma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
-{
- hwif->dmatable_cpu = NULL;
- hwif->dmatable_dma = 0;
-
- return 0;
-}
-
-static const struct ide_dma_ops icside_v6_dma_ops = {
- .dma_host_set = icside_dma_host_set,
- .dma_setup = icside_dma_setup,
- .dma_start = icside_dma_start,
- .dma_end = icside_dma_end,
- .dma_test_irq = icside_dma_test_irq,
- .dma_lost_irq = ide_dma_lost_irq,
-};
-#endif
-
-static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d)
-{
- return -EOPNOTSUPP;
-}
-
-static void icside_setup_ports(struct ide_hw *hw, void __iomem *base,
- struct cardinfo *info, struct expansion_card *ec)
-{
- unsigned long port = (unsigned long)base + info->dataoffset;
-
- hw->io_ports.data_addr = port;
- hw->io_ports.error_addr = port + (1 << info->stepping);
- hw->io_ports.nsect_addr = port + (2 << info->stepping);
- hw->io_ports.lbal_addr = port + (3 << info->stepping);
- hw->io_ports.lbam_addr = port + (4 << info->stepping);
- hw->io_ports.lbah_addr = port + (5 << info->stepping);
- hw->io_ports.device_addr = port + (6 << info->stepping);
- hw->io_ports.status_addr = port + (7 << info->stepping);
- hw->io_ports.ctl_addr = (unsigned long)base + info->ctrloffset;
-
- hw->irq = ec->irq;
- hw->dev = &ec->dev;
-}
-
-static const struct ide_port_info icside_v5_port_info = {
- .host_flags = IDE_HFLAG_NO_DMA,
- .chipset = ide_acorn,
-};
-
-static int icside_register_v5(struct icside_state *state,
- struct expansion_card *ec)
-{
- void __iomem *base;
- struct ide_host *host;
- struct ide_hw hw, *hws[] = { &hw };
- int ret;
-
- base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0);
- if (!base)
- return -ENOMEM;
-
- state->irq_port = base;
-
- ec->irqaddr = base + ICS_ARCIN_V5_INTRSTAT;
- ec->irqmask = 1;
-
- ecard_setirq(ec, &icside_ops_arcin_v5, state);
-
- /*
- * Be on the safe side - disable interrupts
- */
- icside_irqdisable_arcin_v5(ec, 0);
-
- icside_setup_ports(&hw, base, &icside_cardinfo_v5, ec);
-
- host = ide_host_alloc(&icside_v5_port_info, hws, 1);
- if (host == NULL)
- return -ENODEV;
-
- state->host = host;
-
- ecard_set_drvdata(ec, state);
-
- ret = ide_host_register(host, &icside_v5_port_info, hws);
- if (ret)
- goto err_free;
-
- return 0;
-err_free:
- ide_host_free(host);
- ecard_set_drvdata(ec, NULL);
- return ret;
-}
-
-static const struct ide_port_info icside_v6_port_info = {
- .init_dma = icside_dma_off_init,
- .port_ops = &icside_v6_no_dma_port_ops,
- .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO,
- .mwdma_mask = ATA_MWDMA2,
- .swdma_mask = ATA_SWDMA2,
- .chipset = ide_acorn,
-};
-
-static int icside_register_v6(struct icside_state *state,
- struct expansion_card *ec)
-{
- void __iomem *ioc_base, *easi_base;
- struct ide_host *host;
- unsigned int sel = 0;
- int ret;
- struct ide_hw hw[2], *hws[] = { &hw[0], &hw[1] };
- struct ide_port_info d = icside_v6_port_info;
-
- ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
- if (!ioc_base) {
- ret = -ENOMEM;
- goto out;
- }
-
- easi_base = ioc_base;
-
- if (ecard_resource_flags(ec, ECARD_RES_EASI)) {
- easi_base = ecardm_iomap(ec, ECARD_RES_EASI, 0, 0);
- if (!easi_base) {
- ret = -ENOMEM;
- goto out;
- }
-
- /*
- * Enable access to the EASI region.
- */
- sel = 1 << 5;
- }
-
- writeb(sel, ioc_base);
-
- ecard_setirq(ec, &icside_ops_arcin_v6, state);
-
- state->irq_port = easi_base;
- state->ioc_base = ioc_base;
- state->sel = sel;
-
- /*
- * Be on the safe side - disable interrupts
- */
- icside_irqdisable_arcin_v6(ec, 0);
-
- icside_setup_ports(&hw[0], easi_base, &icside_cardinfo_v6_1, ec);
- icside_setup_ports(&hw[1], easi_base, &icside_cardinfo_v6_2, ec);
-
- host = ide_host_alloc(&d, hws, 2);
- if (host == NULL)
- return -ENODEV;
-
- state->host = host;
-
- ecard_set_drvdata(ec, state);
-
-#ifdef CONFIG_BLK_DEV_IDEDMA_ICS
- if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) {
- d.init_dma = icside_dma_init;
- d.port_ops = &icside_v6_port_ops;
- d.dma_ops = &icside_v6_dma_ops;
- }
-#endif
-
- ret = ide_host_register(host, &d, hws);
- if (ret)
- goto err_free;
-
- return 0;
-err_free:
- ide_host_free(host);
- if (d.dma_ops)
- free_dma(ec->dma);
- ecard_set_drvdata(ec, NULL);
-out:
- return ret;
-}
-
-static int icside_probe(struct expansion_card *ec, const struct ecard_id *id)
-{
- struct icside_state *state;
- void __iomem *idmem;
- int ret;
-
- ret = ecard_request_resources(ec);
- if (ret)
- goto out;
-
- state = kzalloc(sizeof(struct icside_state), GFP_KERNEL);
- if (!state) {
- ret = -ENOMEM;
- goto release;
- }
-
- state->type = ICS_TYPE_NOTYPE;
-
- idmem = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
- if (idmem) {
- unsigned int type;
-
- type = readb(idmem + ICS_IDENT_OFFSET) & 1;
- type |= (readb(idmem + ICS_IDENT_OFFSET + 4) & 1) << 1;
- type |= (readb(idmem + ICS_IDENT_OFFSET + 8) & 1) << 2;
- type |= (readb(idmem + ICS_IDENT_OFFSET + 12) & 1) << 3;
- ecardm_iounmap(ec, idmem);
-
- state->type = type;
- }
-
- switch (state->type) {
- case ICS_TYPE_A3IN:
- dev_warn(&ec->dev, "A3IN unsupported\n");
- ret = -ENODEV;
- break;
-
- case ICS_TYPE_A3USER:
- dev_warn(&ec->dev, "A3USER unsupported\n");
- ret = -ENODEV;
- break;
-
- case ICS_TYPE_V5:
- ret = icside_register_v5(state, ec);
- break;
-
- case ICS_TYPE_V6:
- ret = icside_register_v6(state, ec);
- break;
-
- default:
- dev_warn(&ec->dev, "unknown interface type\n");
- ret = -ENODEV;
- break;
- }
-
- if (ret == 0)
- goto out;
-
- kfree(state);
- release:
- ecard_release_resources(ec);
- out:
- return ret;
-}
-
-static void icside_remove(struct expansion_card *ec)
-{
- struct icside_state *state = ecard_get_drvdata(ec);
-
- switch (state->type) {
- case ICS_TYPE_V5:
- /* FIXME: tell IDE to stop using the interface */
-
- /* Disable interrupts */
- icside_irqdisable_arcin_v5(ec, 0);
- break;
-
- case ICS_TYPE_V6:
- /* FIXME: tell IDE to stop using the interface */
- if (ec->dma != NO_DMA)
- free_dma(ec->dma);
-
- /* Disable interrupts */
- icside_irqdisable_arcin_v6(ec, 0);
-
- /* Reset the ROM pointer/EASI selection */
- writeb(0, state->ioc_base);
- break;
- }
-
- ecard_set_drvdata(ec, NULL);
-
- kfree(state);
- ecard_release_resources(ec);
-}
-
-static void icside_shutdown(struct expansion_card *ec)
-{
- struct icside_state *state = ecard_get_drvdata(ec);
- unsigned long flags;
-
- /*
- * Disable interrupts from this card. We need to do
- * this before disabling EASI since we may be accessing
- * this register via that region.
- */
- local_irq_save(flags);
- ec->ops->irqdisable(ec, 0);
- local_irq_restore(flags);
-
- /*
- * Reset the ROM pointer so that we can read the ROM
- * after a soft reboot. This also disables access to
- * the IDE taskfile via the EASI region.
- */
- if (state->ioc_base)
- writeb(0, state->ioc_base);
-}
-
-static const struct ecard_id icside_ids[] = {
- { MANU_ICS, PROD_ICS_IDE },
- { MANU_ICS2, PROD_ICS2_IDE },
- { 0xffff, 0xffff }
-};
-
-static struct ecard_driver icside_driver = {
- .probe = icside_probe,
- .remove = icside_remove,
- .shutdown = icside_shutdown,
- .id_table = icside_ids,
- .drv = {
- .name = "icside",
- },
-};
-
-static int __init icside_init(void)
-{
- return ecard_register_driver(&icside_driver);
-}
-
-static void __exit icside_exit(void)
-{
- ecard_remove_driver(&icside_driver);
-}
-
-MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("ICS IDE driver");
-
-module_init(icside_init);
-module_exit(icside_exit);
diff --git a/drivers/ide/ide-4drives.c b/drivers/ide/ide-4drives.c
deleted file mode 100644
index 06c6215e0cbe..000000000000
--- a/drivers/ide/ide-4drives.c
+++ /dev/null
@@ -1,65 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/ide.h>
-
-#define DRV_NAME "ide-4drives"
-
-static bool probe_4drives;
-
-module_param_named(probe, probe_4drives, bool, 0);
-MODULE_PARM_DESC(probe, "probe for generic IDE chipset with 4 drives/port");
-
-static void ide_4drives_init_dev(ide_drive_t *drive)
-{
- if (drive->hwif->channel)
- drive->select ^= 0x20;
-}
-
-static const struct ide_port_ops ide_4drives_port_ops = {
- .init_dev = ide_4drives_init_dev,
-};
-
-static const struct ide_port_info ide_4drives_port_info = {
- .port_ops = &ide_4drives_port_ops,
- .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_NO_DMA |
- IDE_HFLAG_4DRIVES,
- .chipset = ide_4drives,
-};
-
-static int __init ide_4drives_init(void)
-{
- unsigned long base = 0x1f0, ctl = 0x3f6;
- struct ide_hw hw, *hws[] = { &hw, &hw };
-
- if (probe_4drives == 0)
- return -ENODEV;
-
- if (!request_region(base, 8, DRV_NAME)) {
- printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
- DRV_NAME, base, base + 7);
- return -EBUSY;
- }
-
- if (!request_region(ctl, 1, DRV_NAME)) {
- printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
- DRV_NAME, ctl);
- release_region(base, 8);
- return -EBUSY;
- }
-
- memset(&hw, 0, sizeof(hw));
-
- ide_std_init_ports(&hw, base, ctl);
- hw.irq = 14;
-
- return ide_host_add(&ide_4drives_port_info, hws, 2, NULL);
-}
-
-module_init(ide_4drives_init);
-
-MODULE_AUTHOR("Bartlomiej Zolnierkiewicz");
-MODULE_DESCRIPTION("generic IDE chipset with 4 drives/port support");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
deleted file mode 100644
index 05e18d658141..000000000000
--- a/drivers/ide/ide-acpi.c
+++ /dev/null
@@ -1,622 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Provides ACPI support for IDE drives.
- *
- * Copyright (C) 2005 Intel Corp.
- * Copyright (C) 2005 Randy Dunlap
- * Copyright (C) 2006 SUSE Linux Products GmbH
- * Copyright (C) 2006 Hannes Reinecke
- */
-
-#include <linux/acpi.h>
-#include <linux/ata.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/ide.h>
-#include <linux/pci.h>
-#include <linux/dmi.h>
-#include <linux/module.h>
-
-#define REGS_PER_GTF 7
-
-struct GTM_buffer {
- u32 PIO_speed0;
- u32 DMA_speed0;
- u32 PIO_speed1;
- u32 DMA_speed1;
- u32 GTM_flags;
-};
-
-struct ide_acpi_drive_link {
- acpi_handle obj_handle;
- u8 idbuff[512];
-};
-
-struct ide_acpi_hwif_link {
- ide_hwif_t *hwif;
- acpi_handle obj_handle;
- struct GTM_buffer gtm;
- struct ide_acpi_drive_link master;
- struct ide_acpi_drive_link slave;
-};
-
-#undef DEBUGGING
-/* note: adds function name and KERN_DEBUG */
-#ifdef DEBUGGING
-#define DEBPRINT(fmt, args...) \
- printk(KERN_DEBUG "%s: " fmt, __func__, ## args)
-#else
-#define DEBPRINT(fmt, args...) do {} while (0)
-#endif /* DEBUGGING */
-
-static bool ide_noacpi;
-module_param_named(noacpi, ide_noacpi, bool, 0);
-MODULE_PARM_DESC(noacpi, "disable IDE ACPI support");
-
-static bool ide_acpigtf;
-module_param_named(acpigtf, ide_acpigtf, bool, 0);
-MODULE_PARM_DESC(acpigtf, "enable IDE ACPI _GTF support");
-
-static bool ide_acpionboot;
-module_param_named(acpionboot, ide_acpionboot, bool, 0);
-MODULE_PARM_DESC(acpionboot, "call IDE ACPI methods on boot");
-
-static bool ide_noacpi_psx;
-static int no_acpi_psx(const struct dmi_system_id *id)
-{
- ide_noacpi_psx = true;
- printk(KERN_NOTICE"%s detected - disable ACPI _PSx.\n", id->ident);
- return 0;
-}
-
-static const struct dmi_system_id ide_acpi_dmi_table[] = {
- /* Bug 9673. */
- /* We should check if this is because ACPI NVS isn't save/restored. */
- {
- .callback = no_acpi_psx,
- .ident = "HP nx9005",
- .matches = {
- DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies Ltd."),
- DMI_MATCH(DMI_BIOS_VERSION, "KAM1.60")
- },
- },
-
- { } /* terminate list */
-};
-
-int ide_acpi_init(void)
-{
- dmi_check_system(ide_acpi_dmi_table);
- return 0;
-}
-
-bool ide_port_acpi(ide_hwif_t *hwif)
-{
- return ide_noacpi == 0 && hwif->acpidata;
-}
-
-static acpi_handle acpi_get_child(acpi_handle handle, u64 addr)
-{
- struct acpi_device *adev;
-
- if (!handle || acpi_bus_get_device(handle, &adev))
- return NULL;
-
- adev = acpi_find_child_device(adev, addr, false);
- return adev ? adev->handle : NULL;
-}
-
-/**
- * ide_get_dev_handle - finds acpi_handle and PCI device.function
- * @dev: device to locate
- * @handle: returned acpi_handle for @dev
- * @pcidevfn: return PCI device.func for @dev
- *
- * Returns the ACPI object handle to the corresponding PCI device.
- *
- * Returns 0 on success, <0 on error.
- */
-static int ide_get_dev_handle(struct device *dev, acpi_handle *handle,
- u64 *pcidevfn)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- unsigned int bus, devnum, func;
- u64 addr;
- acpi_handle dev_handle;
- acpi_status status;
- struct acpi_device_info *dinfo = NULL;
- int ret = -ENODEV;
-
- bus = pdev->bus->number;
- devnum = PCI_SLOT(pdev->devfn);
- func = PCI_FUNC(pdev->devfn);
- /* ACPI _ADR encoding for PCI bus: */
- addr = (u64)(devnum << 16 | func);
-
- DEBPRINT("ENTER: pci %02x:%02x.%01x\n", bus, devnum, func);
-
- dev_handle = ACPI_HANDLE(dev);
- if (!dev_handle) {
- DEBPRINT("no acpi handle for device\n");
- goto err;
- }
-
- status = acpi_get_object_info(dev_handle, &dinfo);
- if (ACPI_FAILURE(status)) {
- DEBPRINT("get_object_info for device failed\n");
- goto err;
- }
- if (dinfo && (dinfo->valid & ACPI_VALID_ADR) &&
- dinfo->address == addr) {
- *pcidevfn = addr;
- *handle = dev_handle;
- } else {
- DEBPRINT("get_object_info for device has wrong "
- " address: %llu, should be %u\n",
- dinfo ? (unsigned long long)dinfo->address : -1ULL,
- (unsigned int)addr);
- goto err;
- }
-
- DEBPRINT("for dev=0x%x.%x, addr=0x%llx, *handle=0x%p\n",
- devnum, func, (unsigned long long)addr, *handle);
- ret = 0;
-err:
- kfree(dinfo);
- return ret;
-}
-
-/**
- * ide_acpi_hwif_get_handle - Get ACPI object handle for a given hwif
- * @hwif: device to locate
- *
- * Retrieves the object handle for a given hwif.
- *
- * Returns handle on success, 0 on error.
- */
-static acpi_handle ide_acpi_hwif_get_handle(ide_hwif_t *hwif)
-{
- struct device *dev = hwif->gendev.parent;
- acpi_handle dev_handle;
- u64 pcidevfn;
- acpi_handle chan_handle;
- int err;
-
- DEBPRINT("ENTER: device %s\n", hwif->name);
-
- if (!dev) {
- DEBPRINT("no PCI device for %s\n", hwif->name);
- return NULL;
- }
-
- err = ide_get_dev_handle(dev, &dev_handle, &pcidevfn);
- if (err < 0) {
- DEBPRINT("ide_get_dev_handle failed (%d)\n", err);
- return NULL;
- }
-
- /* get child objects of dev_handle == channel objects,
- * + _their_ children == drive objects */
- /* channel is hwif->channel */
- chan_handle = acpi_get_child(dev_handle, hwif->channel);
- DEBPRINT("chan adr=%d: handle=0x%p\n",
- hwif->channel, chan_handle);
-
- return chan_handle;
-}
-
-/**
- * do_drive_get_GTF - get the drive bootup default taskfile settings
- * @drive: the drive for which the taskfile settings should be retrieved
- * @gtf_length: number of bytes of _GTF data returned at @gtf_address
- * @gtf_address: buffer containing _GTF taskfile arrays
- *
- * The _GTF method has no input parameters.
- * It returns a variable number of register set values (registers
- * hex 1F1..1F7, taskfiles).
- * The <variable number> is not known in advance, so have ACPI-CA
- * allocate the buffer as needed and return it, then free it later.
- *
- * The returned @gtf_length and @gtf_address are only valid if the
- * function return value is 0.
- */
-static int do_drive_get_GTF(ide_drive_t *drive,
- unsigned int *gtf_length, unsigned long *gtf_address,
- unsigned long *obj_loc)
-{
- acpi_status status;
- struct acpi_buffer output;
- union acpi_object *out_obj;
- int err = -ENODEV;
-
- *gtf_length = 0;
- *gtf_address = 0UL;
- *obj_loc = 0UL;
-
- if (!drive->acpidata->obj_handle) {
- DEBPRINT("No ACPI object found for %s\n", drive->name);
- goto out;
- }
-
- /* Setting up output buffer */
- output.length = ACPI_ALLOCATE_BUFFER;
- output.pointer = NULL; /* ACPI-CA sets this; save/free it later */
-
- /* _GTF has no input parameters */
- err = -EIO;
- status = acpi_evaluate_object(drive->acpidata->obj_handle, "_GTF",
- NULL, &output);
- if (ACPI_FAILURE(status)) {
- printk(KERN_DEBUG
- "%s: Run _GTF error: status = 0x%x\n",
- __func__, status);
- goto out;
- }
-
- if (!output.length || !output.pointer) {
- DEBPRINT("Run _GTF: "
- "length or ptr is NULL (0x%llx, 0x%p)\n",
- (unsigned long long)output.length,
- output.pointer);
- goto out;
- }
-
- out_obj = output.pointer;
- if (out_obj->type != ACPI_TYPE_BUFFER) {
- DEBPRINT("Run _GTF: error: "
- "expected object type of ACPI_TYPE_BUFFER, "
- "got 0x%x\n", out_obj->type);
- err = -ENOENT;
- kfree(output.pointer);
- goto out;
- }
-
- if (!out_obj->buffer.length || !out_obj->buffer.pointer ||
- out_obj->buffer.length % REGS_PER_GTF) {
- printk(KERN_ERR
- "%s: unexpected GTF length (%d) or addr (0x%p)\n",
- __func__, out_obj->buffer.length,
- out_obj->buffer.pointer);
- err = -ENOENT;
- kfree(output.pointer);
- goto out;
- }
-
- *gtf_length = out_obj->buffer.length;
- *gtf_address = (unsigned long)out_obj->buffer.pointer;
- *obj_loc = (unsigned long)out_obj;
- DEBPRINT("returning gtf_length=%d, gtf_address=0x%lx, obj_loc=0x%lx\n",
- *gtf_length, *gtf_address, *obj_loc);
- err = 0;
-out:
- return err;
-}
-
-/**
- * do_drive_set_taskfiles - write the drive taskfile settings from _GTF
- * @drive: the drive to which the taskfile command should be sent
- * @gtf_length: total number of bytes of _GTF taskfiles
- * @gtf_address: location of _GTF taskfile arrays
- *
- * Write {gtf_address, length gtf_length} in groups of
- * REGS_PER_GTF bytes.
- */
-static int do_drive_set_taskfiles(ide_drive_t *drive,
- unsigned int gtf_length,
- unsigned long gtf_address)
-{
- int rc = 0, err;
- int gtf_count = gtf_length / REGS_PER_GTF;
- int ix;
-
- DEBPRINT("total GTF bytes=%u (0x%x), gtf_count=%d, addr=0x%lx\n",
- gtf_length, gtf_length, gtf_count, gtf_address);
-
- /* send all taskfile registers (0x1f1-0x1f7) *in*that*order* */
- for (ix = 0; ix < gtf_count; ix++) {
- u8 *gtf = (u8 *)(gtf_address + ix * REGS_PER_GTF);
- struct ide_cmd cmd;
-
- DEBPRINT("(0x1f1-1f7): "
- "hex: %02x %02x %02x %02x %02x %02x %02x\n",
- gtf[0], gtf[1], gtf[2],
- gtf[3], gtf[4], gtf[5], gtf[6]);
-
- if (!ide_acpigtf) {
- DEBPRINT("_GTF execution disabled\n");
- continue;
- }
-
- /* convert GTF to taskfile */
- memset(&cmd, 0, sizeof(cmd));
- memcpy(&cmd.tf.feature, gtf, REGS_PER_GTF);
- cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
- cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
-
- err = ide_no_data_taskfile(drive, &cmd);
- if (err) {
- printk(KERN_ERR "%s: ide_no_data_taskfile failed: %u\n",
- __func__, err);
- rc = err;
- }
- }
-
- return rc;
-}
-
-/**
- * ide_acpi_exec_tfs - get then write drive taskfile settings
- * @drive: the drive for which the taskfile settings should be
- * written.
- *
- * According to the ACPI spec this should be called after _STM
- * has been evaluated for the interface. Some ACPI vendors interpret
- * that as a hard requirement and modify the taskfile according
- * to the Identify Drive information passed down with _STM.
- * So one should really make sure to call this only after _STM has
- * been executed.
- */
-int ide_acpi_exec_tfs(ide_drive_t *drive)
-{
- int ret;
- unsigned int gtf_length;
- unsigned long gtf_address;
- unsigned long obj_loc;
-
- DEBPRINT("call get_GTF, drive=%s port=%d\n", drive->name, drive->dn);
-
- ret = do_drive_get_GTF(drive, &gtf_length, &gtf_address, &obj_loc);
- if (ret < 0) {
- DEBPRINT("get_GTF error (%d)\n", ret);
- return ret;
- }
-
- DEBPRINT("call set_taskfiles, drive=%s\n", drive->name);
-
- ret = do_drive_set_taskfiles(drive, gtf_length, gtf_address);
- kfree((void *)obj_loc);
- if (ret < 0) {
- DEBPRINT("set_taskfiles error (%d)\n", ret);
- }
-
- DEBPRINT("ret=%d\n", ret);
-
- return ret;
-}
-
-/**
- * ide_acpi_get_timing - get the channel (controller) timings
- * @hwif: target IDE interface (channel)
- *
- * This function executes the _GTM ACPI method for the target channel.
- *
- */
-void ide_acpi_get_timing(ide_hwif_t *hwif)
-{
- acpi_status status;
- struct acpi_buffer output;
- union acpi_object *out_obj;
-
- /* Setting up output buffer for _GTM */
- output.length = ACPI_ALLOCATE_BUFFER;
- output.pointer = NULL; /* ACPI-CA sets this; save/free it later */
-
- /* _GTM has no input parameters */
- status = acpi_evaluate_object(hwif->acpidata->obj_handle, "_GTM",
- NULL, &output);
-
- DEBPRINT("_GTM status: %d, outptr: 0x%p, outlen: 0x%llx\n",
- status, output.pointer,
- (unsigned long long)output.length);
-
- if (ACPI_FAILURE(status)) {
- DEBPRINT("Run _GTM error: status = 0x%x\n", status);
- return;
- }
-
- if (!output.length || !output.pointer) {
- DEBPRINT("Run _GTM: length or ptr is NULL (0x%llx, 0x%p)\n",
- (unsigned long long)output.length,
- output.pointer);
- kfree(output.pointer);
- return;
- }
-
- out_obj = output.pointer;
- if (out_obj->type != ACPI_TYPE_BUFFER) {
- DEBPRINT("Run _GTM: error: "
- "expected object type of ACPI_TYPE_BUFFER, "
- "got 0x%x\n", out_obj->type);
- kfree(output.pointer);
- return;
- }
-
- if (!out_obj->buffer.length || !out_obj->buffer.pointer ||
- out_obj->buffer.length != sizeof(struct GTM_buffer)) {
- printk(KERN_ERR
- "%s: unexpected _GTM length (0x%x)[should be 0x%zx] or "
- "addr (0x%p)\n",
- __func__, out_obj->buffer.length,
- sizeof(struct GTM_buffer), out_obj->buffer.pointer);
- kfree(output.pointer);
- return;
- }
-
- memcpy(&hwif->acpidata->gtm, out_obj->buffer.pointer,
- sizeof(struct GTM_buffer));
-
- DEBPRINT("_GTM info: ptr: 0x%p, len: 0x%x, exp.len: 0x%zx\n",
- out_obj->buffer.pointer, out_obj->buffer.length,
- sizeof(struct GTM_buffer));
-
- DEBPRINT("_GTM fields: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
- hwif->acpidata->gtm.PIO_speed0,
- hwif->acpidata->gtm.DMA_speed0,
- hwif->acpidata->gtm.PIO_speed1,
- hwif->acpidata->gtm.DMA_speed1,
- hwif->acpidata->gtm.GTM_flags);
-
- kfree(output.pointer);
-}
-
-/**
- * ide_acpi_push_timing - set the channel (controller) timings
- * @hwif: target IDE interface (channel)
- *
- * This function executes the _STM ACPI method for the target channel.
- *
- * _STM requires Identify Drive data, which has to passed as an argument.
- * Unfortunately drive->id is a mangled version which we can't readily
- * use; hence we'll get the information afresh.
- */
-void ide_acpi_push_timing(ide_hwif_t *hwif)
-{
- acpi_status status;
- struct acpi_object_list input;
- union acpi_object in_params[3];
- struct ide_acpi_drive_link *master = &hwif->acpidata->master;
- struct ide_acpi_drive_link *slave = &hwif->acpidata->slave;
-
- /* Give the GTM buffer + drive Identify data to the channel via the
- * _STM method: */
- /* setup input parameters buffer for _STM */
- input.count = 3;
- input.pointer = in_params;
- in_params[0].type = ACPI_TYPE_BUFFER;
- in_params[0].buffer.length = sizeof(struct GTM_buffer);
- in_params[0].buffer.pointer = (u8 *)&hwif->acpidata->gtm;
- in_params[1].type = ACPI_TYPE_BUFFER;
- in_params[1].buffer.length = ATA_ID_WORDS * 2;
- in_params[1].buffer.pointer = (u8 *)&master->idbuff;
- in_params[2].type = ACPI_TYPE_BUFFER;
- in_params[2].buffer.length = ATA_ID_WORDS * 2;
- in_params[2].buffer.pointer = (u8 *)&slave->idbuff;
- /* Output buffer: _STM has no output */
-
- status = acpi_evaluate_object(hwif->acpidata->obj_handle, "_STM",
- &input, NULL);
-
- if (ACPI_FAILURE(status)) {
- DEBPRINT("Run _STM error: status = 0x%x\n", status);
- }
- DEBPRINT("_STM status: %d\n", status);
-}
-
-/**
- * ide_acpi_set_state - set the channel power state
- * @hwif: target IDE interface
- * @on: state, on/off
- *
- * This function executes the _PS0/_PS3 ACPI method to set the power state.
- * ACPI spec requires _PS0 when IDE power on and _PS3 when power off
- */
-void ide_acpi_set_state(ide_hwif_t *hwif, int on)
-{
- ide_drive_t *drive;
- int i;
-
- if (ide_noacpi_psx)
- return;
-
- DEBPRINT("ENTER:\n");
-
- /* channel first and then drives for power on and verse versa for power off */
- if (on)
- acpi_bus_set_power(hwif->acpidata->obj_handle, ACPI_STATE_D0);
-
- ide_port_for_each_present_dev(i, drive, hwif) {
- if (drive->acpidata->obj_handle)
- acpi_bus_set_power(drive->acpidata->obj_handle,
- on ? ACPI_STATE_D0 : ACPI_STATE_D3_COLD);
- }
-
- if (!on)
- acpi_bus_set_power(hwif->acpidata->obj_handle,
- ACPI_STATE_D3_COLD);
-}
-
-/**
- * ide_acpi_init_port - initialize the ACPI link for an IDE interface
- * @hwif: target IDE interface (channel)
- *
- * The ACPI spec is not quite clear when the drive identify buffer
- * should be obtained. Calling IDENTIFY DEVICE during shutdown
- * is not the best of ideas as the drive might already being put to
- * sleep. And obviously we can't call it during resume.
- * So we get the information during startup; but this means that
- * any changes during run-time will be lost after resume.
- */
-void ide_acpi_init_port(ide_hwif_t *hwif)
-{
- hwif->acpidata = kzalloc(sizeof(struct ide_acpi_hwif_link), GFP_KERNEL);
- if (!hwif->acpidata)
- return;
-
- hwif->acpidata->obj_handle = ide_acpi_hwif_get_handle(hwif);
- if (!hwif->acpidata->obj_handle) {
- DEBPRINT("no ACPI object for %s found\n", hwif->name);
- kfree(hwif->acpidata);
- hwif->acpidata = NULL;
- }
-}
-
-void ide_acpi_port_init_devices(ide_hwif_t *hwif)
-{
- ide_drive_t *drive;
- int i, err;
-
- if (hwif->acpidata == NULL)
- return;
-
- /*
- * The ACPI spec mandates that we send information
- * for both drives, regardless whether they are connected
- * or not.
- */
- hwif->devices[0]->acpidata = &hwif->acpidata->master;
- hwif->devices[1]->acpidata = &hwif->acpidata->slave;
-
- /* get _ADR info for each device */
- ide_port_for_each_present_dev(i, drive, hwif) {
- acpi_handle dev_handle;
-
- DEBPRINT("ENTER: %s at channel#: %d port#: %d\n",
- drive->name, hwif->channel, drive->dn & 1);
-
- /* TBD: could also check ACPI object VALID bits */
- dev_handle = acpi_get_child(hwif->acpidata->obj_handle,
- drive->dn & 1);
-
- DEBPRINT("drive %s handle 0x%p\n", drive->name, dev_handle);
-
- drive->acpidata->obj_handle = dev_handle;
- }
-
- /* send IDENTIFY for each device */
- ide_port_for_each_present_dev(i, drive, hwif) {
- err = taskfile_lib_get_identify(drive, drive->acpidata->idbuff);
- if (err)
- DEBPRINT("identify device %s failed (%d)\n",
- drive->name, err);
- }
-
- if (ide_noacpi || ide_acpionboot == 0) {
- DEBPRINT("ACPI methods disabled on boot\n");
- return;
- }
-
- /* ACPI _PS0 before _STM */
- ide_acpi_set_state(hwif, 1);
- /*
- * ACPI requires us to call _STM on startup
- */
- ide_acpi_get_timing(hwif);
- ide_acpi_push_timing(hwif);
-
- ide_port_for_each_present_dev(i, drive, hwif) {
- ide_acpi_exec_tfs(drive);
- }
-}
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
deleted file mode 100644
index a1ce9f5ac3aa..000000000000
--- a/drivers/ide/ide-atapi.c
+++ /dev/null
@@ -1,756 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * ATAPI support.
- */
-
-#include <linux/kernel.h>
-#include <linux/cdrom.h>
-#include <linux/delay.h>
-#include <linux/export.h>
-#include <linux/ide.h>
-#include <linux/scatterlist.h>
-#include <linux/gfp.h>
-
-#include <scsi/scsi.h>
-
-#define DRV_NAME "ide-atapi"
-#define PFX DRV_NAME ": "
-
-#ifdef DEBUG
-#define debug_log(fmt, args...) \
- printk(KERN_INFO "ide: " fmt, ## args)
-#else
-#define debug_log(fmt, args...) do {} while (0)
-#endif
-
-#define ATAPI_MIN_CDB_BYTES 12
-
-static inline int dev_is_idecd(ide_drive_t *drive)
-{
- return drive->media == ide_cdrom || drive->media == ide_optical;
-}
-
-/*
- * Check whether we can support a device,
- * based on the ATAPI IDENTIFY command results.
- */
-int ide_check_atapi_device(ide_drive_t *drive, const char *s)
-{
- u16 *id = drive->id;
- u8 gcw[2], protocol, device_type, removable, drq_type, packet_size;
-
- *((u16 *)&gcw) = id[ATA_ID_CONFIG];
-
- protocol = (gcw[1] & 0xC0) >> 6;
- device_type = gcw[1] & 0x1F;
- removable = (gcw[0] & 0x80) >> 7;
- drq_type = (gcw[0] & 0x60) >> 5;
- packet_size = gcw[0] & 0x03;
-
-#ifdef CONFIG_PPC
- /* kludge for Apple PowerBook internal zip */
- if (drive->media == ide_floppy && device_type == 5 &&
- !strstr((char *)&id[ATA_ID_PROD], "CD-ROM") &&
- strstr((char *)&id[ATA_ID_PROD], "ZIP"))
- device_type = 0;
-#endif
-
- if (protocol != 2)
- printk(KERN_ERR "%s: %s: protocol (0x%02x) is not ATAPI\n",
- s, drive->name, protocol);
- else if ((drive->media == ide_floppy && device_type != 0) ||
- (drive->media == ide_tape && device_type != 1))
- printk(KERN_ERR "%s: %s: invalid device type (0x%02x)\n",
- s, drive->name, device_type);
- else if (removable == 0)
- printk(KERN_ERR "%s: %s: the removable flag is not set\n",
- s, drive->name);
- else if (drive->media == ide_floppy && drq_type == 3)
- printk(KERN_ERR "%s: %s: sorry, DRQ type (0x%02x) not "
- "supported\n", s, drive->name, drq_type);
- else if (packet_size != 0)
- printk(KERN_ERR "%s: %s: packet size (0x%02x) is not 12 "
- "bytes\n", s, drive->name, packet_size);
- else
- return 1;
- return 0;
-}
-EXPORT_SYMBOL_GPL(ide_check_atapi_device);
-
-void ide_init_pc(struct ide_atapi_pc *pc)
-{
- memset(pc, 0, sizeof(*pc));
-}
-EXPORT_SYMBOL_GPL(ide_init_pc);
-
-/*
- * Add a special packet command request to the tail of the request queue,
- * and wait for it to be serviced.
- */
-int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk,
- struct ide_atapi_pc *pc, void *buf, unsigned int bufflen)
-{
- struct request *rq;
- int error;
-
- rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
- ide_req(rq)->type = ATA_PRIV_MISC;
- ide_req(rq)->special = pc;
-
- if (buf && bufflen) {
- error = blk_rq_map_kern(drive->queue, rq, buf, bufflen,
- GFP_NOIO);
- if (error)
- goto put_req;
- }
-
- memcpy(scsi_req(rq)->cmd, pc->c, 12);
- if (drive->media == ide_tape)
- scsi_req(rq)->cmd[13] = REQ_IDETAPE_PC1;
- blk_execute_rq(disk, rq, 0);
- error = scsi_req(rq)->result ? -EIO : 0;
-put_req:
- blk_put_request(rq);
- return error;
-}
-EXPORT_SYMBOL_GPL(ide_queue_pc_tail);
-
-int ide_do_test_unit_ready(ide_drive_t *drive, struct gendisk *disk)
-{
- struct ide_atapi_pc pc;
-
- ide_init_pc(&pc);
- pc.c[0] = TEST_UNIT_READY;
-
- return ide_queue_pc_tail(drive, disk, &pc, NULL, 0);
-}
-EXPORT_SYMBOL_GPL(ide_do_test_unit_ready);
-
-int ide_do_start_stop(ide_drive_t *drive, struct gendisk *disk, int start)
-{
- struct ide_atapi_pc pc;
-
- ide_init_pc(&pc);
- pc.c[0] = START_STOP;
- pc.c[4] = start;
-
- if (drive->media == ide_tape)
- pc.flags |= PC_FLAG_WAIT_FOR_DSC;
-
- return ide_queue_pc_tail(drive, disk, &pc, NULL, 0);
-}
-EXPORT_SYMBOL_GPL(ide_do_start_stop);
-
-int ide_set_media_lock(ide_drive_t *drive, struct gendisk *disk, int on)
-{
- struct ide_atapi_pc pc;
-
- if ((drive->dev_flags & IDE_DFLAG_DOORLOCKING) == 0)
- return 0;
-
- ide_init_pc(&pc);
- pc.c[0] = ALLOW_MEDIUM_REMOVAL;
- pc.c[4] = on;
-
- return ide_queue_pc_tail(drive, disk, &pc, NULL, 0);
-}
-EXPORT_SYMBOL_GPL(ide_set_media_lock);
-
-void ide_create_request_sense_cmd(ide_drive_t *drive, struct ide_atapi_pc *pc)
-{
- ide_init_pc(pc);
- pc->c[0] = REQUEST_SENSE;
- if (drive->media == ide_floppy) {
- pc->c[4] = 255;
- pc->req_xfer = 18;
- } else {
- pc->c[4] = 20;
- pc->req_xfer = 20;
- }
-}
-EXPORT_SYMBOL_GPL(ide_create_request_sense_cmd);
-
-void ide_prep_sense(ide_drive_t *drive, struct request *rq)
-{
- struct request_sense *sense = &drive->sense_data;
- struct request *sense_rq;
- struct scsi_request *req;
- unsigned int cmd_len, sense_len;
- int err;
-
- switch (drive->media) {
- case ide_floppy:
- cmd_len = 255;
- sense_len = 18;
- break;
- case ide_tape:
- cmd_len = 20;
- sense_len = 20;
- break;
- default:
- cmd_len = 18;
- sense_len = 18;
- }
-
- BUG_ON(sense_len > sizeof(*sense));
-
- if (ata_sense_request(rq) || drive->sense_rq_armed)
- return;
-
- sense_rq = drive->sense_rq;
- if (!sense_rq) {
- sense_rq = blk_mq_alloc_request(drive->queue, REQ_OP_DRV_IN,
- BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
- drive->sense_rq = sense_rq;
- }
- req = scsi_req(sense_rq);
-
- memset(sense, 0, sizeof(*sense));
-
- scsi_req_init(req);
-
- err = blk_rq_map_kern(drive->queue, sense_rq, sense, sense_len,
- GFP_NOIO);
- if (unlikely(err)) {
- if (printk_ratelimit())
- printk(KERN_WARNING PFX "%s: failed to map sense "
- "buffer\n", drive->name);
- blk_mq_free_request(sense_rq);
- drive->sense_rq = NULL;
- return;
- }
-
- sense_rq->rq_disk = rq->rq_disk;
- sense_rq->cmd_flags = REQ_OP_DRV_IN;
- ide_req(sense_rq)->type = ATA_PRIV_SENSE;
-
- req->cmd[0] = GPCMD_REQUEST_SENSE;
- req->cmd[4] = cmd_len;
- if (drive->media == ide_tape)
- req->cmd[13] = REQ_IDETAPE_PC1;
-
- drive->sense_rq_armed = true;
-}
-EXPORT_SYMBOL_GPL(ide_prep_sense);
-
-int ide_queue_sense_rq(ide_drive_t *drive, void *special)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct request *sense_rq;
- unsigned long flags;
-
- spin_lock_irqsave(&hwif->lock, flags);
-
- /* deferred failure from ide_prep_sense() */
- if (!drive->sense_rq_armed) {
- printk(KERN_WARNING PFX "%s: error queuing a sense request\n",
- drive->name);
- spin_unlock_irqrestore(&hwif->lock, flags);
- return -ENOMEM;
- }
-
- sense_rq = drive->sense_rq;
- ide_req(sense_rq)->special = special;
- drive->sense_rq_armed = false;
-
- drive->hwif->rq = NULL;
-
- ide_insert_request_head(drive, sense_rq);
- spin_unlock_irqrestore(&hwif->lock, flags);
- return 0;
-}
-EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
-
-/*
- * Called when an error was detected during the last packet command.
- * We queue a request sense packet command at the head of the request
- * queue.
- */
-void ide_retry_pc(ide_drive_t *drive)
-{
- struct request *failed_rq = drive->hwif->rq;
- struct request *sense_rq = drive->sense_rq;
- struct ide_atapi_pc *pc = &drive->request_sense_pc;
-
- (void)ide_read_error(drive);
-
- /* init pc from sense_rq */
- ide_init_pc(pc);
- memcpy(pc->c, scsi_req(sense_rq)->cmd, 12);
-
- if (drive->media == ide_tape)
- drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC;
-
- /*
- * Push back the failed request and put request sense on top
- * of it. The failed command will be retried after sense data
- * is acquired.
- */
- drive->hwif->rq = NULL;
- ide_requeue_and_plug(drive, failed_rq);
- if (ide_queue_sense_rq(drive, pc))
- ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(failed_rq));
-}
-EXPORT_SYMBOL_GPL(ide_retry_pc);
-
-int ide_cd_expiry(ide_drive_t *drive)
-{
- struct request *rq = drive->hwif->rq;
- unsigned long wait = 0;
-
- debug_log("%s: scsi_req(rq)->cmd[0]: 0x%x\n", __func__, scsi_req(rq)->cmd[0]);
-
- /*
- * Some commands are *slow* and normally take a long time to complete.
- * Usually we can use the ATAPI "disconnect" to bypass this, but not all
- * commands/drives support that. Let ide_timer_expiry keep polling us
- * for these.
- */
- switch (scsi_req(rq)->cmd[0]) {
- case GPCMD_BLANK:
- case GPCMD_FORMAT_UNIT:
- case GPCMD_RESERVE_RZONE_TRACK:
- case GPCMD_CLOSE_TRACK:
- case GPCMD_FLUSH_CACHE:
- wait = ATAPI_WAIT_PC;
- break;
- default:
- if (!(rq->rq_flags & RQF_QUIET))
- printk(KERN_INFO PFX "cmd 0x%x timed out\n",
- scsi_req(rq)->cmd[0]);
- wait = 0;
- break;
- }
- return wait;
-}
-EXPORT_SYMBOL_GPL(ide_cd_expiry);
-
-int ide_cd_get_xferlen(struct request *rq)
-{
- switch (req_op(rq)) {
- default:
- return 32768;
- case REQ_OP_SCSI_IN:
- case REQ_OP_SCSI_OUT:
- return blk_rq_bytes(rq);
- case REQ_OP_DRV_IN:
- case REQ_OP_DRV_OUT:
- switch (ide_req(rq)->type) {
- case ATA_PRIV_PC:
- case ATA_PRIV_SENSE:
- return blk_rq_bytes(rq);
- default:
- return 0;
- }
- }
-}
-EXPORT_SYMBOL_GPL(ide_cd_get_xferlen);
-
-void ide_read_bcount_and_ireason(ide_drive_t *drive, u16 *bcount, u8 *ireason)
-{
- struct ide_taskfile tf;
-
- drive->hwif->tp_ops->tf_read(drive, &tf, IDE_VALID_NSECT |
- IDE_VALID_LBAM | IDE_VALID_LBAH);
-
- *bcount = (tf.lbah << 8) | tf.lbam;
- *ireason = tf.nsect & 3;
-}
-EXPORT_SYMBOL_GPL(ide_read_bcount_and_ireason);
-
-/*
- * Check the contents of the interrupt reason register and attempt to recover if
- * there are problems.
- *
- * Returns:
- * - 0 if everything's ok
- * - 1 if the request has to be terminated.
- */
-int ide_check_ireason(ide_drive_t *drive, struct request *rq, int len,
- int ireason, int rw)
-{
- ide_hwif_t *hwif = drive->hwif;
-
- debug_log("ireason: 0x%x, rw: 0x%x\n", ireason, rw);
-
- if (ireason == (!rw << 1))
- return 0;
- else if (ireason == (rw << 1)) {
- printk(KERN_ERR PFX "%s: %s: wrong transfer direction!\n",
- drive->name, __func__);
-
- if (dev_is_idecd(drive))
- ide_pad_transfer(drive, rw, len);
- } else if (!rw && ireason == ATAPI_COD) {
- if (dev_is_idecd(drive)) {
- /*
- * Some drives (ASUS) seem to tell us that status info
- * is available. Just get it and ignore.
- */
- (void)hwif->tp_ops->read_status(hwif);
- return 0;
- }
- } else {
- if (ireason & ATAPI_COD)
- printk(KERN_ERR PFX "%s: CoD != 0 in %s\n", drive->name,
- __func__);
-
- /* drive wants a command packet, or invalid ireason... */
- printk(KERN_ERR PFX "%s: %s: bad interrupt reason 0x%02x\n",
- drive->name, __func__, ireason);
- }
-
- if (dev_is_idecd(drive) && ata_pc_request(rq))
- rq->rq_flags |= RQF_FAILED;
-
- return 1;
-}
-EXPORT_SYMBOL_GPL(ide_check_ireason);
-
-/*
- * This is the usual interrupt handler which will be called during a packet
- * command. We will transfer some of the data (as requested by the drive)
- * and will re-point interrupt handler to us.
- */
-static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
-{
- struct ide_atapi_pc *pc = drive->pc;
- ide_hwif_t *hwif = drive->hwif;
- struct ide_cmd *cmd = &hwif->cmd;
- struct request *rq = hwif->rq;
- const struct ide_tp_ops *tp_ops = hwif->tp_ops;
- unsigned int timeout, done;
- u16 bcount;
- u8 stat, ireason, dsc = 0;
- u8 write = !!(pc->flags & PC_FLAG_WRITING);
-
- debug_log("Enter %s - interrupt handler\n", __func__);
-
- timeout = (drive->media == ide_floppy) ? WAIT_FLOPPY_CMD
- : WAIT_TAPE_CMD;
-
- /* Clear the interrupt */
- stat = tp_ops->read_status(hwif);
-
- if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
- int rc;
-
- drive->waiting_for_dma = 0;
- rc = hwif->dma_ops->dma_end(drive);
- ide_dma_unmap_sg(drive, cmd);
-
- if (rc || (drive->media == ide_tape && (stat & ATA_ERR))) {
- if (drive->media == ide_floppy)
- printk(KERN_ERR PFX "%s: DMA %s error\n",
- drive->name, rq_data_dir(pc->rq)
- ? "write" : "read");
- pc->flags |= PC_FLAG_DMA_ERROR;
- } else
- scsi_req(rq)->resid_len = 0;
- debug_log("%s: DMA finished\n", drive->name);
- }
-
- /* No more interrupts */
- if ((stat & ATA_DRQ) == 0) {
- int uptodate;
- blk_status_t error;
-
- debug_log("Packet command completed, %d bytes transferred\n",
- blk_rq_bytes(rq));
-
- pc->flags &= ~PC_FLAG_DMA_IN_PROGRESS;
-
- local_irq_enable_in_hardirq();
-
- if (drive->media == ide_tape &&
- (stat & ATA_ERR) && scsi_req(rq)->cmd[0] == REQUEST_SENSE)
- stat &= ~ATA_ERR;
-
- if ((stat & ATA_ERR) || (pc->flags & PC_FLAG_DMA_ERROR)) {
- /* Error detected */
- debug_log("%s: I/O error\n", drive->name);
-
- if (drive->media != ide_tape)
- scsi_req(pc->rq)->result++;
-
- if (scsi_req(rq)->cmd[0] == REQUEST_SENSE) {
- printk(KERN_ERR PFX "%s: I/O error in request "
- "sense command\n", drive->name);
- return ide_do_reset(drive);
- }
-
- debug_log("[cmd %x]: check condition\n", scsi_req(rq)->cmd[0]);
-
- /* Retry operation */
- ide_retry_pc(drive);
-
- /* queued, but not started */
- return ide_stopped;
- }
- pc->error = 0;
-
- if ((pc->flags & PC_FLAG_WAIT_FOR_DSC) && (stat & ATA_DSC) == 0)
- dsc = 1;
-
- /*
- * ->pc_callback() might change rq->data_len for
- * residual count, cache total length.
- */
- done = blk_rq_bytes(rq);
-
- /* Command finished - Call the callback function */
- uptodate = drive->pc_callback(drive, dsc);
-
- if (uptodate == 0)
- drive->failed_pc = NULL;
-
- if (ata_misc_request(rq)) {
- scsi_req(rq)->result = 0;
- error = BLK_STS_OK;
- } else {
-
- if (blk_rq_is_passthrough(rq) && uptodate <= 0) {
- if (scsi_req(rq)->result == 0)
- scsi_req(rq)->result = -EIO;
- }
-
- error = uptodate ? BLK_STS_OK : BLK_STS_IOERR;
- }
-
- ide_complete_rq(drive, error, blk_rq_bytes(rq));
- return ide_stopped;
- }
-
- if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
- pc->flags &= ~PC_FLAG_DMA_IN_PROGRESS;
- printk(KERN_ERR PFX "%s: The device wants to issue more "
- "interrupts in DMA mode\n", drive->name);
- ide_dma_off(drive);
- return ide_do_reset(drive);
- }
-
- /* Get the number of bytes to transfer on this interrupt. */
- ide_read_bcount_and_ireason(drive, &bcount, &ireason);
-
- if (ide_check_ireason(drive, rq, bcount, ireason, write))
- return ide_do_reset(drive);
-
- done = min_t(unsigned int, bcount, cmd->nleft);
- ide_pio_bytes(drive, cmd, write, done);
-
- /* Update transferred byte count */
- scsi_req(rq)->resid_len -= done;
-
- bcount -= done;
-
- if (bcount)
- ide_pad_transfer(drive, write, bcount);
-
- debug_log("[cmd %x] transferred %d bytes, padded %d bytes, resid: %u\n",
- scsi_req(rq)->cmd[0], done, bcount, scsi_req(rq)->resid_len);
-
- /* And set the interrupt handler again */
- ide_set_handler(drive, ide_pc_intr, timeout);
- return ide_started;
-}
-
-static void ide_init_packet_cmd(struct ide_cmd *cmd, u8 valid_tf,
- u16 bcount, u8 dma)
-{
- cmd->protocol = dma ? ATAPI_PROT_DMA : ATAPI_PROT_PIO;
- cmd->valid.out.tf = IDE_VALID_LBAH | IDE_VALID_LBAM |
- IDE_VALID_FEATURE | valid_tf;
- cmd->tf.command = ATA_CMD_PACKET;
- cmd->tf.feature = dma; /* Use PIO/DMA */
- cmd->tf.lbam = bcount & 0xff;
- cmd->tf.lbah = (bcount >> 8) & 0xff;
-}
-
-static u8 ide_read_ireason(ide_drive_t *drive)
-{
- struct ide_taskfile tf;
-
- drive->hwif->tp_ops->tf_read(drive, &tf, IDE_VALID_NSECT);
-
- return tf.nsect & 3;
-}
-
-static u8 ide_wait_ireason(ide_drive_t *drive, u8 ireason)
-{
- int retries = 100;
-
- while (retries-- && ((ireason & ATAPI_COD) == 0 ||
- (ireason & ATAPI_IO))) {
- printk(KERN_ERR PFX "%s: (IO,CoD != (0,1) while issuing "
- "a packet command, retrying\n", drive->name);
- udelay(100);
- ireason = ide_read_ireason(drive);
- if (retries == 0) {
- printk(KERN_ERR PFX "%s: (IO,CoD != (0,1) while issuing"
- " a packet command, ignoring\n",
- drive->name);
- ireason |= ATAPI_COD;
- ireason &= ~ATAPI_IO;
- }
- }
-
- return ireason;
-}
-
-static int ide_delayed_transfer_pc(ide_drive_t *drive)
-{
- /* Send the actual packet */
- drive->hwif->tp_ops->output_data(drive, NULL, drive->pc->c, 12);
-
- /* Timeout for the packet command */
- return WAIT_FLOPPY_CMD;
-}
-
-static ide_startstop_t ide_transfer_pc(ide_drive_t *drive)
-{
- struct ide_atapi_pc *pc;
- ide_hwif_t *hwif = drive->hwif;
- struct request *rq = hwif->rq;
- ide_expiry_t *expiry;
- unsigned int timeout;
- int cmd_len;
- ide_startstop_t startstop;
- u8 ireason;
-
- if (ide_wait_stat(&startstop, drive, ATA_DRQ, ATA_BUSY, WAIT_READY)) {
- printk(KERN_ERR PFX "%s: Strange, packet command initiated yet "
- "DRQ isn't asserted\n", drive->name);
- return startstop;
- }
-
- if (drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT) {
- if (drive->dma)
- drive->waiting_for_dma = 1;
- }
-
- if (dev_is_idecd(drive)) {
- /* ATAPI commands get padded out to 12 bytes minimum */
- cmd_len = COMMAND_SIZE(scsi_req(rq)->cmd[0]);
- if (cmd_len < ATAPI_MIN_CDB_BYTES)
- cmd_len = ATAPI_MIN_CDB_BYTES;
-
- timeout = rq->timeout;
- expiry = ide_cd_expiry;
- } else {
- pc = drive->pc;
-
- cmd_len = ATAPI_MIN_CDB_BYTES;
-
- /*
- * If necessary schedule the packet transfer to occur 'timeout'
- * milliseconds later in ide_delayed_transfer_pc() after the
- * device says it's ready for a packet.
- */
- if (drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) {
- timeout = drive->pc_delay;
- expiry = &ide_delayed_transfer_pc;
- } else {
- timeout = (drive->media == ide_floppy) ? WAIT_FLOPPY_CMD
- : WAIT_TAPE_CMD;
- expiry = NULL;
- }
-
- ireason = ide_read_ireason(drive);
- if (drive->media == ide_tape)
- ireason = ide_wait_ireason(drive, ireason);
-
- if ((ireason & ATAPI_COD) == 0 || (ireason & ATAPI_IO)) {
- printk(KERN_ERR PFX "%s: (IO,CoD) != (0,1) while "
- "issuing a packet command\n", drive->name);
-
- return ide_do_reset(drive);
- }
- }
-
- hwif->expiry = expiry;
-
- /* Set the interrupt routine */
- ide_set_handler(drive,
- (dev_is_idecd(drive) ? drive->irq_handler
- : ide_pc_intr),
- timeout);
-
- /* Send the actual packet */
- if ((drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) == 0)
- hwif->tp_ops->output_data(drive, NULL, scsi_req(rq)->cmd, cmd_len);
-
- /* Begin DMA, if necessary */
- if (dev_is_idecd(drive)) {
- if (drive->dma)
- hwif->dma_ops->dma_start(drive);
- } else {
- if (pc->flags & PC_FLAG_DMA_OK) {
- pc->flags |= PC_FLAG_DMA_IN_PROGRESS;
- hwif->dma_ops->dma_start(drive);
- }
- }
-
- return ide_started;
-}
-
-ide_startstop_t ide_issue_pc(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- struct ide_atapi_pc *pc;
- ide_hwif_t *hwif = drive->hwif;
- ide_expiry_t *expiry = NULL;
- struct request *rq = hwif->rq;
- unsigned int timeout, bytes;
- u16 bcount;
- u8 valid_tf;
- u8 drq_int = !!(drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT);
-
- if (dev_is_idecd(drive)) {
- valid_tf = IDE_VALID_NSECT | IDE_VALID_LBAL;
- bcount = ide_cd_get_xferlen(rq);
- expiry = ide_cd_expiry;
- timeout = ATAPI_WAIT_PC;
-
- if (drive->dma)
- drive->dma = !ide_dma_prepare(drive, cmd);
- } else {
- pc = drive->pc;
-
- valid_tf = IDE_VALID_DEVICE;
- bytes = blk_rq_bytes(rq);
- bcount = ((drive->media == ide_tape) ? bytes
- : min_t(unsigned int,
- bytes, 63 * 1024));
-
- /* We haven't transferred any data yet */
- scsi_req(rq)->resid_len = bcount;
-
- if (pc->flags & PC_FLAG_DMA_ERROR) {
- pc->flags &= ~PC_FLAG_DMA_ERROR;
- ide_dma_off(drive);
- }
-
- if (pc->flags & PC_FLAG_DMA_OK)
- drive->dma = !ide_dma_prepare(drive, cmd);
-
- if (!drive->dma)
- pc->flags &= ~PC_FLAG_DMA_OK;
-
- timeout = (drive->media == ide_floppy) ? WAIT_FLOPPY_CMD
- : WAIT_TAPE_CMD;
- }
-
- ide_init_packet_cmd(cmd, valid_tf, bcount, drive->dma);
-
- (void)do_rw_taskfile(drive, cmd);
-
- if (drq_int) {
- if (drive->dma)
- drive->waiting_for_dma = 0;
- hwif->expiry = expiry;
- }
-
- ide_execute_command(drive, cmd, ide_transfer_pc, timeout);
-
- return drq_int ? ide_started : ide_transfer_pc(drive);
-}
-EXPORT_SYMBOL_GPL(ide_issue_pc);
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
deleted file mode 100644
index cffbcc27a34c..000000000000
--- a/drivers/ide/ide-cd.c
+++ /dev/null
@@ -1,1858 +0,0 @@
-/*
- * ATAPI CD-ROM driver.
- *
- * Copyright (C) 1994-1996 Scott Snyder <snyder@fnald0.fnal.gov>
- * Copyright (C) 1996-1998 Erik Andersen <andersee@debian.org>
- * Copyright (C) 1998-2000 Jens Axboe <axboe@suse.de>
- * Copyright (C) 2005, 2007-2009 Bartlomiej Zolnierkiewicz
- *
- * May be copied or modified under the terms of the GNU General Public
- * License. See linux/COPYING for more information.
- *
- * See Documentation/cdrom/ide-cd.rst for usage information.
- *
- * Suggestions are welcome. Patches that work are more welcome though. ;-)
- *
- * Documentation:
- * Mt. Fuji (SFF8090 version 4) and ATAPI (SFF-8020i rev 2.6) standards.
- *
- * For historical changelog please see:
- * Documentation/ide/ChangeLog.ide-cd.1994-2004
- */
-
-#define DRV_NAME "ide-cd"
-#define PFX DRV_NAME ": "
-
-#define IDECD_VERSION "5.00"
-
-#include <linux/compat.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/sched/task_stack.h>
-#include <linux/delay.h>
-#include <linux/timer.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/errno.h>
-#include <linux/cdrom.h>
-#include <linux/ide.h>
-#include <linux/completion.h>
-#include <linux/mutex.h>
-#include <linux/bcd.h>
-
-/* For SCSI -> ATAPI command conversion */
-#include <scsi/scsi.h>
-
-#include <linux/io.h>
-#include <asm/byteorder.h>
-#include <linux/uaccess.h>
-#include <asm/unaligned.h>
-
-#include "ide-cd.h"
-
-static DEFINE_MUTEX(ide_cd_mutex);
-static DEFINE_MUTEX(idecd_ref_mutex);
-
-static void ide_cd_release(struct device *);
-
-static struct cdrom_info *ide_cd_get(struct gendisk *disk)
-{
- struct cdrom_info *cd = NULL;
-
- mutex_lock(&idecd_ref_mutex);
- cd = ide_drv_g(disk, cdrom_info);
- if (cd) {
- if (ide_device_get(cd->drive))
- cd = NULL;
- else
- get_device(&cd->dev);
-
- }
- mutex_unlock(&idecd_ref_mutex);
- return cd;
-}
-
-static void ide_cd_put(struct cdrom_info *cd)
-{
- ide_drive_t *drive = cd->drive;
-
- mutex_lock(&idecd_ref_mutex);
- put_device(&cd->dev);
- ide_device_put(drive);
- mutex_unlock(&idecd_ref_mutex);
-}
-
-/*
- * Generic packet command support and error handling routines.
- */
-
-/* Mark that we've seen a media change and invalidate our internal buffers. */
-static void cdrom_saw_media_change(ide_drive_t *drive)
-{
- drive->dev_flags |= IDE_DFLAG_MEDIA_CHANGED;
- drive->atapi_flags &= ~IDE_AFLAG_TOC_VALID;
-}
-
-static int cdrom_log_sense(ide_drive_t *drive, struct request *rq)
-{
- struct request_sense *sense = &drive->sense_data;
- int log = 0;
-
- if (!sense || !rq || (rq->rq_flags & RQF_QUIET))
- return 0;
-
- ide_debug_log(IDE_DBG_SENSE, "sense_key: 0x%x", sense->sense_key);
-
- switch (sense->sense_key) {
- case NO_SENSE:
- case RECOVERED_ERROR:
- break;
- case NOT_READY:
- /*
- * don't care about tray state messages for e.g. capacity
- * commands or in-progress or becoming ready
- */
- if (sense->asc == 0x3a || sense->asc == 0x04)
- break;
- log = 1;
- break;
- case ILLEGAL_REQUEST:
- /*
- * don't log START_STOP unit with LoEj set, since we cannot
- * reliably check if drive can auto-close
- */
- if (scsi_req(rq)->cmd[0] == GPCMD_START_STOP_UNIT && sense->asc == 0x24)
- break;
- log = 1;
- break;
- case UNIT_ATTENTION:
- /*
- * Make good and sure we've seen this potential media change.
- * Some drives (i.e. Creative) fail to present the correct sense
- * key in the error register.
- */
- cdrom_saw_media_change(drive);
- break;
- default:
- log = 1;
- break;
- }
- return log;
-}
-
-static void cdrom_analyze_sense_data(ide_drive_t *drive,
- struct request *failed_command)
-{
- struct request_sense *sense = &drive->sense_data;
- struct cdrom_info *info = drive->driver_data;
- unsigned long sector;
- unsigned long bio_sectors;
-
- ide_debug_log(IDE_DBG_SENSE, "error_code: 0x%x, sense_key: 0x%x",
- sense->error_code, sense->sense_key);
-
- if (failed_command)
- ide_debug_log(IDE_DBG_SENSE, "failed cmd: 0x%x",
- failed_command->cmd[0]);
-
- if (!cdrom_log_sense(drive, failed_command))
- return;
-
- /*
- * If a read toc is executed for a CD-R or CD-RW medium where the first
- * toc has not been recorded yet, it will fail with 05/24/00 (which is a
- * confusing error)
- */
- if (failed_command && scsi_req(failed_command)->cmd[0] == GPCMD_READ_TOC_PMA_ATIP)
- if (sense->sense_key == 0x05 && sense->asc == 0x24)
- return;
-
- /* current error */
- if (sense->error_code == 0x70) {
- switch (sense->sense_key) {
- case MEDIUM_ERROR:
- case VOLUME_OVERFLOW:
- case ILLEGAL_REQUEST:
- if (!sense->valid)
- break;
- if (failed_command == NULL ||
- blk_rq_is_passthrough(failed_command))
- break;
- sector = (sense->information[0] << 24) |
- (sense->information[1] << 16) |
- (sense->information[2] << 8) |
- (sense->information[3]);
-
- if (queue_logical_block_size(drive->queue) == 2048)
- /* device sector size is 2K */
- sector <<= 2;
-
- bio_sectors = max(bio_sectors(failed_command->bio), 4U);
- sector &= ~(bio_sectors - 1);
-
- /*
- * The SCSI specification allows for the value
- * returned by READ CAPACITY to be up to 75 2K
- * sectors past the last readable block.
- * Therefore, if we hit a medium error within the
- * last 75 2K sectors, we decrease the saved size
- * value.
- */
- if (sector < get_capacity(info->disk) &&
- drive->probed_capacity - sector < 4 * 75)
- set_capacity(info->disk, sector);
- }
- }
-
- ide_cd_log_error(drive->name, failed_command, sense);
-}
-
-static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq)
-{
- /*
- * For ATA_PRIV_SENSE, "ide_req(rq)->special" points to the original
- * failed request. Also, the sense data should be read
- * directly from rq which might be different from the original
- * sense buffer if it got copied during mapping.
- */
- struct request *failed = ide_req(rq)->special;
- void *sense = bio_data(rq->bio);
-
- if (failed) {
- /*
- * Sense is always read into drive->sense_data, copy back to the
- * original request.
- */
- memcpy(scsi_req(failed)->sense, sense, 18);
- scsi_req(failed)->sense_len = scsi_req(rq)->sense_len;
- cdrom_analyze_sense_data(drive, failed);
-
- if (ide_end_rq(drive, failed, BLK_STS_IOERR, blk_rq_bytes(failed)))
- BUG();
- } else
- cdrom_analyze_sense_data(drive, NULL);
-}
-
-
-/*
- * Allow the drive 5 seconds to recover; some devices will return NOT_READY
- * while flushing data from cache.
- *
- * returns: 0 failed (write timeout expired)
- * 1 success
- */
-static int ide_cd_breathe(ide_drive_t *drive, struct request *rq)
-{
-
- struct cdrom_info *info = drive->driver_data;
-
- if (!scsi_req(rq)->result)
- info->write_timeout = jiffies + ATAPI_WAIT_WRITE_BUSY;
-
- scsi_req(rq)->result = 1;
-
- if (time_after(jiffies, info->write_timeout))
- return 0;
- else {
- /*
- * take a breather
- */
- blk_mq_requeue_request(rq, false);
- blk_mq_delay_kick_requeue_list(drive->queue, 1);
- return 1;
- }
-}
-
-static void ide_cd_free_sense(ide_drive_t *drive)
-{
- if (!drive->sense_rq)
- return;
-
- blk_mq_free_request(drive->sense_rq);
- drive->sense_rq = NULL;
- drive->sense_rq_armed = false;
-}
-
-/**
- * Returns:
- * 0: if the request should be continued.
- * 1: if the request will be going through error recovery.
- * 2: if the request should be ended.
- */
-static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct request *rq = hwif->rq;
- int err, sense_key, do_end_request = 0;
-
- /* get the IDE error register */
- err = ide_read_error(drive);
- sense_key = err >> 4;
-
- ide_debug_log(IDE_DBG_RQ, "cmd: 0x%x, rq->cmd_type: 0x%x, err: 0x%x, "
- "stat 0x%x",
- rq->cmd[0], rq->cmd_type, err, stat);
-
- if (ata_sense_request(rq)) {
- /*
- * We got an error trying to get sense info from the drive
- * (probably while trying to recover from a former error).
- * Just give up.
- */
- rq->rq_flags |= RQF_FAILED;
- return 2;
- }
-
- /* if we have an error, pass CHECK_CONDITION as the SCSI status byte */
- if (blk_rq_is_scsi(rq) && !scsi_req(rq)->result)
- scsi_req(rq)->result = SAM_STAT_CHECK_CONDITION;
-
- if (blk_noretry_request(rq))
- do_end_request = 1;
-
- switch (sense_key) {
- case NOT_READY:
- if (req_op(rq) == REQ_OP_WRITE) {
- if (ide_cd_breathe(drive, rq))
- return 1;
- } else {
- cdrom_saw_media_change(drive);
-
- if (!blk_rq_is_passthrough(rq) &&
- !(rq->rq_flags & RQF_QUIET))
- printk(KERN_ERR PFX "%s: tray open\n",
- drive->name);
- }
- do_end_request = 1;
- break;
- case UNIT_ATTENTION:
- cdrom_saw_media_change(drive);
-
- if (blk_rq_is_passthrough(rq))
- return 0;
-
- /*
- * Arrange to retry the request but be sure to give up if we've
- * retried too many times.
- */
- if (++scsi_req(rq)->result > ERROR_MAX)
- do_end_request = 1;
- break;
- case ILLEGAL_REQUEST:
- /*
- * Don't print error message for this condition -- SFF8090i
- * indicates that 5/24/00 is the correct response to a request
- * to close the tray if the drive doesn't have that capability.
- *
- * cdrom_log_sense() knows this!
- */
- if (scsi_req(rq)->cmd[0] == GPCMD_START_STOP_UNIT)
- break;
- fallthrough;
- case DATA_PROTECT:
- /*
- * No point in retrying after an illegal request or data
- * protect error.
- */
- if (!(rq->rq_flags & RQF_QUIET))
- ide_dump_status(drive, "command error", stat);
- do_end_request = 1;
- break;
- case MEDIUM_ERROR:
- /*
- * No point in re-trying a zillion times on a bad sector.
- * If we got here the error is not correctable.
- */
- if (!(rq->rq_flags & RQF_QUIET))
- ide_dump_status(drive, "media error "
- "(bad sector)", stat);
- do_end_request = 1;
- break;
- case BLANK_CHECK:
- /* disk appears blank? */
- if (!(rq->rq_flags & RQF_QUIET))
- ide_dump_status(drive, "media error (blank)",
- stat);
- do_end_request = 1;
- break;
- default:
- if (blk_rq_is_passthrough(rq))
- break;
- if (err & ~ATA_ABORTED) {
- /* go to the default handler for other errors */
- ide_error(drive, "cdrom_decode_status", stat);
- return 1;
- } else if (++scsi_req(rq)->result > ERROR_MAX)
- /* we've racked up too many retries, abort */
- do_end_request = 1;
- }
-
- if (blk_rq_is_passthrough(rq)) {
- rq->rq_flags |= RQF_FAILED;
- do_end_request = 1;
- }
-
- /*
- * End a request through request sense analysis when we have sense data.
- * We need this in order to perform end of media processing.
- */
- if (do_end_request)
- goto end_request;
-
- /* if we got a CHECK_CONDITION status, queue a request sense command */
- if (stat & ATA_ERR)
- return ide_queue_sense_rq(drive, NULL) ? 2 : 1;
- return 1;
-
-end_request:
- if (stat & ATA_ERR) {
- hwif->rq = NULL;
- return ide_queue_sense_rq(drive, rq) ? 2 : 1;
- } else
- return 2;
-}
-
-static void ide_cd_request_sense_fixup(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- struct request *rq = cmd->rq;
-
- ide_debug_log(IDE_DBG_FUNC, "rq->cmd[0]: 0x%x", rq->cmd[0]);
-
- /*
- * Some of the trailing request sense fields are optional,
- * and some drives don't send them. Sigh.
- */
- if (scsi_req(rq)->cmd[0] == GPCMD_REQUEST_SENSE &&
- cmd->nleft > 0 && cmd->nleft <= 5)
- cmd->nleft = 0;
-}
-
-int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
- int write, void *buffer, unsigned *bufflen,
- struct scsi_sense_hdr *sshdr, int timeout,
- req_flags_t rq_flags)
-{
- struct cdrom_info *info = drive->driver_data;
- struct scsi_sense_hdr local_sshdr;
- int retries = 10;
- bool failed;
-
- ide_debug_log(IDE_DBG_PC, "cmd[0]: 0x%x, write: 0x%x, timeout: %d, "
- "rq_flags: 0x%x",
- cmd[0], write, timeout, rq_flags);
-
- if (!sshdr)
- sshdr = &local_sshdr;
-
- /* start of retry loop */
- do {
- struct request *rq;
- int error;
- bool delay = false;
-
- rq = blk_get_request(drive->queue,
- write ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
- memcpy(scsi_req(rq)->cmd, cmd, BLK_MAX_CDB);
- ide_req(rq)->type = ATA_PRIV_PC;
- rq->rq_flags |= rq_flags;
- rq->timeout = timeout;
- if (buffer) {
- error = blk_rq_map_kern(drive->queue, rq, buffer,
- *bufflen, GFP_NOIO);
- if (error) {
- blk_put_request(rq);
- return error;
- }
- }
-
- blk_execute_rq(info->disk, rq, 0);
- error = scsi_req(rq)->result ? -EIO : 0;
-
- if (buffer)
- *bufflen = scsi_req(rq)->resid_len;
- scsi_normalize_sense(scsi_req(rq)->sense,
- scsi_req(rq)->sense_len, sshdr);
-
- /*
- * FIXME: we should probably abort/retry or something in case of
- * failure.
- */
- failed = (rq->rq_flags & RQF_FAILED) != 0;
- if (failed) {
- /*
- * The request failed. Retry if it was due to a unit
- * attention status (usually means media was changed).
- */
- if (sshdr->sense_key == UNIT_ATTENTION)
- cdrom_saw_media_change(drive);
- else if (sshdr->sense_key == NOT_READY &&
- sshdr->asc == 4 && sshdr->ascq != 4) {
- /*
- * The drive is in the process of loading
- * a disk. Retry, but wait a little to give
- * the drive time to complete the load.
- */
- delay = true;
- } else {
- /* otherwise, don't retry */
- retries = 0;
- }
- --retries;
- }
- blk_put_request(rq);
- if (delay)
- ssleep(2);
- } while (failed && retries >= 0);
-
- /* return an error if the command failed */
- return failed ? -EIO : 0;
-}
-
-/*
- * returns true if rq has been completed
- */
-static bool ide_cd_error_cmd(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- unsigned int nr_bytes = cmd->nbytes - cmd->nleft;
-
- if (cmd->tf_flags & IDE_TFLAG_WRITE)
- nr_bytes -= cmd->last_xfer_len;
-
- if (nr_bytes > 0) {
- ide_complete_rq(drive, BLK_STS_OK, nr_bytes);
- return true;
- }
-
- return false;
-}
-
-/* standard prep_rq that builds 10 byte cmds */
-static bool ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
-{
- int hard_sect = queue_logical_block_size(q);
- long block = (long)blk_rq_pos(rq) / (hard_sect >> 9);
- unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9);
- struct scsi_request *req = scsi_req(rq);
-
- if (rq_data_dir(rq) == READ)
- req->cmd[0] = GPCMD_READ_10;
- else
- req->cmd[0] = GPCMD_WRITE_10;
-
- /*
- * fill in lba
- */
- req->cmd[2] = (block >> 24) & 0xff;
- req->cmd[3] = (block >> 16) & 0xff;
- req->cmd[4] = (block >> 8) & 0xff;
- req->cmd[5] = block & 0xff;
-
- /*
- * and transfer length
- */
- req->cmd[7] = (blocks >> 8) & 0xff;
- req->cmd[8] = blocks & 0xff;
- req->cmd_len = 10;
- return true;
-}
-
-/*
- * Most of the SCSI commands are supported directly by ATAPI devices.
- * This transform handles the few exceptions.
- */
-static bool ide_cdrom_prep_pc(struct request *rq)
-{
- u8 *c = scsi_req(rq)->cmd;
-
- /* transform 6-byte read/write commands to the 10-byte version */
- if (c[0] == READ_6 || c[0] == WRITE_6) {
- c[8] = c[4];
- c[5] = c[3];
- c[4] = c[2];
- c[3] = c[1] & 0x1f;
- c[2] = 0;
- c[1] &= 0xe0;
- c[0] += (READ_10 - READ_6);
- scsi_req(rq)->cmd_len = 10;
- return true;
- }
-
- /*
- * it's silly to pretend we understand 6-byte sense commands, just
- * reject with ILLEGAL_REQUEST and the caller should take the
- * appropriate action
- */
- if (c[0] == MODE_SENSE || c[0] == MODE_SELECT) {
- scsi_req(rq)->result = ILLEGAL_REQUEST;
- return false;
- }
-
- return true;
-}
-
-static bool ide_cdrom_prep_rq(ide_drive_t *drive, struct request *rq)
-{
- if (!blk_rq_is_passthrough(rq)) {
- scsi_req_init(scsi_req(rq));
-
- return ide_cdrom_prep_fs(drive->queue, rq);
- } else if (blk_rq_is_scsi(rq))
- return ide_cdrom_prep_pc(rq);
-
- return true;
-}
-
-static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct ide_cmd *cmd = &hwif->cmd;
- struct request *rq = hwif->rq;
- ide_expiry_t *expiry = NULL;
- int dma_error = 0, dma, thislen, uptodate = 0;
- int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0;
- int sense = ata_sense_request(rq);
- unsigned int timeout;
- u16 len;
- u8 ireason, stat;
-
- ide_debug_log(IDE_DBG_PC, "cmd: 0x%x, write: 0x%x", rq->cmd[0], write);
-
- /* check for errors */
- dma = drive->dma;
- if (dma) {
- drive->dma = 0;
- drive->waiting_for_dma = 0;
- dma_error = hwif->dma_ops->dma_end(drive);
- ide_dma_unmap_sg(drive, cmd);
- if (dma_error) {
- printk(KERN_ERR PFX "%s: DMA %s error\n", drive->name,
- write ? "write" : "read");
- ide_dma_off(drive);
- }
- }
-
- /* check status */
- stat = hwif->tp_ops->read_status(hwif);
-
- if (!OK_STAT(stat, 0, BAD_R_STAT)) {
- rc = cdrom_decode_status(drive, stat);
- if (rc) {
- if (rc == 2)
- goto out_end;
- return ide_stopped;
- }
- }
-
- /* using dma, transfer is complete now */
- if (dma) {
- if (dma_error)
- return ide_error(drive, "dma error", stat);
- uptodate = 1;
- goto out_end;
- }
-
- ide_read_bcount_and_ireason(drive, &len, &ireason);
-
- thislen = !blk_rq_is_passthrough(rq) ? len : cmd->nleft;
- if (thislen > len)
- thislen = len;
-
- ide_debug_log(IDE_DBG_PC, "DRQ: stat: 0x%x, thislen: %d",
- stat, thislen);
-
- /* If DRQ is clear, the command has completed. */
- if ((stat & ATA_DRQ) == 0) {
- switch (req_op(rq)) {
- default:
- /*
- * If we're not done reading/writing, complain.
- * Otherwise, complete the command normally.
- */
- uptodate = 1;
- if (cmd->nleft > 0) {
- printk(KERN_ERR PFX "%s: %s: data underrun "
- "(%u bytes)\n", drive->name, __func__,
- cmd->nleft);
- if (!write)
- rq->rq_flags |= RQF_FAILED;
- uptodate = 0;
- }
- goto out_end;
- case REQ_OP_DRV_IN:
- case REQ_OP_DRV_OUT:
- ide_cd_request_sense_fixup(drive, cmd);
-
- uptodate = cmd->nleft ? 0 : 1;
-
- /*
- * suck out the remaining bytes from the drive in an
- * attempt to complete the data xfer. (see BZ#13399)
- */
- if (!(stat & ATA_ERR) && !uptodate && thislen) {
- ide_pio_bytes(drive, cmd, write, thislen);
- uptodate = cmd->nleft ? 0 : 1;
- }
-
- if (!uptodate)
- rq->rq_flags |= RQF_FAILED;
- goto out_end;
- case REQ_OP_SCSI_IN:
- case REQ_OP_SCSI_OUT:
- goto out_end;
- }
- }
-
- rc = ide_check_ireason(drive, rq, len, ireason, write);
- if (rc)
- goto out_end;
-
- cmd->last_xfer_len = 0;
-
- ide_debug_log(IDE_DBG_PC, "data transfer, rq->cmd_type: 0x%x, "
- "ireason: 0x%x",
- rq->cmd_type, ireason);
-
- /* transfer data */
- while (thislen > 0) {
- int blen = min_t(int, thislen, cmd->nleft);
-
- if (cmd->nleft == 0)
- break;
-
- ide_pio_bytes(drive, cmd, write, blen);
- cmd->last_xfer_len += blen;
-
- thislen -= blen;
- len -= blen;
-
- if (sense && write == 0)
- scsi_req(rq)->sense_len += blen;
- }
-
- /* pad, if necessary */
- if (len > 0) {
- if (blk_rq_is_passthrough(rq) || write == 0)
- ide_pad_transfer(drive, write, len);
- else {
- printk(KERN_ERR PFX "%s: confused, missing data\n",
- drive->name);
- blk_dump_rq_flags(rq, "cdrom_newpc_intr");
- }
- }
-
- switch (req_op(rq)) {
- case REQ_OP_SCSI_IN:
- case REQ_OP_SCSI_OUT:
- timeout = rq->timeout;
- break;
- case REQ_OP_DRV_IN:
- case REQ_OP_DRV_OUT:
- expiry = ide_cd_expiry;
- fallthrough;
- default:
- timeout = ATAPI_WAIT_PC;
- break;
- }
-
- hwif->expiry = expiry;
- ide_set_handler(drive, cdrom_newpc_intr, timeout);
- return ide_started;
-
-out_end:
- if (blk_rq_is_scsi(rq) && rc == 0) {
- scsi_req(rq)->resid_len = 0;
- blk_mq_end_request(rq, BLK_STS_OK);
- hwif->rq = NULL;
- } else {
- if (sense && uptodate)
- ide_cd_complete_failed_rq(drive, rq);
-
- if (!blk_rq_is_passthrough(rq)) {
- if (cmd->nleft == 0)
- uptodate = 1;
- } else {
- if (uptodate <= 0 && scsi_req(rq)->result == 0)
- scsi_req(rq)->result = -EIO;
- }
-
- if (uptodate == 0 && rq->bio)
- if (ide_cd_error_cmd(drive, cmd))
- return ide_stopped;
-
- /* make sure it's fully ended */
- if (blk_rq_is_passthrough(rq)) {
- scsi_req(rq)->resid_len -= cmd->nbytes - cmd->nleft;
- if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE))
- scsi_req(rq)->resid_len += cmd->last_xfer_len;
- }
-
- ide_complete_rq(drive, uptodate ? BLK_STS_OK : BLK_STS_IOERR, blk_rq_bytes(rq));
-
- if (sense && rc == 2)
- ide_error(drive, "request sense failure", stat);
- }
-
- ide_cd_free_sense(drive);
- return ide_stopped;
-}
-
-static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
-{
- struct cdrom_info *cd = drive->driver_data;
- struct request_queue *q = drive->queue;
- int write = rq_data_dir(rq) == WRITE;
- unsigned short sectors_per_frame =
- queue_logical_block_size(q) >> SECTOR_SHIFT;
-
- ide_debug_log(IDE_DBG_RQ, "rq->cmd[0]: 0x%x, rq->cmd_flags: 0x%x, "
- "secs_per_frame: %u",
- rq->cmd[0], rq->cmd_flags, sectors_per_frame);
-
- if (write) {
- /* disk has become write protected */
- if (get_disk_ro(cd->disk))
- return ide_stopped;
- } else {
- /*
- * We may be retrying this request after an error. Fix up any
- * weirdness which might be present in the request packet.
- */
- ide_cdrom_prep_rq(drive, rq);
- }
-
- /* fs requests *must* be hardware frame aligned */
- if ((blk_rq_sectors(rq) & (sectors_per_frame - 1)) ||
- (blk_rq_pos(rq) & (sectors_per_frame - 1)))
- return ide_stopped;
-
- /* use DMA, if possible */
- drive->dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
-
- if (write)
- cd->devinfo.media_written = 1;
-
- rq->timeout = ATAPI_WAIT_PC;
-
- return ide_started;
-}
-
-static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
-{
-
- ide_debug_log(IDE_DBG_PC, "rq->cmd[0]: 0x%x, rq->cmd_type: 0x%x",
- rq->cmd[0], rq->cmd_type);
-
- if (blk_rq_is_scsi(rq))
- rq->rq_flags |= RQF_QUIET;
- else
- rq->rq_flags &= ~RQF_FAILED;
-
- drive->dma = 0;
-
- /* sg request */
- if (rq->bio) {
- struct request_queue *q = drive->queue;
- char *buf = bio_data(rq->bio);
- unsigned int alignment;
-
- drive->dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
-
- /*
- * check if dma is safe
- *
- * NOTE! The "len" and "addr" checks should possibly have
- * separate masks.
- */
- alignment = queue_dma_alignment(q) | q->dma_pad_mask;
- if ((unsigned long)buf & alignment
- || blk_rq_bytes(rq) & q->dma_pad_mask
- || object_is_on_stack(buf))
- drive->dma = 0;
- }
-}
-
-static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
- sector_t block)
-{
- struct ide_cmd cmd;
- int uptodate = 0;
- unsigned int nsectors;
-
- ide_debug_log(IDE_DBG_RQ, "cmd: 0x%x, block: %llu",
- rq->cmd[0], (unsigned long long)block);
-
- if (drive->debug_mask & IDE_DBG_RQ)
- blk_dump_rq_flags(rq, "ide_cd_do_request");
-
- switch (req_op(rq)) {
- default:
- if (cdrom_start_rw(drive, rq) == ide_stopped)
- goto out_end;
- break;
- case REQ_OP_SCSI_IN:
- case REQ_OP_SCSI_OUT:
- handle_pc:
- if (!rq->timeout)
- rq->timeout = ATAPI_WAIT_PC;
- cdrom_do_block_pc(drive, rq);
- break;
- case REQ_OP_DRV_IN:
- case REQ_OP_DRV_OUT:
- switch (ide_req(rq)->type) {
- case ATA_PRIV_MISC:
- /* right now this can only be a reset... */
- uptodate = 1;
- goto out_end;
- case ATA_PRIV_SENSE:
- case ATA_PRIV_PC:
- goto handle_pc;
- default:
- BUG();
- }
- }
-
- /* prepare sense request for this command */
- ide_prep_sense(drive, rq);
-
- memset(&cmd, 0, sizeof(cmd));
-
- if (rq_data_dir(rq))
- cmd.tf_flags |= IDE_TFLAG_WRITE;
-
- cmd.rq = rq;
-
- if (!blk_rq_is_passthrough(rq) || blk_rq_bytes(rq)) {
- ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
- ide_map_sg(drive, &cmd);
- }
-
- return ide_issue_pc(drive, &cmd);
-out_end:
- nsectors = blk_rq_sectors(rq);
-
- if (nsectors == 0)
- nsectors = 1;
-
- ide_complete_rq(drive, uptodate ? BLK_STS_OK : BLK_STS_IOERR, nsectors << 9);
-
- return ide_stopped;
-}
-
-/*
- * Ioctl handling.
- *
- * Routines which queue packet commands take as a final argument a pointer to a
- * request_sense struct. If execution of the command results in an error with a
- * CHECK CONDITION status, this structure will be filled with the results of the
- * subsequent request sense command. The pointer can also be NULL, in which case
- * no sense information is returned.
- */
-static void msf_from_bcd(struct atapi_msf *msf)
-{
- msf->minute = bcd2bin(msf->minute);
- msf->second = bcd2bin(msf->second);
- msf->frame = bcd2bin(msf->frame);
-}
-
-int cdrom_check_status(ide_drive_t *drive, struct scsi_sense_hdr *sshdr)
-{
- struct cdrom_info *info = drive->driver_data;
- struct cdrom_device_info *cdi;
- unsigned char cmd[BLK_MAX_CDB];
-
- ide_debug_log(IDE_DBG_FUNC, "enter");
-
- if (!info)
- return -EIO;
-
- cdi = &info->devinfo;
-
- memset(cmd, 0, BLK_MAX_CDB);
- cmd[0] = GPCMD_TEST_UNIT_READY;
-
- /*
- * Sanyo 3 CD changer uses byte 7 of TEST_UNIT_READY to switch CDs
- * instead of supporting the LOAD_UNLOAD opcode.
- */
- cmd[7] = cdi->sanyo_slot % 3;
-
- return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sshdr, 0, RQF_QUIET);
-}
-
-static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
- unsigned long *sectors_per_frame)
-{
- struct {
- __be32 lba;
- __be32 blocklen;
- } capbuf;
-
- int stat;
- unsigned char cmd[BLK_MAX_CDB];
- unsigned len = sizeof(capbuf);
- u32 blocklen;
-
- ide_debug_log(IDE_DBG_FUNC, "enter");
-
- memset(cmd, 0, BLK_MAX_CDB);
- cmd[0] = GPCMD_READ_CDVD_CAPACITY;
-
- stat = ide_cd_queue_pc(drive, cmd, 0, &capbuf, &len, NULL, 0,
- RQF_QUIET);
- if (stat)
- return stat;
-
- /*
- * Sanity check the given block size, in so far as making
- * sure the sectors_per_frame we give to the caller won't
- * end up being bogus.
- */
- blocklen = be32_to_cpu(capbuf.blocklen);
- blocklen = (blocklen >> SECTOR_SHIFT) << SECTOR_SHIFT;
- switch (blocklen) {
- case 512:
- case 1024:
- case 2048:
- case 4096:
- break;
- default:
- printk_once(KERN_ERR PFX "%s: weird block size %u; "
- "setting default block size to 2048\n",
- drive->name, blocklen);
- blocklen = 2048;
- break;
- }
-
- *capacity = 1 + be32_to_cpu(capbuf.lba);
- *sectors_per_frame = blocklen >> SECTOR_SHIFT;
-
- ide_debug_log(IDE_DBG_PROBE, "cap: %lu, sectors_per_frame: %lu",
- *capacity, *sectors_per_frame);
-
- return 0;
-}
-
-static int ide_cdrom_read_tocentry(ide_drive_t *drive, int trackno,
- int msf_flag, int format, char *buf, int buflen)
-{
- unsigned char cmd[BLK_MAX_CDB];
-
- ide_debug_log(IDE_DBG_FUNC, "enter");
-
- memset(cmd, 0, BLK_MAX_CDB);
-
- cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
- cmd[6] = trackno;
- cmd[7] = (buflen >> 8);
- cmd[8] = (buflen & 0xff);
- cmd[9] = (format << 6);
-
- if (msf_flag)
- cmd[1] = 2;
-
- return ide_cd_queue_pc(drive, cmd, 0, buf, &buflen, NULL, 0, RQF_QUIET);
-}
-
-/* Try to read the entire TOC for the disk into our internal buffer. */
-int ide_cd_read_toc(ide_drive_t *drive)
-{
- int stat, ntracks, i;
- struct cdrom_info *info = drive->driver_data;
- struct cdrom_device_info *cdi = &info->devinfo;
- struct atapi_toc *toc = info->toc;
- struct {
- struct atapi_toc_header hdr;
- struct atapi_toc_entry ent;
- } ms_tmp;
- long last_written;
- unsigned long sectors_per_frame = SECTORS_PER_FRAME;
-
- ide_debug_log(IDE_DBG_FUNC, "enter");
-
- if (toc == NULL) {
- /* try to allocate space */
- toc = kmalloc(sizeof(struct atapi_toc), GFP_KERNEL);
- if (toc == NULL) {
- printk(KERN_ERR PFX "%s: No cdrom TOC buffer!\n",
- drive->name);
- return -ENOMEM;
- }
- info->toc = toc;
- }
-
- /*
- * Check to see if the existing data is still valid. If it is,
- * just return.
- */
- (void) cdrom_check_status(drive, NULL);
-
- if (drive->atapi_flags & IDE_AFLAG_TOC_VALID)
- return 0;
-
- /* try to get the total cdrom capacity and sector size */
- stat = cdrom_read_capacity(drive, &toc->capacity, &sectors_per_frame);
- if (stat)
- toc->capacity = 0x1fffff;
-
- set_capacity(info->disk, toc->capacity * sectors_per_frame);
- /* save a private copy of the TOC capacity for error handling */
- drive->probed_capacity = toc->capacity * sectors_per_frame;
-
- blk_queue_logical_block_size(drive->queue,
- sectors_per_frame << SECTOR_SHIFT);
-
- /* first read just the header, so we know how long the TOC is */
- stat = ide_cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr,
- sizeof(struct atapi_toc_header));
- if (stat)
- return stat;
-
- if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) {
- toc->hdr.first_track = bcd2bin(toc->hdr.first_track);
- toc->hdr.last_track = bcd2bin(toc->hdr.last_track);
- }
-
- ntracks = toc->hdr.last_track - toc->hdr.first_track + 1;
- if (ntracks <= 0)
- return -EIO;
- if (ntracks > MAX_TRACKS)
- ntracks = MAX_TRACKS;
-
- /* now read the whole schmeer */
- stat = ide_cdrom_read_tocentry(drive, toc->hdr.first_track, 1, 0,
- (char *)&toc->hdr,
- sizeof(struct atapi_toc_header) +
- (ntracks + 1) *
- sizeof(struct atapi_toc_entry));
-
- if (stat && toc->hdr.first_track > 1) {
- /*
- * Cds with CDI tracks only don't have any TOC entries, despite
- * of this the returned values are
- * first_track == last_track = number of CDI tracks + 1,
- * so that this case is indistinguishable from the same layout
- * plus an additional audio track. If we get an error for the
- * regular case, we assume a CDI without additional audio
- * tracks. In this case the readable TOC is empty (CDI tracks
- * are not included) and only holds the Leadout entry.
- *
- * Heiko Eißfeldt.
- */
- ntracks = 0;
- stat = ide_cdrom_read_tocentry(drive, CDROM_LEADOUT, 1, 0,
- (char *)&toc->hdr,
- sizeof(struct atapi_toc_header) +
- (ntracks + 1) *
- sizeof(struct atapi_toc_entry));
- if (stat)
- return stat;
-
- if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) {
- toc->hdr.first_track = (u8)bin2bcd(CDROM_LEADOUT);
- toc->hdr.last_track = (u8)bin2bcd(CDROM_LEADOUT);
- } else {
- toc->hdr.first_track = CDROM_LEADOUT;
- toc->hdr.last_track = CDROM_LEADOUT;
- }
- }
-
- if (stat)
- return stat;
-
- toc->hdr.toc_length = be16_to_cpu(toc->hdr.toc_length);
-
- if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) {
- toc->hdr.first_track = bcd2bin(toc->hdr.first_track);
- toc->hdr.last_track = bcd2bin(toc->hdr.last_track);
- }
-
- for (i = 0; i <= ntracks; i++) {
- if (drive->atapi_flags & IDE_AFLAG_TOCADDR_AS_BCD) {
- if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD)
- toc->ent[i].track = bcd2bin(toc->ent[i].track);
- msf_from_bcd(&toc->ent[i].addr.msf);
- }
- toc->ent[i].addr.lba = msf_to_lba(toc->ent[i].addr.msf.minute,
- toc->ent[i].addr.msf.second,
- toc->ent[i].addr.msf.frame);
- }
-
- if (toc->hdr.first_track != CDROM_LEADOUT) {
- /* read the multisession information */
- stat = ide_cdrom_read_tocentry(drive, 0, 0, 1, (char *)&ms_tmp,
- sizeof(ms_tmp));
- if (stat)
- return stat;
-
- toc->last_session_lba = be32_to_cpu(ms_tmp.ent.addr.lba);
- } else {
- ms_tmp.hdr.last_track = CDROM_LEADOUT;
- ms_tmp.hdr.first_track = ms_tmp.hdr.last_track;
- toc->last_session_lba = msf_to_lba(0, 2, 0); /* 0m 2s 0f */
- }
-
- if (drive->atapi_flags & IDE_AFLAG_TOCADDR_AS_BCD) {
- /* re-read multisession information using MSF format */
- stat = ide_cdrom_read_tocentry(drive, 0, 1, 1, (char *)&ms_tmp,
- sizeof(ms_tmp));
- if (stat)
- return stat;
-
- msf_from_bcd(&ms_tmp.ent.addr.msf);
- toc->last_session_lba = msf_to_lba(ms_tmp.ent.addr.msf.minute,
- ms_tmp.ent.addr.msf.second,
- ms_tmp.ent.addr.msf.frame);
- }
-
- toc->xa_flag = (ms_tmp.hdr.first_track != ms_tmp.hdr.last_track);
-
- /* now try to get the total cdrom capacity */
- stat = cdrom_get_last_written(cdi, &last_written);
- if (!stat && (last_written > toc->capacity)) {
- toc->capacity = last_written;
- set_capacity(info->disk, toc->capacity * sectors_per_frame);
- drive->probed_capacity = toc->capacity * sectors_per_frame;
- }
-
- /* Remember that we've read this stuff. */
- drive->atapi_flags |= IDE_AFLAG_TOC_VALID;
-
- return 0;
-}
-
-int ide_cdrom_get_capabilities(ide_drive_t *drive, u8 *buf)
-{
- struct cdrom_info *info = drive->driver_data;
- struct cdrom_device_info *cdi = &info->devinfo;
- struct packet_command cgc;
- int stat, attempts = 3, size = ATAPI_CAPABILITIES_PAGE_SIZE;
-
- ide_debug_log(IDE_DBG_FUNC, "enter");
-
- if ((drive->atapi_flags & IDE_AFLAG_FULL_CAPS_PAGE) == 0)
- size -= ATAPI_CAPABILITIES_PAGE_PAD_SIZE;
-
- init_cdrom_command(&cgc, buf, size, CGC_DATA_UNKNOWN);
- do {
- /* we seem to get stat=0x01,err=0x00 the first time (??) */
- stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
- if (!stat)
- break;
- } while (--attempts);
- return stat;
-}
-
-void ide_cdrom_update_speed(ide_drive_t *drive, u8 *buf)
-{
- struct cdrom_info *cd = drive->driver_data;
- u16 curspeed, maxspeed;
-
- ide_debug_log(IDE_DBG_FUNC, "enter");
-
- if (drive->atapi_flags & IDE_AFLAG_LE_SPEED_FIELDS) {
- curspeed = le16_to_cpup((__le16 *)&buf[8 + 14]);
- maxspeed = le16_to_cpup((__le16 *)&buf[8 + 8]);
- } else {
- curspeed = be16_to_cpup((__be16 *)&buf[8 + 14]);
- maxspeed = be16_to_cpup((__be16 *)&buf[8 + 8]);
- }
-
- ide_debug_log(IDE_DBG_PROBE, "curspeed: %u, maxspeed: %u",
- curspeed, maxspeed);
-
- cd->current_speed = DIV_ROUND_CLOSEST(curspeed, 176);
- cd->max_speed = DIV_ROUND_CLOSEST(maxspeed, 176);
-}
-
-#define IDE_CD_CAPABILITIES \
- (CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_SELECT_SPEED | \
- CDC_SELECT_DISC | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED | \
- CDC_PLAY_AUDIO | CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R | \
- CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM | CDC_GENERIC_PACKET | \
- CDC_MO_DRIVE | CDC_MRW | CDC_MRW_W | CDC_RAM)
-
-static const struct cdrom_device_ops ide_cdrom_dops = {
- .open = ide_cdrom_open_real,
- .release = ide_cdrom_release_real,
- .drive_status = ide_cdrom_drive_status,
- .check_events = ide_cdrom_check_events_real,
- .tray_move = ide_cdrom_tray_move,
- .lock_door = ide_cdrom_lock_door,
- .select_speed = ide_cdrom_select_speed,
- .get_last_session = ide_cdrom_get_last_session,
- .get_mcn = ide_cdrom_get_mcn,
- .reset = ide_cdrom_reset,
- .audio_ioctl = ide_cdrom_audio_ioctl,
- .capability = IDE_CD_CAPABILITIES,
- .generic_packet = ide_cdrom_packet,
-};
-
-static int ide_cdrom_register(ide_drive_t *drive, int nslots)
-{
- struct cdrom_info *info = drive->driver_data;
- struct cdrom_device_info *devinfo = &info->devinfo;
-
- ide_debug_log(IDE_DBG_PROBE, "nslots: %d", nslots);
-
- devinfo->ops = &ide_cdrom_dops;
- devinfo->speed = info->current_speed;
- devinfo->capacity = nslots;
- devinfo->handle = drive;
- strcpy(devinfo->name, drive->name);
-
- if (drive->atapi_flags & IDE_AFLAG_NO_SPEED_SELECT)
- devinfo->mask |= CDC_SELECT_SPEED;
-
- return register_cdrom(info->disk, devinfo);
-}
-
-static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
-{
- struct cdrom_info *cd = drive->driver_data;
- struct cdrom_device_info *cdi = &cd->devinfo;
- u8 buf[ATAPI_CAPABILITIES_PAGE_SIZE];
- mechtype_t mechtype;
- int nslots = 1;
-
- ide_debug_log(IDE_DBG_PROBE, "media: 0x%x, atapi_flags: 0x%lx",
- drive->media, drive->atapi_flags);
-
- cdi->mask = (CDC_CD_R | CDC_CD_RW | CDC_DVD | CDC_DVD_R |
- CDC_DVD_RAM | CDC_SELECT_DISC | CDC_PLAY_AUDIO |
- CDC_MO_DRIVE | CDC_RAM);
-
- if (drive->media == ide_optical) {
- cdi->mask &= ~(CDC_MO_DRIVE | CDC_RAM);
- printk(KERN_ERR PFX "%s: ATAPI magneto-optical drive\n",
- drive->name);
- return nslots;
- }
-
- if (drive->atapi_flags & IDE_AFLAG_PRE_ATAPI12) {
- drive->atapi_flags &= ~IDE_AFLAG_NO_EJECT;
- cdi->mask &= ~CDC_PLAY_AUDIO;
- return nslots;
- }
-
- /*
- * We have to cheat a little here. the packet will eventually be queued
- * with ide_cdrom_packet(), which extracts the drive from cdi->handle.
- * Since this device hasn't been registered with the Uniform layer yet,
- * it can't do this. Same goes for cdi->ops.
- */
- cdi->handle = drive;
- cdi->ops = &ide_cdrom_dops;
-
- if (ide_cdrom_get_capabilities(drive, buf))
- return 0;
-
- if ((buf[8 + 6] & 0x01) == 0)
- drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;
- if (buf[8 + 6] & 0x08)
- drive->atapi_flags &= ~IDE_AFLAG_NO_EJECT;
- if (buf[8 + 3] & 0x01)
- cdi->mask &= ~CDC_CD_R;
- if (buf[8 + 3] & 0x02)
- cdi->mask &= ~(CDC_CD_RW | CDC_RAM);
- if (buf[8 + 2] & 0x38)
- cdi->mask &= ~CDC_DVD;
- if (buf[8 + 3] & 0x20)
- cdi->mask &= ~(CDC_DVD_RAM | CDC_RAM);
- if (buf[8 + 3] & 0x10)
- cdi->mask &= ~CDC_DVD_R;
- if ((buf[8 + 4] & 0x01) || (drive->atapi_flags & IDE_AFLAG_PLAY_AUDIO_OK))
- cdi->mask &= ~CDC_PLAY_AUDIO;
-
- mechtype = buf[8 + 6] >> 5;
- if (mechtype == mechtype_caddy ||
- mechtype == mechtype_popup ||
- (drive->atapi_flags & IDE_AFLAG_NO_AUTOCLOSE))
- cdi->mask |= CDC_CLOSE_TRAY;
-
- if (cdi->sanyo_slot > 0) {
- cdi->mask &= ~CDC_SELECT_DISC;
- nslots = 3;
- } else if (mechtype == mechtype_individual_changer ||
- mechtype == mechtype_cartridge_changer) {
- nslots = cdrom_number_of_slots(cdi);
- if (nslots > 1)
- cdi->mask &= ~CDC_SELECT_DISC;
- }
-
- ide_cdrom_update_speed(drive, buf);
-
- printk(KERN_INFO PFX "%s: ATAPI", drive->name);
-
- /* don't print speed if the drive reported 0 */
- if (cd->max_speed)
- printk(KERN_CONT " %dX", cd->max_speed);
-
- printk(KERN_CONT " %s", (cdi->mask & CDC_DVD) ? "CD-ROM" : "DVD-ROM");
-
- if ((cdi->mask & CDC_DVD_R) == 0 || (cdi->mask & CDC_DVD_RAM) == 0)
- printk(KERN_CONT " DVD%s%s",
- (cdi->mask & CDC_DVD_R) ? "" : "-R",
- (cdi->mask & CDC_DVD_RAM) ? "" : "/RAM");
-
- if ((cdi->mask & CDC_CD_R) == 0 || (cdi->mask & CDC_CD_RW) == 0)
- printk(KERN_CONT " CD%s%s",
- (cdi->mask & CDC_CD_R) ? "" : "-R",
- (cdi->mask & CDC_CD_RW) ? "" : "/RW");
-
- if ((cdi->mask & CDC_SELECT_DISC) == 0)
- printk(KERN_CONT " changer w/%d slots", nslots);
- else
- printk(KERN_CONT " drive");
-
- printk(KERN_CONT ", %dkB Cache\n",
- be16_to_cpup((__be16 *)&buf[8 + 12]));
-
- return nslots;
-}
-
-struct cd_list_entry {
- const char *id_model;
- const char *id_firmware;
- unsigned int cd_flags;
-};
-
-#ifdef CONFIG_IDE_PROC_FS
-static sector_t ide_cdrom_capacity(ide_drive_t *drive)
-{
- unsigned long capacity, sectors_per_frame;
-
- if (cdrom_read_capacity(drive, &capacity, &sectors_per_frame))
- return 0;
-
- return capacity * sectors_per_frame;
-}
-
-static int idecd_capacity_proc_show(struct seq_file *m, void *v)
-{
- ide_drive_t *drive = m->private;
-
- seq_printf(m, "%llu\n", (long long)ide_cdrom_capacity(drive));
- return 0;
-}
-
-static ide_proc_entry_t idecd_proc[] = {
- { "capacity", S_IFREG|S_IRUGO, idecd_capacity_proc_show },
- {}
-};
-
-static ide_proc_entry_t *ide_cd_proc_entries(ide_drive_t *drive)
-{
- return idecd_proc;
-}
-
-static const struct ide_proc_devset *ide_cd_proc_devsets(ide_drive_t *drive)
-{
- return NULL;
-}
-#endif
-
-static const struct cd_list_entry ide_cd_quirks_list[] = {
- /* SCR-3231 doesn't support the SET_CD_SPEED command. */
- { "SAMSUNG CD-ROM SCR-3231", NULL, IDE_AFLAG_NO_SPEED_SELECT },
- /* Old NEC260 (not R) was released before ATAPI 1.2 spec. */
- { "NEC CD-ROM DRIVE:260", "1.01", IDE_AFLAG_TOCADDR_AS_BCD |
- IDE_AFLAG_PRE_ATAPI12, },
- /* Vertos 300, some versions of this drive like to talk BCD. */
- { "V003S0DS", NULL, IDE_AFLAG_VERTOS_300_SSD, },
- /* Vertos 600 ESD. */
- { "V006E0DS", NULL, IDE_AFLAG_VERTOS_600_ESD, },
- /*
- * Sanyo 3 CD changer uses a non-standard command for CD changing
- * (by default standard ATAPI support for CD changers is used).
- */
- { "CD-ROM CDR-C3 G", NULL, IDE_AFLAG_SANYO_3CD },
- { "CD-ROM CDR-C3G", NULL, IDE_AFLAG_SANYO_3CD },
- { "CD-ROM CDR_C36", NULL, IDE_AFLAG_SANYO_3CD },
- /* Stingray 8X CD-ROM. */
- { "STINGRAY 8422 IDE 8X CD-ROM 7-27-95", NULL, IDE_AFLAG_PRE_ATAPI12 },
- /*
- * ACER 50X CD-ROM and WPI 32X CD-ROM require the full spec length
- * mode sense page capabilities size, but older drives break.
- */
- { "ATAPI CD ROM DRIVE 50X MAX", NULL, IDE_AFLAG_FULL_CAPS_PAGE },
- { "WPI CDS-32X", NULL, IDE_AFLAG_FULL_CAPS_PAGE },
- /* ACER/AOpen 24X CD-ROM has the speed fields byte-swapped. */
- { "", "241N", IDE_AFLAG_LE_SPEED_FIELDS },
- /*
- * Some drives used by Apple don't advertise audio play
- * but they do support reading TOC & audio datas.
- */
- { "MATSHITADVD-ROM SR-8187", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
- { "MATSHITADVD-ROM SR-8186", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
- { "MATSHITADVD-ROM SR-8176", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
- { "MATSHITADVD-ROM SR-8174", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
- { "Optiarc DVD RW AD-5200A", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
- { "Optiarc DVD RW AD-7200A", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
- { "Optiarc DVD RW AD-7543A", NULL, IDE_AFLAG_NO_AUTOCLOSE },
- { "TEAC CD-ROM CD-224E", NULL, IDE_AFLAG_NO_AUTOCLOSE },
- { NULL, NULL, 0 }
-};
-
-static unsigned int ide_cd_flags(u16 *id)
-{
- const struct cd_list_entry *cle = ide_cd_quirks_list;
-
- while (cle->id_model) {
- if (strcmp(cle->id_model, (char *)&id[ATA_ID_PROD]) == 0 &&
- (cle->id_firmware == NULL ||
- strstr((char *)&id[ATA_ID_FW_REV], cle->id_firmware)))
- return cle->cd_flags;
- cle++;
- }
-
- return 0;
-}
-
-static int ide_cdrom_setup(ide_drive_t *drive)
-{
- struct cdrom_info *cd = drive->driver_data;
- struct cdrom_device_info *cdi = &cd->devinfo;
- struct request_queue *q = drive->queue;
- u16 *id = drive->id;
- char *fw_rev = (char *)&id[ATA_ID_FW_REV];
- int nslots;
-
- ide_debug_log(IDE_DBG_PROBE, "enter");
-
- drive->prep_rq = ide_cdrom_prep_rq;
- blk_queue_dma_alignment(q, 31);
- blk_queue_update_dma_pad(q, 15);
-
- drive->dev_flags |= IDE_DFLAG_MEDIA_CHANGED;
- drive->atapi_flags = IDE_AFLAG_NO_EJECT | ide_cd_flags(id);
-
- if ((drive->atapi_flags & IDE_AFLAG_VERTOS_300_SSD) &&
- fw_rev[4] == '1' && fw_rev[6] <= '2')
- drive->atapi_flags |= (IDE_AFLAG_TOCTRACKS_AS_BCD |
- IDE_AFLAG_TOCADDR_AS_BCD);
- else if ((drive->atapi_flags & IDE_AFLAG_VERTOS_600_ESD) &&
- fw_rev[4] == '1' && fw_rev[6] <= '2')
- drive->atapi_flags |= IDE_AFLAG_TOCTRACKS_AS_BCD;
- else if (drive->atapi_flags & IDE_AFLAG_SANYO_3CD)
- /* 3 => use CD in slot 0 */
- cdi->sanyo_slot = 3;
-
- nslots = ide_cdrom_probe_capabilities(drive);
-
- blk_queue_logical_block_size(q, CD_FRAMESIZE);
-
- if (ide_cdrom_register(drive, nslots)) {
- printk(KERN_ERR PFX "%s: %s failed to register device with the"
- " cdrom driver.\n", drive->name, __func__);
- cd->devinfo.handle = NULL;
- return 1;
- }
-
- ide_proc_register_driver(drive, cd->driver);
- return 0;
-}
-
-static void ide_cd_remove(ide_drive_t *drive)
-{
- struct cdrom_info *info = drive->driver_data;
-
- ide_debug_log(IDE_DBG_FUNC, "enter");
-
- ide_proc_unregister_driver(drive, info->driver);
- device_del(&info->dev);
- del_gendisk(info->disk);
-
- mutex_lock(&idecd_ref_mutex);
- put_device(&info->dev);
- mutex_unlock(&idecd_ref_mutex);
-}
-
-static void ide_cd_release(struct device *dev)
-{
- struct cdrom_info *info = to_ide_drv(dev, cdrom_info);
- struct cdrom_device_info *devinfo = &info->devinfo;
- ide_drive_t *drive = info->drive;
- struct gendisk *g = info->disk;
-
- ide_debug_log(IDE_DBG_FUNC, "enter");
-
- kfree(info->toc);
- if (devinfo->handle == drive)
- unregister_cdrom(devinfo);
- drive->driver_data = NULL;
- drive->prep_rq = NULL;
- g->private_data = NULL;
- put_disk(g);
- kfree(info);
-}
-
-static int ide_cd_probe(ide_drive_t *);
-
-static struct ide_driver ide_cdrom_driver = {
- .gen_driver = {
- .owner = THIS_MODULE,
- .name = "ide-cdrom",
- .bus = &ide_bus_type,
- },
- .probe = ide_cd_probe,
- .remove = ide_cd_remove,
- .version = IDECD_VERSION,
- .do_request = ide_cd_do_request,
-#ifdef CONFIG_IDE_PROC_FS
- .proc_entries = ide_cd_proc_entries,
- .proc_devsets = ide_cd_proc_devsets,
-#endif
-};
-
-static int idecd_open(struct block_device *bdev, fmode_t mode)
-{
- struct cdrom_info *info;
- int rc = -ENXIO;
-
- if (bdev_check_media_change(bdev)) {
- info = ide_drv_g(bdev->bd_disk, cdrom_info);
-
- ide_cd_read_toc(info->drive);
- }
-
- mutex_lock(&ide_cd_mutex);
- info = ide_cd_get(bdev->bd_disk);
- if (!info)
- goto out;
-
- rc = cdrom_open(&info->devinfo, bdev, mode);
- if (rc < 0)
- ide_cd_put(info);
-out:
- mutex_unlock(&ide_cd_mutex);
- return rc;
-}
-
-static void idecd_release(struct gendisk *disk, fmode_t mode)
-{
- struct cdrom_info *info = ide_drv_g(disk, cdrom_info);
-
- mutex_lock(&ide_cd_mutex);
- cdrom_release(&info->devinfo, mode);
-
- ide_cd_put(info);
- mutex_unlock(&ide_cd_mutex);
-}
-
-static int idecd_set_spindown(struct cdrom_device_info *cdi, unsigned long arg)
-{
- struct packet_command cgc;
- char buffer[16];
- int stat;
- char spindown;
-
- if (copy_from_user(&spindown, (void __user *)arg, sizeof(char)))
- return -EFAULT;
-
- init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_UNKNOWN);
-
- stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CDROM_PAGE, 0);
- if (stat)
- return stat;
-
- buffer[11] = (buffer[11] & 0xf0) | (spindown & 0x0f);
- return cdrom_mode_select(cdi, &cgc);
-}
-
-static int idecd_get_spindown(struct cdrom_device_info *cdi, unsigned long arg)
-{
- struct packet_command cgc;
- char buffer[16];
- int stat;
- char spindown;
-
- init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_UNKNOWN);
-
- stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CDROM_PAGE, 0);
- if (stat)
- return stat;
-
- spindown = buffer[11] & 0x0f;
- if (copy_to_user((void __user *)arg, &spindown, sizeof(char)))
- return -EFAULT;
- return 0;
-}
-
-static int idecd_locked_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- struct cdrom_info *info = ide_drv_g(bdev->bd_disk, cdrom_info);
- int err;
-
- switch (cmd) {
- case CDROMSETSPINDOWN:
- return idecd_set_spindown(&info->devinfo, arg);
- case CDROMGETSPINDOWN:
- return idecd_get_spindown(&info->devinfo, arg);
- default:
- break;
- }
-
- err = generic_ide_ioctl(info->drive, bdev, cmd, arg);
- if (err == -EINVAL)
- err = cdrom_ioctl(&info->devinfo, bdev, mode, cmd, arg);
-
- return err;
-}
-
-static int idecd_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- int ret;
-
- mutex_lock(&ide_cd_mutex);
- ret = idecd_locked_ioctl(bdev, mode, cmd, arg);
- mutex_unlock(&ide_cd_mutex);
-
- return ret;
-}
-
-static int idecd_locked_compat_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- struct cdrom_info *info = ide_drv_g(bdev->bd_disk, cdrom_info);
- void __user *argp = compat_ptr(arg);
- int err;
-
- switch (cmd) {
- case CDROMSETSPINDOWN:
- return idecd_set_spindown(&info->devinfo, (unsigned long)argp);
- case CDROMGETSPINDOWN:
- return idecd_get_spindown(&info->devinfo, (unsigned long)argp);
- default:
- break;
- }
-
- err = generic_ide_ioctl(info->drive, bdev, cmd, arg);
- if (err == -EINVAL)
- err = cdrom_ioctl(&info->devinfo, bdev, mode, cmd,
- (unsigned long)argp);
-
- return err;
-}
-
-static int idecd_compat_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- int ret;
-
- mutex_lock(&ide_cd_mutex);
- ret = idecd_locked_compat_ioctl(bdev, mode, cmd, arg);
- mutex_unlock(&ide_cd_mutex);
-
- return ret;
-}
-
-static unsigned int idecd_check_events(struct gendisk *disk,
- unsigned int clearing)
-{
- struct cdrom_info *info = ide_drv_g(disk, cdrom_info);
- return cdrom_check_events(&info->devinfo, clearing);
-}
-
-static const struct block_device_operations idecd_ops = {
- .owner = THIS_MODULE,
- .open = idecd_open,
- .release = idecd_release,
- .ioctl = idecd_ioctl,
- .compat_ioctl = IS_ENABLED(CONFIG_COMPAT) ?
- idecd_compat_ioctl : NULL,
- .check_events = idecd_check_events,
-};
-
-/* module options */
-static unsigned long debug_mask;
-module_param(debug_mask, ulong, 0644);
-
-MODULE_DESCRIPTION("ATAPI CD-ROM Driver");
-
-static int ide_cd_probe(ide_drive_t *drive)
-{
- struct cdrom_info *info;
- struct gendisk *g;
-
- ide_debug_log(IDE_DBG_PROBE, "driver_req: %s, media: 0x%x",
- drive->driver_req, drive->media);
-
- if (!strstr("ide-cdrom", drive->driver_req))
- goto failed;
-
- if (drive->media != ide_cdrom && drive->media != ide_optical)
- goto failed;
-
- drive->debug_mask = debug_mask;
- drive->irq_handler = cdrom_newpc_intr;
-
- info = kzalloc(sizeof(struct cdrom_info), GFP_KERNEL);
- if (info == NULL) {
- printk(KERN_ERR PFX "%s: Can't allocate a cdrom structure\n",
- drive->name);
- goto failed;
- }
-
- g = alloc_disk(1 << PARTN_BITS);
- if (!g)
- goto out_free_cd;
-
- ide_init_disk(g, drive);
-
- info->dev.parent = &drive->gendev;
- info->dev.release = ide_cd_release;
- dev_set_name(&info->dev, "%s", dev_name(&drive->gendev));
-
- if (device_register(&info->dev))
- goto out_free_disk;
-
- info->drive = drive;
- info->driver = &ide_cdrom_driver;
- info->disk = g;
-
- g->private_data = &info->driver;
-
- drive->driver_data = info;
-
- g->minors = 1;
- g->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE;
- if (ide_cdrom_setup(drive)) {
- put_device(&info->dev);
- goto failed;
- }
-
- ide_cd_read_toc(drive);
- g->fops = &idecd_ops;
- g->flags |= GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
- g->events = DISK_EVENT_MEDIA_CHANGE;
- device_add_disk(&drive->gendev, g, NULL);
- return 0;
-
-out_free_disk:
- put_disk(g);
-out_free_cd:
- kfree(info);
-failed:
- return -ENODEV;
-}
-
-static void __exit ide_cdrom_exit(void)
-{
- driver_unregister(&ide_cdrom_driver.gen_driver);
-}
-
-static int __init ide_cdrom_init(void)
-{
- printk(KERN_INFO DRV_NAME " driver " IDECD_VERSION "\n");
- return driver_register(&ide_cdrom_driver.gen_driver);
-}
-
-MODULE_ALIAS("ide:*m-cdrom*");
-MODULE_ALIAS("ide-cd");
-module_init(ide_cdrom_init);
-module_exit(ide_cdrom_exit);
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h
deleted file mode 100644
index a69dc7f61c4d..000000000000
--- a/drivers/ide/ide-cd.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 1996-98 Erik Andersen
- * Copyright (C) 1998-2000 Jens Axboe
- */
-#ifndef _IDE_CD_H
-#define _IDE_CD_H
-
-#include <linux/cdrom.h>
-#include <asm/byteorder.h>
-
-#define IDECD_DEBUG_LOG 0
-
-#if IDECD_DEBUG_LOG
-#define ide_debug_log(lvl, fmt, args...) __ide_debug_log(lvl, fmt, ## args)
-#else
-#define ide_debug_log(lvl, fmt, args...) do {} while (0)
-#endif
-
-#define ATAPI_WAIT_WRITE_BUSY (10 * HZ)
-
-/************************************************************************/
-
-#define SECTORS_PER_FRAME (CD_FRAMESIZE >> SECTOR_SHIFT)
-#define SECTOR_BUFFER_SIZE (CD_FRAMESIZE * 32)
-
-/* Capabilities Page size including 8 bytes of Mode Page Header */
-#define ATAPI_CAPABILITIES_PAGE_SIZE (8 + 20)
-#define ATAPI_CAPABILITIES_PAGE_PAD_SIZE 4
-
-/* Structure of a MSF cdrom address. */
-struct atapi_msf {
- u8 reserved;
- u8 minute;
- u8 second;
- u8 frame;
-};
-
-/* Space to hold the disk TOC. */
-#define MAX_TRACKS 99
-struct atapi_toc_header {
- unsigned short toc_length;
- u8 first_track;
- u8 last_track;
-};
-
-struct atapi_toc_entry {
- u8 reserved1;
-#if defined(__BIG_ENDIAN_BITFIELD)
- u8 adr : 4;
- u8 control : 4;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u8 control : 4;
- u8 adr : 4;
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
- u8 track;
- u8 reserved2;
- union {
- unsigned lba;
- struct atapi_msf msf;
- } addr;
-};
-
-struct atapi_toc {
- int last_session_lba;
- int xa_flag;
- unsigned long capacity;
- struct atapi_toc_header hdr;
- struct atapi_toc_entry ent[MAX_TRACKS+1];
- /* One extra for the leadout. */
-};
-
-/* Extra per-device info for cdrom drives. */
-struct cdrom_info {
- ide_drive_t *drive;
- struct ide_driver *driver;
- struct gendisk *disk;
- struct device dev;
-
- /* Buffer for table of contents. NULL if we haven't allocated
- a TOC buffer for this device yet. */
-
- struct atapi_toc *toc;
-
- u8 max_speed; /* Max speed of the drive. */
- u8 current_speed; /* Current speed of the drive. */
-
- /* Per-device info needed by cdrom.c generic driver. */
- struct cdrom_device_info devinfo;
-
- unsigned long write_timeout;
-};
-
-/* ide-cd_verbose.c */
-void ide_cd_log_error(const char *, struct request *, struct request_sense *);
-
-/* ide-cd.c functions used by ide-cd_ioctl.c */
-int ide_cd_queue_pc(ide_drive_t *, const unsigned char *, int, void *,
- unsigned *, struct scsi_sense_hdr *, int, req_flags_t);
-int ide_cd_read_toc(ide_drive_t *);
-int ide_cdrom_get_capabilities(ide_drive_t *, u8 *);
-void ide_cdrom_update_speed(ide_drive_t *, u8 *);
-int cdrom_check_status(ide_drive_t *, struct scsi_sense_hdr *);
-
-/* ide-cd_ioctl.c */
-int ide_cdrom_open_real(struct cdrom_device_info *, int);
-void ide_cdrom_release_real(struct cdrom_device_info *);
-int ide_cdrom_drive_status(struct cdrom_device_info *, int);
-unsigned int ide_cdrom_check_events_real(struct cdrom_device_info *,
- unsigned int clearing, int slot_nr);
-int ide_cdrom_tray_move(struct cdrom_device_info *, int);
-int ide_cdrom_lock_door(struct cdrom_device_info *, int);
-int ide_cdrom_select_speed(struct cdrom_device_info *, int);
-int ide_cdrom_get_last_session(struct cdrom_device_info *,
- struct cdrom_multisession *);
-int ide_cdrom_get_mcn(struct cdrom_device_info *, struct cdrom_mcn *);
-int ide_cdrom_reset(struct cdrom_device_info *cdi);
-int ide_cdrom_audio_ioctl(struct cdrom_device_info *, unsigned int, void *);
-int ide_cdrom_packet(struct cdrom_device_info *, struct packet_command *);
-
-#endif /* _IDE_CD_H */
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
deleted file mode 100644
index 011eab9c69b7..000000000000
--- a/drivers/ide/ide-cd_ioctl.c
+++ /dev/null
@@ -1,468 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * cdrom.c IOCTLs handling for ide-cd driver.
- *
- * Copyright (C) 1994-1996 Scott Snyder <snyder@fnald0.fnal.gov>
- * Copyright (C) 1996-1998 Erik Andersen <andersee@debian.org>
- * Copyright (C) 1998-2000 Jens Axboe <axboe@suse.de>
- */
-
-#include <linux/kernel.h>
-#include <linux/cdrom.h>
-#include <linux/gfp.h>
-#include <linux/ide.h>
-#include <scsi/scsi.h>
-
-#include "ide-cd.h"
-
-/****************************************************************************
- * Other driver requests (open, close, check media change).
- */
-int ide_cdrom_open_real(struct cdrom_device_info *cdi, int purpose)
-{
- return 0;
-}
-
-/*
- * Close down the device. Invalidate all cached blocks.
- */
-void ide_cdrom_release_real(struct cdrom_device_info *cdi)
-{
- ide_drive_t *drive = cdi->handle;
-
- if (!cdi->use_count)
- drive->atapi_flags &= ~IDE_AFLAG_TOC_VALID;
-}
-
-/*
- * add logic to try GET_EVENT command first to check for media and tray
- * status. this should be supported by newer cd-r/w and all DVD etc
- * drives
- */
-int ide_cdrom_drive_status(struct cdrom_device_info *cdi, int slot_nr)
-{
- ide_drive_t *drive = cdi->handle;
- struct media_event_desc med;
- struct scsi_sense_hdr sshdr;
- int stat;
-
- if (slot_nr != CDSL_CURRENT)
- return -EINVAL;
-
- stat = cdrom_check_status(drive, &sshdr);
- if (!stat || sshdr.sense_key == UNIT_ATTENTION)
- return CDS_DISC_OK;
-
- if (!cdrom_get_media_event(cdi, &med)) {
- if (med.media_present)
- return CDS_DISC_OK;
- else if (med.door_open)
- return CDS_TRAY_OPEN;
- else
- return CDS_NO_DISC;
- }
-
- if (sshdr.sense_key == NOT_READY && sshdr.asc == 0x04
- && sshdr.ascq == 0x04)
- return CDS_DISC_OK;
-
- /*
- * If not using Mt Fuji extended media tray reports,
- * just return TRAY_OPEN since ATAPI doesn't provide
- * any other way to detect this...
- */
- if (sshdr.sense_key == NOT_READY) {
- if (sshdr.asc == 0x3a && sshdr.ascq == 1)
- return CDS_NO_DISC;
- else
- return CDS_TRAY_OPEN;
- }
- return CDS_DRIVE_NOT_READY;
-}
-
-/*
- * ide-cd always generates media changed event if media is missing, which
- * makes it impossible to use for proper event reporting, so
- * DISK_EVENT_FLAG_UEVENT is cleared in disk->event_flags
- * and the following function is used only to trigger
- * revalidation and never propagated to userland.
- */
-unsigned int ide_cdrom_check_events_real(struct cdrom_device_info *cdi,
- unsigned int clearing, int slot_nr)
-{
- ide_drive_t *drive = cdi->handle;
- int retval;
-
- if (slot_nr == CDSL_CURRENT) {
- (void) cdrom_check_status(drive, NULL);
- retval = (drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED) ? 1 : 0;
- drive->dev_flags &= ~IDE_DFLAG_MEDIA_CHANGED;
- return retval ? DISK_EVENT_MEDIA_CHANGE : 0;
- } else {
- return 0;
- }
-}
-
-/* Eject the disk if EJECTFLAG is 0.
- If EJECTFLAG is 1, try to reload the disk. */
-static
-int cdrom_eject(ide_drive_t *drive, int ejectflag)
-{
- struct cdrom_info *cd = drive->driver_data;
- struct cdrom_device_info *cdi = &cd->devinfo;
- char loej = 0x02;
- unsigned char cmd[BLK_MAX_CDB];
-
- if ((drive->atapi_flags & IDE_AFLAG_NO_EJECT) && !ejectflag)
- return -EDRIVE_CANT_DO_THIS;
-
- /* reload fails on some drives, if the tray is locked */
- if ((drive->atapi_flags & IDE_AFLAG_DOOR_LOCKED) && ejectflag)
- return 0;
-
- /* only tell drive to close tray if open, if it can do that */
- if (ejectflag && (cdi->mask & CDC_CLOSE_TRAY))
- loej = 0;
-
- memset(cmd, 0, BLK_MAX_CDB);
-
- cmd[0] = GPCMD_START_STOP_UNIT;
- cmd[4] = loej | (ejectflag != 0);
-
- return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, NULL, 0, 0);
-}
-
-/* Lock the door if LOCKFLAG is nonzero; unlock it otherwise. */
-static
-int ide_cd_lockdoor(ide_drive_t *drive, int lockflag)
-{
- struct scsi_sense_hdr sshdr;
- int stat;
-
- /* If the drive cannot lock the door, just pretend. */
- if ((drive->dev_flags & IDE_DFLAG_DOORLOCKING) == 0) {
- stat = 0;
- } else {
- unsigned char cmd[BLK_MAX_CDB];
-
- memset(cmd, 0, BLK_MAX_CDB);
-
- cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
- cmd[4] = lockflag ? 1 : 0;
-
- stat = ide_cd_queue_pc(drive, cmd, 0, NULL, NULL,
- &sshdr, 0, 0);
- }
-
- /* If we got an illegal field error, the drive
- probably cannot lock the door. */
- if (stat != 0 &&
- sshdr.sense_key == ILLEGAL_REQUEST &&
- (sshdr.asc == 0x24 || sshdr.asc == 0x20)) {
- printk(KERN_ERR "%s: door locking not supported\n",
- drive->name);
- drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;
- stat = 0;
- }
-
- /* no medium, that's alright. */
- if (stat != 0 && sshdr.sense_key == NOT_READY && sshdr.asc == 0x3a)
- stat = 0;
-
- if (stat == 0) {
- if (lockflag)
- drive->atapi_flags |= IDE_AFLAG_DOOR_LOCKED;
- else
- drive->atapi_flags &= ~IDE_AFLAG_DOOR_LOCKED;
- }
-
- return stat;
-}
-
-int ide_cdrom_tray_move(struct cdrom_device_info *cdi, int position)
-{
- ide_drive_t *drive = cdi->handle;
-
- if (position) {
- int stat = ide_cd_lockdoor(drive, 0);
-
- if (stat)
- return stat;
- }
-
- return cdrom_eject(drive, !position);
-}
-
-int ide_cdrom_lock_door(struct cdrom_device_info *cdi, int lock)
-{
- ide_drive_t *drive = cdi->handle;
-
- return ide_cd_lockdoor(drive, lock);
-}
-
-/*
- * ATAPI devices are free to select the speed you request or any slower
- * rate. :-( Requesting too fast a speed will _not_ produce an error.
- */
-int ide_cdrom_select_speed(struct cdrom_device_info *cdi, int speed)
-{
- ide_drive_t *drive = cdi->handle;
- struct cdrom_info *cd = drive->driver_data;
- u8 buf[ATAPI_CAPABILITIES_PAGE_SIZE];
- int stat;
- unsigned char cmd[BLK_MAX_CDB];
-
- if (speed == 0)
- speed = 0xffff; /* set to max */
- else
- speed *= 177; /* Nx to kbytes/s */
-
- memset(cmd, 0, BLK_MAX_CDB);
-
- cmd[0] = GPCMD_SET_SPEED;
- /* Read Drive speed in kbytes/second MSB/LSB */
- cmd[2] = (speed >> 8) & 0xff;
- cmd[3] = speed & 0xff;
- if ((cdi->mask & (CDC_CD_R | CDC_CD_RW | CDC_DVD_R)) !=
- (CDC_CD_R | CDC_CD_RW | CDC_DVD_R)) {
- /* Write Drive speed in kbytes/second MSB/LSB */
- cmd[4] = (speed >> 8) & 0xff;
- cmd[5] = speed & 0xff;
- }
-
- stat = ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, NULL, 0, 0);
-
- if (!ide_cdrom_get_capabilities(drive, buf)) {
- ide_cdrom_update_speed(drive, buf);
- cdi->speed = cd->current_speed;
- }
-
- return 0;
-}
-
-int ide_cdrom_get_last_session(struct cdrom_device_info *cdi,
- struct cdrom_multisession *ms_info)
-{
- struct atapi_toc *toc;
- ide_drive_t *drive = cdi->handle;
- struct cdrom_info *info = drive->driver_data;
- int ret;
-
- if ((drive->atapi_flags & IDE_AFLAG_TOC_VALID) == 0 || !info->toc) {
- ret = ide_cd_read_toc(drive);
- if (ret)
- return ret;
- }
-
- toc = info->toc;
- ms_info->addr.lba = toc->last_session_lba;
- ms_info->xa_flag = toc->xa_flag;
-
- return 0;
-}
-
-int ide_cdrom_get_mcn(struct cdrom_device_info *cdi,
- struct cdrom_mcn *mcn_info)
-{
- ide_drive_t *drive = cdi->handle;
- int stat, mcnlen;
- char buf[24];
- unsigned char cmd[BLK_MAX_CDB];
- unsigned len = sizeof(buf);
-
- memset(cmd, 0, BLK_MAX_CDB);
-
- cmd[0] = GPCMD_READ_SUBCHANNEL;
- cmd[1] = 2; /* MSF addressing */
- cmd[2] = 0x40; /* request subQ data */
- cmd[3] = 2; /* format */
- cmd[8] = len;
-
- stat = ide_cd_queue_pc(drive, cmd, 0, buf, &len, NULL, 0, 0);
- if (stat)
- return stat;
-
- mcnlen = sizeof(mcn_info->medium_catalog_number) - 1;
- memcpy(mcn_info->medium_catalog_number, buf + 9, mcnlen);
- mcn_info->medium_catalog_number[mcnlen] = '\0';
-
- return 0;
-}
-
-int ide_cdrom_reset(struct cdrom_device_info *cdi)
-{
- ide_drive_t *drive = cdi->handle;
- struct cdrom_info *cd = drive->driver_data;
- struct request *rq;
- int ret;
-
- rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
- ide_req(rq)->type = ATA_PRIV_MISC;
- rq->rq_flags = RQF_QUIET;
- blk_execute_rq(cd->disk, rq, 0);
- ret = scsi_req(rq)->result ? -EIO : 0;
- blk_put_request(rq);
- /*
- * A reset will unlock the door. If it was previously locked,
- * lock it again.
- */
- if (drive->atapi_flags & IDE_AFLAG_DOOR_LOCKED)
- (void)ide_cd_lockdoor(drive, 1);
-
- return ret;
-}
-
-static int ide_cd_get_toc_entry(ide_drive_t *drive, int track,
- struct atapi_toc_entry **ent)
-{
- struct cdrom_info *info = drive->driver_data;
- struct atapi_toc *toc = info->toc;
- int ntracks;
-
- /*
- * don't serve cached data, if the toc isn't valid
- */
- if ((drive->atapi_flags & IDE_AFLAG_TOC_VALID) == 0)
- return -EINVAL;
-
- /* Check validity of requested track number. */
- ntracks = toc->hdr.last_track - toc->hdr.first_track + 1;
-
- if (toc->hdr.first_track == CDROM_LEADOUT)
- ntracks = 0;
-
- if (track == CDROM_LEADOUT)
- *ent = &toc->ent[ntracks];
- else if (track < toc->hdr.first_track || track > toc->hdr.last_track)
- return -EINVAL;
- else
- *ent = &toc->ent[track - toc->hdr.first_track];
-
- return 0;
-}
-
-static int ide_cd_fake_play_trkind(ide_drive_t *drive, void *arg)
-{
- struct cdrom_ti *ti = arg;
- struct atapi_toc_entry *first_toc, *last_toc;
- unsigned long lba_start, lba_end;
- int stat;
- unsigned char cmd[BLK_MAX_CDB];
-
- stat = ide_cd_get_toc_entry(drive, ti->cdti_trk0, &first_toc);
- if (stat)
- return stat;
-
- stat = ide_cd_get_toc_entry(drive, ti->cdti_trk1, &last_toc);
- if (stat)
- return stat;
-
- if (ti->cdti_trk1 != CDROM_LEADOUT)
- ++last_toc;
- lba_start = first_toc->addr.lba;
- lba_end = last_toc->addr.lba;
-
- if (lba_end <= lba_start)
- return -EINVAL;
-
- memset(cmd, 0, BLK_MAX_CDB);
-
- cmd[0] = GPCMD_PLAY_AUDIO_MSF;
- lba_to_msf(lba_start, &cmd[3], &cmd[4], &cmd[5]);
- lba_to_msf(lba_end - 1, &cmd[6], &cmd[7], &cmd[8]);
-
- return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, NULL, 0, 0);
-}
-
-static int ide_cd_read_tochdr(ide_drive_t *drive, void *arg)
-{
- struct cdrom_info *cd = drive->driver_data;
- struct cdrom_tochdr *tochdr = arg;
- struct atapi_toc *toc;
- int stat;
-
- /* Make sure our saved TOC is valid. */
- stat = ide_cd_read_toc(drive);
- if (stat)
- return stat;
-
- toc = cd->toc;
- tochdr->cdth_trk0 = toc->hdr.first_track;
- tochdr->cdth_trk1 = toc->hdr.last_track;
-
- return 0;
-}
-
-static int ide_cd_read_tocentry(ide_drive_t *drive, void *arg)
-{
- struct cdrom_tocentry *tocentry = arg;
- struct atapi_toc_entry *toce;
- int stat;
-
- stat = ide_cd_get_toc_entry(drive, tocentry->cdte_track, &toce);
- if (stat)
- return stat;
-
- tocentry->cdte_ctrl = toce->control;
- tocentry->cdte_adr = toce->adr;
- if (tocentry->cdte_format == CDROM_MSF) {
- lba_to_msf(toce->addr.lba,
- &tocentry->cdte_addr.msf.minute,
- &tocentry->cdte_addr.msf.second,
- &tocentry->cdte_addr.msf.frame);
- } else
- tocentry->cdte_addr.lba = toce->addr.lba;
-
- return 0;
-}
-
-int ide_cdrom_audio_ioctl(struct cdrom_device_info *cdi,
- unsigned int cmd, void *arg)
-{
- ide_drive_t *drive = cdi->handle;
-
- switch (cmd) {
- /*
- * emulate PLAY_AUDIO_TI command with PLAY_AUDIO_10, since
- * atapi doesn't support it
- */
- case CDROMPLAYTRKIND:
- return ide_cd_fake_play_trkind(drive, arg);
- case CDROMREADTOCHDR:
- return ide_cd_read_tochdr(drive, arg);
- case CDROMREADTOCENTRY:
- return ide_cd_read_tocentry(drive, arg);
- default:
- return -EINVAL;
- }
-}
-
-/* the generic packet interface to cdrom.c */
-int ide_cdrom_packet(struct cdrom_device_info *cdi,
- struct packet_command *cgc)
-{
- ide_drive_t *drive = cdi->handle;
- req_flags_t flags = 0;
- unsigned len = cgc->buflen;
-
- if (cgc->timeout <= 0)
- cgc->timeout = ATAPI_WAIT_PC;
-
- /* here we queue the commands from the uniform CD-ROM
- layer. the packet must be complete, as we do not
- touch it at all. */
-
- if (cgc->sshdr)
- memset(cgc->sshdr, 0, sizeof(*cgc->sshdr));
-
- if (cgc->quiet)
- flags |= RQF_QUIET;
-
- cgc->stat = ide_cd_queue_pc(drive, cgc->cmd,
- cgc->data_direction == CGC_DATA_WRITE,
- cgc->buffer, &len,
- cgc->sshdr, cgc->timeout, flags);
- if (!cgc->stat)
- cgc->buflen -= len;
- return cgc->stat;
-}
diff --git a/drivers/ide/ide-cd_verbose.c b/drivers/ide/ide-cd_verbose.c
deleted file mode 100644
index 5ecd5b2f03a3..000000000000
--- a/drivers/ide/ide-cd_verbose.c
+++ /dev/null
@@ -1,362 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Verbose error logging for ATAPI CD/DVD devices.
- *
- * Copyright (C) 1994-1996 Scott Snyder <snyder@fnald0.fnal.gov>
- * Copyright (C) 1996-1998 Erik Andersen <andersee@debian.org>
- * Copyright (C) 1998-2000 Jens Axboe <axboe@suse.de>
- */
-
-#include <linux/kernel.h>
-#include <linux/blkdev.h>
-#include <linux/cdrom.h>
-#include <linux/ide.h>
-#include <scsi/scsi.h>
-#include "ide-cd.h"
-
-#ifndef CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS
-void ide_cd_log_error(const char *name, struct request *failed_command,
- struct request_sense *sense)
-{
- /* Suppress printing unit attention and `in progress of becoming ready'
- errors when we're not being verbose. */
- if (sense->sense_key == UNIT_ATTENTION ||
- (sense->sense_key == NOT_READY && (sense->asc == 4 ||
- sense->asc == 0x3a)))
- return;
-
- printk(KERN_ERR "%s: error code: 0x%02x sense_key: 0x%02x "
- "asc: 0x%02x ascq: 0x%02x\n",
- name, sense->error_code, sense->sense_key,
- sense->asc, sense->ascq);
-}
-#else
-/* The generic packet command opcodes for CD/DVD Logical Units,
- * From Table 57 of the SFF8090 Ver. 3 (Mt. Fuji) draft standard. */
-static const struct {
- unsigned short packet_command;
- const char * const text;
-} packet_command_texts[] = {
- { GPCMD_TEST_UNIT_READY, "Test Unit Ready" },
- { GPCMD_REQUEST_SENSE, "Request Sense" },
- { GPCMD_FORMAT_UNIT, "Format Unit" },
- { GPCMD_INQUIRY, "Inquiry" },
- { GPCMD_START_STOP_UNIT, "Start/Stop Unit" },
- { GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL, "Prevent/Allow Medium Removal" },
- { GPCMD_READ_FORMAT_CAPACITIES, "Read Format Capacities" },
- { GPCMD_READ_CDVD_CAPACITY, "Read Cd/Dvd Capacity" },
- { GPCMD_READ_10, "Read 10" },
- { GPCMD_WRITE_10, "Write 10" },
- { GPCMD_SEEK, "Seek" },
- { GPCMD_WRITE_AND_VERIFY_10, "Write and Verify 10" },
- { GPCMD_VERIFY_10, "Verify 10" },
- { GPCMD_FLUSH_CACHE, "Flush Cache" },
- { GPCMD_READ_SUBCHANNEL, "Read Subchannel" },
- { GPCMD_READ_TOC_PMA_ATIP, "Read Table of Contents" },
- { GPCMD_READ_HEADER, "Read Header" },
- { GPCMD_PLAY_AUDIO_10, "Play Audio 10" },
- { GPCMD_GET_CONFIGURATION, "Get Configuration" },
- { GPCMD_PLAY_AUDIO_MSF, "Play Audio MSF" },
- { GPCMD_PLAYAUDIO_TI, "Play Audio TrackIndex" },
- { GPCMD_GET_EVENT_STATUS_NOTIFICATION,
- "Get Event Status Notification" },
- { GPCMD_PAUSE_RESUME, "Pause/Resume" },
- { GPCMD_STOP_PLAY_SCAN, "Stop Play/Scan" },
- { GPCMD_READ_DISC_INFO, "Read Disc Info" },
- { GPCMD_READ_TRACK_RZONE_INFO, "Read Track Rzone Info" },
- { GPCMD_RESERVE_RZONE_TRACK, "Reserve Rzone Track" },
- { GPCMD_SEND_OPC, "Send OPC" },
- { GPCMD_MODE_SELECT_10, "Mode Select 10" },
- { GPCMD_REPAIR_RZONE_TRACK, "Repair Rzone Track" },
- { GPCMD_MODE_SENSE_10, "Mode Sense 10" },
- { GPCMD_CLOSE_TRACK, "Close Track" },
- { GPCMD_BLANK, "Blank" },
- { GPCMD_SEND_EVENT, "Send Event" },
- { GPCMD_SEND_KEY, "Send Key" },
- { GPCMD_REPORT_KEY, "Report Key" },
- { GPCMD_LOAD_UNLOAD, "Load/Unload" },
- { GPCMD_SET_READ_AHEAD, "Set Read-ahead" },
- { GPCMD_READ_12, "Read 12" },
- { GPCMD_GET_PERFORMANCE, "Get Performance" },
- { GPCMD_SEND_DVD_STRUCTURE, "Send DVD Structure" },
- { GPCMD_READ_DVD_STRUCTURE, "Read DVD Structure" },
- { GPCMD_SET_STREAMING, "Set Streaming" },
- { GPCMD_READ_CD_MSF, "Read CD MSF" },
- { GPCMD_SCAN, "Scan" },
- { GPCMD_SET_SPEED, "Set Speed" },
- { GPCMD_PLAY_CD, "Play CD" },
- { GPCMD_MECHANISM_STATUS, "Mechanism Status" },
- { GPCMD_READ_CD, "Read CD" },
-};
-
-/* From Table 303 of the SFF8090 Ver. 3 (Mt. Fuji) draft standard. */
-static const char * const sense_key_texts[16] = {
- "No sense data",
- "Recovered error",
- "Not ready",
- "Medium error",
- "Hardware error",
- "Illegal request",
- "Unit attention",
- "Data protect",
- "Blank check",
- "(reserved)",
- "(reserved)",
- "Aborted command",
- "(reserved)",
- "(reserved)",
- "Miscompare",
- "(reserved)",
-};
-
-/* From Table 304 of the SFF8090 Ver. 3 (Mt. Fuji) draft standard. */
-static const struct {
- unsigned long asc_ascq;
- const char * const text;
-} sense_data_texts[] = {
- { 0x000000, "No additional sense information" },
- { 0x000011, "Play operation in progress" },
- { 0x000012, "Play operation paused" },
- { 0x000013, "Play operation successfully completed" },
- { 0x000014, "Play operation stopped due to error" },
- { 0x000015, "No current audio status to return" },
- { 0x010c0a, "Write error - padding blocks added" },
- { 0x011700, "Recovered data with no error correction applied" },
- { 0x011701, "Recovered data with retries" },
- { 0x011702, "Recovered data with positive head offset" },
- { 0x011703, "Recovered data with negative head offset" },
- { 0x011704, "Recovered data with retries and/or CIRC applied" },
- { 0x011705, "Recovered data using previous sector ID" },
- { 0x011800, "Recovered data with error correction applied" },
- { 0x011801, "Recovered data with error correction and retries applied"},
- { 0x011802, "Recovered data - the data was auto-reallocated" },
- { 0x011803, "Recovered data with CIRC" },
- { 0x011804, "Recovered data with L-EC" },
- { 0x015d00, "Failure prediction threshold exceeded"
- " - Predicted logical unit failure" },
- { 0x015d01, "Failure prediction threshold exceeded"
- " - Predicted media failure" },
- { 0x015dff, "Failure prediction threshold exceeded - False" },
- { 0x017301, "Power calibration area almost full" },
- { 0x020400, "Logical unit not ready - cause not reportable" },
- /* Following is misspelled in ATAPI 2.6, _and_ in Mt. Fuji */
- { 0x020401, "Logical unit not ready"
- " - in progress [sic] of becoming ready" },
- { 0x020402, "Logical unit not ready - initializing command required" },
- { 0x020403, "Logical unit not ready - manual intervention required" },
- { 0x020404, "Logical unit not ready - format in progress" },
- { 0x020407, "Logical unit not ready - operation in progress" },
- { 0x020408, "Logical unit not ready - long write in progress" },
- { 0x020600, "No reference position found (media may be upside down)" },
- { 0x023000, "Incompatible medium installed" },
- { 0x023a00, "Medium not present" },
- { 0x025300, "Media load or eject failed" },
- { 0x025700, "Unable to recover table of contents" },
- { 0x030300, "Peripheral device write fault" },
- { 0x030301, "No write current" },
- { 0x030302, "Excessive write errors" },
- { 0x030c00, "Write error" },
- { 0x030c01, "Write error - Recovered with auto reallocation" },
- { 0x030c02, "Write error - auto reallocation failed" },
- { 0x030c03, "Write error - recommend reassignment" },
- { 0x030c04, "Compression check miscompare error" },
- { 0x030c05, "Data expansion occurred during compress" },
- { 0x030c06, "Block not compressible" },
- { 0x030c07, "Write error - recovery needed" },
- { 0x030c08, "Write error - recovery failed" },
- { 0x030c09, "Write error - loss of streaming" },
- { 0x031100, "Unrecovered read error" },
- { 0x031106, "CIRC unrecovered error" },
- { 0x033101, "Format command failed" },
- { 0x033200, "No defect spare location available" },
- { 0x033201, "Defect list update failure" },
- { 0x035100, "Erase failure" },
- { 0x037200, "Session fixation error" },
- { 0x037201, "Session fixation error writin lead-in" },
- { 0x037202, "Session fixation error writin lead-out" },
- { 0x037300, "CD control error" },
- { 0x037302, "Power calibration area is full" },
- { 0x037303, "Power calibration area error" },
- { 0x037304, "Program memory area / RMA update failure" },
- { 0x037305, "Program memory area / RMA is full" },
- { 0x037306, "Program memory area / RMA is (almost) full" },
- { 0x040200, "No seek complete" },
- { 0x040300, "Write fault" },
- { 0x040900, "Track following error" },
- { 0x040901, "Tracking servo failure" },
- { 0x040902, "Focus servo failure" },
- { 0x040903, "Spindle servo failure" },
- { 0x041500, "Random positioning error" },
- { 0x041501, "Mechanical positioning or changer error" },
- { 0x041502, "Positioning error detected by read of medium" },
- { 0x043c00, "Mechanical positioning or changer error" },
- { 0x044000, "Diagnostic failure on component (ASCQ)" },
- { 0x044400, "Internal CD/DVD logical unit failure" },
- { 0x04b600, "Media load mechanism failed" },
- { 0x051a00, "Parameter list length error" },
- { 0x052000, "Invalid command operation code" },
- { 0x052100, "Logical block address out of range" },
- { 0x052102, "Invalid address for write" },
- { 0x052400, "Invalid field in command packet" },
- { 0x052600, "Invalid field in parameter list" },
- { 0x052601, "Parameter not supported" },
- { 0x052602, "Parameter value invalid" },
- { 0x052700, "Write protected media" },
- { 0x052c00, "Command sequence error" },
- { 0x052c03, "Current program area is not empty" },
- { 0x052c04, "Current program area is empty" },
- { 0x053001, "Cannot read medium - unknown format" },
- { 0x053002, "Cannot read medium - incompatible format" },
- { 0x053900, "Saving parameters not supported" },
- { 0x054e00, "Overlapped commands attempted" },
- { 0x055302, "Medium removal prevented" },
- { 0x055500, "System resource failure" },
- { 0x056300, "End of user area encountered on this track" },
- { 0x056400, "Illegal mode for this track or incompatible medium" },
- { 0x056f00, "Copy protection key exchange failure"
- " - Authentication failure" },
- { 0x056f01, "Copy protection key exchange failure - Key not present" },
- { 0x056f02, "Copy protection key exchange failure"
- " - Key not established" },
- { 0x056f03, "Read of scrambled sector without authentication" },
- { 0x056f04, "Media region code is mismatched to logical unit" },
- { 0x056f05, "Drive region must be permanent"
- " / region reset count error" },
- { 0x057203, "Session fixation error - incomplete track in session" },
- { 0x057204, "Empty or partially written reserved track" },
- { 0x057205, "No more RZONE reservations are allowed" },
- { 0x05bf00, "Loss of streaming" },
- { 0x062800, "Not ready to ready transition, medium may have changed" },
- { 0x062900, "Power on, reset or hardware reset occurred" },
- { 0x062a00, "Parameters changed" },
- { 0x062a01, "Mode parameters changed" },
- { 0x062e00, "Insufficient time for operation" },
- { 0x063f00, "Logical unit operating conditions have changed" },
- { 0x063f01, "Microcode has been changed" },
- { 0x065a00, "Operator request or state change input (unspecified)" },
- { 0x065a01, "Operator medium removal request" },
- { 0x0bb900, "Play operation aborted" },
- /* Here we use 0xff for the key (not a valid key) to signify
- * that these can have _any_ key value associated with them... */
- { 0xff0401, "Logical unit is in process of becoming ready" },
- { 0xff0400, "Logical unit not ready, cause not reportable" },
- { 0xff0402, "Logical unit not ready, initializing command required" },
- { 0xff0403, "Logical unit not ready, manual intervention required" },
- { 0xff0500, "Logical unit does not respond to selection" },
- { 0xff0800, "Logical unit communication failure" },
- { 0xff0802, "Logical unit communication parity error" },
- { 0xff0801, "Logical unit communication time-out" },
- { 0xff2500, "Logical unit not supported" },
- { 0xff4c00, "Logical unit failed self-configuration" },
- { 0xff3e00, "Logical unit has not self-configured yet" },
-};
-
-void ide_cd_log_error(const char *name, struct request *failed_command,
- struct request_sense *sense)
-{
- int i;
- const char *s = "bad sense key!";
- char buf[80];
-
- printk(KERN_ERR "ATAPI device %s:\n", name);
- if (sense->error_code == 0x70)
- printk(KERN_CONT " Error: ");
- else if (sense->error_code == 0x71)
- printk(" Deferred Error: ");
- else if (sense->error_code == 0x7f)
- printk(KERN_CONT " Vendor-specific Error: ");
- else
- printk(KERN_CONT " Unknown Error Type: ");
-
- if (sense->sense_key < ARRAY_SIZE(sense_key_texts))
- s = sense_key_texts[sense->sense_key];
-
- printk(KERN_CONT "%s -- (Sense key=0x%02x)\n", s, sense->sense_key);
-
- if (sense->asc == 0x40) {
- sprintf(buf, "Diagnostic failure on component 0x%02x",
- sense->ascq);
- s = buf;
- } else {
- int lo = 0, mid, hi = ARRAY_SIZE(sense_data_texts);
- unsigned long key = (sense->sense_key << 16);
-
- key |= (sense->asc << 8);
- if (!(sense->ascq >= 0x80 && sense->ascq <= 0xdd))
- key |= sense->ascq;
- s = NULL;
-
- while (hi > lo) {
- mid = (lo + hi) / 2;
- if (sense_data_texts[mid].asc_ascq == key ||
- sense_data_texts[mid].asc_ascq == (0xff0000|key)) {
- s = sense_data_texts[mid].text;
- break;
- } else if (sense_data_texts[mid].asc_ascq > key)
- hi = mid;
- else
- lo = mid + 1;
- }
- }
-
- if (s == NULL) {
- if (sense->asc > 0x80)
- s = "(vendor-specific error)";
- else
- s = "(reserved error code)";
- }
-
- printk(KERN_ERR " %s -- (asc=0x%02x, ascq=0x%02x)\n",
- s, sense->asc, sense->ascq);
-
- if (failed_command != NULL) {
- int lo = 0, mid, hi = ARRAY_SIZE(packet_command_texts);
- s = NULL;
-
- while (hi > lo) {
- mid = (lo + hi) / 2;
- if (packet_command_texts[mid].packet_command ==
- scsi_req(failed_command)->cmd[0]) {
- s = packet_command_texts[mid].text;
- break;
- }
- if (packet_command_texts[mid].packet_command >
- scsi_req(failed_command)->cmd[0])
- hi = mid;
- else
- lo = mid + 1;
- }
-
- printk(KERN_ERR " The failed \"%s\" packet command "
- "was: \n \"", s);
- for (i = 0; i < BLK_MAX_CDB; i++)
- printk(KERN_CONT "%02x ", scsi_req(failed_command)->cmd[i]);
- printk(KERN_CONT "\"\n");
- }
-
- /* The SKSV bit specifies validity of the sense_key_specific
- * in the next two commands. It is bit 7 of the first byte.
- * In the case of NOT_READY, if SKSV is set the drive can
- * give us nice ETA readings.
- */
- if (sense->sense_key == NOT_READY && (sense->sks[0] & 0x80)) {
- int progress = (sense->sks[1] << 8 | sense->sks[2]) * 100;
-
- printk(KERN_ERR " Command is %02d%% complete\n",
- progress / 0xffff);
- }
-
- if (sense->sense_key == ILLEGAL_REQUEST &&
- (sense->sks[0] & 0x80) != 0) {
- printk(KERN_ERR " Error in %s byte %d",
- (sense->sks[0] & 0x40) != 0 ?
- "command packet" : "command data",
- (sense->sks[1] << 8) + sense->sks[2]);
-
- if ((sense->sks[0] & 0x40) != 0)
- printk(KERN_CONT " bit %d", sense->sks[0] & 0x07);
-
- printk(KERN_CONT "\n");
- }
-}
-#endif
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c
deleted file mode 100644
index f1e922e2479a..000000000000
--- a/drivers/ide/ide-cs.c
+++ /dev/null
@@ -1,364 +0,0 @@
-/*======================================================================
-
- A driver for PCMCIA IDE/ATA disk cards
-
- The contents of this file are subject to the Mozilla Public
- License Version 1.1 (the "License"); you may not use this file
- except in compliance with the License. You may obtain a copy of
- the License at http://www.mozilla.org/MPL/
-
- Software distributed under the License is distributed on an "AS
- IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
- implied. See the License for the specific language governing
- rights and limitations under the License.
-
- The initial developer of the original code is David A. Hinds
- <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
- are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
-
- Alternatively, the contents of this file may be used under the
- terms of the GNU General Public License version 2 (the "GPL"), in
- which case the provisions of the GPL are applicable instead of the
- above. If you wish to allow the use of your version of this file
- only under the terms of the GPL and not to allow others to use
- your version of this file under the MPL, indicate your decision
- by deleting the provisions above and replace them with the notice
- and other provisions required by the GPL. If you do not delete
- the provisions above, a recipient may use your version of this
- file under either the MPL or the GPL.
-
-======================================================================*/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/ioport.h>
-#include <linux/ide.h>
-#include <linux/major.h>
-#include <linux/delay.h>
-#include <asm/io.h>
-
-#include <pcmcia/cistpl.h>
-#include <pcmcia/ds.h>
-#include <pcmcia/cisreg.h>
-#include <pcmcia/ciscode.h>
-
-#define DRV_NAME "ide-cs"
-
-/*====================================================================*/
-
-/* Module parameters */
-
-MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
-MODULE_DESCRIPTION("PCMCIA ATA/IDE card driver");
-MODULE_LICENSE("Dual MPL/GPL");
-
-/*====================================================================*/
-
-typedef struct ide_info_t {
- struct pcmcia_device *p_dev;
- struct ide_host *host;
- int ndev;
-} ide_info_t;
-
-static void ide_release(struct pcmcia_device *);
-static int ide_config(struct pcmcia_device *);
-
-static void ide_detach(struct pcmcia_device *p_dev);
-
-static int ide_probe(struct pcmcia_device *link)
-{
- ide_info_t *info;
-
- dev_dbg(&link->dev, "ide_attach()\n");
-
- /* Create new ide device */
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- info->p_dev = link;
- link->priv = info;
-
- link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO |
- CONF_AUTO_SET_VPP | CONF_AUTO_CHECK_VCC;
-
- return ide_config(link);
-} /* ide_attach */
-
-static void ide_detach(struct pcmcia_device *link)
-{
- ide_info_t *info = link->priv;
-
- dev_dbg(&link->dev, "ide_detach(0x%p)\n", link);
-
- ide_release(link);
-
- kfree(info);
-} /* ide_detach */
-
-static const struct ide_port_ops idecs_port_ops = {
- .quirkproc = ide_undecoded_slave,
-};
-
-static const struct ide_port_info idecs_port_info = {
- .port_ops = &idecs_port_ops,
- .host_flags = IDE_HFLAG_NO_DMA,
- .irq_flags = IRQF_SHARED,
- .chipset = ide_pci,
-};
-
-static struct ide_host *idecs_register(unsigned long io, unsigned long ctl,
- unsigned long irq, struct pcmcia_device *handle)
-{
- struct ide_host *host;
- ide_hwif_t *hwif;
- int i, rc;
- struct ide_hw hw, *hws[] = { &hw };
-
- if (!request_region(io, 8, DRV_NAME)) {
- printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
- DRV_NAME, io, io + 7);
- return NULL;
- }
-
- if (!request_region(ctl, 1, DRV_NAME)) {
- printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
- DRV_NAME, ctl);
- release_region(io, 8);
- return NULL;
- }
-
- memset(&hw, 0, sizeof(hw));
- ide_std_init_ports(&hw, io, ctl);
- hw.irq = irq;
- hw.dev = &handle->dev;
-
- rc = ide_host_add(&idecs_port_info, hws, 1, &host);
- if (rc)
- goto out_release;
-
- hwif = host->ports[0];
-
- if (hwif->present)
- return host;
-
- /* retry registration in case device is still spinning up */
- for (i = 0; i < 10; i++) {
- msleep(100);
- ide_port_scan(hwif);
- if (hwif->present)
- return host;
- }
-
- return host;
-
-out_release:
- release_region(ctl, 1);
- release_region(io, 8);
- return NULL;
-}
-
-static int pcmcia_check_one_config(struct pcmcia_device *pdev, void *priv_data)
-{
- int *is_kme = priv_data;
-
- if ((pdev->resource[0]->flags & IO_DATA_PATH_WIDTH)
- != IO_DATA_PATH_WIDTH_8) {
- pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
- pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
- }
- pdev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH;
- pdev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
-
- if (pdev->resource[1]->end) {
- pdev->resource[0]->end = 8;
- pdev->resource[1]->end = (*is_kme) ? 2 : 1;
- } else {
- if (pdev->resource[0]->end < 16)
- return -ENODEV;
- }
-
- return pcmcia_request_io(pdev);
-}
-
-static int ide_config(struct pcmcia_device *link)
-{
- ide_info_t *info = link->priv;
- int ret = 0, is_kme = 0;
- unsigned long io_base, ctl_base;
- struct ide_host *host;
-
- dev_dbg(&link->dev, "ide_config(0x%p)\n", link);
-
- is_kme = ((link->manf_id == MANFID_KME) &&
- ((link->card_id == PRODID_KME_KXLC005_A) ||
- (link->card_id == PRODID_KME_KXLC005_B)));
-
- if (pcmcia_loop_config(link, pcmcia_check_one_config, &is_kme)) {
- link->config_flags &= ~CONF_AUTO_CHECK_VCC;
- if (pcmcia_loop_config(link, pcmcia_check_one_config, &is_kme))
- goto failed; /* No suitable config found */
- }
- io_base = link->resource[0]->start;
- if (link->resource[1]->end)
- ctl_base = link->resource[1]->start;
- else
- ctl_base = link->resource[0]->start + 0x0e;
-
- if (!link->irq)
- goto failed;
-
- ret = pcmcia_enable_device(link);
- if (ret)
- goto failed;
-
- /* disable drive interrupts during IDE probe */
- outb(0x02, ctl_base);
-
- /* special setup for KXLC005 card */
- if (is_kme)
- outb(0x81, ctl_base+1);
-
- host = idecs_register(io_base, ctl_base, link->irq, link);
- if (host == NULL && resource_size(link->resource[0]) == 0x20) {
- outb(0x02, ctl_base + 0x10);
- host = idecs_register(io_base + 0x10, ctl_base + 0x10,
- link->irq, link);
- }
-
- if (host == NULL)
- goto failed;
-
- info->ndev = 1;
- info->host = host;
- dev_info(&link->dev, "ide-cs: hd%c: Vpp = %d.%d\n",
- 'a' + host->ports[0]->index * 2,
- link->vpp / 10, link->vpp % 10);
-
- return 0;
-
-failed:
- ide_release(link);
- return -ENODEV;
-} /* ide_config */
-
-static void ide_release(struct pcmcia_device *link)
-{
- ide_info_t *info = link->priv;
- struct ide_host *host = info->host;
-
- dev_dbg(&link->dev, "ide_release(0x%p)\n", link);
-
- if (info->ndev) {
- ide_hwif_t *hwif = host->ports[0];
- unsigned long data_addr, ctl_addr;
-
- data_addr = hwif->io_ports.data_addr;
- ctl_addr = hwif->io_ports.ctl_addr;
-
- ide_host_remove(host);
- info->ndev = 0;
-
- release_region(ctl_addr, 1);
- release_region(data_addr, 8);
- }
-
- pcmcia_disable_device(link);
-} /* ide_release */
-
-
-static const struct pcmcia_device_id ide_ids[] = {
- PCMCIA_DEVICE_FUNC_ID(4),
- PCMCIA_DEVICE_MANF_CARD(0x0000, 0x0000), /* Corsair */
- PCMCIA_DEVICE_MANF_CARD(0x0007, 0x0000), /* Hitachi */
- PCMCIA_DEVICE_MANF_CARD(0x000a, 0x0000), /* I-O Data CFA */
- PCMCIA_DEVICE_MANF_CARD(0x001c, 0x0001), /* Mitsubishi CFA */
- PCMCIA_DEVICE_MANF_CARD(0x0032, 0x0704),
- PCMCIA_DEVICE_MANF_CARD(0x0032, 0x2904),
- PCMCIA_DEVICE_MANF_CARD(0x0045, 0x0401), /* SanDisk CFA */
- PCMCIA_DEVICE_MANF_CARD(0x004f, 0x0000), /* Kingston */
- PCMCIA_DEVICE_MANF_CARD(0x0097, 0x1620), /* TI emulated */
- PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000), /* Toshiba */
- PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d),
- PCMCIA_DEVICE_MANF_CARD(0x00ce, 0x0000), /* Samsung */
- PCMCIA_DEVICE_MANF_CARD(0x0319, 0x0000), /* Hitachi */
- PCMCIA_DEVICE_MANF_CARD(0x2080, 0x0001),
- PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0100), /* Viking CFA */
- PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0200), /* Lexar, Viking CFA */
- PCMCIA_DEVICE_PROD_ID123("Caravelle", "PSC-IDE ", "PSC000", 0x8c36137c, 0xd0693ab8, 0x2768a9f0),
- PCMCIA_DEVICE_PROD_ID123("CDROM", "IDE", "MCD-601p", 0x1b9179ca, 0xede88951, 0x0d902f74),
- PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9),
- PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591),
- PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728),
- PCMCIA_DEVICE_PROD_ID12("CNF ", "CD-ROM", 0x46d7db81, 0x66536591),
- PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591),
- PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4),
- PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde),
- PCMCIA_DEVICE_PROD_ID12("EXP", "CD+GAME", 0x6f58c983, 0x63c13aaf),
- PCMCIA_DEVICE_PROD_ID12("EXP ", "CD-ROM", 0x0a5c52fd, 0x66536591),
- PCMCIA_DEVICE_PROD_ID12("EXP ", "PnPIDE", 0x0a5c52fd, 0x0c694728),
- PCMCIA_DEVICE_PROD_ID12("FREECOM", "PCCARD-IDE", 0x5714cbf7, 0x48e0ab8e),
- PCMCIA_DEVICE_PROD_ID12("HITACHI", "FLASH", 0xf4f43949, 0x9eb86aae),
- PCMCIA_DEVICE_PROD_ID12("HITACHI", "microdrive", 0xf4f43949, 0xa6d76178),
- PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420),
- PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178),
- PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753),
- PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 1GB", 0x2e6d1829, 0x55d5bffb),
- PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 4GB", 0x2e6d1829, 0x531e7d10),
- PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e),
- PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b),
- PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
- PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDEII", 0x547e66dc, 0xb3662674),
- PCMCIA_DEVICE_PROD_ID12("LOOKMEET", "CBIDE2 ", 0xe37be2b5, 0x8671043b),
- PCMCIA_DEVICE_PROD_ID12("M-Systems", "CF300", 0x7ed2ad87, 0x7e9e78ee),
- PCMCIA_DEVICE_PROD_ID12("M-Systems", "CF500", 0x7ed2ad87, 0x7a13045c),
- PCMCIA_DEVICE_PROD_ID2("NinjaATA-", 0xebe0bd79),
- PCMCIA_DEVICE_PROD_ID12("PCMCIA", "CD-ROM", 0x281f1c5d, 0x66536591),
- PCMCIA_DEVICE_PROD_ID12("PCMCIA", "PnPIDE", 0x281f1c5d, 0x0c694728),
- PCMCIA_DEVICE_PROD_ID12("SHUTTLE TECHNOLOGY LTD.", "PCCARD-IDE/ATAPI Adapter", 0x4a3f0ba0, 0x322560e1),
- PCMCIA_DEVICE_PROD_ID12("SEAGATE", "ST1", 0x87c1b330, 0xe1f30883),
- PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "04/05/06", 0x43d74cb4, 0x6a22777d),
- PCMCIA_DEVICE_PROD_ID12("SMI VENDOR", "SMI PRODUCT", 0x30896c92, 0x703cc5f6),
- PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "MK2001MPL", 0xb4585a1a, 0x3489e003),
- PCMCIA_DEVICE_PROD_ID1("TRANSCEND 512M ", 0xd0909443),
- PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF45", 0x709b1bf1, 0xf68b6f32),
- PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1),
- PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2),
- PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8),
- PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF133", 0x709b1bf1, 0x7558f133),
- PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS8GCF133", 0x709b1bf1, 0xb2f89b47),
- PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852),
- PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918),
- PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209),
- PCMCIA_DEVICE_PROD_ID12("STI", "Flash 5.0", 0xbf2df18d, 0x8cb57a0e),
- PCMCIA_MFC_DEVICE_PROD_ID12(1, "SanDisk", "ConnectPlus", 0x7a954bd9, 0x74be00c6),
- PCMCIA_DEVICE_PROD_ID2("Flash Card", 0x5a362506),
- PCMCIA_DEVICE_NULL,
-};
-MODULE_DEVICE_TABLE(pcmcia, ide_ids);
-
-static struct pcmcia_driver ide_cs_driver = {
- .owner = THIS_MODULE,
- .name = "ide-cs",
- .probe = ide_probe,
- .remove = ide_detach,
- .id_table = ide_ids,
-};
-
-static int __init init_ide_cs(void)
-{
- return pcmcia_register_driver(&ide_cs_driver);
-}
-
-static void __exit exit_ide_cs(void)
-{
- pcmcia_unregister_driver(&ide_cs_driver);
-}
-
-late_initcall(init_ide_cs);
-module_exit(exit_ide_cs);
diff --git a/drivers/ide/ide-devsets.c b/drivers/ide/ide-devsets.c
deleted file mode 100644
index ca1d4b3d3878..000000000000
--- a/drivers/ide/ide-devsets.c
+++ /dev/null
@@ -1,192 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include <linux/kernel.h>
-#include <linux/gfp.h>
-#include <linux/ide.h>
-
-DEFINE_MUTEX(ide_setting_mtx);
-
-ide_devset_get(io_32bit, io_32bit);
-
-static int set_io_32bit(ide_drive_t *drive, int arg)
-{
- if (drive->dev_flags & IDE_DFLAG_NO_IO_32BIT)
- return -EPERM;
-
- if (arg < 0 || arg > 1 + (SUPPORT_VLB_SYNC << 1))
- return -EINVAL;
-
- drive->io_32bit = arg;
-
- return 0;
-}
-
-ide_devset_get_flag(ksettings, IDE_DFLAG_KEEP_SETTINGS);
-
-static int set_ksettings(ide_drive_t *drive, int arg)
-{
- if (arg < 0 || arg > 1)
- return -EINVAL;
-
- if (arg)
- drive->dev_flags |= IDE_DFLAG_KEEP_SETTINGS;
- else
- drive->dev_flags &= ~IDE_DFLAG_KEEP_SETTINGS;
-
- return 0;
-}
-
-ide_devset_get_flag(using_dma, IDE_DFLAG_USING_DMA);
-
-static int set_using_dma(ide_drive_t *drive, int arg)
-{
-#ifdef CONFIG_BLK_DEV_IDEDMA
- int err = -EPERM;
-
- if (arg < 0 || arg > 1)
- return -EINVAL;
-
- if (ata_id_has_dma(drive->id) == 0)
- goto out;
-
- if (drive->hwif->dma_ops == NULL)
- goto out;
-
- err = 0;
-
- if (arg) {
- if (ide_set_dma(drive))
- err = -EIO;
- } else
- ide_dma_off(drive);
-
-out:
- return err;
-#else
- if (arg < 0 || arg > 1)
- return -EINVAL;
-
- return -EPERM;
-#endif
-}
-
-/*
- * handle HDIO_SET_PIO_MODE ioctl abusers here, eventually it will go away
- */
-static int set_pio_mode_abuse(ide_hwif_t *hwif, u8 req_pio)
-{
- switch (req_pio) {
- case 202:
- case 201:
- case 200:
- case 102:
- case 101:
- case 100:
- return (hwif->host_flags & IDE_HFLAG_ABUSE_DMA_MODES) ? 1 : 0;
- case 9:
- case 8:
- return (hwif->host_flags & IDE_HFLAG_ABUSE_PREFETCH) ? 1 : 0;
- case 7:
- case 6:
- return (hwif->host_flags & IDE_HFLAG_ABUSE_FAST_DEVSEL) ? 1 : 0;
- default:
- return 0;
- }
-}
-
-static int set_pio_mode(ide_drive_t *drive, int arg)
-{
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_port_ops *port_ops = hwif->port_ops;
-
- if (arg < 0 || arg > 255)
- return -EINVAL;
-
- if (port_ops == NULL || port_ops->set_pio_mode == NULL ||
- (hwif->host_flags & IDE_HFLAG_NO_SET_MODE))
- return -ENOSYS;
-
- if (set_pio_mode_abuse(drive->hwif, arg)) {
- drive->pio_mode = arg + XFER_PIO_0;
-
- if (arg == 8 || arg == 9) {
- unsigned long flags;
-
- /* take lock for IDE_DFLAG_[NO_]UNMASK/[NO_]IO_32BIT */
- spin_lock_irqsave(&hwif->lock, flags);
- port_ops->set_pio_mode(hwif, drive);
- spin_unlock_irqrestore(&hwif->lock, flags);
- } else
- port_ops->set_pio_mode(hwif, drive);
- } else {
- int keep_dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
-
- ide_set_pio(drive, arg);
-
- if (hwif->host_flags & IDE_HFLAG_SET_PIO_MODE_KEEP_DMA) {
- if (keep_dma)
- ide_dma_on(drive);
- }
- }
-
- return 0;
-}
-
-ide_devset_get_flag(unmaskirq, IDE_DFLAG_UNMASK);
-
-static int set_unmaskirq(ide_drive_t *drive, int arg)
-{
- if (drive->dev_flags & IDE_DFLAG_NO_UNMASK)
- return -EPERM;
-
- if (arg < 0 || arg > 1)
- return -EINVAL;
-
- if (arg)
- drive->dev_flags |= IDE_DFLAG_UNMASK;
- else
- drive->dev_flags &= ~IDE_DFLAG_UNMASK;
-
- return 0;
-}
-
-ide_ext_devset_rw_sync(io_32bit, io_32bit);
-ide_ext_devset_rw_sync(keepsettings, ksettings);
-ide_ext_devset_rw_sync(unmaskirq, unmaskirq);
-ide_ext_devset_rw_sync(using_dma, using_dma);
-__IDE_DEVSET(pio_mode, DS_SYNC, NULL, set_pio_mode);
-
-int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting,
- int arg)
-{
- struct request_queue *q = drive->queue;
- struct request *rq;
- int ret = 0;
-
- if (!(setting->flags & DS_SYNC))
- return setting->set(drive, arg);
-
- rq = blk_get_request(q, REQ_OP_DRV_IN, 0);
- ide_req(rq)->type = ATA_PRIV_MISC;
- scsi_req(rq)->cmd_len = 5;
- scsi_req(rq)->cmd[0] = REQ_DEVSET_EXEC;
- *(int *)&scsi_req(rq)->cmd[1] = arg;
- ide_req(rq)->special = setting->set;
-
- blk_execute_rq(NULL, rq, 0);
- ret = scsi_req(rq)->result;
- blk_put_request(rq);
-
- return ret;
-}
-
-ide_startstop_t ide_do_devset(ide_drive_t *drive, struct request *rq)
-{
- int err, (*setfunc)(ide_drive_t *, int) = ide_req(rq)->special;
-
- err = setfunc(drive, *(int *)&scsi_req(rq)->cmd[1]);
- if (err)
- scsi_req(rq)->result = err;
- ide_complete_rq(drive, 0, blk_rq_bytes(rq));
- return ide_stopped;
-}
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
deleted file mode 100644
index 8413731c6259..000000000000
--- a/drivers/ide/ide-disk.c
+++ /dev/null
@@ -1,795 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 1994-1998 Linus Torvalds & authors (see below)
- * Copyright (C) 1998-2002 Linux ATA Development
- * Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2003 Red Hat
- * Copyright (C) 2003-2005, 2007 Bartlomiej Zolnierkiewicz
- */
-
-/*
- * Mostly written by Mark Lord <mlord@pobox.com>
- * and Gadi Oxman <gadio@netvision.net.il>
- * and Andre Hedrick <andre@linux-ide.org>
- *
- * This is the IDE/ATA disk driver, as evolved from hd.c and ide.c.
- */
-
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/major.h>
-#include <linux/errno.h>
-#include <linux/genhd.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/leds.h>
-#include <linux/ide.h>
-
-#include <asm/byteorder.h>
-#include <asm/irq.h>
-#include <linux/uaccess.h>
-#include <asm/io.h>
-#include <asm/div64.h>
-
-#include "ide-disk.h"
-
-static const u8 ide_rw_cmds[] = {
- ATA_CMD_READ_MULTI,
- ATA_CMD_WRITE_MULTI,
- ATA_CMD_READ_MULTI_EXT,
- ATA_CMD_WRITE_MULTI_EXT,
- ATA_CMD_PIO_READ,
- ATA_CMD_PIO_WRITE,
- ATA_CMD_PIO_READ_EXT,
- ATA_CMD_PIO_WRITE_EXT,
- ATA_CMD_READ,
- ATA_CMD_WRITE,
- ATA_CMD_READ_EXT,
- ATA_CMD_WRITE_EXT,
-};
-
-static void ide_tf_set_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 dma)
-{
- u8 index, lba48, write;
-
- lba48 = (cmd->tf_flags & IDE_TFLAG_LBA48) ? 2 : 0;
- write = (cmd->tf_flags & IDE_TFLAG_WRITE) ? 1 : 0;
-
- if (dma) {
- cmd->protocol = ATA_PROT_DMA;
- index = 8;
- } else {
- cmd->protocol = ATA_PROT_PIO;
- if (drive->mult_count) {
- cmd->tf_flags |= IDE_TFLAG_MULTI_PIO;
- index = 0;
- } else
- index = 4;
- }
-
- cmd->tf.command = ide_rw_cmds[index + lba48 + write];
-}
-
-/*
- * __ide_do_rw_disk() issues READ and WRITE commands to a disk,
- * using LBA if supported, or CHS otherwise, to address sectors.
- */
-static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
- sector_t block)
-{
- ide_hwif_t *hwif = drive->hwif;
- u16 nsectors = (u16)blk_rq_sectors(rq);
- u8 lba48 = !!(drive->dev_flags & IDE_DFLAG_LBA48);
- u8 dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
- struct ide_cmd cmd;
- struct ide_taskfile *tf = &cmd.tf;
- ide_startstop_t rc;
-
- if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && lba48 && dma) {
- if (block + blk_rq_sectors(rq) > 1ULL << 28)
- dma = 0;
- else
- lba48 = 0;
- }
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
- cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
-
- if (drive->dev_flags & IDE_DFLAG_LBA) {
- if (lba48) {
- pr_debug("%s: LBA=0x%012llx\n", drive->name,
- (unsigned long long)block);
-
- tf->nsect = nsectors & 0xff;
- tf->lbal = (u8) block;
- tf->lbam = (u8)(block >> 8);
- tf->lbah = (u8)(block >> 16);
- tf->device = ATA_LBA;
-
- tf = &cmd.hob;
- tf->nsect = (nsectors >> 8) & 0xff;
- tf->lbal = (u8)(block >> 24);
- if (sizeof(block) != 4) {
- tf->lbam = (u8)((u64)block >> 32);
- tf->lbah = (u8)((u64)block >> 40);
- }
-
- cmd.valid.out.hob = IDE_VALID_OUT_HOB;
- cmd.valid.in.hob = IDE_VALID_IN_HOB;
- cmd.tf_flags |= IDE_TFLAG_LBA48;
- } else {
- tf->nsect = nsectors & 0xff;
- tf->lbal = block;
- tf->lbam = block >>= 8;
- tf->lbah = block >>= 8;
- tf->device = ((block >> 8) & 0xf) | ATA_LBA;
- }
- } else {
- unsigned int sect, head, cyl, track;
-
- track = (int)block / drive->sect;
- sect = (int)block % drive->sect + 1;
- head = track % drive->head;
- cyl = track / drive->head;
-
- pr_debug("%s: CHS=%u/%u/%u\n", drive->name, cyl, head, sect);
-
- tf->nsect = nsectors & 0xff;
- tf->lbal = sect;
- tf->lbam = cyl;
- tf->lbah = cyl >> 8;
- tf->device = head;
- }
-
- cmd.tf_flags |= IDE_TFLAG_FS;
-
- if (rq_data_dir(rq))
- cmd.tf_flags |= IDE_TFLAG_WRITE;
-
- ide_tf_set_cmd(drive, &cmd, dma);
- cmd.rq = rq;
-
- if (dma == 0) {
- ide_init_sg_cmd(&cmd, nsectors << 9);
- ide_map_sg(drive, &cmd);
- }
-
- rc = do_rw_taskfile(drive, &cmd);
-
- if (rc == ide_stopped && dma) {
- /* fallback to PIO */
- cmd.tf_flags |= IDE_TFLAG_DMA_PIO_FALLBACK;
- ide_tf_set_cmd(drive, &cmd, 0);
- ide_init_sg_cmd(&cmd, nsectors << 9);
- rc = do_rw_taskfile(drive, &cmd);
- }
-
- return rc;
-}
-
-/*
- * 268435455 == 137439 MB or 28bit limit
- * 320173056 == 163929 MB or 48bit addressing
- * 1073741822 == 549756 MB or 48bit addressing fake drive
- */
-
-static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
- sector_t block)
-{
- ide_hwif_t *hwif = drive->hwif;
-
- BUG_ON(drive->dev_flags & IDE_DFLAG_BLOCKED);
- BUG_ON(blk_rq_is_passthrough(rq));
-
- ledtrig_disk_activity(rq_data_dir(rq) == WRITE);
-
- pr_debug("%s: %sing: block=%llu, sectors=%u\n",
- drive->name, rq_data_dir(rq) == READ ? "read" : "writ",
- (unsigned long long)block, blk_rq_sectors(rq));
-
- if (hwif->rw_disk)
- hwif->rw_disk(drive, rq);
-
- return __ide_do_rw_disk(drive, rq, block);
-}
-
-/*
- * Queries for true maximum capacity of the drive.
- * Returns maximum LBA address (> 0) of the drive, 0 if failed.
- */
-static u64 idedisk_read_native_max_address(ide_drive_t *drive, int lba48)
-{
- struct ide_cmd cmd;
- struct ide_taskfile *tf = &cmd.tf;
- u64 addr = 0;
-
- memset(&cmd, 0, sizeof(cmd));
- if (lba48)
- tf->command = ATA_CMD_READ_NATIVE_MAX_EXT;
- else
- tf->command = ATA_CMD_READ_NATIVE_MAX;
- tf->device = ATA_LBA;
-
- cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
- cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
- if (lba48) {
- cmd.valid.out.hob = IDE_VALID_OUT_HOB;
- cmd.valid.in.hob = IDE_VALID_IN_HOB;
- cmd.tf_flags = IDE_TFLAG_LBA48;
- }
-
- ide_no_data_taskfile(drive, &cmd);
-
- /* if OK, compute maximum address value */
- if (!(tf->status & ATA_ERR))
- addr = ide_get_lba_addr(&cmd, lba48) + 1;
-
- return addr;
-}
-
-/*
- * Sets maximum virtual LBA address of the drive.
- * Returns new maximum virtual LBA address (> 0) or 0 on failure.
- */
-static u64 idedisk_set_max_address(ide_drive_t *drive, u64 addr_req, int lba48)
-{
- struct ide_cmd cmd;
- struct ide_taskfile *tf = &cmd.tf;
- u64 addr_set = 0;
-
- addr_req--;
-
- memset(&cmd, 0, sizeof(cmd));
- tf->lbal = (addr_req >> 0) & 0xff;
- tf->lbam = (addr_req >>= 8) & 0xff;
- tf->lbah = (addr_req >>= 8) & 0xff;
- if (lba48) {
- cmd.hob.lbal = (addr_req >>= 8) & 0xff;
- cmd.hob.lbam = (addr_req >>= 8) & 0xff;
- cmd.hob.lbah = (addr_req >>= 8) & 0xff;
- tf->command = ATA_CMD_SET_MAX_EXT;
- } else {
- tf->device = (addr_req >>= 8) & 0x0f;
- tf->command = ATA_CMD_SET_MAX;
- }
- tf->device |= ATA_LBA;
-
- cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
- cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
- if (lba48) {
- cmd.valid.out.hob = IDE_VALID_OUT_HOB;
- cmd.valid.in.hob = IDE_VALID_IN_HOB;
- cmd.tf_flags = IDE_TFLAG_LBA48;
- }
-
- ide_no_data_taskfile(drive, &cmd);
-
- /* if OK, compute maximum address value */
- if (!(tf->status & ATA_ERR))
- addr_set = ide_get_lba_addr(&cmd, lba48) + 1;
-
- return addr_set;
-}
-
-static unsigned long long sectors_to_MB(unsigned long long n)
-{
- n <<= 9; /* make it bytes */
- do_div(n, 1000000); /* make it MB */
- return n;
-}
-
-/*
- * Some disks report total number of sectors instead of
- * maximum sector address. We list them here.
- */
-static const struct drive_list_entry hpa_list[] = {
- { "ST340823A", NULL },
- { "ST320413A", NULL },
- { "ST310211A", NULL },
- { NULL, NULL }
-};
-
-static u64 ide_disk_hpa_get_native_capacity(ide_drive_t *drive, int lba48)
-{
- u64 capacity, set_max;
-
- capacity = drive->capacity64;
- set_max = idedisk_read_native_max_address(drive, lba48);
-
- if (ide_in_drive_list(drive->id, hpa_list)) {
- /*
- * Since we are inclusive wrt to firmware revisions do this
- * extra check and apply the workaround only when needed.
- */
- if (set_max == capacity + 1)
- set_max--;
- }
-
- return set_max;
-}
-
-static u64 ide_disk_hpa_set_capacity(ide_drive_t *drive, u64 set_max, int lba48)
-{
- set_max = idedisk_set_max_address(drive, set_max, lba48);
- if (set_max)
- drive->capacity64 = set_max;
-
- return set_max;
-}
-
-static void idedisk_check_hpa(ide_drive_t *drive)
-{
- u64 capacity, set_max;
- int lba48 = ata_id_lba48_enabled(drive->id);
-
- capacity = drive->capacity64;
- set_max = ide_disk_hpa_get_native_capacity(drive, lba48);
-
- if (set_max <= capacity)
- return;
-
- drive->probed_capacity = set_max;
-
- printk(KERN_INFO "%s: Host Protected Area detected.\n"
- "\tcurrent capacity is %llu sectors (%llu MB)\n"
- "\tnative capacity is %llu sectors (%llu MB)\n",
- drive->name,
- capacity, sectors_to_MB(capacity),
- set_max, sectors_to_MB(set_max));
-
- if ((drive->dev_flags & IDE_DFLAG_NOHPA) == 0)
- return;
-
- set_max = ide_disk_hpa_set_capacity(drive, set_max, lba48);
- if (set_max)
- printk(KERN_INFO "%s: Host Protected Area disabled.\n",
- drive->name);
-}
-
-static int ide_disk_get_capacity(ide_drive_t *drive)
-{
- u16 *id = drive->id;
- int lba;
-
- if (ata_id_lba48_enabled(id)) {
- /* drive speaks 48-bit LBA */
- lba = 1;
- drive->capacity64 = ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
- } else if (ata_id_has_lba(id) && ata_id_is_lba_capacity_ok(id)) {
- /* drive speaks 28-bit LBA */
- lba = 1;
- drive->capacity64 = ata_id_u32(id, ATA_ID_LBA_CAPACITY);
- } else {
- /* drive speaks boring old 28-bit CHS */
- lba = 0;
- drive->capacity64 = drive->cyl * drive->head * drive->sect;
- }
-
- drive->probed_capacity = drive->capacity64;
-
- if (lba) {
- drive->dev_flags |= IDE_DFLAG_LBA;
-
- /*
- * If this device supports the Host Protected Area feature set,
- * then we may need to change our opinion about its capacity.
- */
- if (ata_id_hpa_enabled(id))
- idedisk_check_hpa(drive);
- }
-
- /* limit drive capacity to 137GB if LBA48 cannot be used */
- if ((drive->dev_flags & IDE_DFLAG_LBA48) == 0 &&
- drive->capacity64 > 1ULL << 28) {
- printk(KERN_WARNING "%s: cannot use LBA48 - full capacity "
- "%llu sectors (%llu MB)\n",
- drive->name, (unsigned long long)drive->capacity64,
- sectors_to_MB(drive->capacity64));
- drive->probed_capacity = drive->capacity64 = 1ULL << 28;
- }
-
- if ((drive->hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) &&
- (drive->dev_flags & IDE_DFLAG_LBA48)) {
- if (drive->capacity64 > 1ULL << 28) {
- printk(KERN_INFO "%s: cannot use LBA48 DMA - PIO mode"
- " will be used for accessing sectors "
- "> %u\n", drive->name, 1 << 28);
- } else
- drive->dev_flags &= ~IDE_DFLAG_LBA48;
- }
-
- return 0;
-}
-
-static void ide_disk_unlock_native_capacity(ide_drive_t *drive)
-{
- u16 *id = drive->id;
- int lba48 = ata_id_lba48_enabled(id);
-
- if ((drive->dev_flags & IDE_DFLAG_LBA) == 0 ||
- ata_id_hpa_enabled(id) == 0)
- return;
-
- /*
- * according to the spec the SET MAX ADDRESS command shall be
- * immediately preceded by a READ NATIVE MAX ADDRESS command
- */
- if (!ide_disk_hpa_get_native_capacity(drive, lba48))
- return;
-
- if (ide_disk_hpa_set_capacity(drive, drive->probed_capacity, lba48))
- drive->dev_flags |= IDE_DFLAG_NOHPA; /* disable HPA on resume */
-}
-
-static bool idedisk_prep_rq(ide_drive_t *drive, struct request *rq)
-{
- struct ide_cmd *cmd;
-
- if (req_op(rq) != REQ_OP_FLUSH)
- return true;
-
- if (ide_req(rq)->special) {
- cmd = ide_req(rq)->special;
- memset(cmd, 0, sizeof(*cmd));
- } else {
- cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
- }
-
- /* FIXME: map struct ide_taskfile on rq->cmd[] */
- BUG_ON(cmd == NULL);
-
- if (ata_id_flush_ext_enabled(drive->id) &&
- (drive->capacity64 >= (1UL << 28)))
- cmd->tf.command = ATA_CMD_FLUSH_EXT;
- else
- cmd->tf.command = ATA_CMD_FLUSH;
- cmd->valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
- cmd->tf_flags = IDE_TFLAG_DYN;
- cmd->protocol = ATA_PROT_NODATA;
- rq->cmd_flags &= ~REQ_OP_MASK;
- rq->cmd_flags |= REQ_OP_DRV_OUT;
- ide_req(rq)->type = ATA_PRIV_TASKFILE;
- ide_req(rq)->special = cmd;
- cmd->rq = rq;
-
- return true;
-}
-
-ide_devset_get(multcount, mult_count);
-
-/*
- * This is tightly woven into the driver->do_special can not touch.
- * DON'T do it again until a total personality rewrite is committed.
- */
-static int set_multcount(ide_drive_t *drive, int arg)
-{
- struct request *rq;
-
- if (arg < 0 || arg > (drive->id[ATA_ID_MAX_MULTSECT] & 0xff))
- return -EINVAL;
-
- if (drive->special_flags & IDE_SFLAG_SET_MULTMODE)
- return -EBUSY;
-
- rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
- ide_req(rq)->type = ATA_PRIV_TASKFILE;
-
- drive->mult_req = arg;
- drive->special_flags |= IDE_SFLAG_SET_MULTMODE;
- blk_execute_rq(NULL, rq, 0);
- blk_put_request(rq);
-
- return (drive->mult_count == arg) ? 0 : -EIO;
-}
-
-ide_devset_get_flag(nowerr, IDE_DFLAG_NOWERR);
-
-static int set_nowerr(ide_drive_t *drive, int arg)
-{
- if (arg < 0 || arg > 1)
- return -EINVAL;
-
- if (arg)
- drive->dev_flags |= IDE_DFLAG_NOWERR;
- else
- drive->dev_flags &= ~IDE_DFLAG_NOWERR;
-
- drive->bad_wstat = arg ? BAD_R_STAT : BAD_W_STAT;
-
- return 0;
-}
-
-static int ide_do_setfeature(ide_drive_t *drive, u8 feature, u8 nsect)
-{
- struct ide_cmd cmd;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.tf.feature = feature;
- cmd.tf.nsect = nsect;
- cmd.tf.command = ATA_CMD_SET_FEATURES;
- cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
- cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
-
- return ide_no_data_taskfile(drive, &cmd);
-}
-
-static void update_flush(ide_drive_t *drive)
-{
- u16 *id = drive->id;
- bool wc = false;
-
- if (drive->dev_flags & IDE_DFLAG_WCACHE) {
- unsigned long long capacity;
- int barrier;
- /*
- * We must avoid issuing commands a drive does not
- * understand or we may crash it. We check flush cache
- * is supported. We also check we have the LBA48 flush
- * cache if the drive capacity is too large. By this
- * time we have trimmed the drive capacity if LBA48 is
- * not available so we don't need to recheck that.
- */
- capacity = ide_gd_capacity(drive);
- barrier = ata_id_flush_enabled(id) &&
- (drive->dev_flags & IDE_DFLAG_NOFLUSH) == 0 &&
- ((drive->dev_flags & IDE_DFLAG_LBA48) == 0 ||
- capacity <= (1ULL << 28) ||
- ata_id_flush_ext_enabled(id));
-
- printk(KERN_INFO "%s: cache flushes %ssupported\n",
- drive->name, barrier ? "" : "not ");
-
- if (barrier) {
- wc = true;
- drive->prep_rq = idedisk_prep_rq;
- }
- }
-
- blk_queue_write_cache(drive->queue, wc, false);
-}
-
-ide_devset_get_flag(wcache, IDE_DFLAG_WCACHE);
-
-static int set_wcache(ide_drive_t *drive, int arg)
-{
- int err = 1;
-
- if (arg < 0 || arg > 1)
- return -EINVAL;
-
- if (ata_id_flush_enabled(drive->id)) {
- err = ide_do_setfeature(drive,
- arg ? SETFEATURES_WC_ON : SETFEATURES_WC_OFF, 0);
- if (err == 0) {
- if (arg)
- drive->dev_flags |= IDE_DFLAG_WCACHE;
- else
- drive->dev_flags &= ~IDE_DFLAG_WCACHE;
- }
- }
-
- update_flush(drive);
-
- return err;
-}
-
-static int do_idedisk_flushcache(ide_drive_t *drive)
-{
- struct ide_cmd cmd;
-
- memset(&cmd, 0, sizeof(cmd));
- if (ata_id_flush_ext_enabled(drive->id))
- cmd.tf.command = ATA_CMD_FLUSH_EXT;
- else
- cmd.tf.command = ATA_CMD_FLUSH;
- cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
- cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
-
- return ide_no_data_taskfile(drive, &cmd);
-}
-
-ide_devset_get(acoustic, acoustic);
-
-static int set_acoustic(ide_drive_t *drive, int arg)
-{
- if (arg < 0 || arg > 254)
- return -EINVAL;
-
- ide_do_setfeature(drive,
- arg ? SETFEATURES_AAM_ON : SETFEATURES_AAM_OFF, arg);
-
- drive->acoustic = arg;
-
- return 0;
-}
-
-ide_devset_get_flag(addressing, IDE_DFLAG_LBA48);
-
-/*
- * drive->addressing:
- * 0: 28-bit
- * 1: 48-bit
- * 2: 48-bit capable doing 28-bit
- */
-static int set_addressing(ide_drive_t *drive, int arg)
-{
- if (arg < 0 || arg > 2)
- return -EINVAL;
-
- if (arg && ((drive->hwif->host_flags & IDE_HFLAG_NO_LBA48) ||
- ata_id_lba48_enabled(drive->id) == 0))
- return -EIO;
-
- if (arg == 2)
- arg = 0;
-
- if (arg)
- drive->dev_flags |= IDE_DFLAG_LBA48;
- else
- drive->dev_flags &= ~IDE_DFLAG_LBA48;
-
- return 0;
-}
-
-ide_ext_devset_rw(acoustic, acoustic);
-ide_ext_devset_rw(address, addressing);
-ide_ext_devset_rw(multcount, multcount);
-ide_ext_devset_rw(wcache, wcache);
-
-ide_ext_devset_rw_sync(nowerr, nowerr);
-
-static int ide_disk_check(ide_drive_t *drive, const char *s)
-{
- return 1;
-}
-
-static void ide_disk_setup(ide_drive_t *drive)
-{
- struct ide_disk_obj *idkp = drive->driver_data;
- struct request_queue *q = drive->queue;
- ide_hwif_t *hwif = drive->hwif;
- u16 *id = drive->id;
- char *m = (char *)&id[ATA_ID_PROD];
- unsigned long long capacity;
-
- ide_proc_register_driver(drive, idkp->driver);
-
- if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0)
- return;
-
- if (drive->dev_flags & IDE_DFLAG_REMOVABLE) {
- /*
- * Removable disks (eg. SYQUEST); ignore 'WD' drives
- */
- if (m[0] != 'W' || m[1] != 'D')
- drive->dev_flags |= IDE_DFLAG_DOORLOCKING;
- }
-
- (void)set_addressing(drive, 1);
-
- if (drive->dev_flags & IDE_DFLAG_LBA48) {
- int max_s = 2048;
-
- if (max_s > hwif->rqsize)
- max_s = hwif->rqsize;
-
- blk_queue_max_hw_sectors(q, max_s);
- }
-
- printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name,
- queue_max_sectors(q) / 2);
-
- if (ata_id_is_ssd(id)) {
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
- }
-
- /* calculate drive capacity, and select LBA if possible */
- ide_disk_get_capacity(drive);
-
- /*
- * if possible, give fdisk access to more of the drive,
- * by correcting bios_cyls:
- */
- capacity = ide_gd_capacity(drive);
-
- if ((drive->dev_flags & IDE_DFLAG_FORCED_GEOM) == 0) {
- if (ata_id_lba48_enabled(drive->id)) {
- /* compatibility */
- drive->bios_sect = 63;
- drive->bios_head = 255;
- }
-
- if (drive->bios_sect && drive->bios_head) {
- unsigned int cap0 = capacity; /* truncate to 32 bits */
- unsigned int cylsz, cyl;
-
- if (cap0 != capacity)
- drive->bios_cyl = 65535;
- else {
- cylsz = drive->bios_sect * drive->bios_head;
- cyl = cap0 / cylsz;
- if (cyl > 65535)
- cyl = 65535;
- if (cyl > drive->bios_cyl)
- drive->bios_cyl = cyl;
- }
- }
- }
- printk(KERN_INFO "%s: %llu sectors (%llu MB)",
- drive->name, capacity, sectors_to_MB(capacity));
-
- /* Only print cache size when it was specified */
- if (id[ATA_ID_BUF_SIZE])
- printk(KERN_CONT " w/%dKiB Cache", id[ATA_ID_BUF_SIZE] / 2);
-
- printk(KERN_CONT ", CHS=%d/%d/%d\n",
- drive->bios_cyl, drive->bios_head, drive->bios_sect);
-
- /* write cache enabled? */
- if ((id[ATA_ID_CSFO] & 1) || ata_id_wcache_enabled(id))
- drive->dev_flags |= IDE_DFLAG_WCACHE;
-
- set_wcache(drive, 1);
-
- if ((drive->dev_flags & IDE_DFLAG_LBA) == 0 &&
- (drive->head == 0 || drive->head > 16))
- printk(KERN_ERR "%s: invalid geometry: %d physical heads?\n",
- drive->name, drive->head);
-}
-
-static void ide_disk_flush(ide_drive_t *drive)
-{
- if (ata_id_flush_enabled(drive->id) == 0 ||
- (drive->dev_flags & IDE_DFLAG_WCACHE) == 0)
- return;
-
- if (do_idedisk_flushcache(drive))
- printk(KERN_INFO "%s: wcache flush failed!\n", drive->name);
-}
-
-static int ide_disk_init_media(ide_drive_t *drive, struct gendisk *disk)
-{
- return 0;
-}
-
-static int ide_disk_set_doorlock(ide_drive_t *drive, struct gendisk *disk,
- int on)
-{
- struct ide_cmd cmd;
- int ret;
-
- if ((drive->dev_flags & IDE_DFLAG_DOORLOCKING) == 0)
- return 0;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.tf.command = on ? ATA_CMD_MEDIA_LOCK : ATA_CMD_MEDIA_UNLOCK;
- cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
- cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
-
- ret = ide_no_data_taskfile(drive, &cmd);
-
- if (ret)
- drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;
-
- return ret;
-}
-
-const struct ide_disk_ops ide_ata_disk_ops = {
- .check = ide_disk_check,
- .unlock_native_capacity = ide_disk_unlock_native_capacity,
- .get_capacity = ide_disk_get_capacity,
- .setup = ide_disk_setup,
- .flush = ide_disk_flush,
- .init_media = ide_disk_init_media,
- .set_doorlock = ide_disk_set_doorlock,
- .do_request = ide_do_rw_disk,
- .ioctl = ide_disk_ioctl,
- .compat_ioctl = ide_disk_ioctl,
-};
diff --git a/drivers/ide/ide-disk.h b/drivers/ide/ide-disk.h
deleted file mode 100644
index 0e8cc18bfda6..000000000000
--- a/drivers/ide/ide-disk.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __IDE_DISK_H
-#define __IDE_DISK_H
-
-#include "ide-gd.h"
-
-#ifdef CONFIG_IDE_GD_ATA
-/* ide-disk.c */
-extern const struct ide_disk_ops ide_ata_disk_ops;
-ide_decl_devset(address);
-ide_decl_devset(multcount);
-ide_decl_devset(nowerr);
-ide_decl_devset(wcache);
-ide_decl_devset(acoustic);
-
-/* ide-disk_ioctl.c */
-int ide_disk_ioctl(ide_drive_t *, struct block_device *, fmode_t, unsigned int,
- unsigned long);
-
-#ifdef CONFIG_IDE_PROC_FS
-/* ide-disk_proc.c */
-extern ide_proc_entry_t ide_disk_proc[];
-extern const struct ide_proc_devset ide_disk_settings[];
-#endif
-#else
-#define ide_disk_proc NULL
-#define ide_disk_settings NULL
-#endif
-
-#endif /* __IDE_DISK_H */
diff --git a/drivers/ide/ide-disk_ioctl.c b/drivers/ide/ide-disk_ioctl.c
deleted file mode 100644
index 2c45616cff4f..000000000000
--- a/drivers/ide/ide-disk_ioctl.c
+++ /dev/null
@@ -1,33 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/kernel.h>
-#include <linux/ide.h>
-#include <linux/hdreg.h>
-#include <linux/mutex.h>
-
-#include "ide-disk.h"
-
-static DEFINE_MUTEX(ide_disk_ioctl_mutex);
-static const struct ide_ioctl_devset ide_disk_ioctl_settings[] = {
-{ HDIO_GET_ADDRESS, HDIO_SET_ADDRESS, &ide_devset_address },
-{ HDIO_GET_MULTCOUNT, HDIO_SET_MULTCOUNT, &ide_devset_multcount },
-{ HDIO_GET_NOWERR, HDIO_SET_NOWERR, &ide_devset_nowerr },
-{ HDIO_GET_WCACHE, HDIO_SET_WCACHE, &ide_devset_wcache },
-{ HDIO_GET_ACOUSTIC, HDIO_SET_ACOUSTIC, &ide_devset_acoustic },
-{ 0 }
-};
-
-int ide_disk_ioctl(ide_drive_t *drive, struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- int err;
-
- mutex_lock(&ide_disk_ioctl_mutex);
- err = ide_setting_ioctl(drive, bdev, cmd, arg, ide_disk_ioctl_settings);
- if (err != -EOPNOTSUPP)
- goto out;
-
- err = generic_ide_ioctl(drive, bdev, cmd, arg);
-out:
- mutex_unlock(&ide_disk_ioctl_mutex);
- return err;
-}
diff --git a/drivers/ide/ide-disk_proc.c b/drivers/ide/ide-disk_proc.c
deleted file mode 100644
index 95d239b2f646..000000000000
--- a/drivers/ide/ide-disk_proc.c
+++ /dev/null
@@ -1,125 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/kernel.h>
-#include <linux/ide.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <linux/seq_file.h>
-
-#include "ide-disk.h"
-
-static int smart_enable(ide_drive_t *drive)
-{
- struct ide_cmd cmd;
- struct ide_taskfile *tf = &cmd.tf;
-
- memset(&cmd, 0, sizeof(cmd));
- tf->feature = ATA_SMART_ENABLE;
- tf->lbam = ATA_SMART_LBAM_PASS;
- tf->lbah = ATA_SMART_LBAH_PASS;
- tf->command = ATA_CMD_SMART;
- cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
- cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
-
- return ide_no_data_taskfile(drive, &cmd);
-}
-
-static int get_smart_data(ide_drive_t *drive, u8 *buf, u8 sub_cmd)
-{
- struct ide_cmd cmd;
- struct ide_taskfile *tf = &cmd.tf;
-
- memset(&cmd, 0, sizeof(cmd));
- tf->feature = sub_cmd;
- tf->nsect = 0x01;
- tf->lbam = ATA_SMART_LBAM_PASS;
- tf->lbah = ATA_SMART_LBAH_PASS;
- tf->command = ATA_CMD_SMART;
- cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
- cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
- cmd.protocol = ATA_PROT_PIO;
-
- return ide_raw_taskfile(drive, &cmd, buf, 1);
-}
-
-static int idedisk_cache_proc_show(struct seq_file *m, void *v)
-{
- ide_drive_t *drive = (ide_drive_t *) m->private;
-
- if (drive->dev_flags & IDE_DFLAG_ID_READ)
- seq_printf(m, "%i\n", drive->id[ATA_ID_BUF_SIZE] / 2);
- else
- seq_printf(m, "(none)\n");
- return 0;
-}
-
-static int idedisk_capacity_proc_show(struct seq_file *m, void *v)
-{
- ide_drive_t*drive = (ide_drive_t *)m->private;
-
- seq_printf(m, "%llu\n", (long long)ide_gd_capacity(drive));
- return 0;
-}
-
-static int __idedisk_proc_show(struct seq_file *m, ide_drive_t *drive, u8 sub_cmd)
-{
- u8 *buf;
-
- buf = kmalloc(SECTOR_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- (void)smart_enable(drive);
-
- if (get_smart_data(drive, buf, sub_cmd) == 0) {
- __le16 *val = (__le16 *)buf;
- int i;
-
- for (i = 0; i < SECTOR_SIZE / 2; i++) {
- seq_printf(m, "%04x%c", le16_to_cpu(val[i]),
- (i % 8) == 7 ? '\n' : ' ');
- }
- }
- kfree(buf);
- return 0;
-}
-
-static int idedisk_sv_proc_show(struct seq_file *m, void *v)
-{
- return __idedisk_proc_show(m, m->private, ATA_SMART_READ_VALUES);
-}
-
-static int idedisk_st_proc_show(struct seq_file *m, void *v)
-{
- return __idedisk_proc_show(m, m->private, ATA_SMART_READ_THRESHOLDS);
-}
-
-ide_proc_entry_t ide_disk_proc[] = {
- { "cache", S_IFREG|S_IRUGO, idedisk_cache_proc_show },
- { "capacity", S_IFREG|S_IRUGO, idedisk_capacity_proc_show },
- { "geometry", S_IFREG|S_IRUGO, ide_geometry_proc_show },
- { "smart_values", S_IFREG|S_IRUSR, idedisk_sv_proc_show },
- { "smart_thresholds", S_IFREG|S_IRUSR, idedisk_st_proc_show },
- {}
-};
-
-ide_devset_rw_field(bios_cyl, bios_cyl);
-ide_devset_rw_field(bios_head, bios_head);
-ide_devset_rw_field(bios_sect, bios_sect);
-ide_devset_rw_field(failures, failures);
-ide_devset_rw_field(lun, lun);
-ide_devset_rw_field(max_failures, max_failures);
-
-const struct ide_proc_devset ide_disk_settings[] = {
- IDE_PROC_DEVSET(acoustic, 0, 254),
- IDE_PROC_DEVSET(address, 0, 2),
- IDE_PROC_DEVSET(bios_cyl, 0, 65535),
- IDE_PROC_DEVSET(bios_head, 0, 255),
- IDE_PROC_DEVSET(bios_sect, 0, 63),
- IDE_PROC_DEVSET(failures, 0, 65535),
- IDE_PROC_DEVSET(lun, 0, 7),
- IDE_PROC_DEVSET(max_failures, 0, 65535),
- IDE_PROC_DEVSET(multcount, 0, 16),
- IDE_PROC_DEVSET(nowerr, 0, 1),
- IDE_PROC_DEVSET(wcache, 0, 1),
- { NULL },
-};
diff --git a/drivers/ide/ide-dma-sff.c b/drivers/ide/ide-dma-sff.c
deleted file mode 100644
index b7c2c0bd18b5..000000000000
--- a/drivers/ide/ide-dma-sff.c
+++ /dev/null
@@ -1,336 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/ide.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
-#include <linux/io.h>
-
-/**
- * config_drive_for_dma - attempt to activate IDE DMA
- * @drive: the drive to place in DMA mode
- *
- * If the drive supports at least mode 2 DMA or UDMA of any kind
- * then attempt to place it into DMA mode. Drives that are known to
- * support DMA but predate the DMA properties or that are known
- * to have DMA handling bugs are also set up appropriately based
- * on the good/bad drive lists.
- */
-
-int config_drive_for_dma(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u16 *id = drive->id;
-
- if (drive->media != ide_disk) {
- if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA)
- return 0;
- }
-
- /*
- * Enable DMA on any drive that has
- * UltraDMA (mode 0/1/2/3/4/5/6) enabled
- */
- if ((id[ATA_ID_FIELD_VALID] & 4) &&
- ((id[ATA_ID_UDMA_MODES] >> 8) & 0x7f))
- return 1;
-
- /*
- * Enable DMA on any drive that has mode2 DMA
- * (multi or single) enabled
- */
- if ((id[ATA_ID_MWDMA_MODES] & 0x404) == 0x404 ||
- (id[ATA_ID_SWDMA_MODES] & 0x404) == 0x404)
- return 1;
-
- /* Consult the list of known "good" drives */
- if (ide_dma_good_drive(drive))
- return 1;
-
- return 0;
-}
-
-u8 ide_dma_sff_read_status(ide_hwif_t *hwif)
-{
- unsigned long addr = hwif->dma_base + ATA_DMA_STATUS;
-
- if (hwif->host_flags & IDE_HFLAG_MMIO)
- return readb((void __iomem *)addr);
- else
- return inb(addr);
-}
-EXPORT_SYMBOL_GPL(ide_dma_sff_read_status);
-
-static void ide_dma_sff_write_status(ide_hwif_t *hwif, u8 val)
-{
- unsigned long addr = hwif->dma_base + ATA_DMA_STATUS;
-
- if (hwif->host_flags & IDE_HFLAG_MMIO)
- writeb(val, (void __iomem *)addr);
- else
- outb(val, addr);
-}
-
-/**
- * ide_dma_host_set - Enable/disable DMA on a host
- * @drive: drive to control
- *
- * Enable/disable DMA on an IDE controller following generic
- * bus-mastering IDE controller behaviour.
- */
-
-void ide_dma_host_set(ide_drive_t *drive, int on)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 unit = drive->dn & 1;
- u8 dma_stat = hwif->dma_ops->dma_sff_read_status(hwif);
-
- if (on)
- dma_stat |= (1 << (5 + unit));
- else
- dma_stat &= ~(1 << (5 + unit));
-
- ide_dma_sff_write_status(hwif, dma_stat);
-}
-EXPORT_SYMBOL_GPL(ide_dma_host_set);
-
-/**
- * ide_build_dmatable - build IDE DMA table
- *
- * ide_build_dmatable() prepares a dma request. We map the command
- * to get the pci bus addresses of the buffers and then build up
- * the PRD table that the IDE layer wants to be fed.
- *
- * Most chipsets correctly interpret a length of 0x0000 as 64KB,
- * but at least one (e.g. CS5530) misinterprets it as zero (!).
- * So we break the 64KB entry into two 32KB entries instead.
- *
- * Returns the number of built PRD entries if all went okay,
- * returns 0 otherwise.
- *
- * May also be invoked from trm290.c
- */
-
-int ide_build_dmatable(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- ide_hwif_t *hwif = drive->hwif;
- __le32 *table = (__le32 *)hwif->dmatable_cpu;
- unsigned int count = 0;
- int i;
- struct scatterlist *sg;
- u8 is_trm290 = !!(hwif->host_flags & IDE_HFLAG_TRM290);
-
- for_each_sg(hwif->sg_table, sg, cmd->sg_nents, i) {
- u32 cur_addr, cur_len, xcount, bcount;
-
- cur_addr = sg_dma_address(sg);
- cur_len = sg_dma_len(sg);
-
- /*
- * Fill in the dma table, without crossing any 64kB boundaries.
- * Most hardware requires 16-bit alignment of all blocks,
- * but the trm290 requires 32-bit alignment.
- */
-
- while (cur_len) {
- if (count++ >= PRD_ENTRIES)
- goto use_pio_instead;
-
- bcount = 0x10000 - (cur_addr & 0xffff);
- if (bcount > cur_len)
- bcount = cur_len;
- *table++ = cpu_to_le32(cur_addr);
- xcount = bcount & 0xffff;
- if (is_trm290)
- xcount = ((xcount >> 2) - 1) << 16;
- else if (xcount == 0x0000) {
- if (count++ >= PRD_ENTRIES)
- goto use_pio_instead;
- *table++ = cpu_to_le32(0x8000);
- *table++ = cpu_to_le32(cur_addr + 0x8000);
- xcount = 0x8000;
- }
- *table++ = cpu_to_le32(xcount);
- cur_addr += bcount;
- cur_len -= bcount;
- }
- }
-
- if (count) {
- if (!is_trm290)
- *--table |= cpu_to_le32(0x80000000);
- return count;
- }
-
-use_pio_instead:
- printk(KERN_ERR "%s: %s\n", drive->name,
- count ? "DMA table too small" : "empty DMA table?");
-
- return 0; /* revert to PIO for this request */
-}
-EXPORT_SYMBOL_GPL(ide_build_dmatable);
-
-/**
- * ide_dma_setup - begin a DMA phase
- * @drive: target device
- * @cmd: command
- *
- * Build an IDE DMA PRD (IDE speak for scatter gather table)
- * and then set up the DMA transfer registers for a device
- * that follows generic IDE PCI DMA behaviour. Controllers can
- * override this function if they need to
- *
- * Returns 0 on success. If a PIO fallback is required then 1
- * is returned.
- */
-
-int ide_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
- u8 rw = (cmd->tf_flags & IDE_TFLAG_WRITE) ? 0 : ATA_DMA_WR;
- u8 dma_stat;
-
- /* fall back to pio! */
- if (ide_build_dmatable(drive, cmd) == 0) {
- ide_map_sg(drive, cmd);
- return 1;
- }
-
- /* PRD table */
- if (mmio)
- writel(hwif->dmatable_dma,
- (void __iomem *)(hwif->dma_base + ATA_DMA_TABLE_OFS));
- else
- outl(hwif->dmatable_dma, hwif->dma_base + ATA_DMA_TABLE_OFS);
-
- /* specify r/w */
- if (mmio)
- writeb(rw, (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
- else
- outb(rw, hwif->dma_base + ATA_DMA_CMD);
-
- /* read DMA status for INTR & ERROR flags */
- dma_stat = hwif->dma_ops->dma_sff_read_status(hwif);
-
- /* clear INTR & ERROR flags */
- ide_dma_sff_write_status(hwif, dma_stat | ATA_DMA_ERR | ATA_DMA_INTR);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ide_dma_setup);
-
-/**
- * ide_dma_sff_timer_expiry - handle a DMA timeout
- * @drive: Drive that timed out
- *
- * An IDE DMA transfer timed out. In the event of an error we ask
- * the driver to resolve the problem, if a DMA transfer is still
- * in progress we continue to wait (arguably we need to add a
- * secondary 'I don't care what the drive thinks' timeout here)
- * Finally if we have an interrupt we let it complete the I/O.
- * But only one time - we clear expiry and if it's still not
- * completed after WAIT_CMD, we error and retry in PIO.
- * This can occur if an interrupt is lost or due to hang or bugs.
- */
-
-int ide_dma_sff_timer_expiry(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 dma_stat = hwif->dma_ops->dma_sff_read_status(hwif);
-
- printk(KERN_WARNING "%s: %s: DMA status (0x%02x)\n",
- drive->name, __func__, dma_stat);
-
- if ((dma_stat & 0x18) == 0x18) /* BUSY Stupid Early Timer !! */
- return WAIT_CMD;
-
- hwif->expiry = NULL; /* one free ride for now */
-
- if (dma_stat & ATA_DMA_ERR) /* ERROR */
- return -1;
-
- if (dma_stat & ATA_DMA_ACTIVE) /* DMAing */
- return WAIT_CMD;
-
- if (dma_stat & ATA_DMA_INTR) /* Got an Interrupt */
- return WAIT_CMD;
-
- return 0; /* Status is unknown -- reset the bus */
-}
-EXPORT_SYMBOL_GPL(ide_dma_sff_timer_expiry);
-
-void ide_dma_start(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 dma_cmd;
-
- /* Note that this is done *after* the cmd has
- * been issued to the drive, as per the BM-IDE spec.
- * The Promise Ultra33 doesn't work correctly when
- * we do this part before issuing the drive cmd.
- */
- if (hwif->host_flags & IDE_HFLAG_MMIO) {
- dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
- writeb(dma_cmd | ATA_DMA_START,
- (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
- } else {
- dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
- outb(dma_cmd | ATA_DMA_START, hwif->dma_base + ATA_DMA_CMD);
- }
-}
-EXPORT_SYMBOL_GPL(ide_dma_start);
-
-/* returns 1 on error, 0 otherwise */
-int ide_dma_end(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 dma_stat = 0, dma_cmd = 0;
-
- /* stop DMA */
- if (hwif->host_flags & IDE_HFLAG_MMIO) {
- dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
- writeb(dma_cmd & ~ATA_DMA_START,
- (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
- } else {
- dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
- outb(dma_cmd & ~ATA_DMA_START, hwif->dma_base + ATA_DMA_CMD);
- }
-
- /* get DMA status */
- dma_stat = hwif->dma_ops->dma_sff_read_status(hwif);
-
- /* clear INTR & ERROR bits */
- ide_dma_sff_write_status(hwif, dma_stat | ATA_DMA_ERR | ATA_DMA_INTR);
-
-#define CHECK_DMA_MASK (ATA_DMA_ACTIVE | ATA_DMA_ERR | ATA_DMA_INTR)
-
- /* verify good DMA status */
- if ((dma_stat & CHECK_DMA_MASK) != ATA_DMA_INTR)
- return 0x10 | dma_stat;
- return 0;
-}
-EXPORT_SYMBOL_GPL(ide_dma_end);
-
-/* returns 1 if dma irq issued, 0 otherwise */
-int ide_dma_test_irq(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 dma_stat = hwif->dma_ops->dma_sff_read_status(hwif);
-
- return (dma_stat & ATA_DMA_INTR) ? 1 : 0;
-}
-EXPORT_SYMBOL_GPL(ide_dma_test_irq);
-
-const struct ide_dma_ops sff_dma_ops = {
- .dma_host_set = ide_dma_host_set,
- .dma_setup = ide_dma_setup,
- .dma_start = ide_dma_start,
- .dma_end = ide_dma_end,
- .dma_test_irq = ide_dma_test_irq,
- .dma_lost_irq = ide_dma_lost_irq,
- .dma_timer_expiry = ide_dma_sff_timer_expiry,
- .dma_sff_read_status = ide_dma_sff_read_status,
-};
-EXPORT_SYMBOL_GPL(sff_dma_ops);
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
deleted file mode 100644
index 6f344654ef22..000000000000
--- a/drivers/ide/ide-dma.c
+++ /dev/null
@@ -1,551 +0,0 @@
-/*
- * IDE DMA support (including IDE PCI BM-DMA).
- *
- * Copyright (C) 1995-1998 Mark Lord
- * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2004, 2007 Bartlomiej Zolnierkiewicz
- *
- * May be copied or modified under the terms of the GNU General Public License
- *
- * DMA is supported for all IDE devices (disk drives, cdroms, tapes, floppies).
- */
-
-/*
- * Special Thanks to Mark for his Six years of work.
- */
-
-/*
- * Thanks to "Christopher J. Reimer" <reimer@doe.carleton.ca> for
- * fixing the problem with the BIOS on some Acer motherboards.
- *
- * Thanks to "Benoit Poulot-Cazajous" <poulot@chorus.fr> for testing
- * "TX" chipset compatibility and for providing patches for the "TX" chipset.
- *
- * Thanks to Christian Brunner <chb@muc.de> for taking a good first crack
- * at generic DMA -- his patches were referred to when preparing this code.
- *
- * Most importantly, thanks to Robert Bringman <rob@mars.trion.com>
- * for supplying a Promise UDMA board & WD UDMA drive for this work!
- */
-
-#include <linux/types.h>
-#include <linux/gfp.h>
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/ide.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
-
-static const struct drive_list_entry drive_whitelist[] = {
- { "Micropolis 2112A" , NULL },
- { "CONNER CTMA 4000" , NULL },
- { "CONNER CTT8000-A" , NULL },
- { "ST34342A" , NULL },
- { NULL , NULL }
-};
-
-static const struct drive_list_entry drive_blacklist[] = {
- { "WDC AC11000H" , NULL },
- { "WDC AC22100H" , NULL },
- { "WDC AC32500H" , NULL },
- { "WDC AC33100H" , NULL },
- { "WDC AC31600H" , NULL },
- { "WDC AC32100H" , "24.09P07" },
- { "WDC AC23200L" , "21.10N21" },
- { "Compaq CRD-8241B" , NULL },
- { "CRD-8400B" , NULL },
- { "CRD-8480B", NULL },
- { "CRD-8482B", NULL },
- { "CRD-84" , NULL },
- { "SanDisk SDP3B" , NULL },
- { "SanDisk SDP3B-64" , NULL },
- { "SANYO CD-ROM CRD" , NULL },
- { "HITACHI CDR-8" , NULL },
- { "HITACHI CDR-8335" , NULL },
- { "HITACHI CDR-8435" , NULL },
- { "Toshiba CD-ROM XM-6202B" , NULL },
- { "TOSHIBA CD-ROM XM-1702BC", NULL },
- { "CD-532E-A" , NULL },
- { "E-IDE CD-ROM CR-840", NULL },
- { "CD-ROM Drive/F5A", NULL },
- { "WPI CDD-820", NULL },
- { "SAMSUNG CD-ROM SC-148C", NULL },
- { "SAMSUNG CD-ROM SC", NULL },
- { "ATAPI CD-ROM DRIVE 40X MAXIMUM", NULL },
- { "_NEC DV5800A", NULL },
- { "SAMSUNG CD-ROM SN-124", "N001" },
- { "Seagate STT20000A", NULL },
- { "CD-ROM CDR_U200", "1.09" },
- { NULL , NULL }
-
-};
-
-/**
- * ide_dma_intr - IDE DMA interrupt handler
- * @drive: the drive the interrupt is for
- *
- * Handle an interrupt completing a read/write DMA transfer on an
- * IDE device
- */
-
-ide_startstop_t ide_dma_intr(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct ide_cmd *cmd = &hwif->cmd;
- u8 stat = 0, dma_stat = 0;
-
- drive->waiting_for_dma = 0;
- dma_stat = hwif->dma_ops->dma_end(drive);
- ide_dma_unmap_sg(drive, cmd);
- stat = hwif->tp_ops->read_status(hwif);
-
- if (OK_STAT(stat, DRIVE_READY, drive->bad_wstat | ATA_DRQ)) {
- if (!dma_stat) {
- if ((cmd->tf_flags & IDE_TFLAG_FS) == 0)
- ide_finish_cmd(drive, cmd, stat);
- else
- ide_complete_rq(drive, BLK_STS_OK,
- blk_rq_sectors(cmd->rq) << 9);
- return ide_stopped;
- }
- printk(KERN_ERR "%s: %s: bad DMA status (0x%02x)\n",
- drive->name, __func__, dma_stat);
- }
- return ide_error(drive, "dma_intr", stat);
-}
-
-int ide_dma_good_drive(ide_drive_t *drive)
-{
- return ide_in_drive_list(drive->id, drive_whitelist);
-}
-
-/**
- * ide_dma_map_sg - map IDE scatter gather for DMA I/O
- * @drive: the drive to map the DMA table for
- * @cmd: command
- *
- * Perform the DMA mapping magic necessary to access the source or
- * target buffers of a request via DMA. The lower layers of the
- * kernel provide the necessary cache management so that we can
- * operate in a portable fashion.
- */
-
-static int ide_dma_map_sg(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct scatterlist *sg = hwif->sg_table;
- int i;
-
- if (cmd->tf_flags & IDE_TFLAG_WRITE)
- cmd->sg_dma_direction = DMA_TO_DEVICE;
- else
- cmd->sg_dma_direction = DMA_FROM_DEVICE;
-
- i = dma_map_sg(hwif->dev, sg, cmd->sg_nents, cmd->sg_dma_direction);
- if (i) {
- cmd->orig_sg_nents = cmd->sg_nents;
- cmd->sg_nents = i;
- }
-
- return i;
-}
-
-/**
- * ide_dma_unmap_sg - clean up DMA mapping
- * @drive: The drive to unmap
- *
- * Teardown mappings after DMA has completed. This must be called
- * after the completion of each use of ide_build_dmatable and before
- * the next use of ide_build_dmatable. Failure to do so will cause
- * an oops as only one mapping can be live for each target at a given
- * time.
- */
-
-void ide_dma_unmap_sg(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- ide_hwif_t *hwif = drive->hwif;
-
- dma_unmap_sg(hwif->dev, hwif->sg_table, cmd->orig_sg_nents,
- cmd->sg_dma_direction);
-}
-EXPORT_SYMBOL_GPL(ide_dma_unmap_sg);
-
-/**
- * ide_dma_off_quietly - Generic DMA kill
- * @drive: drive to control
- *
- * Turn off the current DMA on this IDE controller.
- */
-
-void ide_dma_off_quietly(ide_drive_t *drive)
-{
- drive->dev_flags &= ~IDE_DFLAG_USING_DMA;
-
- drive->hwif->dma_ops->dma_host_set(drive, 0);
-}
-EXPORT_SYMBOL(ide_dma_off_quietly);
-
-/**
- * ide_dma_off - disable DMA on a device
- * @drive: drive to disable DMA on
- *
- * Disable IDE DMA for a device on this IDE controller.
- * Inform the user that DMA has been disabled.
- */
-
-void ide_dma_off(ide_drive_t *drive)
-{
- printk(KERN_INFO "%s: DMA disabled\n", drive->name);
- ide_dma_off_quietly(drive);
-}
-EXPORT_SYMBOL(ide_dma_off);
-
-/**
- * ide_dma_on - Enable DMA on a device
- * @drive: drive to enable DMA on
- *
- * Enable IDE DMA for a device on this IDE controller.
- */
-
-void ide_dma_on(ide_drive_t *drive)
-{
- drive->dev_flags |= IDE_DFLAG_USING_DMA;
-
- drive->hwif->dma_ops->dma_host_set(drive, 1);
-}
-
-int __ide_dma_bad_drive(ide_drive_t *drive)
-{
- u16 *id = drive->id;
-
- int blacklist = ide_in_drive_list(id, drive_blacklist);
- if (blacklist) {
- printk(KERN_WARNING "%s: Disabling (U)DMA for %s (blacklisted)\n",
- drive->name, (char *)&id[ATA_ID_PROD]);
- return blacklist;
- }
- return 0;
-}
-EXPORT_SYMBOL(__ide_dma_bad_drive);
-
-static const u8 xfer_mode_bases[] = {
- XFER_UDMA_0,
- XFER_MW_DMA_0,
- XFER_SW_DMA_0,
-};
-
-static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode)
-{
- u16 *id = drive->id;
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_port_ops *port_ops = hwif->port_ops;
- unsigned int mask = 0;
-
- switch (base) {
- case XFER_UDMA_0:
- if ((id[ATA_ID_FIELD_VALID] & 4) == 0)
- break;
- mask = id[ATA_ID_UDMA_MODES];
- if (port_ops && port_ops->udma_filter)
- mask &= port_ops->udma_filter(drive);
- else
- mask &= hwif->ultra_mask;
-
- /*
- * avoid false cable warning from eighty_ninty_three()
- */
- if (req_mode > XFER_UDMA_2) {
- if ((mask & 0x78) && (eighty_ninty_three(drive) == 0))
- mask &= 0x07;
- }
- break;
- case XFER_MW_DMA_0:
- mask = id[ATA_ID_MWDMA_MODES];
-
- /* Also look for the CF specific MWDMA modes... */
- if (ata_id_is_cfa(id) && (id[ATA_ID_CFA_MODES] & 0x38)) {
- u8 mode = ((id[ATA_ID_CFA_MODES] & 0x38) >> 3) - 1;
-
- mask |= ((2 << mode) - 1) << 3;
- }
-
- if (port_ops && port_ops->mdma_filter)
- mask &= port_ops->mdma_filter(drive);
- else
- mask &= hwif->mwdma_mask;
- break;
- case XFER_SW_DMA_0:
- mask = id[ATA_ID_SWDMA_MODES];
- if (!(mask & ATA_SWDMA2) && (id[ATA_ID_OLD_DMA_MODES] >> 8)) {
- u8 mode = id[ATA_ID_OLD_DMA_MODES] >> 8;
-
- /*
- * if the mode is valid convert it to the mask
- * (the maximum allowed mode is XFER_SW_DMA_2)
- */
- if (mode <= 2)
- mask = (2 << mode) - 1;
- }
- mask &= hwif->swdma_mask;
- break;
- default:
- BUG();
- break;
- }
-
- return mask;
-}
-
-/**
- * ide_find_dma_mode - compute DMA speed
- * @drive: IDE device
- * @req_mode: requested mode
- *
- * Checks the drive/host capabilities and finds the speed to use for
- * the DMA transfer. The speed is then limited by the requested mode.
- *
- * Returns 0 if the drive/host combination is incapable of DMA transfers
- * or if the requested mode is not a DMA mode.
- */
-
-u8 ide_find_dma_mode(ide_drive_t *drive, u8 req_mode)
-{
- ide_hwif_t *hwif = drive->hwif;
- unsigned int mask;
- int x, i;
- u8 mode = 0;
-
- if (drive->media != ide_disk) {
- if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA)
- return 0;
- }
-
- for (i = 0; i < ARRAY_SIZE(xfer_mode_bases); i++) {
- if (req_mode < xfer_mode_bases[i])
- continue;
- mask = ide_get_mode_mask(drive, xfer_mode_bases[i], req_mode);
- x = fls(mask) - 1;
- if (x >= 0) {
- mode = xfer_mode_bases[i] + x;
- break;
- }
- }
-
- if (hwif->chipset == ide_acorn && mode == 0) {
- /*
- * is this correct?
- */
- if (ide_dma_good_drive(drive) &&
- drive->id[ATA_ID_EIDE_DMA_TIME] < 150)
- mode = XFER_MW_DMA_1;
- }
-
- mode = min(mode, req_mode);
-
- printk(KERN_INFO "%s: %s mode selected\n", drive->name,
- mode ? ide_xfer_verbose(mode) : "no DMA");
-
- return mode;
-}
-
-static int ide_tune_dma(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 speed;
-
- if (ata_id_has_dma(drive->id) == 0 ||
- (drive->dev_flags & IDE_DFLAG_NODMA))
- return 0;
-
- /* consult the list of known "bad" drives */
- if (__ide_dma_bad_drive(drive))
- return 0;
-
- if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
- return config_drive_for_dma(drive);
-
- speed = ide_max_dma_mode(drive);
-
- if (!speed)
- return 0;
-
- if (ide_set_dma_mode(drive, speed))
- return 0;
-
- return 1;
-}
-
-static int ide_dma_check(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
-
- if (ide_tune_dma(drive))
- return 0;
-
- /* TODO: always do PIO fallback */
- if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
- return -1;
-
- ide_set_max_pio(drive);
-
- return -1;
-}
-
-int ide_set_dma(ide_drive_t *drive)
-{
- int rc;
-
- /*
- * Force DMAing for the beginning of the check.
- * Some chipsets appear to do interesting
- * things, if not checked and cleared.
- * PARANOIA!!!
- */
- ide_dma_off_quietly(drive);
-
- rc = ide_dma_check(drive);
- if (rc)
- return rc;
-
- ide_dma_on(drive);
-
- return 0;
-}
-
-void ide_check_dma_crc(ide_drive_t *drive)
-{
- u8 mode;
-
- ide_dma_off_quietly(drive);
- drive->crc_count = 0;
- mode = drive->current_speed;
- /*
- * Don't try non Ultra-DMA modes without iCRC's. Force the
- * device to PIO and make the user enable SWDMA/MWDMA modes.
- */
- if (mode > XFER_UDMA_0 && mode <= XFER_UDMA_7)
- mode--;
- else
- mode = XFER_PIO_4;
- ide_set_xfer_rate(drive, mode);
- if (drive->current_speed >= XFER_SW_DMA_0)
- ide_dma_on(drive);
-}
-
-void ide_dma_lost_irq(ide_drive_t *drive)
-{
- printk(KERN_ERR "%s: DMA interrupt recovery\n", drive->name);
-}
-EXPORT_SYMBOL_GPL(ide_dma_lost_irq);
-
-/*
- * un-busy the port etc, and clear any pending DMA status. we want to
- * retry the current request in pio mode instead of risking tossing it
- * all away
- */
-ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
-{
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_dma_ops *dma_ops = hwif->dma_ops;
- struct ide_cmd *cmd = &hwif->cmd;
- ide_startstop_t ret = ide_stopped;
-
- /*
- * end current dma transaction
- */
-
- if (error < 0) {
- printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
- drive->waiting_for_dma = 0;
- (void)dma_ops->dma_end(drive);
- ide_dma_unmap_sg(drive, cmd);
- ret = ide_error(drive, "dma timeout error",
- hwif->tp_ops->read_status(hwif));
- } else {
- printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
- if (dma_ops->dma_clear)
- dma_ops->dma_clear(drive);
- printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name);
- if (dma_ops->dma_test_irq(drive) == 0) {
- ide_dump_status(drive, "DMA timeout",
- hwif->tp_ops->read_status(hwif));
- drive->waiting_for_dma = 0;
- (void)dma_ops->dma_end(drive);
- ide_dma_unmap_sg(drive, cmd);
- }
- }
-
- /*
- * disable dma for now, but remember that we did so because of
- * a timeout -- we'll reenable after we finish this next request
- * (or rather the first chunk of it) in pio.
- */
- drive->dev_flags |= IDE_DFLAG_DMA_PIO_RETRY;
- drive->retry_pio++;
- ide_dma_off_quietly(drive);
-
- /*
- * make sure request is sane
- */
- if (hwif->rq)
- scsi_req(hwif->rq)->result = 0;
- return ret;
-}
-
-void ide_release_dma_engine(ide_hwif_t *hwif)
-{
- if (hwif->dmatable_cpu) {
- int prd_size = hwif->prd_max_nents * hwif->prd_ent_size;
-
- dma_free_coherent(hwif->dev, prd_size,
- hwif->dmatable_cpu, hwif->dmatable_dma);
- hwif->dmatable_cpu = NULL;
- }
-}
-EXPORT_SYMBOL_GPL(ide_release_dma_engine);
-
-int ide_allocate_dma_engine(ide_hwif_t *hwif)
-{
- int prd_size;
-
- if (hwif->prd_max_nents == 0)
- hwif->prd_max_nents = PRD_ENTRIES;
- if (hwif->prd_ent_size == 0)
- hwif->prd_ent_size = PRD_BYTES;
-
- prd_size = hwif->prd_max_nents * hwif->prd_ent_size;
-
- hwif->dmatable_cpu = dma_alloc_coherent(hwif->dev, prd_size,
- &hwif->dmatable_dma,
- GFP_ATOMIC);
- if (hwif->dmatable_cpu == NULL) {
- printk(KERN_ERR "%s: unable to allocate PRD table\n",
- hwif->name);
- return -ENOMEM;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ide_allocate_dma_engine);
-
-int ide_dma_prepare(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- const struct ide_dma_ops *dma_ops = drive->hwif->dma_ops;
-
- if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0 ||
- (dma_ops->dma_check && dma_ops->dma_check(drive, cmd)))
- goto out;
- ide_map_sg(drive, cmd);
- if (ide_dma_map_sg(drive, cmd) == 0)
- goto out_map;
- if (dma_ops->dma_setup(drive, cmd))
- goto out_dma_unmap;
- drive->waiting_for_dma = 1;
- return 0;
-out_dma_unmap:
- ide_dma_unmap_sg(drive, cmd);
-out_map:
- ide_map_sg(drive, cmd);
-out:
- return 1;
-}
diff --git a/drivers/ide/ide-eh.c b/drivers/ide/ide-eh.c
deleted file mode 100644
index 2f378213e9b5..000000000000
--- a/drivers/ide/ide-eh.c
+++ /dev/null
@@ -1,443 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/ide.h>
-#include <linux/delay.h>
-
-static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq,
- u8 stat, u8 err)
-{
- ide_hwif_t *hwif = drive->hwif;
-
- if ((stat & ATA_BUSY) ||
- ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) {
- /* other bits are useless when BUSY */
- scsi_req(rq)->result |= ERROR_RESET;
- } else if (stat & ATA_ERR) {
- /* err has different meaning on cdrom and tape */
- if (err == ATA_ABORTED) {
- if ((drive->dev_flags & IDE_DFLAG_LBA) &&
- /* some newer drives don't support ATA_CMD_INIT_DEV_PARAMS */
- hwif->tp_ops->read_status(hwif) == ATA_CMD_INIT_DEV_PARAMS)
- return ide_stopped;
- } else if ((err & BAD_CRC) == BAD_CRC) {
- /* UDMA crc error, just retry the operation */
- drive->crc_count++;
- } else if (err & (ATA_BBK | ATA_UNC)) {
- /* retries won't help these */
- scsi_req(rq)->result = ERROR_MAX;
- } else if (err & ATA_TRK0NF) {
- /* help it find track zero */
- scsi_req(rq)->result |= ERROR_RECAL;
- }
- }
-
- if ((stat & ATA_DRQ) && rq_data_dir(rq) == READ &&
- (hwif->host_flags & IDE_HFLAG_ERROR_STOPS_FIFO) == 0) {
- int nsect = drive->mult_count ? drive->mult_count : 1;
-
- ide_pad_transfer(drive, READ, nsect * SECTOR_SIZE);
- }
-
- if (scsi_req(rq)->result >= ERROR_MAX || blk_noretry_request(rq)) {
- ide_kill_rq(drive, rq);
- return ide_stopped;
- }
-
- if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ))
- scsi_req(rq)->result |= ERROR_RESET;
-
- if ((scsi_req(rq)->result & ERROR_RESET) == ERROR_RESET) {
- ++scsi_req(rq)->result;
- return ide_do_reset(drive);
- }
-
- if ((scsi_req(rq)->result & ERROR_RECAL) == ERROR_RECAL)
- drive->special_flags |= IDE_SFLAG_RECALIBRATE;
-
- ++scsi_req(rq)->result;
-
- return ide_stopped;
-}
-
-static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq,
- u8 stat, u8 err)
-{
- ide_hwif_t *hwif = drive->hwif;
-
- if ((stat & ATA_BUSY) ||
- ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) {
- /* other bits are useless when BUSY */
- scsi_req(rq)->result |= ERROR_RESET;
- } else {
- /* add decoding error stuff */
- }
-
- if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ))
- /* force an abort */
- hwif->tp_ops->exec_command(hwif, ATA_CMD_IDLEIMMEDIATE);
-
- if (scsi_req(rq)->result >= ERROR_MAX) {
- ide_kill_rq(drive, rq);
- } else {
- if ((scsi_req(rq)->result & ERROR_RESET) == ERROR_RESET) {
- ++scsi_req(rq)->result;
- return ide_do_reset(drive);
- }
- ++scsi_req(rq)->result;
- }
-
- return ide_stopped;
-}
-
-static ide_startstop_t __ide_error(ide_drive_t *drive, struct request *rq,
- u8 stat, u8 err)
-{
- if (drive->media == ide_disk)
- return ide_ata_error(drive, rq, stat, err);
- return ide_atapi_error(drive, rq, stat, err);
-}
-
-/**
- * ide_error - handle an error on the IDE
- * @drive: drive the error occurred on
- * @msg: message to report
- * @stat: status bits
- *
- * ide_error() takes action based on the error returned by the drive.
- * For normal I/O that may well include retries. We deal with
- * both new-style (taskfile) and old style command handling here.
- * In the case of taskfile command handling there is work left to
- * do
- */
-
-ide_startstop_t ide_error(ide_drive_t *drive, const char *msg, u8 stat)
-{
- struct request *rq;
- u8 err;
-
- err = ide_dump_status(drive, msg, stat);
-
- rq = drive->hwif->rq;
- if (rq == NULL)
- return ide_stopped;
-
- /* retry only "normal" I/O: */
- if (blk_rq_is_passthrough(rq)) {
- if (ata_taskfile_request(rq)) {
- struct ide_cmd *cmd = ide_req(rq)->special;
-
- if (cmd)
- ide_complete_cmd(drive, cmd, stat, err);
- } else if (ata_pm_request(rq)) {
- scsi_req(rq)->result = 1;
- ide_complete_pm_rq(drive, rq);
- return ide_stopped;
- }
- scsi_req(rq)->result = err;
- ide_complete_rq(drive, err ? BLK_STS_IOERR : BLK_STS_OK, blk_rq_bytes(rq));
- return ide_stopped;
- }
-
- return __ide_error(drive, rq, stat, err);
-}
-EXPORT_SYMBOL_GPL(ide_error);
-
-static inline void ide_complete_drive_reset(ide_drive_t *drive, blk_status_t err)
-{
- struct request *rq = drive->hwif->rq;
-
- if (rq && ata_misc_request(rq) &&
- scsi_req(rq)->cmd[0] == REQ_DRIVE_RESET) {
- if (err <= 0 && scsi_req(rq)->result == 0)
- scsi_req(rq)->result = -EIO;
- ide_complete_rq(drive, err, blk_rq_bytes(rq));
- }
-}
-
-/* needed below */
-static ide_startstop_t do_reset1(ide_drive_t *, int);
-
-/*
- * atapi_reset_pollfunc() gets invoked to poll the interface for completion
- * every 50ms during an atapi drive reset operation. If the drive has not yet
- * responded, and we have not yet hit our maximum waiting time, then the timer
- * is restarted for another 50ms.
- */
-static ide_startstop_t atapi_reset_pollfunc(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_tp_ops *tp_ops = hwif->tp_ops;
- u8 stat;
-
- tp_ops->dev_select(drive);
- udelay(10);
- stat = tp_ops->read_status(hwif);
-
- if (OK_STAT(stat, 0, ATA_BUSY))
- printk(KERN_INFO "%s: ATAPI reset complete\n", drive->name);
- else {
- if (time_before(jiffies, hwif->poll_timeout)) {
- ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20);
- /* continue polling */
- return ide_started;
- }
- /* end of polling */
- hwif->polling = 0;
- printk(KERN_ERR "%s: ATAPI reset timed-out, status=0x%02x\n",
- drive->name, stat);
- /* do it the old fashioned way */
- return do_reset1(drive, 1);
- }
- /* done polling */
- hwif->polling = 0;
- ide_complete_drive_reset(drive, BLK_STS_OK);
- return ide_stopped;
-}
-
-static void ide_reset_report_error(ide_hwif_t *hwif, u8 err)
-{
- static const char *err_master_vals[] =
- { NULL, "passed", "formatter device error",
- "sector buffer error", "ECC circuitry error",
- "controlling MPU error" };
-
- u8 err_master = err & 0x7f;
-
- printk(KERN_ERR "%s: reset: master: ", hwif->name);
- if (err_master && err_master < 6)
- printk(KERN_CONT "%s", err_master_vals[err_master]);
- else
- printk(KERN_CONT "error (0x%02x?)", err);
- if (err & 0x80)
- printk(KERN_CONT "; slave: failed");
- printk(KERN_CONT "\n");
-}
-
-/*
- * reset_pollfunc() gets invoked to poll the interface for completion every 50ms
- * during an ide reset operation. If the drives have not yet responded,
- * and we have not yet hit our maximum waiting time, then the timer is restarted
- * for another 50ms.
- */
-static ide_startstop_t reset_pollfunc(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_port_ops *port_ops = hwif->port_ops;
- u8 tmp;
- blk_status_t err = BLK_STS_OK;
-
- if (port_ops && port_ops->reset_poll) {
- err = port_ops->reset_poll(drive);
- if (err) {
- printk(KERN_ERR "%s: host reset_poll failure for %s.\n",
- hwif->name, drive->name);
- goto out;
- }
- }
-
- tmp = hwif->tp_ops->read_status(hwif);
-
- if (!OK_STAT(tmp, 0, ATA_BUSY)) {
- if (time_before(jiffies, hwif->poll_timeout)) {
- ide_set_handler(drive, &reset_pollfunc, HZ/20);
- /* continue polling */
- return ide_started;
- }
- printk(KERN_ERR "%s: reset timed-out, status=0x%02x\n",
- hwif->name, tmp);
- drive->failures++;
- err = BLK_STS_IOERR;
- } else {
- tmp = ide_read_error(drive);
-
- if (tmp == 1) {
- printk(KERN_INFO "%s: reset: success\n", hwif->name);
- drive->failures = 0;
- } else {
- ide_reset_report_error(hwif, tmp);
- drive->failures++;
- err = BLK_STS_IOERR;
- }
- }
-out:
- hwif->polling = 0; /* done polling */
- ide_complete_drive_reset(drive, err);
- return ide_stopped;
-}
-
-static void ide_disk_pre_reset(ide_drive_t *drive)
-{
- int legacy = (drive->id[ATA_ID_CFS_ENABLE_2] & 0x0400) ? 0 : 1;
-
- drive->special_flags =
- legacy ? (IDE_SFLAG_SET_GEOMETRY | IDE_SFLAG_RECALIBRATE) : 0;
-
- drive->mult_count = 0;
- drive->dev_flags &= ~IDE_DFLAG_PARKED;
-
- if ((drive->dev_flags & IDE_DFLAG_KEEP_SETTINGS) == 0 &&
- (drive->dev_flags & IDE_DFLAG_USING_DMA) == 0)
- drive->mult_req = 0;
-
- if (drive->mult_req != drive->mult_count)
- drive->special_flags |= IDE_SFLAG_SET_MULTMODE;
-}
-
-static void pre_reset(ide_drive_t *drive)
-{
- const struct ide_port_ops *port_ops = drive->hwif->port_ops;
-
- if (drive->media == ide_disk)
- ide_disk_pre_reset(drive);
- else
- drive->dev_flags |= IDE_DFLAG_POST_RESET;
-
- if (drive->dev_flags & IDE_DFLAG_USING_DMA) {
- if (drive->crc_count)
- ide_check_dma_crc(drive);
- else
- ide_dma_off(drive);
- }
-
- if ((drive->dev_flags & IDE_DFLAG_KEEP_SETTINGS) == 0) {
- if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0) {
- drive->dev_flags &= ~IDE_DFLAG_UNMASK;
- drive->io_32bit = 0;
- }
- return;
- }
-
- if (port_ops && port_ops->pre_reset)
- port_ops->pre_reset(drive);
-
- if (drive->current_speed != 0xff)
- drive->desired_speed = drive->current_speed;
- drive->current_speed = 0xff;
-}
-
-/*
- * do_reset1() attempts to recover a confused drive by resetting it.
- * Unfortunately, resetting a disk drive actually resets all devices on
- * the same interface, so it can really be thought of as resetting the
- * interface rather than resetting the drive.
- *
- * ATAPI devices have their own reset mechanism which allows them to be
- * individually reset without clobbering other devices on the same interface.
- *
- * Unfortunately, the IDE interface does not generate an interrupt to let
- * us know when the reset operation has finished, so we must poll for this.
- * Equally poor, though, is the fact that this may a very long time to complete,
- * (up to 30 seconds worstcase). So, instead of busy-waiting here for it,
- * we set a timer to poll at 50ms intervals.
- */
-static ide_startstop_t do_reset1(ide_drive_t *drive, int do_not_try_atapi)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct ide_io_ports *io_ports = &hwif->io_ports;
- const struct ide_tp_ops *tp_ops = hwif->tp_ops;
- const struct ide_port_ops *port_ops;
- ide_drive_t *tdrive;
- unsigned long flags, timeout;
- int i;
- DEFINE_WAIT(wait);
-
- spin_lock_irqsave(&hwif->lock, flags);
-
- /* We must not reset with running handlers */
- BUG_ON(hwif->handler != NULL);
-
- /* For an ATAPI device, first try an ATAPI SRST. */
- if (drive->media != ide_disk && !do_not_try_atapi) {
- pre_reset(drive);
- tp_ops->dev_select(drive);
- udelay(20);
- tp_ops->exec_command(hwif, ATA_CMD_DEV_RESET);
- ndelay(400);
- hwif->poll_timeout = jiffies + WAIT_WORSTCASE;
- hwif->polling = 1;
- __ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20);
- spin_unlock_irqrestore(&hwif->lock, flags);
- return ide_started;
- }
-
- /* We must not disturb devices in the IDE_DFLAG_PARKED state. */
- do {
- unsigned long now;
-
- prepare_to_wait(&ide_park_wq, &wait, TASK_UNINTERRUPTIBLE);
- timeout = jiffies;
- ide_port_for_each_present_dev(i, tdrive, hwif) {
- if ((tdrive->dev_flags & IDE_DFLAG_PARKED) &&
- time_after(tdrive->sleep, timeout))
- timeout = tdrive->sleep;
- }
-
- now = jiffies;
- if (time_before_eq(timeout, now))
- break;
-
- spin_unlock_irqrestore(&hwif->lock, flags);
- timeout = schedule_timeout_uninterruptible(timeout - now);
- spin_lock_irqsave(&hwif->lock, flags);
- } while (timeout);
- finish_wait(&ide_park_wq, &wait);
-
- /*
- * First, reset any device state data we were maintaining
- * for any of the drives on this interface.
- */
- ide_port_for_each_dev(i, tdrive, hwif)
- pre_reset(tdrive);
-
- if (io_ports->ctl_addr == 0) {
- spin_unlock_irqrestore(&hwif->lock, flags);
- ide_complete_drive_reset(drive, BLK_STS_IOERR);
- return ide_stopped;
- }
-
- /*
- * Note that we also set nIEN while resetting the device,
- * to mask unwanted interrupts from the interface during the reset.
- * However, due to the design of PC hardware, this will cause an
- * immediate interrupt due to the edge transition it produces.
- * This single interrupt gives us a "fast poll" for drives that
- * recover from reset very quickly, saving us the first 50ms wait time.
- */
- /* set SRST and nIEN */
- tp_ops->write_devctl(hwif, ATA_SRST | ATA_NIEN | ATA_DEVCTL_OBS);
- /* more than enough time */
- udelay(10);
- /* clear SRST, leave nIEN (unless device is on the quirk list) */
- tp_ops->write_devctl(hwif,
- ((drive->dev_flags & IDE_DFLAG_NIEN_QUIRK) ? 0 : ATA_NIEN) |
- ATA_DEVCTL_OBS);
- /* more than enough time */
- udelay(10);
- hwif->poll_timeout = jiffies + WAIT_WORSTCASE;
- hwif->polling = 1;
- __ide_set_handler(drive, &reset_pollfunc, HZ/20);
-
- /*
- * Some weird controller like resetting themselves to a strange
- * state when the disks are reset this way. At least, the Winbond
- * 553 documentation says that
- */
- port_ops = hwif->port_ops;
- if (port_ops && port_ops->resetproc)
- port_ops->resetproc(drive);
-
- spin_unlock_irqrestore(&hwif->lock, flags);
- return ide_started;
-}
-
-/*
- * ide_do_reset() is the entry point to the drive/interface reset code.
- */
-
-ide_startstop_t ide_do_reset(ide_drive_t *drive)
-{
- return do_reset1(drive, 0);
-}
-EXPORT_SYMBOL(ide_do_reset);
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
deleted file mode 100644
index f5a2870aaf54..000000000000
--- a/drivers/ide/ide-floppy.c
+++ /dev/null
@@ -1,551 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * IDE ATAPI floppy driver.
- *
- * Copyright (C) 1996-1999 Gadi Oxman <gadio@netvision.net.il>
- * Copyright (C) 2000-2002 Paul Bristow <paul@paulbristow.net>
- * Copyright (C) 2005 Bartlomiej Zolnierkiewicz
- *
- * This driver supports the following IDE floppy drives:
- *
- * LS-120/240 SuperDisk
- * Iomega Zip 100/250
- * Iomega PC Card Clik!/PocketZip
- *
- * For a historical changelog see
- * Documentation/ide/ChangeLog.ide-floppy.1996-2002
- */
-
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/compat.h>
-#include <linux/delay.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/major.h>
-#include <linux/errno.h>
-#include <linux/genhd.h>
-#include <linux/cdrom.h>
-#include <linux/ide.h>
-#include <linux/hdreg.h>
-#include <linux/bitops.h>
-#include <linux/mutex.h>
-#include <linux/scatterlist.h>
-
-#include <scsi/scsi_ioctl.h>
-
-#include <asm/byteorder.h>
-#include <linux/uaccess.h>
-#include <linux/io.h>
-#include <asm/unaligned.h>
-
-#include "ide-floppy.h"
-
-/*
- * After each failed packet command we issue a request sense command and retry
- * the packet command IDEFLOPPY_MAX_PC_RETRIES times.
- */
-#define IDEFLOPPY_MAX_PC_RETRIES 3
-
-/* format capacities descriptor codes */
-#define CAPACITY_INVALID 0x00
-#define CAPACITY_UNFORMATTED 0x01
-#define CAPACITY_CURRENT 0x02
-#define CAPACITY_NO_CARTRIDGE 0x03
-
-/*
- * The following delay solves a problem with ATAPI Zip 100 drive where BSY bit
- * was apparently being deasserted before the unit was ready to receive data.
- */
-#define IDEFLOPPY_PC_DELAY (HZ/20) /* default delay for ZIP 100 (50ms) */
-
-static int ide_floppy_callback(ide_drive_t *drive, int dsc)
-{
- struct ide_disk_obj *floppy = drive->driver_data;
- struct ide_atapi_pc *pc = drive->pc;
- struct request *rq = pc->rq;
- int uptodate = pc->error ? 0 : 1;
-
- ide_debug_log(IDE_DBG_FUNC, "enter");
-
- if (drive->failed_pc == pc)
- drive->failed_pc = NULL;
-
- if (pc->c[0] == GPCMD_READ_10 || pc->c[0] == GPCMD_WRITE_10 ||
- blk_rq_is_scsi(rq))
- uptodate = 1; /* FIXME */
- else if (pc->c[0] == GPCMD_REQUEST_SENSE) {
-
- u8 *buf = bio_data(rq->bio);
-
- if (!pc->error) {
- floppy->sense_key = buf[2] & 0x0F;
- floppy->asc = buf[12];
- floppy->ascq = buf[13];
- floppy->progress_indication = buf[15] & 0x80 ?
- (u16)get_unaligned((u16 *)&buf[16]) : 0x10000;
-
- if (drive->failed_pc)
- ide_debug_log(IDE_DBG_PC, "pc = %x",
- drive->failed_pc->c[0]);
-
- ide_debug_log(IDE_DBG_SENSE, "sense key = %x, asc = %x,"
- "ascq = %x", floppy->sense_key,
- floppy->asc, floppy->ascq);
- } else
- printk(KERN_ERR PFX "Error in REQUEST SENSE itself - "
- "Aborting request!\n");
- }
-
- if (ata_misc_request(rq))
- scsi_req(rq)->result = uptodate ? 0 : IDE_DRV_ERROR_GENERAL;
-
- return uptodate;
-}
-
-static void ide_floppy_report_error(struct ide_disk_obj *floppy,
- struct ide_atapi_pc *pc)
-{
- /* suppress error messages resulting from Medium not present */
- if (floppy->sense_key == 0x02 &&
- floppy->asc == 0x3a &&
- floppy->ascq == 0x00)
- return;
-
- printk(KERN_ERR PFX "%s: I/O error, pc = %2x, key = %2x, "
- "asc = %2x, ascq = %2x\n",
- floppy->drive->name, pc->c[0], floppy->sense_key,
- floppy->asc, floppy->ascq);
-
-}
-
-static ide_startstop_t ide_floppy_issue_pc(ide_drive_t *drive,
- struct ide_cmd *cmd,
- struct ide_atapi_pc *pc)
-{
- struct ide_disk_obj *floppy = drive->driver_data;
-
- if (drive->failed_pc == NULL &&
- pc->c[0] != GPCMD_REQUEST_SENSE)
- drive->failed_pc = pc;
-
- /* Set the current packet command */
- drive->pc = pc;
-
- if (pc->retries > IDEFLOPPY_MAX_PC_RETRIES) {
- unsigned int done = blk_rq_bytes(drive->hwif->rq);
-
- if (!(pc->flags & PC_FLAG_SUPPRESS_ERROR))
- ide_floppy_report_error(floppy, pc);
-
- /* Giving up */
- pc->error = IDE_DRV_ERROR_GENERAL;
-
- drive->failed_pc = NULL;
- drive->pc_callback(drive, 0);
- ide_complete_rq(drive, BLK_STS_IOERR, done);
- return ide_stopped;
- }
-
- ide_debug_log(IDE_DBG_FUNC, "retry #%d", pc->retries);
-
- pc->retries++;
-
- return ide_issue_pc(drive, cmd);
-}
-
-void ide_floppy_create_read_capacity_cmd(struct ide_atapi_pc *pc)
-{
- ide_init_pc(pc);
- pc->c[0] = GPCMD_READ_FORMAT_CAPACITIES;
- pc->c[7] = 255;
- pc->c[8] = 255;
- pc->req_xfer = 255;
-}
-
-/* A mode sense command is used to "sense" floppy parameters. */
-void ide_floppy_create_mode_sense_cmd(struct ide_atapi_pc *pc, u8 page_code)
-{
- u16 length = 8; /* sizeof(Mode Parameter Header) = 8 Bytes */
-
- ide_init_pc(pc);
- pc->c[0] = GPCMD_MODE_SENSE_10;
- pc->c[1] = 0;
- pc->c[2] = page_code;
-
- switch (page_code) {
- case IDEFLOPPY_CAPABILITIES_PAGE:
- length += 12;
- break;
- case IDEFLOPPY_FLEXIBLE_DISK_PAGE:
- length += 32;
- break;
- default:
- printk(KERN_ERR PFX "unsupported page code in %s\n", __func__);
- }
- put_unaligned(cpu_to_be16(length), (u16 *) &pc->c[7]);
- pc->req_xfer = length;
-}
-
-static void idefloppy_create_rw_cmd(ide_drive_t *drive,
- struct ide_atapi_pc *pc, struct request *rq,
- unsigned long sector)
-{
- struct ide_disk_obj *floppy = drive->driver_data;
- int block = sector / floppy->bs_factor;
- int blocks = blk_rq_sectors(rq) / floppy->bs_factor;
- int cmd = rq_data_dir(rq);
-
- ide_debug_log(IDE_DBG_FUNC, "block: %d, blocks: %d", block, blocks);
-
- ide_init_pc(pc);
- pc->c[0] = cmd == READ ? GPCMD_READ_10 : GPCMD_WRITE_10;
- put_unaligned(cpu_to_be16(blocks), (unsigned short *)&pc->c[7]);
- put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[2]);
-
- memcpy(scsi_req(rq)->cmd, pc->c, 12);
-
- pc->rq = rq;
- if (cmd == WRITE)
- pc->flags |= PC_FLAG_WRITING;
-
- pc->flags |= PC_FLAG_DMA_OK;
-}
-
-static void idefloppy_blockpc_cmd(struct ide_disk_obj *floppy,
- struct ide_atapi_pc *pc, struct request *rq)
-{
- ide_init_pc(pc);
- memcpy(pc->c, scsi_req(rq)->cmd, sizeof(pc->c));
- pc->rq = rq;
- if (blk_rq_bytes(rq)) {
- pc->flags |= PC_FLAG_DMA_OK;
- if (rq_data_dir(rq) == WRITE)
- pc->flags |= PC_FLAG_WRITING;
- }
-}
-
-static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
- struct request *rq, sector_t block)
-{
- struct ide_disk_obj *floppy = drive->driver_data;
- struct ide_cmd cmd;
- struct ide_atapi_pc *pc;
-
- ide_debug_log(IDE_DBG_FUNC, "enter, cmd: 0x%x\n", rq->cmd[0]);
-
- if (drive->debug_mask & IDE_DBG_RQ)
- blk_dump_rq_flags(rq, (rq->rq_disk
- ? rq->rq_disk->disk_name
- : "dev?"));
-
- if (scsi_req(rq)->result >= ERROR_MAX) {
- if (drive->failed_pc) {
- ide_floppy_report_error(floppy, drive->failed_pc);
- drive->failed_pc = NULL;
- } else
- printk(KERN_ERR PFX "%s: I/O error\n", drive->name);
-
- if (ata_misc_request(rq)) {
- scsi_req(rq)->result = 0;
- ide_complete_rq(drive, BLK_STS_OK, blk_rq_bytes(rq));
- return ide_stopped;
- } else
- goto out_end;
- }
-
- switch (req_op(rq)) {
- default:
- if (((long)blk_rq_pos(rq) % floppy->bs_factor) ||
- (blk_rq_sectors(rq) % floppy->bs_factor)) {
- printk(KERN_ERR PFX "%s: unsupported r/w rq size\n",
- drive->name);
- goto out_end;
- }
- pc = &floppy->queued_pc;
- idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block);
- break;
- case REQ_OP_SCSI_IN:
- case REQ_OP_SCSI_OUT:
- pc = &floppy->queued_pc;
- idefloppy_blockpc_cmd(floppy, pc, rq);
- break;
- case REQ_OP_DRV_IN:
- case REQ_OP_DRV_OUT:
- switch (ide_req(rq)->type) {
- case ATA_PRIV_MISC:
- case ATA_PRIV_SENSE:
- pc = (struct ide_atapi_pc *)ide_req(rq)->special;
- break;
- default:
- BUG();
- }
- }
-
- ide_prep_sense(drive, rq);
-
- memset(&cmd, 0, sizeof(cmd));
-
- if (rq_data_dir(rq))
- cmd.tf_flags |= IDE_TFLAG_WRITE;
-
- cmd.rq = rq;
-
- if (!blk_rq_is_passthrough(rq) || blk_rq_bytes(rq)) {
- ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
- ide_map_sg(drive, &cmd);
- }
-
- pc->rq = rq;
-
- return ide_floppy_issue_pc(drive, &cmd, pc);
-out_end:
- drive->failed_pc = NULL;
- if (blk_rq_is_passthrough(rq) && scsi_req(rq)->result == 0)
- scsi_req(rq)->result = -EIO;
- ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(rq));
- return ide_stopped;
-}
-
-/*
- * Look at the flexible disk page parameters. We ignore the CHS capacity
- * parameters and use the LBA parameters instead.
- */
-static int ide_floppy_get_flexible_disk_page(ide_drive_t *drive,
- struct ide_atapi_pc *pc)
-{
- struct ide_disk_obj *floppy = drive->driver_data;
- struct gendisk *disk = floppy->disk;
- u8 *page, buf[40];
- int capacity, lba_capacity;
- u16 transfer_rate, sector_size, cyls, rpm;
- u8 heads, sectors;
-
- ide_floppy_create_mode_sense_cmd(pc, IDEFLOPPY_FLEXIBLE_DISK_PAGE);
-
- if (ide_queue_pc_tail(drive, disk, pc, buf, pc->req_xfer)) {
- printk(KERN_ERR PFX "Can't get flexible disk page params\n");
- return 1;
- }
-
- if (buf[3] & 0x80)
- drive->dev_flags |= IDE_DFLAG_WP;
- else
- drive->dev_flags &= ~IDE_DFLAG_WP;
-
- set_disk_ro(disk, !!(drive->dev_flags & IDE_DFLAG_WP));
-
- page = &buf[8];
-
- transfer_rate = be16_to_cpup((__be16 *)&buf[8 + 2]);
- sector_size = be16_to_cpup((__be16 *)&buf[8 + 6]);
- cyls = be16_to_cpup((__be16 *)&buf[8 + 8]);
- rpm = be16_to_cpup((__be16 *)&buf[8 + 28]);
- heads = buf[8 + 4];
- sectors = buf[8 + 5];
-
- capacity = cyls * heads * sectors * sector_size;
-
- if (memcmp(page, &floppy->flexible_disk_page, 32))
- printk(KERN_INFO PFX "%s: %dkB, %d/%d/%d CHS, %d kBps, "
- "%d sector size, %d rpm\n",
- drive->name, capacity / 1024, cyls, heads,
- sectors, transfer_rate / 8, sector_size, rpm);
-
- memcpy(&floppy->flexible_disk_page, page, 32);
- drive->bios_cyl = cyls;
- drive->bios_head = heads;
- drive->bios_sect = sectors;
- lba_capacity = floppy->blocks * floppy->block_size;
-
- if (capacity < lba_capacity) {
- printk(KERN_NOTICE PFX "%s: The disk reports a capacity of %d "
- "bytes, but the drive only handles %d\n",
- drive->name, lba_capacity, capacity);
- floppy->blocks = floppy->block_size ?
- capacity / floppy->block_size : 0;
- drive->capacity64 = floppy->blocks * floppy->bs_factor;
- }
-
- return 0;
-}
-
-/*
- * Determine if a media is present in the floppy drive, and if so, its LBA
- * capacity.
- */
-static int ide_floppy_get_capacity(ide_drive_t *drive)
-{
- struct ide_disk_obj *floppy = drive->driver_data;
- struct gendisk *disk = floppy->disk;
- struct ide_atapi_pc pc;
- u8 *cap_desc;
- u8 pc_buf[256], header_len, desc_cnt;
- int i, rc = 1, blocks, length;
-
- ide_debug_log(IDE_DBG_FUNC, "enter");
-
- drive->bios_cyl = 0;
- drive->bios_head = drive->bios_sect = 0;
- floppy->blocks = 0;
- floppy->bs_factor = 1;
- drive->capacity64 = 0;
-
- ide_floppy_create_read_capacity_cmd(&pc);
- if (ide_queue_pc_tail(drive, disk, &pc, pc_buf, pc.req_xfer)) {
- printk(KERN_ERR PFX "Can't get floppy parameters\n");
- return 1;
- }
- header_len = pc_buf[3];
- cap_desc = &pc_buf[4];
- desc_cnt = header_len / 8; /* capacity descriptor of 8 bytes */
-
- for (i = 0; i < desc_cnt; i++) {
- unsigned int desc_start = 4 + i*8;
-
- blocks = be32_to_cpup((__be32 *)&pc_buf[desc_start]);
- length = be16_to_cpup((__be16 *)&pc_buf[desc_start + 6]);
-
- ide_debug_log(IDE_DBG_PROBE, "Descriptor %d: %dkB, %d blocks, "
- "%d sector size",
- i, blocks * length / 1024,
- blocks, length);
-
- if (i)
- continue;
- /*
- * the code below is valid only for the 1st descriptor, ie i=0
- */
-
- switch (pc_buf[desc_start + 4] & 0x03) {
- /* Clik! drive returns this instead of CAPACITY_CURRENT */
- case CAPACITY_UNFORMATTED:
- if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE))
- /*
- * If it is not a clik drive, break out
- * (maintains previous driver behaviour)
- */
- break;
- fallthrough;
- case CAPACITY_CURRENT:
- /* Normal Zip/LS-120 disks */
- if (memcmp(cap_desc, &floppy->cap_desc, 8))
- printk(KERN_INFO PFX "%s: %dkB, %d blocks, %d "
- "sector size\n",
- drive->name, blocks * length / 1024,
- blocks, length);
- memcpy(&floppy->cap_desc, cap_desc, 8);
-
- if (!length || length % 512) {
- printk(KERN_NOTICE PFX "%s: %d bytes block size"
- " not supported\n", drive->name, length);
- } else {
- floppy->blocks = blocks;
- floppy->block_size = length;
- floppy->bs_factor = length / 512;
- if (floppy->bs_factor != 1)
- printk(KERN_NOTICE PFX "%s: Warning: "
- "non 512 bytes block size not "
- "fully supported\n",
- drive->name);
- drive->capacity64 =
- floppy->blocks * floppy->bs_factor;
- rc = 0;
- }
- break;
- case CAPACITY_NO_CARTRIDGE:
- /*
- * This is a KERN_ERR so it appears on screen
- * for the user to see
- */
- printk(KERN_ERR PFX "%s: No disk in drive\n",
- drive->name);
- break;
- case CAPACITY_INVALID:
- printk(KERN_ERR PFX "%s: Invalid capacity for disk "
- "in drive\n", drive->name);
- break;
- }
- ide_debug_log(IDE_DBG_PROBE, "Descriptor 0 Code: %d",
- pc_buf[desc_start + 4] & 0x03);
- }
-
- /* Clik! disk does not support get_flexible_disk_page */
- if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE))
- (void) ide_floppy_get_flexible_disk_page(drive, &pc);
-
- return rc;
-}
-
-static void ide_floppy_setup(ide_drive_t *drive)
-{
- struct ide_disk_obj *floppy = drive->driver_data;
- u16 *id = drive->id;
-
- drive->pc_callback = ide_floppy_callback;
-
- /*
- * We used to check revisions here. At this point however I'm giving up.
- * Just assume they are all broken, its easier.
- *
- * The actual reason for the workarounds was likely a driver bug after
- * all rather than a firmware bug, and the workaround below used to hide
- * it. It should be fixed as of version 1.9, but to be on the safe side
- * we'll leave the limitation below for the 2.2.x tree.
- */
- if (strstarts((char *)&id[ATA_ID_PROD], "IOMEGA ZIP 100 ATAPI")) {
- drive->atapi_flags |= IDE_AFLAG_ZIP_DRIVE;
- /* This value will be visible in the /proc/ide/hdx/settings */
- drive->pc_delay = IDEFLOPPY_PC_DELAY;
- blk_queue_max_hw_sectors(drive->queue, 64);
- }
-
- /*
- * Guess what? The IOMEGA Clik! drive also needs the above fix. It makes
- * nasty clicking noises without it, so please don't remove this.
- */
- if (strstarts((char *)&id[ATA_ID_PROD], "IOMEGA Clik!")) {
- blk_queue_max_hw_sectors(drive->queue, 64);
- drive->atapi_flags |= IDE_AFLAG_CLIK_DRIVE;
- /* IOMEGA Clik! drives do not support lock/unlock commands */
- drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;
- }
-
- (void) ide_floppy_get_capacity(drive);
-
- ide_proc_register_driver(drive, floppy->driver);
-}
-
-static void ide_floppy_flush(ide_drive_t *drive)
-{
-}
-
-static int ide_floppy_init_media(ide_drive_t *drive, struct gendisk *disk)
-{
- int ret = 0;
-
- if (ide_do_test_unit_ready(drive, disk))
- ide_do_start_stop(drive, disk, 1);
-
- ret = ide_floppy_get_capacity(drive);
-
- set_capacity(disk, ide_gd_capacity(drive));
-
- return ret;
-}
-
-const struct ide_disk_ops ide_atapi_disk_ops = {
- .check = ide_check_atapi_device,
- .get_capacity = ide_floppy_get_capacity,
- .setup = ide_floppy_setup,
- .flush = ide_floppy_flush,
- .init_media = ide_floppy_init_media,
- .set_doorlock = ide_set_media_lock,
- .do_request = ide_floppy_do_request,
- .ioctl = ide_floppy_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = ide_floppy_compat_ioctl,
-#endif
-};
diff --git a/drivers/ide/ide-floppy.h b/drivers/ide/ide-floppy.h
deleted file mode 100644
index 8505a5f58f4e..000000000000
--- a/drivers/ide/ide-floppy.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __IDE_FLOPPY_H
-#define __IDE_FLOPPY_H
-
-#include "ide-gd.h"
-
-#ifdef CONFIG_IDE_GD_ATAPI
-/*
- * Pages of the SELECT SENSE / MODE SENSE packet commands.
- * See SFF-8070i spec.
- */
-#define IDEFLOPPY_CAPABILITIES_PAGE 0x1b
-#define IDEFLOPPY_FLEXIBLE_DISK_PAGE 0x05
-
-/* IOCTLs used in low-level formatting. */
-#define IDEFLOPPY_IOCTL_FORMAT_SUPPORTED 0x4600
-#define IDEFLOPPY_IOCTL_FORMAT_GET_CAPACITY 0x4601
-#define IDEFLOPPY_IOCTL_FORMAT_START 0x4602
-#define IDEFLOPPY_IOCTL_FORMAT_GET_PROGRESS 0x4603
-
-/* ide-floppy.c */
-extern const struct ide_disk_ops ide_atapi_disk_ops;
-void ide_floppy_create_mode_sense_cmd(struct ide_atapi_pc *, u8);
-void ide_floppy_create_read_capacity_cmd(struct ide_atapi_pc *);
-
-/* ide-floppy_ioctl.c */
-int ide_floppy_ioctl(ide_drive_t *, struct block_device *, fmode_t,
- unsigned int, unsigned long);
-int ide_floppy_compat_ioctl(ide_drive_t *, struct block_device *, fmode_t,
- unsigned int, unsigned long);
-
-#ifdef CONFIG_IDE_PROC_FS
-/* ide-floppy_proc.c */
-extern ide_proc_entry_t ide_floppy_proc[];
-extern const struct ide_proc_devset ide_floppy_settings[];
-#endif
-#else
-#define ide_floppy_proc NULL
-#define ide_floppy_settings NULL
-#endif
-
-#endif /*__IDE_FLOPPY_H */
diff --git a/drivers/ide/ide-floppy_ioctl.c b/drivers/ide/ide-floppy_ioctl.c
deleted file mode 100644
index 39a790ac6cc3..000000000000
--- a/drivers/ide/ide-floppy_ioctl.c
+++ /dev/null
@@ -1,339 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * ide-floppy IOCTLs handling.
- */
-
-#include <linux/kernel.h>
-#include <linux/ide.h>
-#include <linux/compat.h>
-#include <linux/cdrom.h>
-#include <linux/mutex.h>
-
-#include <asm/unaligned.h>
-
-#include <scsi/scsi_ioctl.h>
-
-#include "ide-floppy.h"
-
-/*
- * Obtain the list of formattable capacities.
- * Very similar to ide_floppy_get_capacity, except that we push the capacity
- * descriptors to userland, instead of our own structures.
- *
- * Userland gives us the following structure:
- *
- * struct idefloppy_format_capacities {
- * int nformats;
- * struct {
- * int nblocks;
- * int blocksize;
- * } formats[];
- * };
- *
- * userland initializes nformats to the number of allocated formats[] records.
- * On exit we set nformats to the number of records we've actually initialized.
- */
-
-static DEFINE_MUTEX(ide_floppy_ioctl_mutex);
-static int ide_floppy_get_format_capacities(ide_drive_t *drive,
- struct ide_atapi_pc *pc,
- int __user *arg)
-{
- struct ide_disk_obj *floppy = drive->driver_data;
- int i, blocks, length, u_array_size, u_index;
- int __user *argp;
- u8 pc_buf[256], header_len, desc_cnt;
-
- if (get_user(u_array_size, arg))
- return -EFAULT;
-
- if (u_array_size <= 0)
- return -EINVAL;
-
- ide_floppy_create_read_capacity_cmd(pc);
-
- if (ide_queue_pc_tail(drive, floppy->disk, pc, pc_buf, pc->req_xfer)) {
- printk(KERN_ERR "ide-floppy: Can't get floppy parameters\n");
- return -EIO;
- }
-
- header_len = pc_buf[3];
- desc_cnt = header_len / 8; /* capacity descriptor of 8 bytes */
-
- u_index = 0;
- argp = arg + 1;
-
- /*
- * We always skip the first capacity descriptor. That's the current
- * capacity. We are interested in the remaining descriptors, the
- * formattable capacities.
- */
- for (i = 1; i < desc_cnt; i++) {
- unsigned int desc_start = 4 + i*8;
-
- if (u_index >= u_array_size)
- break; /* User-supplied buffer too small */
-
- blocks = be32_to_cpup((__be32 *)&pc_buf[desc_start]);
- length = be16_to_cpup((__be16 *)&pc_buf[desc_start + 6]);
-
- if (put_user(blocks, argp))
- return -EFAULT;
-
- ++argp;
-
- if (put_user(length, argp))
- return -EFAULT;
-
- ++argp;
-
- ++u_index;
- }
-
- if (put_user(u_index, arg))
- return -EFAULT;
-
- return 0;
-}
-
-static void ide_floppy_create_format_unit_cmd(struct ide_atapi_pc *pc,
- u8 *buf, int b, int l,
- int flags)
-{
- ide_init_pc(pc);
- pc->c[0] = GPCMD_FORMAT_UNIT;
- pc->c[1] = 0x17;
-
- memset(buf, 0, 12);
- buf[1] = 0xA2;
- /* Default format list header, u8 1: FOV/DCRT/IMM bits set */
-
- if (flags & 1) /* Verify bit on... */
- buf[1] ^= 0x20; /* ... turn off DCRT bit */
- buf[3] = 8;
-
- put_unaligned(cpu_to_be32(b), (unsigned int *)(&buf[4]));
- put_unaligned(cpu_to_be32(l), (unsigned int *)(&buf[8]));
- pc->req_xfer = 12;
- pc->flags |= PC_FLAG_WRITING;
-}
-
-static int ide_floppy_get_sfrp_bit(ide_drive_t *drive, struct ide_atapi_pc *pc)
-{
- struct ide_disk_obj *floppy = drive->driver_data;
- u8 buf[20];
-
- drive->atapi_flags &= ~IDE_AFLAG_SRFP;
-
- ide_floppy_create_mode_sense_cmd(pc, IDEFLOPPY_CAPABILITIES_PAGE);
- pc->flags |= PC_FLAG_SUPPRESS_ERROR;
-
- if (ide_queue_pc_tail(drive, floppy->disk, pc, buf, pc->req_xfer))
- return 1;
-
- if (buf[8 + 2] & 0x40)
- drive->atapi_flags |= IDE_AFLAG_SRFP;
-
- return 0;
-}
-
-static int ide_floppy_format_unit(ide_drive_t *drive, struct ide_atapi_pc *pc,
- int __user *arg)
-{
- struct ide_disk_obj *floppy = drive->driver_data;
- u8 buf[12];
- int blocks, length, flags, err = 0;
-
- if (floppy->openers > 1) {
- /* Don't format if someone is using the disk */
- drive->dev_flags &= ~IDE_DFLAG_FORMAT_IN_PROGRESS;
- return -EBUSY;
- }
-
- drive->dev_flags |= IDE_DFLAG_FORMAT_IN_PROGRESS;
-
- /*
- * Send ATAPI_FORMAT_UNIT to the drive.
- *
- * Userland gives us the following structure:
- *
- * struct idefloppy_format_command {
- * int nblocks;
- * int blocksize;
- * int flags;
- * } ;
- *
- * flags is a bitmask, currently, the only defined flag is:
- *
- * 0x01 - verify media after format.
- */
- if (get_user(blocks, arg) ||
- get_user(length, arg+1) ||
- get_user(flags, arg+2)) {
- err = -EFAULT;
- goto out;
- }
-
- ide_floppy_get_sfrp_bit(drive, pc);
- ide_floppy_create_format_unit_cmd(pc, buf, blocks, length, flags);
-
- if (ide_queue_pc_tail(drive, floppy->disk, pc, buf, pc->req_xfer))
- err = -EIO;
-
-out:
- if (err)
- drive->dev_flags &= ~IDE_DFLAG_FORMAT_IN_PROGRESS;
- return err;
-}
-
-/*
- * Get ATAPI_FORMAT_UNIT progress indication.
- *
- * Userland gives a pointer to an int. The int is set to a progress
- * indicator 0-65536, with 65536=100%.
- *
- * If the drive does not support format progress indication, we just check
- * the dsc bit, and return either 0 or 65536.
- */
-
-static int ide_floppy_get_format_progress(ide_drive_t *drive,
- struct ide_atapi_pc *pc,
- int __user *arg)
-{
- struct ide_disk_obj *floppy = drive->driver_data;
- u8 sense_buf[18];
- int progress_indication = 0x10000;
-
- if (drive->atapi_flags & IDE_AFLAG_SRFP) {
- ide_create_request_sense_cmd(drive, pc);
- if (ide_queue_pc_tail(drive, floppy->disk, pc, sense_buf,
- pc->req_xfer))
- return -EIO;
-
- if (floppy->sense_key == 2 &&
- floppy->asc == 4 &&
- floppy->ascq == 4)
- progress_indication = floppy->progress_indication;
-
- /* Else assume format_unit has finished, and we're at 0x10000 */
- } else {
- ide_hwif_t *hwif = drive->hwif;
- unsigned long flags;
- u8 stat;
-
- local_irq_save(flags);
- stat = hwif->tp_ops->read_status(hwif);
- local_irq_restore(flags);
-
- progress_indication = ((stat & ATA_DSC) == 0) ? 0 : 0x10000;
- }
-
- if (put_user(progress_indication, arg))
- return -EFAULT;
-
- return 0;
-}
-
-static int ide_floppy_lockdoor(ide_drive_t *drive, struct ide_atapi_pc *pc,
- unsigned long arg, unsigned int cmd)
-{
- struct ide_disk_obj *floppy = drive->driver_data;
- struct gendisk *disk = floppy->disk;
- int prevent = (arg && cmd != CDROMEJECT) ? 1 : 0;
-
- if (floppy->openers > 1)
- return -EBUSY;
-
- ide_set_media_lock(drive, disk, prevent);
-
- if (cmd == CDROMEJECT)
- ide_do_start_stop(drive, disk, 2);
-
- return 0;
-}
-
-static int ide_floppy_format_ioctl(ide_drive_t *drive, struct ide_atapi_pc *pc,
- fmode_t mode, unsigned int cmd,
- void __user *argp)
-{
- switch (cmd) {
- case IDEFLOPPY_IOCTL_FORMAT_SUPPORTED:
- return 0;
- case IDEFLOPPY_IOCTL_FORMAT_GET_CAPACITY:
- return ide_floppy_get_format_capacities(drive, pc, argp);
- case IDEFLOPPY_IOCTL_FORMAT_START:
- if (!(mode & FMODE_WRITE))
- return -EPERM;
- return ide_floppy_format_unit(drive, pc, (int __user *)argp);
- case IDEFLOPPY_IOCTL_FORMAT_GET_PROGRESS:
- return ide_floppy_get_format_progress(drive, pc, argp);
- default:
- return -ENOTTY;
- }
-}
-
-int ide_floppy_ioctl(ide_drive_t *drive, struct block_device *bdev,
- fmode_t mode, unsigned int cmd, unsigned long arg)
-{
- struct ide_atapi_pc pc;
- void __user *argp = (void __user *)arg;
- int err;
-
- mutex_lock(&ide_floppy_ioctl_mutex);
- if (cmd == CDROMEJECT || cmd == CDROM_LOCKDOOR) {
- err = ide_floppy_lockdoor(drive, &pc, arg, cmd);
- goto out;
- }
-
- err = ide_floppy_format_ioctl(drive, &pc, mode, cmd, argp);
- if (err != -ENOTTY)
- goto out;
-
- /*
- * skip SCSI_IOCTL_SEND_COMMAND (deprecated)
- * and CDROM_SEND_PACKET (legacy) ioctls
- */
- if (cmd != CDROM_SEND_PACKET && cmd != SCSI_IOCTL_SEND_COMMAND)
- err = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
-
- if (err == -ENOTTY)
- err = generic_ide_ioctl(drive, bdev, cmd, arg);
-
-out:
- mutex_unlock(&ide_floppy_ioctl_mutex);
- return err;
-}
-
-#ifdef CONFIG_COMPAT
-int ide_floppy_compat_ioctl(ide_drive_t *drive, struct block_device *bdev,
- fmode_t mode, unsigned int cmd, unsigned long arg)
-{
- struct ide_atapi_pc pc;
- void __user *argp = compat_ptr(arg);
- int err;
-
- mutex_lock(&ide_floppy_ioctl_mutex);
- if (cmd == CDROMEJECT || cmd == CDROM_LOCKDOOR) {
- err = ide_floppy_lockdoor(drive, &pc, arg, cmd);
- goto out;
- }
-
- err = ide_floppy_format_ioctl(drive, &pc, mode, cmd, argp);
- if (err != -ENOTTY)
- goto out;
-
- /*
- * skip SCSI_IOCTL_SEND_COMMAND (deprecated)
- * and CDROM_SEND_PACKET (legacy) ioctls
- */
- if (cmd != CDROM_SEND_PACKET && cmd != SCSI_IOCTL_SEND_COMMAND)
- err = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
-
- if (err == -ENOTTY)
- err = generic_ide_ioctl(drive, bdev, cmd, arg);
-
-out:
- mutex_unlock(&ide_floppy_ioctl_mutex);
- return err;
-}
-#endif
diff --git a/drivers/ide/ide-floppy_proc.c b/drivers/ide/ide-floppy_proc.c
deleted file mode 100644
index 7f697ddb5fe5..000000000000
--- a/drivers/ide/ide-floppy_proc.c
+++ /dev/null
@@ -1,34 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/ide.h>
-#include <linux/seq_file.h>
-
-#include "ide-floppy.h"
-
-static int idefloppy_capacity_proc_show(struct seq_file *m, void *v)
-{
- ide_drive_t*drive = (ide_drive_t *)m->private;
-
- seq_printf(m, "%llu\n", (long long)ide_gd_capacity(drive));
- return 0;
-}
-
-ide_proc_entry_t ide_floppy_proc[] = {
- { "capacity", S_IFREG|S_IRUGO, idefloppy_capacity_proc_show },
- { "geometry", S_IFREG|S_IRUGO, ide_geometry_proc_show },
- {}
-};
-
-ide_devset_rw_field(bios_cyl, bios_cyl);
-ide_devset_rw_field(bios_head, bios_head);
-ide_devset_rw_field(bios_sect, bios_sect);
-ide_devset_rw_field(ticks, pc_delay);
-
-const struct ide_proc_devset ide_floppy_settings[] = {
- IDE_PROC_DEVSET(bios_cyl, 0, 1023),
- IDE_PROC_DEVSET(bios_head, 0, 255),
- IDE_PROC_DEVSET(bios_sect, 0, 63),
- IDE_PROC_DEVSET(ticks, 0, 255),
- { NULL },
-};
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
deleted file mode 100644
index e2b6c82586ce..000000000000
--- a/drivers/ide/ide-gd.c
+++ /dev/null
@@ -1,432 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/genhd.h>
-#include <linux/mutex.h>
-#include <linux/ide.h>
-#include <linux/hdreg.h>
-#include <linux/dmi.h>
-#include <linux/slab.h>
-
-#if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT)
-#define IDE_DISK_MINORS (1 << PARTN_BITS)
-#else
-#define IDE_DISK_MINORS 0
-#endif
-
-#include "ide-disk.h"
-#include "ide-floppy.h"
-
-#define IDE_GD_VERSION "1.18"
-
-/* module parameters */
-static DEFINE_MUTEX(ide_gd_mutex);
-static unsigned long debug_mask;
-module_param(debug_mask, ulong, 0644);
-
-static DEFINE_MUTEX(ide_disk_ref_mutex);
-
-static void ide_disk_release(struct device *);
-
-static struct ide_disk_obj *ide_disk_get(struct gendisk *disk)
-{
- struct ide_disk_obj *idkp = NULL;
-
- mutex_lock(&ide_disk_ref_mutex);
- idkp = ide_drv_g(disk, ide_disk_obj);
- if (idkp) {
- if (ide_device_get(idkp->drive))
- idkp = NULL;
- else
- get_device(&idkp->dev);
- }
- mutex_unlock(&ide_disk_ref_mutex);
- return idkp;
-}
-
-static void ide_disk_put(struct ide_disk_obj *idkp)
-{
- ide_drive_t *drive = idkp->drive;
-
- mutex_lock(&ide_disk_ref_mutex);
- put_device(&idkp->dev);
- ide_device_put(drive);
- mutex_unlock(&ide_disk_ref_mutex);
-}
-
-sector_t ide_gd_capacity(ide_drive_t *drive)
-{
- return drive->capacity64;
-}
-
-static int ide_gd_probe(ide_drive_t *);
-
-static void ide_gd_remove(ide_drive_t *drive)
-{
- struct ide_disk_obj *idkp = drive->driver_data;
- struct gendisk *g = idkp->disk;
-
- ide_proc_unregister_driver(drive, idkp->driver);
- device_del(&idkp->dev);
- del_gendisk(g);
- drive->disk_ops->flush(drive);
-
- mutex_lock(&ide_disk_ref_mutex);
- put_device(&idkp->dev);
- mutex_unlock(&ide_disk_ref_mutex);
-}
-
-static void ide_disk_release(struct device *dev)
-{
- struct ide_disk_obj *idkp = to_ide_drv(dev, ide_disk_obj);
- ide_drive_t *drive = idkp->drive;
- struct gendisk *g = idkp->disk;
-
- drive->disk_ops = NULL;
- drive->driver_data = NULL;
- g->private_data = NULL;
- put_disk(g);
- kfree(idkp);
-}
-
-/*
- * On HPA drives the capacity needs to be
- * reinitialized on resume otherwise the disk
- * can not be used and a hard reset is required
- */
-static void ide_gd_resume(ide_drive_t *drive)
-{
- if (ata_id_hpa_enabled(drive->id))
- (void)drive->disk_ops->get_capacity(drive);
-}
-
-static const struct dmi_system_id ide_coldreboot_table[] = {
- {
- /* Acer TravelMate 66x cuts power during reboot */
- .ident = "Acer TravelMate 660",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 660"),
- },
- },
-
- { } /* terminate list */
-};
-
-static void ide_gd_shutdown(ide_drive_t *drive)
-{
-#ifdef CONFIG_ALPHA
- /* On Alpha, halt(8) doesn't actually turn the machine off,
- it puts you into the sort of firmware monitor. Typically,
- it's used to boot another kernel image, so it's not much
- different from reboot(8). Therefore, we don't need to
- spin down the disk in this case, especially since Alpha
- firmware doesn't handle disks in standby mode properly.
- On the other hand, it's reasonably safe to turn the power
- off when the shutdown process reaches the firmware prompt,
- as the firmware initialization takes rather long time -
- at least 10 seconds, which should be sufficient for
- the disk to expire its write cache. */
- if (system_state != SYSTEM_POWER_OFF) {
-#else
- if (system_state == SYSTEM_RESTART &&
- !dmi_check_system(ide_coldreboot_table)) {
-#endif
- drive->disk_ops->flush(drive);
- return;
- }
-
- printk(KERN_INFO "Shutdown: %s\n", drive->name);
-
- drive->gendev.bus->suspend(&drive->gendev, PMSG_SUSPEND);
-}
-
-#ifdef CONFIG_IDE_PROC_FS
-static ide_proc_entry_t *ide_disk_proc_entries(ide_drive_t *drive)
-{
- return (drive->media == ide_disk) ? ide_disk_proc : ide_floppy_proc;
-}
-
-static const struct ide_proc_devset *ide_disk_proc_devsets(ide_drive_t *drive)
-{
- return (drive->media == ide_disk) ? ide_disk_settings
- : ide_floppy_settings;
-}
-#endif
-
-static ide_startstop_t ide_gd_do_request(ide_drive_t *drive,
- struct request *rq, sector_t sector)
-{
- return drive->disk_ops->do_request(drive, rq, sector);
-}
-
-static struct ide_driver ide_gd_driver = {
- .gen_driver = {
- .owner = THIS_MODULE,
- .name = "ide-gd",
- .bus = &ide_bus_type,
- },
- .probe = ide_gd_probe,
- .remove = ide_gd_remove,
- .resume = ide_gd_resume,
- .shutdown = ide_gd_shutdown,
- .version = IDE_GD_VERSION,
- .do_request = ide_gd_do_request,
-#ifdef CONFIG_IDE_PROC_FS
- .proc_entries = ide_disk_proc_entries,
- .proc_devsets = ide_disk_proc_devsets,
-#endif
-};
-
-static int ide_gd_open(struct block_device *bdev, fmode_t mode)
-{
- struct gendisk *disk = bdev->bd_disk;
- struct ide_disk_obj *idkp;
- ide_drive_t *drive;
- int ret = 0;
-
- idkp = ide_disk_get(disk);
- if (idkp == NULL)
- return -ENXIO;
-
- drive = idkp->drive;
-
- ide_debug_log(IDE_DBG_FUNC, "enter");
-
- idkp->openers++;
-
- if ((drive->dev_flags & IDE_DFLAG_REMOVABLE) && idkp->openers == 1) {
- drive->dev_flags &= ~IDE_DFLAG_FORMAT_IN_PROGRESS;
- /* Just in case */
-
- ret = drive->disk_ops->init_media(drive, disk);
-
- /*
- * Allow O_NDELAY to open a drive without a disk, or with an
- * unreadable disk, so that we can get the format capacity
- * of the drive or begin the format - Sam
- */
- if (ret && (mode & FMODE_NDELAY) == 0) {
- ret = -EIO;
- goto out_put_idkp;
- }
-
- if ((drive->dev_flags & IDE_DFLAG_WP) && (mode & FMODE_WRITE)) {
- ret = -EROFS;
- goto out_put_idkp;
- }
-
- /*
- * Ignore the return code from door_lock,
- * since the open() has already succeeded,
- * and the door_lock is irrelevant at this point.
- */
- drive->disk_ops->set_doorlock(drive, disk, 1);
- if (__invalidate_device(bdev, true))
- pr_warn("VFS: busy inodes on changed media %s\n",
- bdev->bd_disk->disk_name);
- drive->disk_ops->get_capacity(drive);
- set_capacity(disk, ide_gd_capacity(drive));
- set_bit(GD_NEED_PART_SCAN, &disk->state);
- } else if (drive->dev_flags & IDE_DFLAG_FORMAT_IN_PROGRESS) {
- ret = -EBUSY;
- goto out_put_idkp;
- }
- return 0;
-
-out_put_idkp:
- idkp->openers--;
- ide_disk_put(idkp);
- return ret;
-}
-
-static int ide_gd_unlocked_open(struct block_device *bdev, fmode_t mode)
-{
- int ret;
-
- mutex_lock(&ide_gd_mutex);
- ret = ide_gd_open(bdev, mode);
- mutex_unlock(&ide_gd_mutex);
-
- return ret;
-}
-
-
-static void ide_gd_release(struct gendisk *disk, fmode_t mode)
-{
- struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
- ide_drive_t *drive = idkp->drive;
-
- ide_debug_log(IDE_DBG_FUNC, "enter");
-
- mutex_lock(&ide_gd_mutex);
- if (idkp->openers == 1)
- drive->disk_ops->flush(drive);
-
- if ((drive->dev_flags & IDE_DFLAG_REMOVABLE) && idkp->openers == 1) {
- drive->disk_ops->set_doorlock(drive, disk, 0);
- drive->dev_flags &= ~IDE_DFLAG_FORMAT_IN_PROGRESS;
- }
-
- idkp->openers--;
-
- ide_disk_put(idkp);
- mutex_unlock(&ide_gd_mutex);
-}
-
-static int ide_gd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-{
- struct ide_disk_obj *idkp = ide_drv_g(bdev->bd_disk, ide_disk_obj);
- ide_drive_t *drive = idkp->drive;
-
- geo->heads = drive->bios_head;
- geo->sectors = drive->bios_sect;
- geo->cylinders = (u16)drive->bios_cyl; /* truncate */
- return 0;
-}
-
-static void ide_gd_unlock_native_capacity(struct gendisk *disk)
-{
- struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
- ide_drive_t *drive = idkp->drive;
- const struct ide_disk_ops *disk_ops = drive->disk_ops;
-
- if (disk_ops->unlock_native_capacity)
- disk_ops->unlock_native_capacity(drive);
-}
-
-static int ide_gd_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- struct ide_disk_obj *idkp = ide_drv_g(bdev->bd_disk, ide_disk_obj);
- ide_drive_t *drive = idkp->drive;
-
- return drive->disk_ops->ioctl(drive, bdev, mode, cmd, arg);
-}
-
-#ifdef CONFIG_COMPAT
-static int ide_gd_compat_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- struct ide_disk_obj *idkp = ide_drv_g(bdev->bd_disk, ide_disk_obj);
- ide_drive_t *drive = idkp->drive;
-
- if (!drive->disk_ops->compat_ioctl)
- return -ENOIOCTLCMD;
-
- return drive->disk_ops->compat_ioctl(drive, bdev, mode, cmd, arg);
-}
-#endif
-
-static const struct block_device_operations ide_gd_ops = {
- .owner = THIS_MODULE,
- .open = ide_gd_unlocked_open,
- .release = ide_gd_release,
- .ioctl = ide_gd_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = ide_gd_compat_ioctl,
-#endif
- .getgeo = ide_gd_getgeo,
- .unlock_native_capacity = ide_gd_unlock_native_capacity,
-};
-
-static int ide_gd_probe(ide_drive_t *drive)
-{
- const struct ide_disk_ops *disk_ops = NULL;
- struct ide_disk_obj *idkp;
- struct gendisk *g;
-
- /* strstr("foo", "") is non-NULL */
- if (!strstr("ide-gd", drive->driver_req))
- goto failed;
-
-#ifdef CONFIG_IDE_GD_ATA
- if (drive->media == ide_disk)
- disk_ops = &ide_ata_disk_ops;
-#endif
-#ifdef CONFIG_IDE_GD_ATAPI
- if (drive->media == ide_floppy)
- disk_ops = &ide_atapi_disk_ops;
-#endif
- if (disk_ops == NULL)
- goto failed;
-
- if (disk_ops->check(drive, DRV_NAME) == 0) {
- printk(KERN_ERR PFX "%s: not supported by this driver\n",
- drive->name);
- goto failed;
- }
-
- idkp = kzalloc(sizeof(*idkp), GFP_KERNEL);
- if (!idkp) {
- printk(KERN_ERR PFX "%s: can't allocate a disk structure\n",
- drive->name);
- goto failed;
- }
-
- g = alloc_disk_node(IDE_DISK_MINORS, hwif_to_node(drive->hwif));
- if (!g)
- goto out_free_idkp;
-
- ide_init_disk(g, drive);
-
- idkp->dev.parent = &drive->gendev;
- idkp->dev.release = ide_disk_release;
- dev_set_name(&idkp->dev, "%s", dev_name(&drive->gendev));
-
- if (device_register(&idkp->dev))
- goto out_free_disk;
-
- idkp->drive = drive;
- idkp->driver = &ide_gd_driver;
- idkp->disk = g;
-
- g->private_data = &idkp->driver;
-
- drive->driver_data = idkp;
- drive->debug_mask = debug_mask;
- drive->disk_ops = disk_ops;
-
- disk_ops->setup(drive);
-
- set_capacity(g, ide_gd_capacity(drive));
-
- g->minors = IDE_DISK_MINORS;
- g->flags |= GENHD_FL_EXT_DEVT;
- if (drive->dev_flags & IDE_DFLAG_REMOVABLE)
- g->flags = GENHD_FL_REMOVABLE;
- g->fops = &ide_gd_ops;
- g->events = DISK_EVENT_MEDIA_CHANGE;
- device_add_disk(&drive->gendev, g, NULL);
- return 0;
-
-out_free_disk:
- put_disk(g);
-out_free_idkp:
- kfree(idkp);
-failed:
- return -ENODEV;
-}
-
-static int __init ide_gd_init(void)
-{
- printk(KERN_INFO DRV_NAME " driver " IDE_GD_VERSION "\n");
- return driver_register(&ide_gd_driver.gen_driver);
-}
-
-static void __exit ide_gd_exit(void)
-{
- driver_unregister(&ide_gd_driver.gen_driver);
-}
-
-MODULE_ALIAS("ide:*m-disk*");
-MODULE_ALIAS("ide-disk");
-MODULE_ALIAS("ide:*m-floppy*");
-MODULE_ALIAS("ide-floppy");
-module_init(ide_gd_init);
-module_exit(ide_gd_exit);
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("generic ATA/ATAPI disk driver");
diff --git a/drivers/ide/ide-gd.h b/drivers/ide/ide-gd.h
deleted file mode 100644
index af3fe1880e9e..000000000000
--- a/drivers/ide/ide-gd.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __IDE_GD_H
-#define __IDE_GD_H
-
-#define DRV_NAME "ide-gd"
-#define PFX DRV_NAME ": "
-
-/* define to see debug info */
-#define IDE_GD_DEBUG_LOG 0
-
-#if IDE_GD_DEBUG_LOG
-#define ide_debug_log(lvl, fmt, args...) __ide_debug_log(lvl, fmt, ## args)
-#else
-#define ide_debug_log(lvl, fmt, args...) do {} while (0)
-#endif
-
-struct ide_disk_obj {
- ide_drive_t *drive;
- struct ide_driver *driver;
- struct gendisk *disk;
- struct device dev;
- unsigned int openers; /* protected by BKL for now */
-
- /* used for blk_{fs,pc}_request() requests */
- struct ide_atapi_pc queued_pc;
-
- /* Last error information */
- u8 sense_key, asc, ascq;
-
- int progress_indication;
-
- /* Device information */
- /* Current format */
- int blocks, block_size, bs_factor;
- /* Last format capacity descriptor */
- u8 cap_desc[8];
- /* Copy of the flexible disk page */
- u8 flexible_disk_page[32];
-};
-
-sector_t ide_gd_capacity(ide_drive_t *);
-
-#endif /* __IDE_GD_H */
diff --git a/drivers/ide/ide-generic.c b/drivers/ide/ide-generic.c
deleted file mode 100644
index 80c0d69b83ac..000000000000
--- a/drivers/ide/ide-generic.c
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * generic/default IDE host driver
- *
- * Copyright (C) 2004, 2008-2009 Bartlomiej Zolnierkiewicz
- * This code was split off from ide.c. See it for original copyrights.
- *
- * May be copied or modified under the terms of the GNU General Public License.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/ide.h>
-#include <linux/pci_ids.h>
-
-/* FIXME: convert arm to use ide_platform host driver */
-#ifdef CONFIG_ARM
-#include <asm/irq.h>
-#endif
-
-#define DRV_NAME "ide_generic"
-
-static int probe_mask;
-module_param(probe_mask, int, 0);
-MODULE_PARM_DESC(probe_mask, "probe mask for legacy ISA IDE ports");
-
-static const struct ide_port_info ide_generic_port_info = {
- .host_flags = IDE_HFLAG_NO_DMA,
- .chipset = ide_generic,
-};
-
-#ifdef CONFIG_ARM
-static const u16 legacy_bases[] = { 0x1f0 };
-static const int legacy_irqs[] = { IRQ_HARDDISK };
-#elif defined(CONFIG_ALPHA)
-static const u16 legacy_bases[] = { 0x1f0, 0x170, 0x1e8, 0x168 };
-static const int legacy_irqs[] = { 14, 15, 11, 10 };
-#else
-static const u16 legacy_bases[] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 };
-static const int legacy_irqs[] = { 14, 15, 11, 10, 8, 12 };
-#endif
-
-static void ide_generic_check_pci_legacy_iobases(int *primary, int *secondary)
-{
-#ifdef CONFIG_PCI
- struct pci_dev *p = NULL;
- u16 val;
-
- for_each_pci_dev(p) {
- if (pci_resource_start(p, 0) == 0x1f0)
- *primary = 1;
- if (pci_resource_start(p, 2) == 0x170)
- *secondary = 1;
-
- /* Cyrix CS55{1,2}0 pre SFF MWDMA ATA on the bridge */
- if (p->vendor == PCI_VENDOR_ID_CYRIX &&
- (p->device == PCI_DEVICE_ID_CYRIX_5510 ||
- p->device == PCI_DEVICE_ID_CYRIX_5520))
- *primary = *secondary = 1;
-
- /* Intel MPIIX - PIO ATA on non PCI side of bridge */
- if (p->vendor == PCI_VENDOR_ID_INTEL &&
- p->device == PCI_DEVICE_ID_INTEL_82371MX) {
- pci_read_config_word(p, 0x6C, &val);
- if (val & 0x8000) {
- /* ATA port enabled */
- if (val & 0x4000)
- *secondary = 1;
- else
- *primary = 1;
- }
- }
- }
-#endif
-}
-
-static int __init ide_generic_init(void)
-{
- struct ide_hw hw, *hws[] = { &hw };
- unsigned long io_addr;
- int i, rc = 0, primary = 0, secondary = 0;
-
- ide_generic_check_pci_legacy_iobases(&primary, &secondary);
-
- if (!probe_mask) {
- printk(KERN_INFO DRV_NAME ": please use \"probe_mask=0x3f\" "
- "module parameter for probing all legacy ISA IDE ports\n");
-
- if (primary == 0)
- probe_mask |= 0x1;
-
- if (secondary == 0)
- probe_mask |= 0x2;
- } else
- printk(KERN_INFO DRV_NAME ": enforcing probing of I/O ports "
- "upon user request\n");
-
- for (i = 0; i < ARRAY_SIZE(legacy_bases); i++) {
- io_addr = legacy_bases[i];
-
- if ((probe_mask & (1 << i)) && io_addr) {
- if (!request_region(io_addr, 8, DRV_NAME)) {
- printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX "
- "not free.\n",
- DRV_NAME, io_addr, io_addr + 7);
- rc = -EBUSY;
- continue;
- }
-
- if (!request_region(io_addr + 0x206, 1, DRV_NAME)) {
- printk(KERN_ERR "%s: I/O resource 0x%lX "
- "not free.\n",
- DRV_NAME, io_addr + 0x206);
- release_region(io_addr, 8);
- rc = -EBUSY;
- continue;
- }
-
- memset(&hw, 0, sizeof(hw));
- ide_std_init_ports(&hw, io_addr, io_addr + 0x206);
-#ifdef CONFIG_IA64
- hw.irq = isa_irq_to_vector(legacy_irqs[i]);
-#else
- hw.irq = legacy_irqs[i];
-#endif
- rc = ide_host_add(&ide_generic_port_info, hws, 1, NULL);
- if (rc) {
- release_region(io_addr + 0x206, 1);
- release_region(io_addr, 8);
- }
- }
- }
-
- return rc;
-}
-
-module_init(ide_generic_init);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/ide-io-std.c b/drivers/ide/ide-io-std.c
deleted file mode 100644
index 94bdcf1ea186..000000000000
--- a/drivers/ide/ide-io-std.c
+++ /dev/null
@@ -1,262 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/ide.h>
-
-#if defined(CONFIG_ARM) || defined(CONFIG_M68K) || defined(CONFIG_MIPS) || \
- defined(CONFIG_PARISC) || defined(CONFIG_PPC) || defined(CONFIG_SPARC)
-#include <asm/ide.h>
-#else
-#include <asm-generic/ide_iops.h>
-#endif
-
-/*
- * Conventional PIO operations for ATA devices
- */
-
-static u8 ide_inb(unsigned long port)
-{
- return (u8) inb(port);
-}
-
-static void ide_outb(u8 val, unsigned long port)
-{
- outb(val, port);
-}
-
-/*
- * MMIO operations, typically used for SATA controllers
- */
-
-static u8 ide_mm_inb(unsigned long port)
-{
- return (u8) readb((void __iomem *) port);
-}
-
-static void ide_mm_outb(u8 value, unsigned long port)
-{
- writeb(value, (void __iomem *) port);
-}
-
-void ide_exec_command(ide_hwif_t *hwif, u8 cmd)
-{
- if (hwif->host_flags & IDE_HFLAG_MMIO)
- writeb(cmd, (void __iomem *)hwif->io_ports.command_addr);
- else
- outb(cmd, hwif->io_ports.command_addr);
-}
-EXPORT_SYMBOL_GPL(ide_exec_command);
-
-u8 ide_read_status(ide_hwif_t *hwif)
-{
- if (hwif->host_flags & IDE_HFLAG_MMIO)
- return readb((void __iomem *)hwif->io_ports.status_addr);
- else
- return inb(hwif->io_ports.status_addr);
-}
-EXPORT_SYMBOL_GPL(ide_read_status);
-
-u8 ide_read_altstatus(ide_hwif_t *hwif)
-{
- if (hwif->host_flags & IDE_HFLAG_MMIO)
- return readb((void __iomem *)hwif->io_ports.ctl_addr);
- else
- return inb(hwif->io_ports.ctl_addr);
-}
-EXPORT_SYMBOL_GPL(ide_read_altstatus);
-
-void ide_write_devctl(ide_hwif_t *hwif, u8 ctl)
-{
- if (hwif->host_flags & IDE_HFLAG_MMIO)
- writeb(ctl, (void __iomem *)hwif->io_ports.ctl_addr);
- else
- outb(ctl, hwif->io_ports.ctl_addr);
-}
-EXPORT_SYMBOL_GPL(ide_write_devctl);
-
-void ide_dev_select(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 select = drive->select | ATA_DEVICE_OBS;
-
- if (hwif->host_flags & IDE_HFLAG_MMIO)
- writeb(select, (void __iomem *)hwif->io_ports.device_addr);
- else
- outb(select, hwif->io_ports.device_addr);
-}
-EXPORT_SYMBOL_GPL(ide_dev_select);
-
-void ide_tf_load(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct ide_io_ports *io_ports = &hwif->io_ports;
- void (*tf_outb)(u8 addr, unsigned long port);
- u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
-
- if (mmio)
- tf_outb = ide_mm_outb;
- else
- tf_outb = ide_outb;
-
- if (valid & IDE_VALID_FEATURE)
- tf_outb(tf->feature, io_ports->feature_addr);
- if (valid & IDE_VALID_NSECT)
- tf_outb(tf->nsect, io_ports->nsect_addr);
- if (valid & IDE_VALID_LBAL)
- tf_outb(tf->lbal, io_ports->lbal_addr);
- if (valid & IDE_VALID_LBAM)
- tf_outb(tf->lbam, io_ports->lbam_addr);
- if (valid & IDE_VALID_LBAH)
- tf_outb(tf->lbah, io_ports->lbah_addr);
- if (valid & IDE_VALID_DEVICE)
- tf_outb(tf->device, io_ports->device_addr);
-}
-EXPORT_SYMBOL_GPL(ide_tf_load);
-
-void ide_tf_read(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct ide_io_ports *io_ports = &hwif->io_ports;
- u8 (*tf_inb)(unsigned long port);
- u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
-
- if (mmio)
- tf_inb = ide_mm_inb;
- else
- tf_inb = ide_inb;
-
- if (valid & IDE_VALID_ERROR)
- tf->error = tf_inb(io_ports->feature_addr);
- if (valid & IDE_VALID_NSECT)
- tf->nsect = tf_inb(io_ports->nsect_addr);
- if (valid & IDE_VALID_LBAL)
- tf->lbal = tf_inb(io_ports->lbal_addr);
- if (valid & IDE_VALID_LBAM)
- tf->lbam = tf_inb(io_ports->lbam_addr);
- if (valid & IDE_VALID_LBAH)
- tf->lbah = tf_inb(io_ports->lbah_addr);
- if (valid & IDE_VALID_DEVICE)
- tf->device = tf_inb(io_ports->device_addr);
-}
-EXPORT_SYMBOL_GPL(ide_tf_read);
-
-/*
- * Some localbus EIDE interfaces require a special access sequence
- * when using 32-bit I/O instructions to transfer data. We call this
- * the "vlb_sync" sequence, which consists of three successive reads
- * of the sector count register location, with interrupts disabled
- * to ensure that the reads all happen together.
- */
-static void ata_vlb_sync(unsigned long port)
-{
- (void)inb(port);
- (void)inb(port);
- (void)inb(port);
-}
-
-/*
- * This is used for most PIO data transfers *from* the IDE interface
- *
- * These routines will round up any request for an odd number of bytes,
- * so if an odd len is specified, be sure that there's at least one
- * extra byte allocated for the buffer.
- */
-void ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
- unsigned int len)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct ide_io_ports *io_ports = &hwif->io_ports;
- unsigned long data_addr = io_ports->data_addr;
- unsigned int words = (len + 1) >> 1;
- u8 io_32bit = drive->io_32bit;
- u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
-
- if (io_32bit) {
- unsigned long flags;
-
- if ((io_32bit & 2) && !mmio) {
- local_irq_save(flags);
- ata_vlb_sync(io_ports->nsect_addr);
- }
-
- words >>= 1;
- if (mmio)
- __ide_mm_insl((void __iomem *)data_addr, buf, words);
- else
- insl(data_addr, buf, words);
-
- if ((io_32bit & 2) && !mmio)
- local_irq_restore(flags);
-
- if (((len + 1) & 3) < 2)
- return;
-
- buf += len & ~3;
- words = 1;
- }
-
- if (mmio)
- __ide_mm_insw((void __iomem *)data_addr, buf, words);
- else
- insw(data_addr, buf, words);
-}
-EXPORT_SYMBOL_GPL(ide_input_data);
-
-/*
- * This is used for most PIO data transfers *to* the IDE interface
- */
-void ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
- unsigned int len)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct ide_io_ports *io_ports = &hwif->io_ports;
- unsigned long data_addr = io_ports->data_addr;
- unsigned int words = (len + 1) >> 1;
- u8 io_32bit = drive->io_32bit;
- u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
-
- if (io_32bit) {
- unsigned long flags;
-
- if ((io_32bit & 2) && !mmio) {
- local_irq_save(flags);
- ata_vlb_sync(io_ports->nsect_addr);
- }
-
- words >>= 1;
- if (mmio)
- __ide_mm_outsl((void __iomem *)data_addr, buf, words);
- else
- outsl(data_addr, buf, words);
-
- if ((io_32bit & 2) && !mmio)
- local_irq_restore(flags);
-
- if (((len + 1) & 3) < 2)
- return;
-
- buf += len & ~3;
- words = 1;
- }
-
- if (mmio)
- __ide_mm_outsw((void __iomem *)data_addr, buf, words);
- else
- outsw(data_addr, buf, words);
-}
-EXPORT_SYMBOL_GPL(ide_output_data);
-
-const struct ide_tp_ops default_tp_ops = {
- .exec_command = ide_exec_command,
- .read_status = ide_read_status,
- .read_altstatus = ide_read_altstatus,
- .write_devctl = ide_write_devctl,
-
- .dev_select = ide_dev_select,
- .tf_load = ide_tf_load,
- .tf_read = ide_tf_read,
-
- .input_data = ide_input_data,
- .output_data = ide_output_data,
-};
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
deleted file mode 100644
index 4867b67b60d6..000000000000
--- a/drivers/ide/ide-io.c
+++ /dev/null
@@ -1,904 +0,0 @@
-/*
- * IDE I/O functions
- *
- * Basic PIO and command management functionality.
- *
- * This code was split off from ide.c. See ide.c for history and original
- * copyrights.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * For the avoidance of doubt the "preferred form" of this code is one which
- * is in an open non patent encumbered format. Where cryptographic key signing
- * forms part of the process of creating an executable the information
- * including keys needed to generate an equivalently functional executable
- * are deemed to be part of the source code.
- */
-
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/major.h>
-#include <linux/errno.h>
-#include <linux/genhd.h>
-#include <linux/blkpg.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/ide.h>
-#include <linux/completion.h>
-#include <linux/reboot.h>
-#include <linux/cdrom.h>
-#include <linux/seq_file.h>
-#include <linux/device.h>
-#include <linux/kmod.h>
-#include <linux/scatterlist.h>
-#include <linux/bitops.h>
-
-#include <asm/byteorder.h>
-#include <asm/irq.h>
-#include <linux/uaccess.h>
-#include <asm/io.h>
-
-int ide_end_rq(ide_drive_t *drive, struct request *rq, blk_status_t error,
- unsigned int nr_bytes)
-{
- /*
- * decide whether to reenable DMA -- 3 is a random magic for now,
- * if we DMA timeout more than 3 times, just stay in PIO
- */
- if ((drive->dev_flags & IDE_DFLAG_DMA_PIO_RETRY) &&
- drive->retry_pio <= 3) {
- drive->dev_flags &= ~IDE_DFLAG_DMA_PIO_RETRY;
- ide_dma_on(drive);
- }
-
- if (!blk_update_request(rq, error, nr_bytes)) {
- if (rq == drive->sense_rq) {
- drive->sense_rq = NULL;
- drive->sense_rq_active = false;
- }
-
- __blk_mq_end_request(rq, error);
- return 0;
- }
-
- return 1;
-}
-EXPORT_SYMBOL_GPL(ide_end_rq);
-
-void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
-{
- const struct ide_tp_ops *tp_ops = drive->hwif->tp_ops;
- struct ide_taskfile *tf = &cmd->tf;
- struct request *rq = cmd->rq;
- u8 tf_cmd = tf->command;
-
- tf->error = err;
- tf->status = stat;
-
- if (cmd->ftf_flags & IDE_FTFLAG_IN_DATA) {
- u8 data[2];
-
- tp_ops->input_data(drive, cmd, data, 2);
-
- cmd->tf.data = data[0];
- cmd->hob.data = data[1];
- }
-
- ide_tf_readback(drive, cmd);
-
- if ((cmd->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) &&
- tf_cmd == ATA_CMD_IDLEIMMEDIATE) {
- if (tf->lbal != 0xc4) {
- printk(KERN_ERR "%s: head unload failed!\n",
- drive->name);
- ide_tf_dump(drive->name, cmd);
- } else
- drive->dev_flags |= IDE_DFLAG_PARKED;
- }
-
- if (rq && ata_taskfile_request(rq)) {
- struct ide_cmd *orig_cmd = ide_req(rq)->special;
-
- if (cmd->tf_flags & IDE_TFLAG_DYN)
- kfree(orig_cmd);
- else if (cmd != orig_cmd)
- memcpy(orig_cmd, cmd, sizeof(*cmd));
- }
-}
-
-int ide_complete_rq(ide_drive_t *drive, blk_status_t error, unsigned int nr_bytes)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct request *rq = hwif->rq;
- int rc;
-
- /*
- * if failfast is set on a request, override number of sectors
- * and complete the whole request right now
- */
- if (blk_noretry_request(rq) && error)
- nr_bytes = blk_rq_sectors(rq) << 9;
-
- rc = ide_end_rq(drive, rq, error, nr_bytes);
- if (rc == 0)
- hwif->rq = NULL;
-
- return rc;
-}
-EXPORT_SYMBOL(ide_complete_rq);
-
-void ide_kill_rq(ide_drive_t *drive, struct request *rq)
-{
- u8 drv_req = ata_misc_request(rq) && rq->rq_disk;
- u8 media = drive->media;
-
- drive->failed_pc = NULL;
-
- if ((media == ide_floppy || media == ide_tape) && drv_req) {
- scsi_req(rq)->result = 0;
- } else {
- if (media == ide_tape)
- scsi_req(rq)->result = IDE_DRV_ERROR_GENERAL;
- else if (blk_rq_is_passthrough(rq) && scsi_req(rq)->result == 0)
- scsi_req(rq)->result = -EIO;
- }
-
- ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(rq));
-}
-
-static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
-{
- tf->nsect = drive->sect;
- tf->lbal = drive->sect;
- tf->lbam = drive->cyl;
- tf->lbah = drive->cyl >> 8;
- tf->device = (drive->head - 1) | drive->select;
- tf->command = ATA_CMD_INIT_DEV_PARAMS;
-}
-
-static void ide_tf_set_restore_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
-{
- tf->nsect = drive->sect;
- tf->command = ATA_CMD_RESTORE;
-}
-
-static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
-{
- tf->nsect = drive->mult_req;
- tf->command = ATA_CMD_SET_MULTI;
-}
-
-/**
- * do_special - issue some special commands
- * @drive: drive the command is for
- *
- * do_special() is used to issue ATA_CMD_INIT_DEV_PARAMS,
- * ATA_CMD_RESTORE and ATA_CMD_SET_MULTI commands to a drive.
- */
-
-static ide_startstop_t do_special(ide_drive_t *drive)
-{
- struct ide_cmd cmd;
-
-#ifdef DEBUG
- printk(KERN_DEBUG "%s: %s: 0x%02x\n", drive->name, __func__,
- drive->special_flags);
-#endif
- if (drive->media != ide_disk) {
- drive->special_flags = 0;
- drive->mult_req = 0;
- return ide_stopped;
- }
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.protocol = ATA_PROT_NODATA;
-
- if (drive->special_flags & IDE_SFLAG_SET_GEOMETRY) {
- drive->special_flags &= ~IDE_SFLAG_SET_GEOMETRY;
- ide_tf_set_specify_cmd(drive, &cmd.tf);
- } else if (drive->special_flags & IDE_SFLAG_RECALIBRATE) {
- drive->special_flags &= ~IDE_SFLAG_RECALIBRATE;
- ide_tf_set_restore_cmd(drive, &cmd.tf);
- } else if (drive->special_flags & IDE_SFLAG_SET_MULTMODE) {
- drive->special_flags &= ~IDE_SFLAG_SET_MULTMODE;
- ide_tf_set_setmult_cmd(drive, &cmd.tf);
- } else
- BUG();
-
- cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
- cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
- cmd.tf_flags = IDE_TFLAG_CUSTOM_HANDLER;
-
- do_rw_taskfile(drive, &cmd);
-
- return ide_started;
-}
-
-void ide_map_sg(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct scatterlist *sg = hwif->sg_table, *last_sg = NULL;
- struct request *rq = cmd->rq;
-
- cmd->sg_nents = __blk_rq_map_sg(drive->queue, rq, sg, &last_sg);
- if (blk_rq_bytes(rq) && (blk_rq_bytes(rq) & rq->q->dma_pad_mask))
- last_sg->length +=
- (rq->q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
-}
-EXPORT_SYMBOL_GPL(ide_map_sg);
-
-void ide_init_sg_cmd(struct ide_cmd *cmd, unsigned int nr_bytes)
-{
- cmd->nbytes = cmd->nleft = nr_bytes;
- cmd->cursg_ofs = 0;
- cmd->cursg = NULL;
-}
-EXPORT_SYMBOL_GPL(ide_init_sg_cmd);
-
-/**
- * execute_drive_command - issue special drive command
- * @drive: the drive to issue the command on
- * @rq: the request structure holding the command
- *
- * execute_drive_cmd() issues a special drive command, usually
- * initiated by ioctl() from the external hdparm program. The
- * command can be a drive command, drive task or taskfile
- * operation. Weirdly you can call it with NULL to wait for
- * all commands to finish. Don't do this as that is due to change
- */
-
-static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
- struct request *rq)
-{
- struct ide_cmd *cmd = ide_req(rq)->special;
-
- if (cmd) {
- if (cmd->protocol == ATA_PROT_PIO) {
- ide_init_sg_cmd(cmd, blk_rq_sectors(rq) << 9);
- ide_map_sg(drive, cmd);
- }
-
- return do_rw_taskfile(drive, cmd);
- }
-
- /*
- * NULL is actually a valid way of waiting for
- * all current requests to be flushed from the queue.
- */
-#ifdef DEBUG
- printk("%s: DRIVE_CMD (null)\n", drive->name);
-#endif
- scsi_req(rq)->result = 0;
- ide_complete_rq(drive, BLK_STS_OK, blk_rq_bytes(rq));
-
- return ide_stopped;
-}
-
-static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq)
-{
- u8 cmd = scsi_req(rq)->cmd[0];
-
- switch (cmd) {
- case REQ_PARK_HEADS:
- case REQ_UNPARK_HEADS:
- return ide_do_park_unpark(drive, rq);
- case REQ_DEVSET_EXEC:
- return ide_do_devset(drive, rq);
- case REQ_DRIVE_RESET:
- return ide_do_reset(drive);
- default:
- BUG();
- }
-}
-
-/**
- * start_request - start of I/O and command issuing for IDE
- *
- * start_request() initiates handling of a new I/O request. It
- * accepts commands and I/O (read/write) requests.
- *
- * FIXME: this function needs a rename
- */
-
-static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
-{
- ide_startstop_t startstop;
-
-#ifdef DEBUG
- printk("%s: start_request: current=0x%08lx\n",
- drive->hwif->name, (unsigned long) rq);
-#endif
-
- /* bail early if we've exceeded max_failures */
- if (drive->max_failures && (drive->failures > drive->max_failures)) {
- rq->rq_flags |= RQF_FAILED;
- goto kill_rq;
- }
-
- if (drive->prep_rq && !drive->prep_rq(drive, rq))
- return ide_stopped;
-
- if (ata_pm_request(rq))
- ide_check_pm_state(drive, rq);
-
- drive->hwif->tp_ops->dev_select(drive);
- if (ide_wait_stat(&startstop, drive, drive->ready_stat,
- ATA_BUSY | ATA_DRQ, WAIT_READY)) {
- printk(KERN_ERR "%s: drive not ready for command\n", drive->name);
- return startstop;
- }
-
- if (drive->special_flags == 0) {
- struct ide_driver *drv;
-
- /*
- * We reset the drive so we need to issue a SETFEATURES.
- * Do it _after_ do_special() restored device parameters.
- */
- if (drive->current_speed == 0xff)
- ide_config_drive_speed(drive, drive->desired_speed);
-
- if (ata_taskfile_request(rq))
- return execute_drive_cmd(drive, rq);
- else if (ata_pm_request(rq)) {
- struct ide_pm_state *pm = ide_req(rq)->special;
-#ifdef DEBUG_PM
- printk("%s: start_power_step(step: %d)\n",
- drive->name, pm->pm_step);
-#endif
- startstop = ide_start_power_step(drive, rq);
- if (startstop == ide_stopped &&
- pm->pm_step == IDE_PM_COMPLETED)
- ide_complete_pm_rq(drive, rq);
- return startstop;
- } else if (!rq->rq_disk && ata_misc_request(rq))
- /*
- * TODO: Once all ULDs have been modified to
- * check for specific op codes rather than
- * blindly accepting any special request, the
- * check for ->rq_disk above may be replaced
- * by a more suitable mechanism or even
- * dropped entirely.
- */
- return ide_special_rq(drive, rq);
-
- drv = *(struct ide_driver **)rq->rq_disk->private_data;
-
- return drv->do_request(drive, rq, blk_rq_pos(rq));
- }
- return do_special(drive);
-kill_rq:
- ide_kill_rq(drive, rq);
- return ide_stopped;
-}
-
-/**
- * ide_stall_queue - pause an IDE device
- * @drive: drive to stall
- * @timeout: time to stall for (jiffies)
- *
- * ide_stall_queue() can be used by a drive to give excess bandwidth back
- * to the port by sleeping for timeout jiffies.
- */
-
-void ide_stall_queue (ide_drive_t *drive, unsigned long timeout)
-{
- if (timeout > WAIT_WORSTCASE)
- timeout = WAIT_WORSTCASE;
- drive->sleep = timeout + jiffies;
- drive->dev_flags |= IDE_DFLAG_SLEEPING;
-}
-EXPORT_SYMBOL(ide_stall_queue);
-
-static inline int ide_lock_port(ide_hwif_t *hwif)
-{
- if (hwif->busy)
- return 1;
-
- hwif->busy = 1;
-
- return 0;
-}
-
-static inline void ide_unlock_port(ide_hwif_t *hwif)
-{
- hwif->busy = 0;
-}
-
-static inline int ide_lock_host(struct ide_host *host, ide_hwif_t *hwif)
-{
- int rc = 0;
-
- if (host->host_flags & IDE_HFLAG_SERIALIZE) {
- rc = test_and_set_bit_lock(IDE_HOST_BUSY, &host->host_busy);
- if (rc == 0) {
- if (host->get_lock)
- host->get_lock(ide_intr, hwif);
- }
- }
- return rc;
-}
-
-static inline void ide_unlock_host(struct ide_host *host)
-{
- if (host->host_flags & IDE_HFLAG_SERIALIZE) {
- if (host->release_lock)
- host->release_lock();
- clear_bit_unlock(IDE_HOST_BUSY, &host->host_busy);
- }
-}
-
-void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
-{
- struct request_queue *q = drive->queue;
-
- /* Use 3ms as that was the old plug delay */
- if (rq) {
- blk_mq_requeue_request(rq, false);
- blk_mq_delay_kick_requeue_list(q, 3);
- } else
- blk_mq_delay_run_hw_queue(q->queue_hw_ctx[0], 3);
-}
-
-blk_status_t ide_issue_rq(ide_drive_t *drive, struct request *rq,
- bool local_requeue)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct ide_host *host = hwif->host;
- ide_startstop_t startstop;
-
- if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) {
- rq->rq_flags |= RQF_DONTPREP;
- ide_req(rq)->special = NULL;
- }
-
- /* HLD do_request() callback might sleep, make sure it's okay */
- might_sleep();
-
- if (ide_lock_host(host, hwif))
- return BLK_STS_DEV_RESOURCE;
-
- spin_lock_irq(&hwif->lock);
-
- if (!ide_lock_port(hwif)) {
- ide_hwif_t *prev_port;
-
- WARN_ON_ONCE(hwif->rq);
-repeat:
- prev_port = hwif->host->cur_port;
- if (drive->dev_flags & IDE_DFLAG_SLEEPING &&
- time_after(drive->sleep, jiffies)) {
- ide_unlock_port(hwif);
- goto plug_device;
- }
-
- if ((hwif->host->host_flags & IDE_HFLAG_SERIALIZE) &&
- hwif != prev_port) {
- ide_drive_t *cur_dev =
- prev_port ? prev_port->cur_dev : NULL;
-
- /*
- * set nIEN for previous port, drives in the
- * quirk list may not like intr setups/cleanups
- */
- if (cur_dev &&
- (cur_dev->dev_flags & IDE_DFLAG_NIEN_QUIRK) == 0)
- prev_port->tp_ops->write_devctl(prev_port,
- ATA_NIEN |
- ATA_DEVCTL_OBS);
-
- hwif->host->cur_port = hwif;
- }
- hwif->cur_dev = drive;
- drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED);
-
- /*
- * Sanity: don't accept a request that isn't a PM request
- * if we are currently power managed. This is very important as
- * blk_stop_queue() doesn't prevent the blk_fetch_request()
- * above to return us whatever is in the queue. Since we call
- * ide_do_request() ourselves, we end up taking requests while
- * the queue is blocked...
- */
- if ((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
- ata_pm_request(rq) == 0 &&
- (rq->rq_flags & RQF_PM) == 0) {
- /* there should be no pending command at this point */
- ide_unlock_port(hwif);
- goto plug_device;
- }
-
- scsi_req(rq)->resid_len = blk_rq_bytes(rq);
- hwif->rq = rq;
-
- spin_unlock_irq(&hwif->lock);
- startstop = start_request(drive, rq);
- spin_lock_irq(&hwif->lock);
-
- if (startstop == ide_stopped) {
- rq = hwif->rq;
- hwif->rq = NULL;
- if (rq)
- goto repeat;
- ide_unlock_port(hwif);
- goto out;
- }
- } else {
-plug_device:
- if (local_requeue)
- list_add(&rq->queuelist, &drive->rq_list);
- spin_unlock_irq(&hwif->lock);
- ide_unlock_host(host);
- if (!local_requeue)
- ide_requeue_and_plug(drive, rq);
- return BLK_STS_OK;
- }
-
-out:
- spin_unlock_irq(&hwif->lock);
- if (rq == NULL)
- ide_unlock_host(host);
- return BLK_STS_OK;
-}
-
-/*
- * Issue a new request to a device.
- */
-blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd)
-{
- ide_drive_t *drive = hctx->queue->queuedata;
- ide_hwif_t *hwif = drive->hwif;
-
- spin_lock_irq(&hwif->lock);
- if (drive->sense_rq_active) {
- spin_unlock_irq(&hwif->lock);
- return BLK_STS_DEV_RESOURCE;
- }
- spin_unlock_irq(&hwif->lock);
-
- blk_mq_start_request(bd->rq);
- return ide_issue_rq(drive, bd->rq, false);
-}
-
-static int drive_is_ready(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 stat = 0;
-
- if (drive->waiting_for_dma)
- return hwif->dma_ops->dma_test_irq(drive);
-
- if (hwif->io_ports.ctl_addr &&
- (hwif->host_flags & IDE_HFLAG_BROKEN_ALTSTATUS) == 0)
- stat = hwif->tp_ops->read_altstatus(hwif);
- else
- /* Note: this may clear a pending IRQ!! */
- stat = hwif->tp_ops->read_status(hwif);
-
- if (stat & ATA_BUSY)
- /* drive busy: definitely not interrupting */
- return 0;
-
- /* drive ready: *might* be interrupting */
- return 1;
-}
-
-/**
- * ide_timer_expiry - handle lack of an IDE interrupt
- * @data: timer callback magic (hwif)
- *
- * An IDE command has timed out before the expected drive return
- * occurred. At this point we attempt to clean up the current
- * mess. If the current handler includes an expiry handler then
- * we invoke the expiry handler, and providing it is happy the
- * work is done. If that fails we apply generic recovery rules
- * invoking the handler and checking the drive DMA status. We
- * have an excessively incestuous relationship with the DMA
- * logic that wants cleaning up.
- */
-
-void ide_timer_expiry (struct timer_list *t)
-{
- ide_hwif_t *hwif = from_timer(hwif, t, timer);
- ide_drive_t *drive;
- ide_handler_t *handler;
- unsigned long flags;
- int wait = -1;
- int plug_device = 0;
- struct request *rq_in_flight;
-
- spin_lock_irqsave(&hwif->lock, flags);
-
- handler = hwif->handler;
-
- if (handler == NULL || hwif->req_gen != hwif->req_gen_timer) {
- /*
- * Either a marginal timeout occurred
- * (got the interrupt just as timer expired),
- * or we were "sleeping" to give other devices a chance.
- * Either way, we don't really want to complain about anything.
- */
- } else {
- ide_expiry_t *expiry = hwif->expiry;
- ide_startstop_t startstop = ide_stopped;
-
- drive = hwif->cur_dev;
-
- if (expiry) {
- wait = expiry(drive);
- if (wait > 0) { /* continue */
- /* reset timer */
- hwif->timer.expires = jiffies + wait;
- hwif->req_gen_timer = hwif->req_gen;
- add_timer(&hwif->timer);
- spin_unlock_irqrestore(&hwif->lock, flags);
- return;
- }
- }
- hwif->handler = NULL;
- hwif->expiry = NULL;
- /*
- * We need to simulate a real interrupt when invoking
- * the handler() function, which means we need to
- * globally mask the specific IRQ:
- */
- spin_unlock(&hwif->lock);
- /* disable_irq_nosync ?? */
- disable_irq(hwif->irq);
-
- if (hwif->polling) {
- startstop = handler(drive);
- } else if (drive_is_ready(drive)) {
- if (drive->waiting_for_dma)
- hwif->dma_ops->dma_lost_irq(drive);
- if (hwif->port_ops && hwif->port_ops->clear_irq)
- hwif->port_ops->clear_irq(drive);
-
- printk(KERN_WARNING "%s: lost interrupt\n",
- drive->name);
- startstop = handler(drive);
- } else {
- if (drive->waiting_for_dma)
- startstop = ide_dma_timeout_retry(drive, wait);
- else
- startstop = ide_error(drive, "irq timeout",
- hwif->tp_ops->read_status(hwif));
- }
- /* Disable interrupts again, `handler' might have enabled it */
- spin_lock_irq(&hwif->lock);
- enable_irq(hwif->irq);
- if (startstop == ide_stopped && hwif->polling == 0) {
- rq_in_flight = hwif->rq;
- hwif->rq = NULL;
- ide_unlock_port(hwif);
- plug_device = 1;
- }
- }
- spin_unlock_irqrestore(&hwif->lock, flags);
-
- if (plug_device) {
- ide_unlock_host(hwif->host);
- ide_requeue_and_plug(drive, rq_in_flight);
- }
-}
-
-/**
- * unexpected_intr - handle an unexpected IDE interrupt
- * @irq: interrupt line
- * @hwif: port being processed
- *
- * There's nothing really useful we can do with an unexpected interrupt,
- * other than reading the status register (to clear it), and logging it.
- * There should be no way that an irq can happen before we're ready for it,
- * so we needn't worry much about losing an "important" interrupt here.
- *
- * On laptops (and "green" PCs), an unexpected interrupt occurs whenever
- * the drive enters "idle", "standby", or "sleep" mode, so if the status
- * looks "good", we just ignore the interrupt completely.
- *
- * This routine assumes __cli() is in effect when called.
- *
- * If an unexpected interrupt happens on irq15 while we are handling irq14
- * and if the two interfaces are "serialized" (CMD640), then it looks like
- * we could screw up by interfering with a new request being set up for
- * irq15.
- *
- * In reality, this is a non-issue. The new command is not sent unless
- * the drive is ready to accept one, in which case we know the drive is
- * not trying to interrupt us. And ide_set_handler() is always invoked
- * before completing the issuance of any new drive command, so we will not
- * be accidentally invoked as a result of any valid command completion
- * interrupt.
- */
-
-static void unexpected_intr(int irq, ide_hwif_t *hwif)
-{
- u8 stat = hwif->tp_ops->read_status(hwif);
-
- if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) {
- /* Try to not flood the console with msgs */
- static unsigned long last_msgtime, count;
- ++count;
-
- if (time_after(jiffies, last_msgtime + HZ)) {
- last_msgtime = jiffies;
- printk(KERN_ERR "%s: unexpected interrupt, "
- "status=0x%02x, count=%ld\n",
- hwif->name, stat, count);
- }
- }
-}
-
-/**
- * ide_intr - default IDE interrupt handler
- * @irq: interrupt number
- * @dev_id: hwif
- * @regs: unused weirdness from the kernel irq layer
- *
- * This is the default IRQ handler for the IDE layer. You should
- * not need to override it. If you do be aware it is subtle in
- * places
- *
- * hwif is the interface in the group currently performing
- * a command. hwif->cur_dev is the drive and hwif->handler is
- * the IRQ handler to call. As we issue a command the handlers
- * step through multiple states, reassigning the handler to the
- * next step in the process. Unlike a smart SCSI controller IDE
- * expects the main processor to sequence the various transfer
- * stages. We also manage a poll timer to catch up with most
- * timeout situations. There are still a few where the handlers
- * don't ever decide to give up.
- *
- * The handler eventually returns ide_stopped to indicate the
- * request completed. At this point we issue the next request
- * on the port and the process begins again.
- */
-
-irqreturn_t ide_intr (int irq, void *dev_id)
-{
- ide_hwif_t *hwif = (ide_hwif_t *)dev_id;
- struct ide_host *host = hwif->host;
- ide_drive_t *drive;
- ide_handler_t *handler;
- unsigned long flags;
- ide_startstop_t startstop;
- irqreturn_t irq_ret = IRQ_NONE;
- int plug_device = 0;
- struct request *rq_in_flight;
-
- if (host->host_flags & IDE_HFLAG_SERIALIZE) {
- if (hwif != host->cur_port)
- goto out_early;
- }
-
- spin_lock_irqsave(&hwif->lock, flags);
-
- if (hwif->port_ops && hwif->port_ops->test_irq &&
- hwif->port_ops->test_irq(hwif) == 0)
- goto out;
-
- handler = hwif->handler;
-
- if (handler == NULL || hwif->polling) {
- /*
- * Not expecting an interrupt from this drive.
- * That means this could be:
- * (1) an interrupt from another PCI device
- * sharing the same PCI INT# as us.
- * or (2) a drive just entered sleep or standby mode,
- * and is interrupting to let us know.
- * or (3) a spurious interrupt of unknown origin.
- *
- * For PCI, we cannot tell the difference,
- * so in that case we just ignore it and hope it goes away.
- */
- if ((host->irq_flags & IRQF_SHARED) == 0) {
- /*
- * Probably not a shared PCI interrupt,
- * so we can safely try to do something about it:
- */
- unexpected_intr(irq, hwif);
- } else {
- /*
- * Whack the status register, just in case
- * we have a leftover pending IRQ.
- */
- (void)hwif->tp_ops->read_status(hwif);
- }
- goto out;
- }
-
- drive = hwif->cur_dev;
-
- if (!drive_is_ready(drive))
- /*
- * This happens regularly when we share a PCI IRQ with
- * another device. Unfortunately, it can also happen
- * with some buggy drives that trigger the IRQ before
- * their status register is up to date. Hopefully we have
- * enough advance overhead that the latter isn't a problem.
- */
- goto out;
-
- hwif->handler = NULL;
- hwif->expiry = NULL;
- hwif->req_gen++;
- del_timer(&hwif->timer);
- spin_unlock(&hwif->lock);
-
- if (hwif->port_ops && hwif->port_ops->clear_irq)
- hwif->port_ops->clear_irq(drive);
-
- if (drive->dev_flags & IDE_DFLAG_UNMASK)
- local_irq_enable_in_hardirq();
-
- /* service this interrupt, may set handler for next interrupt */
- startstop = handler(drive);
-
- spin_lock_irq(&hwif->lock);
- /*
- * Note that handler() may have set things up for another
- * interrupt to occur soon, but it cannot happen until
- * we exit from this routine, because it will be the
- * same irq as is currently being serviced here, and Linux
- * won't allow another of the same (on any CPU) until we return.
- */
- if (startstop == ide_stopped && hwif->polling == 0) {
- BUG_ON(hwif->handler);
- rq_in_flight = hwif->rq;
- hwif->rq = NULL;
- ide_unlock_port(hwif);
- plug_device = 1;
- }
- irq_ret = IRQ_HANDLED;
-out:
- spin_unlock_irqrestore(&hwif->lock, flags);
-out_early:
- if (plug_device) {
- ide_unlock_host(hwif->host);
- ide_requeue_and_plug(drive, rq_in_flight);
- }
-
- return irq_ret;
-}
-EXPORT_SYMBOL_GPL(ide_intr);
-
-void ide_pad_transfer(ide_drive_t *drive, int write, int len)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 buf[4] = { 0 };
-
- while (len > 0) {
- if (write)
- hwif->tp_ops->output_data(drive, NULL, buf, min(4, len));
- else
- hwif->tp_ops->input_data(drive, NULL, buf, min(4, len));
- len -= 4;
- }
-}
-EXPORT_SYMBOL_GPL(ide_pad_transfer);
-
-void ide_insert_request_head(ide_drive_t *drive, struct request *rq)
-{
- drive->sense_rq_active = true;
- list_add_tail(&rq->queuelist, &drive->rq_list);
- kblockd_schedule_work(&drive->rq_work);
-}
-EXPORT_SYMBOL_GPL(ide_insert_request_head);
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
deleted file mode 100644
index 43fbc37d85c3..000000000000
--- a/drivers/ide/ide-ioctls.c
+++ /dev/null
@@ -1,306 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * IDE ioctls handling.
- */
-
-#include <linux/compat.h>
-#include <linux/export.h>
-#include <linux/hdreg.h>
-#include <linux/ide.h>
-#include <linux/slab.h>
-
-static int put_user_long(long val, unsigned long arg)
-{
- if (in_compat_syscall())
- return put_user(val, (compat_long_t __user *)compat_ptr(arg));
-
- return put_user(val, (long __user *)arg);
-}
-
-static const struct ide_ioctl_devset ide_ioctl_settings[] = {
-{ HDIO_GET_32BIT, HDIO_SET_32BIT, &ide_devset_io_32bit },
-{ HDIO_GET_KEEPSETTINGS, HDIO_SET_KEEPSETTINGS, &ide_devset_keepsettings },
-{ HDIO_GET_UNMASKINTR, HDIO_SET_UNMASKINTR, &ide_devset_unmaskirq },
-{ HDIO_GET_DMA, HDIO_SET_DMA, &ide_devset_using_dma },
-{ -1, HDIO_SET_PIO_MODE, &ide_devset_pio_mode },
-{ 0 }
-};
-
-int ide_setting_ioctl(ide_drive_t *drive, struct block_device *bdev,
- unsigned int cmd, unsigned long arg,
- const struct ide_ioctl_devset *s)
-{
- const struct ide_devset *ds;
- int err = -EOPNOTSUPP;
-
- for (; (ds = s->setting); s++) {
- if (ds->get && s->get_ioctl == cmd)
- goto read_val;
- else if (ds->set && s->set_ioctl == cmd)
- goto set_val;
- }
-
- return err;
-
-read_val:
- mutex_lock(&ide_setting_mtx);
- err = ds->get(drive);
- mutex_unlock(&ide_setting_mtx);
- return err >= 0 ? put_user_long(err, arg) : err;
-
-set_val:
- if (bdev_is_partition(bdev))
- err = -EINVAL;
- else {
- if (!capable(CAP_SYS_ADMIN))
- err = -EACCES;
- else {
- mutex_lock(&ide_setting_mtx);
- err = ide_devset_execute(drive, ds, arg);
- mutex_unlock(&ide_setting_mtx);
- }
- }
- return err;
-}
-EXPORT_SYMBOL_GPL(ide_setting_ioctl);
-
-static int ide_get_identity_ioctl(ide_drive_t *drive, unsigned int cmd,
- void __user *argp)
-{
- u16 *id = NULL;
- int size = (cmd == HDIO_GET_IDENTITY) ? (ATA_ID_WORDS * 2) : 142;
- int rc = 0;
-
- if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0) {
- rc = -ENOMSG;
- goto out;
- }
-
- /* ata_id_to_hd_driveid() relies on 'id' to be fully allocated. */
- id = kmalloc(ATA_ID_WORDS * 2, GFP_KERNEL);
- if (id == NULL) {
- rc = -ENOMEM;
- goto out;
- }
-
- memcpy(id, drive->id, size);
- ata_id_to_hd_driveid(id);
-
- if (copy_to_user(argp, id, size))
- rc = -EFAULT;
-
- kfree(id);
-out:
- return rc;
-}
-
-static int ide_get_nice_ioctl(ide_drive_t *drive, unsigned long arg)
-{
- return put_user_long((!!(drive->dev_flags & IDE_DFLAG_DSC_OVERLAP)
- << IDE_NICE_DSC_OVERLAP) |
- (!!(drive->dev_flags & IDE_DFLAG_NICE1)
- << IDE_NICE_1), arg);
-}
-
-static int ide_set_nice_ioctl(ide_drive_t *drive, unsigned long arg)
-{
- if (arg != (arg & ((1 << IDE_NICE_DSC_OVERLAP) | (1 << IDE_NICE_1))))
- return -EPERM;
-
- if (((arg >> IDE_NICE_DSC_OVERLAP) & 1) &&
- (drive->media != ide_tape))
- return -EPERM;
-
- if ((arg >> IDE_NICE_DSC_OVERLAP) & 1)
- drive->dev_flags |= IDE_DFLAG_DSC_OVERLAP;
- else
- drive->dev_flags &= ~IDE_DFLAG_DSC_OVERLAP;
-
- if ((arg >> IDE_NICE_1) & 1)
- drive->dev_flags |= IDE_DFLAG_NICE1;
- else
- drive->dev_flags &= ~IDE_DFLAG_NICE1;
-
- return 0;
-}
-
-static int ide_cmd_ioctl(ide_drive_t *drive, void __user *argp)
-{
- u8 *buf = NULL;
- int bufsize = 0, err = 0;
- u8 args[4], xfer_rate = 0;
- struct ide_cmd cmd;
- struct ide_taskfile *tf = &cmd.tf;
-
- if (NULL == argp) {
- struct request *rq;
-
- rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
- ide_req(rq)->type = ATA_PRIV_TASKFILE;
- blk_execute_rq(NULL, rq, 0);
- err = scsi_req(rq)->result ? -EIO : 0;
- blk_put_request(rq);
-
- return err;
- }
-
- if (copy_from_user(args, argp, 4))
- return -EFAULT;
-
- memset(&cmd, 0, sizeof(cmd));
- tf->feature = args[2];
- if (args[0] == ATA_CMD_SMART) {
- tf->nsect = args[3];
- tf->lbal = args[1];
- tf->lbam = ATA_SMART_LBAM_PASS;
- tf->lbah = ATA_SMART_LBAH_PASS;
- cmd.valid.out.tf = IDE_VALID_OUT_TF;
- cmd.valid.in.tf = IDE_VALID_NSECT;
- } else {
- tf->nsect = args[1];
- cmd.valid.out.tf = IDE_VALID_FEATURE | IDE_VALID_NSECT;
- cmd.valid.in.tf = IDE_VALID_NSECT;
- }
- tf->command = args[0];
- cmd.protocol = args[3] ? ATA_PROT_PIO : ATA_PROT_NODATA;
-
- if (args[3]) {
- cmd.tf_flags |= IDE_TFLAG_IO_16BIT;
- bufsize = SECTOR_SIZE * args[3];
- buf = kzalloc(bufsize, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
- }
-
- if (tf->command == ATA_CMD_SET_FEATURES &&
- tf->feature == SETFEATURES_XFER &&
- tf->nsect >= XFER_SW_DMA_0) {
- xfer_rate = ide_find_dma_mode(drive, tf->nsect);
- if (xfer_rate != tf->nsect) {
- err = -EINVAL;
- goto abort;
- }
-
- cmd.tf_flags |= IDE_TFLAG_SET_XFER;
- }
-
- err = ide_raw_taskfile(drive, &cmd, buf, args[3]);
-
- args[0] = tf->status;
- args[1] = tf->error;
- args[2] = tf->nsect;
-abort:
- if (copy_to_user(argp, &args, 4))
- err = -EFAULT;
- if (buf) {
- if (copy_to_user((argp + 4), buf, bufsize))
- err = -EFAULT;
- kfree(buf);
- }
- return err;
-}
-
-static int ide_task_ioctl(ide_drive_t *drive, void __user *p)
-{
- int err = 0;
- u8 args[7];
- struct ide_cmd cmd;
-
- if (copy_from_user(args, p, 7))
- return -EFAULT;
-
- memset(&cmd, 0, sizeof(cmd));
- memcpy(&cmd.tf.feature, &args[1], 6);
- cmd.tf.command = args[0];
- cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
- cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
-
- err = ide_no_data_taskfile(drive, &cmd);
-
- args[0] = cmd.tf.command;
- memcpy(&args[1], &cmd.tf.feature, 6);
-
- if (copy_to_user(p, args, 7))
- err = -EFAULT;
-
- return err;
-}
-
-static int generic_drive_reset(ide_drive_t *drive)
-{
- struct request *rq;
- int ret = 0;
-
- rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
- ide_req(rq)->type = ATA_PRIV_MISC;
- scsi_req(rq)->cmd_len = 1;
- scsi_req(rq)->cmd[0] = REQ_DRIVE_RESET;
- blk_execute_rq(NULL, rq, 1);
- ret = scsi_req(rq)->result;
- blk_put_request(rq);
- return ret;
-}
-
-int generic_ide_ioctl(ide_drive_t *drive, struct block_device *bdev,
- unsigned int cmd, unsigned long arg)
-{
- int err;
- void __user *argp = (void __user *)arg;
-
- if (in_compat_syscall())
- argp = compat_ptr(arg);
-
- err = ide_setting_ioctl(drive, bdev, cmd, arg, ide_ioctl_settings);
- if (err != -EOPNOTSUPP)
- return err;
-
- switch (cmd) {
- case HDIO_OBSOLETE_IDENTITY:
- case HDIO_GET_IDENTITY:
- if (bdev_is_partition(bdev))
- return -EINVAL;
- return ide_get_identity_ioctl(drive, cmd, argp);
- case HDIO_GET_NICE:
- return ide_get_nice_ioctl(drive, arg);
- case HDIO_SET_NICE:
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
- return ide_set_nice_ioctl(drive, arg);
-#ifdef CONFIG_IDE_TASK_IOCTL
- case HDIO_DRIVE_TASKFILE:
- if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
- return -EACCES;
- /* missing compat handler for HDIO_DRIVE_TASKFILE */
- if (in_compat_syscall())
- return -ENOTTY;
- if (drive->media == ide_disk)
- return ide_taskfile_ioctl(drive, arg);
- return -ENOMSG;
-#endif
- case HDIO_DRIVE_CMD:
- if (!capable(CAP_SYS_RAWIO))
- return -EACCES;
- return ide_cmd_ioctl(drive, argp);
- case HDIO_DRIVE_TASK:
- if (!capable(CAP_SYS_RAWIO))
- return -EACCES;
- return ide_task_ioctl(drive, argp);
- case HDIO_DRIVE_RESET:
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
- return generic_drive_reset(drive);
- case HDIO_GET_BUSSTATE:
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
- if (put_user_long(BUSSTATE_ON, arg))
- return -EFAULT;
- return 0;
- case HDIO_SET_BUSSTATE:
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
- return -EOPNOTSUPP;
- default:
- return -EINVAL;
- }
-}
-EXPORT_SYMBOL(generic_ide_ioctl);
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
deleted file mode 100644
index f2be127ee96e..000000000000
--- a/drivers/ide/ide-iops.c
+++ /dev/null
@@ -1,536 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2003 Red Hat
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/major.h>
-#include <linux/errno.h>
-#include <linux/genhd.h>
-#include <linux/blkpg.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/ide.h>
-#include <linux/bitops.h>
-#include <linux/nmi.h>
-
-#include <asm/byteorder.h>
-#include <asm/irq.h>
-#include <linux/uaccess.h>
-#include <asm/io.h>
-
-void SELECT_MASK(ide_drive_t *drive, int mask)
-{
- const struct ide_port_ops *port_ops = drive->hwif->port_ops;
-
- if (port_ops && port_ops->maskproc)
- port_ops->maskproc(drive, mask);
-}
-
-u8 ide_read_error(ide_drive_t *drive)
-{
- struct ide_taskfile tf;
-
- drive->hwif->tp_ops->tf_read(drive, &tf, IDE_VALID_ERROR);
-
- return tf.error;
-}
-EXPORT_SYMBOL_GPL(ide_read_error);
-
-void ide_fix_driveid(u16 *id)
-{
-#ifndef __LITTLE_ENDIAN
-# ifdef __BIG_ENDIAN
- int i;
-
- for (i = 0; i < 256; i++)
- id[i] = __le16_to_cpu(id[i]);
-# else
-# error "Please fix <asm/byteorder.h>"
-# endif
-#endif
-}
-
-/*
- * ide_fixstring() cleans up and (optionally) byte-swaps a text string,
- * removing leading/trailing blanks and compressing internal blanks.
- * It is primarily used to tidy up the model name/number fields as
- * returned by the ATA_CMD_ID_ATA[PI] commands.
- */
-
-void ide_fixstring(u8 *s, const int bytecount, const int byteswap)
-{
- u8 *p, *end = &s[bytecount & ~1]; /* bytecount must be even */
-
- if (byteswap) {
- /* convert from big-endian to host byte order */
- for (p = s ; p != end ; p += 2)
- be16_to_cpus((u16 *) p);
- }
-
- /* strip leading blanks */
- p = s;
- while (s != end && *s == ' ')
- ++s;
- /* compress internal blanks and strip trailing blanks */
- while (s != end && *s) {
- if (*s++ != ' ' || (s != end && *s && *s != ' '))
- *p++ = *(s-1);
- }
- /* wipe out trailing garbage */
- while (p != end)
- *p++ = '\0';
-}
-EXPORT_SYMBOL(ide_fixstring);
-
-/*
- * This routine busy-waits for the drive status to be not "busy".
- * It then checks the status for all of the "good" bits and none
- * of the "bad" bits, and if all is okay it returns 0. All other
- * cases return error -- caller may then invoke ide_error().
- *
- * This routine should get fixed to not hog the cpu during extra long waits..
- * That could be done by busy-waiting for the first jiffy or two, and then
- * setting a timer to wake up at half second intervals thereafter,
- * until timeout is achieved, before timing out.
- */
-int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad,
- unsigned long timeout, u8 *rstat)
-{
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_tp_ops *tp_ops = hwif->tp_ops;
- unsigned long flags;
- bool irqs_threaded = force_irqthreads;
- int i;
- u8 stat;
-
- udelay(1); /* spec allows drive 400ns to assert "BUSY" */
- stat = tp_ops->read_status(hwif);
-
- if (stat & ATA_BUSY) {
- if (!irqs_threaded) {
- local_save_flags(flags);
- local_irq_enable_in_hardirq();
- }
- timeout += jiffies;
- while ((stat = tp_ops->read_status(hwif)) & ATA_BUSY) {
- if (time_after(jiffies, timeout)) {
- /*
- * One last read after the timeout in case
- * heavy interrupt load made us not make any
- * progress during the timeout..
- */
- stat = tp_ops->read_status(hwif);
- if ((stat & ATA_BUSY) == 0)
- break;
-
- if (!irqs_threaded)
- local_irq_restore(flags);
- *rstat = stat;
- return -EBUSY;
- }
- }
- if (!irqs_threaded)
- local_irq_restore(flags);
- }
- /*
- * Allow status to settle, then read it again.
- * A few rare drives vastly violate the 400ns spec here,
- * so we'll wait up to 10usec for a "good" status
- * rather than expensively fail things immediately.
- * This fix courtesy of Matthew Faupel & Niccolo Rigacci.
- */
- for (i = 0; i < 10; i++) {
- udelay(1);
- stat = tp_ops->read_status(hwif);
-
- if (OK_STAT(stat, good, bad)) {
- *rstat = stat;
- return 0;
- }
- }
- *rstat = stat;
- return -EFAULT;
-}
-
-/*
- * In case of error returns error value after doing "*startstop = ide_error()".
- * The caller should return the updated value of "startstop" in this case,
- * "startstop" is unchanged when the function returns 0.
- */
-int ide_wait_stat(ide_startstop_t *startstop, ide_drive_t *drive, u8 good,
- u8 bad, unsigned long timeout)
-{
- int err;
- u8 stat;
-
- /* bail early if we've exceeded max_failures */
- if (drive->max_failures && (drive->failures > drive->max_failures)) {
- *startstop = ide_stopped;
- return 1;
- }
-
- err = __ide_wait_stat(drive, good, bad, timeout, &stat);
-
- if (err) {
- char *s = (err == -EBUSY) ? "status timeout" : "status error";
- *startstop = ide_error(drive, s, stat);
- }
-
- return err;
-}
-EXPORT_SYMBOL(ide_wait_stat);
-
-/**
- * ide_in_drive_list - look for drive in black/white list
- * @id: drive identifier
- * @table: list to inspect
- *
- * Look for a drive in the blacklist and the whitelist tables
- * Returns 1 if the drive is found in the table.
- */
-
-int ide_in_drive_list(u16 *id, const struct drive_list_entry *table)
-{
- for ( ; table->id_model; table++)
- if ((!strcmp(table->id_model, (char *)&id[ATA_ID_PROD])) &&
- (!table->id_firmware ||
- strstr((char *)&id[ATA_ID_FW_REV], table->id_firmware)))
- return 1;
- return 0;
-}
-EXPORT_SYMBOL_GPL(ide_in_drive_list);
-
-/*
- * Early UDMA66 devices don't set bit14 to 1, only bit13 is valid.
- * Some optical devices with the buggy firmwares have the same problem.
- */
-static const struct drive_list_entry ivb_list[] = {
- { "QUANTUM FIREBALLlct10 05" , "A03.0900" },
- { "QUANTUM FIREBALLlct20 30" , "APL.0900" },
- { "TSSTcorp CDDVDW SH-S202J" , "SB00" },
- { "TSSTcorp CDDVDW SH-S202J" , "SB01" },
- { "TSSTcorp CDDVDW SH-S202N" , "SB00" },
- { "TSSTcorp CDDVDW SH-S202N" , "SB01" },
- { "TSSTcorp CDDVDW SH-S202H" , "SB00" },
- { "TSSTcorp CDDVDW SH-S202H" , "SB01" },
- { "SAMSUNG SP0822N" , "WA100-10" },
- { NULL , NULL }
-};
-
-/*
- * All hosts that use the 80c ribbon must use!
- * The name is derived from upper byte of word 93 and the 80c ribbon.
- */
-u8 eighty_ninty_three(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u16 *id = drive->id;
- int ivb = ide_in_drive_list(id, ivb_list);
-
- if (hwif->cbl == ATA_CBL_SATA || hwif->cbl == ATA_CBL_PATA40_SHORT)
- return 1;
-
- if (ivb)
- printk(KERN_DEBUG "%s: skipping word 93 validity check\n",
- drive->name);
-
- if (ata_id_is_sata(id) && !ivb)
- return 1;
-
- if (hwif->cbl != ATA_CBL_PATA80 && !ivb)
- goto no_80w;
-
- /*
- * FIXME:
- * - change master/slave IDENTIFY order
- * - force bit13 (80c cable present) check also for !ivb devices
- * (unless the slave device is pre-ATA3)
- */
- if (id[ATA_ID_HW_CONFIG] & 0x4000)
- return 1;
-
- if (ivb) {
- const char *model = (char *)&id[ATA_ID_PROD];
-
- if (strstr(model, "TSSTcorp CDDVDW SH-S202")) {
- /*
- * These ATAPI devices always report 80c cable
- * so we have to depend on the host in this case.
- */
- if (hwif->cbl == ATA_CBL_PATA80)
- return 1;
- } else {
- /* Depend on the device side cable detection. */
- if (id[ATA_ID_HW_CONFIG] & 0x2000)
- return 1;
- }
- }
-no_80w:
- if (drive->dev_flags & IDE_DFLAG_UDMA33_WARNED)
- return 0;
-
- printk(KERN_WARNING "%s: %s side 80-wire cable detection failed, "
- "limiting max speed to UDMA33\n",
- drive->name,
- hwif->cbl == ATA_CBL_PATA80 ? "drive" : "host");
-
- drive->dev_flags |= IDE_DFLAG_UDMA33_WARNED;
-
- return 0;
-}
-
-static const char *nien_quirk_list[] = {
- "QUANTUM FIREBALLlct08 08",
- "QUANTUM FIREBALLP KA6.4",
- "QUANTUM FIREBALLP KA9.1",
- "QUANTUM FIREBALLP KX13.6",
- "QUANTUM FIREBALLP KX20.5",
- "QUANTUM FIREBALLP KX27.3",
- "QUANTUM FIREBALLP LM20.4",
- "QUANTUM FIREBALLP LM20.5",
- "FUJITSU MHZ2160BH G2",
- NULL
-};
-
-void ide_check_nien_quirk_list(ide_drive_t *drive)
-{
- const char **list, *m = (char *)&drive->id[ATA_ID_PROD];
-
- for (list = nien_quirk_list; *list != NULL; list++)
- if (strstr(m, *list) != NULL) {
- drive->dev_flags |= IDE_DFLAG_NIEN_QUIRK;
- return;
- }
-}
-
-int ide_driveid_update(ide_drive_t *drive)
-{
- u16 *id;
- int rc;
-
- id = kmalloc(SECTOR_SIZE, GFP_ATOMIC);
- if (id == NULL)
- return 0;
-
- SELECT_MASK(drive, 1);
- rc = ide_dev_read_id(drive, ATA_CMD_ID_ATA, id, 1);
- SELECT_MASK(drive, 0);
-
- if (rc)
- goto out_err;
-
- drive->id[ATA_ID_UDMA_MODES] = id[ATA_ID_UDMA_MODES];
- drive->id[ATA_ID_MWDMA_MODES] = id[ATA_ID_MWDMA_MODES];
- drive->id[ATA_ID_SWDMA_MODES] = id[ATA_ID_SWDMA_MODES];
- drive->id[ATA_ID_CFA_MODES] = id[ATA_ID_CFA_MODES];
- /* anything more ? */
-
- kfree(id);
-
- return 1;
-out_err:
- if (rc == 2)
- printk(KERN_ERR "%s: %s: bad status\n", drive->name, __func__);
- kfree(id);
- return 0;
-}
-
-int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
-{
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_tp_ops *tp_ops = hwif->tp_ops;
- struct ide_taskfile tf;
- u16 *id = drive->id, i;
- int error = 0;
- u8 stat;
-
-#ifdef CONFIG_BLK_DEV_IDEDMA
- if (hwif->dma_ops) /* check if host supports DMA */
- hwif->dma_ops->dma_host_set(drive, 0);
-#endif
-
- /* Skip setting PIO flow-control modes on pre-EIDE drives */
- if ((speed & 0xf8) == XFER_PIO_0 && ata_id_has_iordy(drive->id) == 0)
- goto skip;
-
- /*
- * Don't use ide_wait_cmd here - it will
- * attempt to set_geometry and recalibrate,
- * but for some reason these don't work at
- * this point (lost interrupt).
- */
-
- udelay(1);
- tp_ops->dev_select(drive);
- SELECT_MASK(drive, 1);
- udelay(1);
- tp_ops->write_devctl(hwif, ATA_NIEN | ATA_DEVCTL_OBS);
-
- memset(&tf, 0, sizeof(tf));
- tf.feature = SETFEATURES_XFER;
- tf.nsect = speed;
-
- tp_ops->tf_load(drive, &tf, IDE_VALID_FEATURE | IDE_VALID_NSECT);
-
- tp_ops->exec_command(hwif, ATA_CMD_SET_FEATURES);
-
- if (drive->dev_flags & IDE_DFLAG_NIEN_QUIRK)
- tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
-
- error = __ide_wait_stat(drive, drive->ready_stat,
- ATA_BUSY | ATA_DRQ | ATA_ERR,
- WAIT_CMD, &stat);
-
- SELECT_MASK(drive, 0);
-
- if (error) {
- (void) ide_dump_status(drive, "set_drive_speed_status", stat);
- return error;
- }
-
- if (speed >= XFER_SW_DMA_0) {
- id[ATA_ID_UDMA_MODES] &= ~0xFF00;
- id[ATA_ID_MWDMA_MODES] &= ~0x0700;
- id[ATA_ID_SWDMA_MODES] &= ~0x0700;
- if (ata_id_is_cfa(id))
- id[ATA_ID_CFA_MODES] &= ~0x0E00;
- } else if (ata_id_is_cfa(id))
- id[ATA_ID_CFA_MODES] &= ~0x01C0;
-
- skip:
-#ifdef CONFIG_BLK_DEV_IDEDMA
- if (speed >= XFER_SW_DMA_0 && (drive->dev_flags & IDE_DFLAG_USING_DMA))
- hwif->dma_ops->dma_host_set(drive, 1);
- else if (hwif->dma_ops) /* check if host supports DMA */
- ide_dma_off_quietly(drive);
-#endif
-
- if (speed >= XFER_UDMA_0) {
- i = 1 << (speed - XFER_UDMA_0);
- id[ATA_ID_UDMA_MODES] |= (i << 8 | i);
- } else if (ata_id_is_cfa(id) && speed >= XFER_MW_DMA_3) {
- i = speed - XFER_MW_DMA_2;
- id[ATA_ID_CFA_MODES] |= i << 9;
- } else if (speed >= XFER_MW_DMA_0) {
- i = 1 << (speed - XFER_MW_DMA_0);
- id[ATA_ID_MWDMA_MODES] |= (i << 8 | i);
- } else if (speed >= XFER_SW_DMA_0) {
- i = 1 << (speed - XFER_SW_DMA_0);
- id[ATA_ID_SWDMA_MODES] |= (i << 8 | i);
- } else if (ata_id_is_cfa(id) && speed >= XFER_PIO_5) {
- i = speed - XFER_PIO_4;
- id[ATA_ID_CFA_MODES] |= i << 6;
- }
-
- if (!drive->init_speed)
- drive->init_speed = speed;
- drive->current_speed = speed;
- return error;
-}
-
-/*
- * This should get invoked any time we exit the driver to
- * wait for an interrupt response from a drive. handler() points
- * at the appropriate code to handle the next interrupt, and a
- * timer is started to prevent us from waiting forever in case
- * something goes wrong (see the ide_timer_expiry() handler later on).
- *
- * See also ide_execute_command
- */
-void __ide_set_handler(ide_drive_t *drive, ide_handler_t *handler,
- unsigned int timeout)
-{
- ide_hwif_t *hwif = drive->hwif;
-
- BUG_ON(hwif->handler);
- hwif->handler = handler;
- hwif->timer.expires = jiffies + timeout;
- hwif->req_gen_timer = hwif->req_gen;
- add_timer(&hwif->timer);
-}
-
-void ide_set_handler(ide_drive_t *drive, ide_handler_t *handler,
- unsigned int timeout)
-{
- ide_hwif_t *hwif = drive->hwif;
- unsigned long flags;
-
- spin_lock_irqsave(&hwif->lock, flags);
- __ide_set_handler(drive, handler, timeout);
- spin_unlock_irqrestore(&hwif->lock, flags);
-}
-EXPORT_SYMBOL(ide_set_handler);
-
-/**
- * ide_execute_command - execute an IDE command
- * @drive: IDE drive to issue the command against
- * @cmd: command
- * @handler: handler for next phase
- * @timeout: timeout for command
- *
- * Helper function to issue an IDE command. This handles the
- * atomicity requirements, command timing and ensures that the
- * handler and IRQ setup do not race. All IDE command kick off
- * should go via this function or do equivalent locking.
- */
-
-void ide_execute_command(ide_drive_t *drive, struct ide_cmd *cmd,
- ide_handler_t *handler, unsigned timeout)
-{
- ide_hwif_t *hwif = drive->hwif;
- unsigned long flags;
-
- spin_lock_irqsave(&hwif->lock, flags);
- if ((cmd->protocol != ATAPI_PROT_DMA &&
- cmd->protocol != ATAPI_PROT_PIO) ||
- (drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT))
- __ide_set_handler(drive, handler, timeout);
- hwif->tp_ops->exec_command(hwif, cmd->tf.command);
- /*
- * Drive takes 400nS to respond, we must avoid the IRQ being
- * serviced before that.
- *
- * FIXME: we could skip this delay with care on non shared devices
- */
- ndelay(400);
- spin_unlock_irqrestore(&hwif->lock, flags);
-}
-
-/*
- * ide_wait_not_busy() waits for the currently selected device on the hwif
- * to report a non-busy status, see comments in ide_probe_port().
- */
-int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout)
-{
- u8 stat = 0;
-
- while (timeout--) {
- /*
- * Turn this into a schedule() sleep once I'm sure
- * about locking issues (2.5 work ?).
- */
- mdelay(1);
- stat = hwif->tp_ops->read_status(hwif);
- if ((stat & ATA_BUSY) == 0)
- return 0;
- /*
- * Assume a value of 0xff means nothing is connected to
- * the interface and it doesn't implement the pull-down
- * resistor on D7.
- */
- if (stat == 0xff)
- return -ENODEV;
- touch_nmi_watchdog();
- }
- return -EBUSY;
-}
diff --git a/drivers/ide/ide-legacy.c b/drivers/ide/ide-legacy.c
deleted file mode 100644
index be65b411ab53..000000000000
--- a/drivers/ide/ide-legacy.c
+++ /dev/null
@@ -1,59 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/ide.h>
-
-static void ide_legacy_init_one(struct ide_hw **hws, struct ide_hw *hw,
- u8 port_no, const struct ide_port_info *d,
- unsigned long config)
-{
- unsigned long base, ctl;
- int irq;
-
- if (port_no == 0) {
- base = 0x1f0;
- ctl = 0x3f6;
- irq = 14;
- } else {
- base = 0x170;
- ctl = 0x376;
- irq = 15;
- }
-
- if (!request_region(base, 8, d->name)) {
- printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
- d->name, base, base + 7);
- return;
- }
-
- if (!request_region(ctl, 1, d->name)) {
- printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
- d->name, ctl);
- release_region(base, 8);
- return;
- }
-
- ide_std_init_ports(hw, base, ctl);
- hw->irq = irq;
- hw->config = config;
-
- hws[port_no] = hw;
-}
-
-int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config)
-{
- struct ide_hw hw[2], *hws[] = { NULL, NULL };
-
- memset(&hw, 0, sizeof(hw));
-
- if ((d->host_flags & IDE_HFLAG_QD_2ND_PORT) == 0)
- ide_legacy_init_one(hws, &hw[0], 0, d, config);
- ide_legacy_init_one(hws, &hw[1], 1, d, config);
-
- if (hws[0] == NULL && hws[1] == NULL &&
- (d->host_flags & IDE_HFLAG_SINGLE))
- return -ENOENT;
-
- return ide_host_add(d, hws, 2, NULL);
-}
-EXPORT_SYMBOL_GPL(ide_legacy_device_add);
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
deleted file mode 100644
index 7b9f655adbc2..000000000000
--- a/drivers/ide/ide-lib.c
+++ /dev/null
@@ -1,146 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/interrupt.h>
-#include <linux/ide.h>
-#include <linux/bitops.h>
-
-u64 ide_get_lba_addr(struct ide_cmd *cmd, int lba48)
-{
- struct ide_taskfile *tf = &cmd->tf;
- u32 high, low;
-
- low = (tf->lbah << 16) | (tf->lbam << 8) | tf->lbal;
- if (lba48) {
- tf = &cmd->hob;
- high = (tf->lbah << 16) | (tf->lbam << 8) | tf->lbal;
- } else
- high = tf->device & 0xf;
-
- return ((u64)high << 24) | low;
-}
-EXPORT_SYMBOL_GPL(ide_get_lba_addr);
-
-static void ide_dump_sector(ide_drive_t *drive)
-{
- struct ide_cmd cmd;
- struct ide_taskfile *tf = &cmd.tf;
- u8 lba48 = !!(drive->dev_flags & IDE_DFLAG_LBA48);
-
- memset(&cmd, 0, sizeof(cmd));
- if (lba48) {
- cmd.valid.in.tf = IDE_VALID_LBA;
- cmd.valid.in.hob = IDE_VALID_LBA;
- cmd.tf_flags = IDE_TFLAG_LBA48;
- } else
- cmd.valid.in.tf = IDE_VALID_LBA | IDE_VALID_DEVICE;
-
- ide_tf_readback(drive, &cmd);
-
- if (lba48 || (tf->device & ATA_LBA))
- printk(KERN_CONT ", LBAsect=%llu",
- (unsigned long long)ide_get_lba_addr(&cmd, lba48));
- else
- printk(KERN_CONT ", CHS=%d/%d/%d", (tf->lbah << 8) + tf->lbam,
- tf->device & 0xf, tf->lbal);
-}
-
-static void ide_dump_ata_error(ide_drive_t *drive, u8 err)
-{
- printk(KERN_CONT "{ ");
- if (err & ATA_ABORTED)
- printk(KERN_CONT "DriveStatusError ");
- if (err & ATA_ICRC)
- printk(KERN_CONT "%s",
- (err & ATA_ABORTED) ? "BadCRC " : "BadSector ");
- if (err & ATA_UNC)
- printk(KERN_CONT "UncorrectableError ");
- if (err & ATA_IDNF)
- printk(KERN_CONT "SectorIdNotFound ");
- if (err & ATA_TRK0NF)
- printk(KERN_CONT "TrackZeroNotFound ");
- if (err & ATA_AMNF)
- printk(KERN_CONT "AddrMarkNotFound ");
- printk(KERN_CONT "}");
- if ((err & (ATA_BBK | ATA_ABORTED)) == ATA_BBK ||
- (err & (ATA_UNC | ATA_IDNF | ATA_AMNF))) {
- struct request *rq = drive->hwif->rq;
-
- ide_dump_sector(drive);
-
- if (rq)
- printk(KERN_CONT ", sector=%llu",
- (unsigned long long)blk_rq_pos(rq));
- }
- printk(KERN_CONT "\n");
-}
-
-static void ide_dump_atapi_error(ide_drive_t *drive, u8 err)
-{
- printk(KERN_CONT "{ ");
- if (err & ATAPI_ILI)
- printk(KERN_CONT "IllegalLengthIndication ");
- if (err & ATAPI_EOM)
- printk(KERN_CONT "EndOfMedia ");
- if (err & ATA_ABORTED)
- printk(KERN_CONT "AbortedCommand ");
- if (err & ATA_MCR)
- printk(KERN_CONT "MediaChangeRequested ");
- if (err & ATAPI_LFS)
- printk(KERN_CONT "LastFailedSense=0x%02x ",
- (err & ATAPI_LFS) >> 4);
- printk(KERN_CONT "}\n");
-}
-
-/**
- * ide_dump_status - translate ATA/ATAPI error
- * @drive: drive that status applies to
- * @msg: text message to print
- * @stat: status byte to decode
- *
- * Error reporting, in human readable form (luxurious, but a memory hog).
- * Combines the drive name, message and status byte to provide a
- * user understandable explanation of the device error.
- */
-
-u8 ide_dump_status(ide_drive_t *drive, const char *msg, u8 stat)
-{
- u8 err = 0;
-
- printk(KERN_ERR "%s: %s: status=0x%02x { ", drive->name, msg, stat);
- if (stat & ATA_BUSY)
- printk(KERN_CONT "Busy ");
- else {
- if (stat & ATA_DRDY)
- printk(KERN_CONT "DriveReady ");
- if (stat & ATA_DF)
- printk(KERN_CONT "DeviceFault ");
- if (stat & ATA_DSC)
- printk(KERN_CONT "SeekComplete ");
- if (stat & ATA_DRQ)
- printk(KERN_CONT "DataRequest ");
- if (stat & ATA_CORR)
- printk(KERN_CONT "CorrectedError ");
- if (stat & ATA_SENSE)
- printk(KERN_CONT "Sense ");
- if (stat & ATA_ERR)
- printk(KERN_CONT "Error ");
- }
- printk(KERN_CONT "}\n");
- if ((stat & (ATA_BUSY | ATA_ERR)) == ATA_ERR) {
- err = ide_read_error(drive);
- printk(KERN_ERR "%s: %s: error=0x%02x ", drive->name, msg, err);
- if (drive->media == ide_disk)
- ide_dump_ata_error(drive, err);
- else
- ide_dump_atapi_error(drive, err);
- }
-
- printk(KERN_ERR "%s: possibly failed opcode: 0x%02x\n",
- drive->name, drive->hwif->cmd.tf.command);
-
- return err;
-}
-EXPORT_SYMBOL(ide_dump_status);
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c
deleted file mode 100644
index a80a0f28f7b9..000000000000
--- a/drivers/ide/ide-park.c
+++ /dev/null
@@ -1,155 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/kernel.h>
-#include <linux/gfp.h>
-#include <linux/ide.h>
-#include <linux/jiffies.h>
-#include <linux/blkdev.h>
-
-DECLARE_WAIT_QUEUE_HEAD(ide_park_wq);
-
-static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct request_queue *q = drive->queue;
- struct request *rq;
- int rc;
-
- timeout += jiffies;
- spin_lock_irq(&hwif->lock);
- if (drive->dev_flags & IDE_DFLAG_PARKED) {
- int reset_timer = time_before(timeout, drive->sleep);
- int start_queue = 0;
-
- drive->sleep = timeout;
- wake_up_all(&ide_park_wq);
- if (reset_timer && del_timer(&hwif->timer))
- start_queue = 1;
- spin_unlock_irq(&hwif->lock);
-
- if (start_queue)
- blk_mq_run_hw_queues(q, true);
- return;
- }
- spin_unlock_irq(&hwif->lock);
-
- rq = blk_get_request(q, REQ_OP_DRV_IN, 0);
- scsi_req(rq)->cmd[0] = REQ_PARK_HEADS;
- scsi_req(rq)->cmd_len = 1;
- ide_req(rq)->type = ATA_PRIV_MISC;
- ide_req(rq)->special = &timeout;
- blk_execute_rq(NULL, rq, 1);
- rc = scsi_req(rq)->result ? -EIO : 0;
- blk_put_request(rq);
- if (rc)
- goto out;
-
- /*
- * Make sure that *some* command is sent to the drive after the
- * timeout has expired, so power management will be reenabled.
- */
- rq = blk_get_request(q, REQ_OP_DRV_IN, BLK_MQ_REQ_NOWAIT);
- if (IS_ERR(rq))
- goto out;
-
- scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS;
- scsi_req(rq)->cmd_len = 1;
- ide_req(rq)->type = ATA_PRIV_MISC;
- spin_lock_irq(&hwif->lock);
- ide_insert_request_head(drive, rq);
- spin_unlock_irq(&hwif->lock);
-
-out:
- return;
-}
-
-ide_startstop_t ide_do_park_unpark(ide_drive_t *drive, struct request *rq)
-{
- struct ide_cmd cmd;
- struct ide_taskfile *tf = &cmd.tf;
-
- memset(&cmd, 0, sizeof(cmd));
- if (scsi_req(rq)->cmd[0] == REQ_PARK_HEADS) {
- drive->sleep = *(unsigned long *)ide_req(rq)->special;
- drive->dev_flags |= IDE_DFLAG_SLEEPING;
- tf->command = ATA_CMD_IDLEIMMEDIATE;
- tf->feature = 0x44;
- tf->lbal = 0x4c;
- tf->lbam = 0x4e;
- tf->lbah = 0x55;
- cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
- cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
- } else /* cmd == REQ_UNPARK_HEADS */
- tf->command = ATA_CMD_CHK_POWER;
-
- cmd.tf_flags |= IDE_TFLAG_CUSTOM_HANDLER;
- cmd.protocol = ATA_PROT_NODATA;
-
- cmd.rq = rq;
-
- return do_rw_taskfile(drive, &cmd);
-}
-
-ssize_t ide_park_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- ide_drive_t *drive = to_ide_device(dev);
- ide_hwif_t *hwif = drive->hwif;
- unsigned long now;
- unsigned int msecs;
-
- if (drive->dev_flags & IDE_DFLAG_NO_UNLOAD)
- return -EOPNOTSUPP;
-
- spin_lock_irq(&hwif->lock);
- now = jiffies;
- if (drive->dev_flags & IDE_DFLAG_PARKED &&
- time_after(drive->sleep, now))
- msecs = jiffies_to_msecs(drive->sleep - now);
- else
- msecs = 0;
- spin_unlock_irq(&hwif->lock);
-
- return snprintf(buf, 20, "%u\n", msecs);
-}
-
-ssize_t ide_park_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t len)
-{
-#define MAX_PARK_TIMEOUT 30000
- ide_drive_t *drive = to_ide_device(dev);
- long int input;
- int rc;
-
- rc = kstrtol(buf, 10, &input);
- if (rc)
- return rc;
- if (input < -2)
- return -EINVAL;
- if (input > MAX_PARK_TIMEOUT) {
- input = MAX_PARK_TIMEOUT;
- rc = -EOVERFLOW;
- }
-
- mutex_lock(&ide_setting_mtx);
- if (input >= 0) {
- if (drive->dev_flags & IDE_DFLAG_NO_UNLOAD)
- rc = -EOPNOTSUPP;
- else if (input || drive->dev_flags & IDE_DFLAG_PARKED)
- issue_park_cmd(drive, msecs_to_jiffies(input));
- } else {
- if (drive->media == ide_disk)
- switch (input) {
- case -1:
- drive->dev_flags &= ~IDE_DFLAG_NO_UNLOAD;
- break;
- case -2:
- drive->dev_flags |= IDE_DFLAG_NO_UNLOAD;
- break;
- }
- else
- rc = -EOPNOTSUPP;
- }
- mutex_unlock(&ide_setting_mtx);
-
- return rc ? rc : len;
-}
diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
deleted file mode 100644
index 673420db953f..000000000000
--- a/drivers/ide/ide-pci-generic.c
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
- * Portions (C) Copyright 2002 Red Hat Inc
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * For the avoidance of doubt the "preferred form" of this code is one which
- * is in an open non patent encumbered format. Where cryptographic key signing
- * forms part of the process of creating an executable the information
- * including keys needed to generate an equivalently functional executable
- * are deemed to be part of the source code.
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#define DRV_NAME "ide_pci_generic"
-
-static bool ide_generic_all; /* Set to claim all devices */
-
-module_param_named(all_generic_ide, ide_generic_all, bool, 0444);
-MODULE_PARM_DESC(all_generic_ide, "IDE generic will claim all unknown PCI IDE storage controllers.");
-
-static void netcell_quirkproc(ide_drive_t *drive)
-{
- /* mark words 85-87 as valid */
- drive->id[ATA_ID_CSF_DEFAULT] |= 0x4000;
-}
-
-static const struct ide_port_ops netcell_port_ops = {
- .quirkproc = netcell_quirkproc,
-};
-
-#define DECLARE_GENERIC_PCI_DEV(extra_flags) \
- { \
- .name = DRV_NAME, \
- .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA | \
- extra_flags, \
- .swdma_mask = ATA_SWDMA2, \
- .mwdma_mask = ATA_MWDMA2, \
- .udma_mask = ATA_UDMA6, \
- }
-
-static const struct ide_port_info generic_chipsets[] = {
- /* 0: Unknown */
- DECLARE_GENERIC_PCI_DEV(0),
-
- { /* 1: NS87410 */
- .name = DRV_NAME,
- .enablebits = { {0x43, 0x08, 0x08}, {0x47, 0x08, 0x08} },
- .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA,
- .swdma_mask = ATA_SWDMA2,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- },
-
- /* 2: SAMURAI / HT6565 / HINT_IDE */
- DECLARE_GENERIC_PCI_DEV(0),
- /* 3: UM8673F / UM8886A / UM8886BF */
- DECLARE_GENERIC_PCI_DEV(IDE_HFLAG_NO_DMA),
- /* 4: VIA_IDE / OPTI621V / Piccolo010{2,3,5} */
- DECLARE_GENERIC_PCI_DEV(IDE_HFLAG_NO_AUTODMA),
-
- { /* 5: VIA8237SATA */
- .name = DRV_NAME,
- .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA |
- IDE_HFLAG_OFF_BOARD,
- .swdma_mask = ATA_SWDMA2,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- },
-
- { /* 6: Revolution */
- .name = DRV_NAME,
- .port_ops = &netcell_port_ops,
- .host_flags = IDE_HFLAG_CLEAR_SIMPLEX |
- IDE_HFLAG_TRUST_BIOS_FOR_DMA |
- IDE_HFLAG_OFF_BOARD,
- .swdma_mask = ATA_SWDMA2,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- }
-};
-
-/**
- * generic_init_one - called when a PIIX is found
- * @dev: the generic device
- * @id: the matching pci id
- *
- * Called when the PCI registration layer (or the IDE initialization)
- * finds a device matching our IDE device tables.
- */
-
-static int generic_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- const struct ide_port_info *d = &generic_chipsets[id->driver_data];
- int ret = -ENODEV;
-
- /* Don't use the generic entry unless instructed to do so */
- if (id->driver_data == 0 && ide_generic_all == 0)
- goto out;
-
- switch (dev->vendor) {
- case PCI_VENDOR_ID_UMC:
- if (dev->device == PCI_DEVICE_ID_UMC_UM8886A &&
- !(PCI_FUNC(dev->devfn) & 1))
- goto out; /* UM8886A/BF pair */
- break;
- case PCI_VENDOR_ID_OPTI:
- if (dev->device == PCI_DEVICE_ID_OPTI_82C558 &&
- !(PCI_FUNC(dev->devfn) & 1))
- goto out;
- break;
- case PCI_VENDOR_ID_JMICRON:
- if (dev->device != PCI_DEVICE_ID_JMICRON_JMB368 &&
- PCI_FUNC(dev->devfn) != 1)
- goto out;
- break;
- case PCI_VENDOR_ID_NS:
- if (dev->device == PCI_DEVICE_ID_NS_87410 &&
- (dev->class >> 8) != PCI_CLASS_STORAGE_IDE)
- goto out;
- break;
- }
-
- if (dev->vendor != PCI_VENDOR_ID_JMICRON) {
- u16 command;
- pci_read_config_word(dev, PCI_COMMAND, &command);
- if (!(command & PCI_COMMAND_IO)) {
- printk(KERN_INFO "%s %s: skipping disabled "
- "controller\n", d->name, pci_name(dev));
- goto out;
- }
- }
- ret = ide_pci_init_one(dev, d, NULL);
-out:
- return ret;
-}
-
-static const struct pci_device_id generic_pci_tbl[] = {
- { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_87410), 1 },
- { PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_SAMURAI_IDE), 2 },
- { PCI_VDEVICE(HOLTEK, PCI_DEVICE_ID_HOLTEK_6565), 2 },
- { PCI_VDEVICE(UMC, PCI_DEVICE_ID_UMC_UM8673F), 3 },
- { PCI_VDEVICE(UMC, PCI_DEVICE_ID_UMC_UM8886A), 3 },
- { PCI_VDEVICE(UMC, PCI_DEVICE_ID_UMC_UM8886BF), 3 },
- { PCI_VDEVICE(HINT, PCI_DEVICE_ID_HINT_VXPROII_IDE), 2 },
- { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C561), 4 },
- { PCI_VDEVICE(OPTI, PCI_DEVICE_ID_OPTI_82C558), 4 },
-#ifdef CONFIG_BLK_DEV_IDE_SATA
- { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_8237_SATA), 5 },
-#endif
- { PCI_VDEVICE(TOSHIBA, PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), 4 },
- { PCI_VDEVICE(TOSHIBA, PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), 4 },
- { PCI_VDEVICE(TOSHIBA, PCI_DEVICE_ID_TOSHIBA_PICCOLO_3), 4 },
- { PCI_VDEVICE(TOSHIBA, PCI_DEVICE_ID_TOSHIBA_PICCOLO_5), 4 },
- { PCI_VDEVICE(NETCELL, PCI_DEVICE_ID_REVOLUTION), 6 },
- /*
- * Must come last. If you add entries adjust
- * this table and generic_chipsets[] appropriately.
- */
- { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, 0 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, generic_pci_tbl);
-
-static struct pci_driver generic_pci_driver = {
- .name = "PCI_IDE",
- .id_table = generic_pci_tbl,
- .probe = generic_init_one,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init generic_ide_init(void)
-{
- return ide_pci_register_driver(&generic_pci_driver);
-}
-
-static void __exit generic_ide_exit(void)
-{
- pci_unregister_driver(&generic_pci_driver);
-}
-
-module_init(generic_ide_init);
-module_exit(generic_ide_exit);
-
-MODULE_AUTHOR("Andre Hedrick");
-MODULE_DESCRIPTION("PCI driver module for generic PCI IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/ide-pio-blacklist.c b/drivers/ide/ide-pio-blacklist.c
deleted file mode 100644
index 1fd24798e5c9..000000000000
--- a/drivers/ide/ide-pio-blacklist.c
+++ /dev/null
@@ -1,96 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * PIO blacklist. Some drives incorrectly report their maximal PIO mode,
- * at least in respect to CMD640. Here we keep info on some known drives.
- *
- * Changes to the ide_pio_blacklist[] should be made with EXTREME CAUTION
- * to avoid breaking the fragile cmd640.c support.
- */
-
-#include <linux/string.h>
-#include <linux/ide.h>
-
-static struct ide_pio_info {
- const char *name;
- int pio;
-} ide_pio_blacklist [] = {
- { "Conner Peripherals 540MB - CFS540A", 3 },
-
- { "WDC AC2700", 3 },
- { "WDC AC2540", 3 },
- { "WDC AC2420", 3 },
- { "WDC AC2340", 3 },
- { "WDC AC2250", 0 },
- { "WDC AC2200", 0 },
- { "WDC AC21200", 4 },
- { "WDC AC2120", 0 },
- { "WDC AC2850", 3 },
- { "WDC AC1270", 3 },
- { "WDC AC1170", 1 },
- { "WDC AC1210", 1 },
- { "WDC AC280", 0 },
- { "WDC AC31000", 3 },
- { "WDC AC31200", 3 },
-
- { "Maxtor 7131 AT", 1 },
- { "Maxtor 7171 AT", 1 },
- { "Maxtor 7213 AT", 1 },
- { "Maxtor 7245 AT", 1 },
- { "Maxtor 7345 AT", 1 },
- { "Maxtor 7546 AT", 3 },
- { "Maxtor 7540 AV", 3 },
-
- { "SAMSUNG SHD-3121A", 1 },
- { "SAMSUNG SHD-3122A", 1 },
- { "SAMSUNG SHD-3172A", 1 },
-
- { "ST5660A", 3 },
- { "ST3660A", 3 },
- { "ST3630A", 3 },
- { "ST3655A", 3 },
- { "ST3391A", 3 },
- { "ST3390A", 1 },
- { "ST3600A", 1 },
- { "ST3290A", 0 },
- { "ST3144A", 0 },
- { "ST3491A", 1 }, /* reports 3, should be 1 or 2 (depending on drive)
- according to Seagate's FIND-ATA program */
-
- { "QUANTUM ELS127A", 0 },
- { "QUANTUM ELS170A", 0 },
- { "QUANTUM LPS240A", 0 },
- { "QUANTUM LPS210A", 3 },
- { "QUANTUM LPS270A", 3 },
- { "QUANTUM LPS365A", 3 },
- { "QUANTUM LPS540A", 3 },
- { "QUANTUM LIGHTNING 540A", 3 },
- { "QUANTUM LIGHTNING 730A", 3 },
-
- { "QUANTUM FIREBALL_540", 3 }, /* Older Quantum Fireballs don't work */
- { "QUANTUM FIREBALL_640", 3 },
- { "QUANTUM FIREBALL_1080", 3 },
- { "QUANTUM FIREBALL_1280", 3 },
- { NULL, 0 }
-};
-
-/**
- * ide_scan_pio_blacklist - check for a blacklisted drive
- * @model: Drive model string
- *
- * This routine searches the ide_pio_blacklist for an entry
- * matching the start/whole of the supplied model name.
- *
- * Returns -1 if no match found.
- * Otherwise returns the recommended PIO mode from ide_pio_blacklist[].
- */
-
-int ide_scan_pio_blacklist(char *model)
-{
- struct ide_pio_info *p;
-
- for (p = ide_pio_blacklist; p->name != NULL; p++) {
- if (strncmp(p->name, model, strlen(p->name)) == 0)
- return p->pio;
- }
- return -1;
-}
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
deleted file mode 100644
index d680b3e3295f..000000000000
--- a/drivers/ide/ide-pm.c
+++ /dev/null
@@ -1,261 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/kernel.h>
-#include <linux/gfp.h>
-#include <linux/ide.h>
-
-int generic_ide_suspend(struct device *dev, pm_message_t mesg)
-{
- ide_drive_t *drive = to_ide_device(dev);
- ide_drive_t *pair = ide_get_pair_dev(drive);
- ide_hwif_t *hwif = drive->hwif;
- struct request *rq;
- struct ide_pm_state rqpm;
- int ret;
-
- if (ide_port_acpi(hwif)) {
- /* call ACPI _GTM only once */
- if ((drive->dn & 1) == 0 || pair == NULL)
- ide_acpi_get_timing(hwif);
- }
-
- memset(&rqpm, 0, sizeof(rqpm));
- rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
- ide_req(rq)->type = ATA_PRIV_PM_SUSPEND;
- ide_req(rq)->special = &rqpm;
- rqpm.pm_step = IDE_PM_START_SUSPEND;
- if (mesg.event == PM_EVENT_PRETHAW)
- mesg.event = PM_EVENT_FREEZE;
- rqpm.pm_state = mesg.event;
-
- blk_execute_rq(NULL, rq, 0);
- ret = scsi_req(rq)->result ? -EIO : 0;
- blk_put_request(rq);
-
- if (ret == 0 && ide_port_acpi(hwif)) {
- /* call ACPI _PS3 only after both devices are suspended */
- if ((drive->dn & 1) || pair == NULL)
- ide_acpi_set_state(hwif, 0);
- }
-
- return ret;
-}
-
-static int ide_pm_execute_rq(struct request *rq)
-{
- struct request_queue *q = rq->q;
-
- if (unlikely(blk_queue_dying(q))) {
- rq->rq_flags |= RQF_QUIET;
- scsi_req(rq)->result = -ENXIO;
- blk_mq_end_request(rq, BLK_STS_OK);
- return -ENXIO;
- }
- blk_execute_rq(NULL, rq, true);
-
- return scsi_req(rq)->result ? -EIO : 0;
-}
-
-int generic_ide_resume(struct device *dev)
-{
- ide_drive_t *drive = to_ide_device(dev);
- ide_drive_t *pair = ide_get_pair_dev(drive);
- ide_hwif_t *hwif = drive->hwif;
- struct request *rq;
- struct ide_pm_state rqpm;
- int err;
-
- blk_mq_start_stopped_hw_queues(drive->queue, true);
-
- if (ide_port_acpi(hwif)) {
- /* call ACPI _PS0 / _STM only once */
- if ((drive->dn & 1) == 0 || pair == NULL) {
- ide_acpi_set_state(hwif, 1);
- ide_acpi_push_timing(hwif);
- }
-
- ide_acpi_exec_tfs(drive);
- }
-
- memset(&rqpm, 0, sizeof(rqpm));
- rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_PM);
- ide_req(rq)->type = ATA_PRIV_PM_RESUME;
- ide_req(rq)->special = &rqpm;
- rqpm.pm_step = IDE_PM_START_RESUME;
- rqpm.pm_state = PM_EVENT_ON;
-
- err = ide_pm_execute_rq(rq);
- blk_put_request(rq);
-
- if (err == 0 && dev->driver) {
- struct ide_driver *drv = to_ide_driver(dev->driver);
-
- if (drv->resume)
- drv->resume(drive);
- }
-
- return err;
-}
-
-void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
-{
- struct ide_pm_state *pm = ide_req(rq)->special;
-
-#ifdef DEBUG_PM
- printk(KERN_INFO "%s: complete_power_step(step: %d)\n",
- drive->name, pm->pm_step);
-#endif
- if (drive->media != ide_disk)
- return;
-
- switch (pm->pm_step) {
- case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
- if (pm->pm_state == PM_EVENT_FREEZE)
- pm->pm_step = IDE_PM_COMPLETED;
- else
- pm->pm_step = IDE_PM_STANDBY;
- break;
- case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
- pm->pm_step = IDE_PM_COMPLETED;
- break;
- case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
- pm->pm_step = IDE_PM_IDLE;
- break;
- case IDE_PM_IDLE: /* Resume step 2 (idle)*/
- pm->pm_step = IDE_PM_RESTORE_DMA;
- break;
- }
-}
-
-ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
-{
- struct ide_pm_state *pm = ide_req(rq)->special;
- struct ide_cmd cmd = { };
-
- switch (pm->pm_step) {
- case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
- if (drive->media != ide_disk)
- break;
- /* Not supported? Switch to next step now. */
- if (ata_id_flush_enabled(drive->id) == 0 ||
- (drive->dev_flags & IDE_DFLAG_WCACHE) == 0) {
- ide_complete_power_step(drive, rq);
- return ide_stopped;
- }
- if (ata_id_flush_ext_enabled(drive->id))
- cmd.tf.command = ATA_CMD_FLUSH_EXT;
- else
- cmd.tf.command = ATA_CMD_FLUSH;
- goto out_do_tf;
- case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
- cmd.tf.command = ATA_CMD_STANDBYNOW1;
- goto out_do_tf;
- case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
- ide_set_max_pio(drive);
- /*
- * skip IDE_PM_IDLE for ATAPI devices
- */
- if (drive->media != ide_disk)
- pm->pm_step = IDE_PM_RESTORE_DMA;
- else
- ide_complete_power_step(drive, rq);
- return ide_stopped;
- case IDE_PM_IDLE: /* Resume step 2 (idle) */
- cmd.tf.command = ATA_CMD_IDLEIMMEDIATE;
- goto out_do_tf;
- case IDE_PM_RESTORE_DMA: /* Resume step 3 (restore DMA) */
- /*
- * Right now, all we do is call ide_set_dma(drive),
- * we could be smarter and check for current xfer_speed
- * in struct drive etc...
- */
- if (drive->hwif->dma_ops == NULL)
- break;
- /*
- * TODO: respect IDE_DFLAG_USING_DMA
- */
- ide_set_dma(drive);
- break;
- }
-
- pm->pm_step = IDE_PM_COMPLETED;
-
- return ide_stopped;
-
-out_do_tf:
- cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
- cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
- cmd.protocol = ATA_PROT_NODATA;
-
- return do_rw_taskfile(drive, &cmd);
-}
-
-/**
- * ide_complete_pm_rq - end the current Power Management request
- * @drive: target drive
- * @rq: request
- *
- * This function cleans up the current PM request and stops the queue
- * if necessary.
- */
-void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
-{
- struct request_queue *q = drive->queue;
- struct ide_pm_state *pm = ide_req(rq)->special;
-
- ide_complete_power_step(drive, rq);
- if (pm->pm_step != IDE_PM_COMPLETED)
- return;
-
-#ifdef DEBUG_PM
- printk("%s: completing PM request, %s\n", drive->name,
- (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND) ? "suspend" : "resume");
-#endif
- if (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND)
- blk_mq_stop_hw_queues(q);
- else
- drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
-
- drive->hwif->rq = NULL;
-
- blk_mq_end_request(rq, BLK_STS_OK);
-}
-
-void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
-{
- struct ide_pm_state *pm = ide_req(rq)->special;
-
- if (blk_rq_is_private(rq) &&
- ide_req(rq)->type == ATA_PRIV_PM_SUSPEND &&
- pm->pm_step == IDE_PM_START_SUSPEND)
- /* Mark drive blocked when starting the suspend sequence. */
- drive->dev_flags |= IDE_DFLAG_BLOCKED;
- else if (blk_rq_is_private(rq) &&
- ide_req(rq)->type == ATA_PRIV_PM_RESUME &&
- pm->pm_step == IDE_PM_START_RESUME) {
- /*
- * The first thing we do on wakeup is to wait for BSY bit to
- * go away (with a looong timeout) as a drive on this hwif may
- * just be POSTing itself.
- * We do that before even selecting as the "other" device on
- * the bus may be broken enough to walk on our toes at this
- * point.
- */
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_tp_ops *tp_ops = hwif->tp_ops;
- struct request_queue *q = drive->queue;
- int rc;
-#ifdef DEBUG_PM
- printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
-#endif
- rc = ide_wait_not_busy(hwif, 35000);
- if (rc)
- printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
- tp_ops->dev_select(drive);
- tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
- rc = ide_wait_not_busy(hwif, 100000);
- if (rc)
- printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
-
- blk_mq_start_hw_queues(q);
- }
-}
diff --git a/drivers/ide/ide-pnp.c b/drivers/ide/ide-pnp.c
deleted file mode 100644
index fc541f1cf8de..000000000000
--- a/drivers/ide/ide-pnp.c
+++ /dev/null
@@ -1,92 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * This file provides autodetection for ISA PnP IDE interfaces.
- * It was tested with "ESS ES1868 Plug and Play AudioDrive" IDE interface.
- *
- * Copyright (C) 2000 Andrey Panin <pazke@donpac.ru>
- */
-
-#include <linux/init.h>
-#include <linux/pnp.h>
-#include <linux/ide.h>
-#include <linux/module.h>
-
-#define DRV_NAME "ide-pnp"
-
-/* Add your devices here :)) */
-static const struct pnp_device_id idepnp_devices[] = {
- /* Generic ESDI/IDE/ATA compatible hard disk controller */
- {.id = "PNP0600", .driver_data = 0},
- {.id = ""}
-};
-
-static const struct ide_port_info ide_pnp_port_info = {
- .host_flags = IDE_HFLAG_NO_DMA,
- .chipset = ide_generic,
-};
-
-static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
-{
- struct ide_host *host;
- unsigned long base, ctl;
- int rc;
- struct ide_hw hw, *hws[] = { &hw };
-
- printk(KERN_INFO DRV_NAME ": generic PnP IDE interface\n");
-
- if (!(pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && pnp_irq_valid(dev, 0)))
- return -1;
-
- base = pnp_port_start(dev, 0);
- ctl = pnp_port_start(dev, 1);
-
- if (!request_region(base, 8, DRV_NAME)) {
- printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
- DRV_NAME, base, base + 7);
- return -EBUSY;
- }
-
- if (!request_region(ctl, 1, DRV_NAME)) {
- printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
- DRV_NAME, ctl);
- release_region(base, 8);
- return -EBUSY;
- }
-
- memset(&hw, 0, sizeof(hw));
- ide_std_init_ports(&hw, base, ctl);
- hw.irq = pnp_irq(dev, 0);
-
- rc = ide_host_add(&ide_pnp_port_info, hws, 1, &host);
- if (rc)
- goto out;
-
- pnp_set_drvdata(dev, host);
-
- return 0;
-out:
- release_region(ctl, 1);
- release_region(base, 8);
-
- return rc;
-}
-
-static void idepnp_remove(struct pnp_dev *dev)
-{
- struct ide_host *host = pnp_get_drvdata(dev);
-
- ide_host_remove(host);
-
- release_region(pnp_port_start(dev, 1), 1);
- release_region(pnp_port_start(dev, 0), 8);
-}
-
-static struct pnp_driver idepnp_driver = {
- .name = "ide",
- .id_table = idepnp_devices,
- .probe = idepnp_probe,
- .remove = idepnp_remove,
-};
-
-module_pnp_driver(idepnp_driver);
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
deleted file mode 100644
index aefd74c0d862..000000000000
--- a/drivers/ide/ide-probe.c
+++ /dev/null
@@ -1,1623 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1994-1998 Linus Torvalds & authors (see below)
- * Copyright (C) 2005, 2007 Bartlomiej Zolnierkiewicz
- */
-
-/*
- * Mostly written by Mark Lord <mlord@pobox.com>
- * and Gadi Oxman <gadio@netvision.net.il>
- * and Andre Hedrick <andre@linux-ide.org>
- *
- * See linux/MAINTAINERS for address of current maintainer.
- *
- * This is the IDE probe module, as evolved from hd.c and ide.c.
- *
- * -- increase WAIT_PIDENTIFY to avoid CD-ROM locking at boot
- * by Andrea Arcangeli
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/major.h>
-#include <linux/errno.h>
-#include <linux/genhd.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/ide.h>
-#include <linux/spinlock.h>
-#include <linux/kmod.h>
-#include <linux/pci.h>
-#include <linux/scatterlist.h>
-
-#include <asm/byteorder.h>
-#include <asm/irq.h>
-#include <linux/uaccess.h>
-#include <asm/io.h>
-
-/**
- * generic_id - add a generic drive id
- * @drive: drive to make an ID block for
- *
- * Add a fake id field to the drive we are passed. This allows
- * use to skip a ton of NULL checks (which people always miss)
- * and make drive properties unconditional outside of this file
- */
-
-static void generic_id(ide_drive_t *drive)
-{
- u16 *id = drive->id;
-
- id[ATA_ID_CUR_CYLS] = id[ATA_ID_CYLS] = drive->cyl;
- id[ATA_ID_CUR_HEADS] = id[ATA_ID_HEADS] = drive->head;
- id[ATA_ID_CUR_SECTORS] = id[ATA_ID_SECTORS] = drive->sect;
-}
-
-static void ide_disk_init_chs(ide_drive_t *drive)
-{
- u16 *id = drive->id;
-
- /* Extract geometry if we did not already have one for the drive */
- if (!drive->cyl || !drive->head || !drive->sect) {
- drive->cyl = drive->bios_cyl = id[ATA_ID_CYLS];
- drive->head = drive->bios_head = id[ATA_ID_HEADS];
- drive->sect = drive->bios_sect = id[ATA_ID_SECTORS];
- }
-
- /* Handle logical geometry translation by the drive */
- if (ata_id_current_chs_valid(id)) {
- drive->cyl = id[ATA_ID_CUR_CYLS];
- drive->head = id[ATA_ID_CUR_HEADS];
- drive->sect = id[ATA_ID_CUR_SECTORS];
- }
-
- /* Use physical geometry if what we have still makes no sense */
- if (drive->head > 16 && id[ATA_ID_HEADS] && id[ATA_ID_HEADS] <= 16) {
- drive->cyl = id[ATA_ID_CYLS];
- drive->head = id[ATA_ID_HEADS];
- drive->sect = id[ATA_ID_SECTORS];
- }
-}
-
-static void ide_disk_init_mult_count(ide_drive_t *drive)
-{
- u16 *id = drive->id;
- u8 max_multsect = id[ATA_ID_MAX_MULTSECT] & 0xff;
-
- if (max_multsect) {
- if ((max_multsect / 2) > 1)
- id[ATA_ID_MULTSECT] = max_multsect | 0x100;
- else
- id[ATA_ID_MULTSECT] &= ~0x1ff;
-
- drive->mult_req = id[ATA_ID_MULTSECT] & 0xff;
-
- if (drive->mult_req)
- drive->special_flags |= IDE_SFLAG_SET_MULTMODE;
- }
-}
-
-static void ide_classify_ata_dev(ide_drive_t *drive)
-{
- u16 *id = drive->id;
- char *m = (char *)&id[ATA_ID_PROD];
- int is_cfa = ata_id_is_cfa(id);
-
- /* CF devices are *not* removable in Linux definition of the term */
- if (is_cfa == 0 && (id[ATA_ID_CONFIG] & (1 << 7)))
- drive->dev_flags |= IDE_DFLAG_REMOVABLE;
-
- drive->media = ide_disk;
-
- if (!ata_id_has_unload(drive->id))
- drive->dev_flags |= IDE_DFLAG_NO_UNLOAD;
-
- printk(KERN_INFO "%s: %s, %s DISK drive\n", drive->name, m,
- is_cfa ? "CFA" : "ATA");
-}
-
-static void ide_classify_atapi_dev(ide_drive_t *drive)
-{
- u16 *id = drive->id;
- char *m = (char *)&id[ATA_ID_PROD];
- u8 type = (id[ATA_ID_CONFIG] >> 8) & 0x1f;
-
- printk(KERN_INFO "%s: %s, ATAPI ", drive->name, m);
- switch (type) {
- case ide_floppy:
- if (!strstr(m, "CD-ROM")) {
- if (!strstr(m, "oppy") &&
- !strstr(m, "poyp") &&
- !strstr(m, "ZIP"))
- printk(KERN_CONT "cdrom or floppy?, assuming ");
- if (drive->media != ide_cdrom) {
- printk(KERN_CONT "FLOPPY");
- drive->dev_flags |= IDE_DFLAG_REMOVABLE;
- break;
- }
- }
- /* Early cdrom models used zero */
- type = ide_cdrom;
- fallthrough;
- case ide_cdrom:
- drive->dev_flags |= IDE_DFLAG_REMOVABLE;
-#ifdef CONFIG_PPC
- /* kludge for Apple PowerBook internal zip */
- if (!strstr(m, "CD-ROM") && strstr(m, "ZIP")) {
- printk(KERN_CONT "FLOPPY");
- type = ide_floppy;
- break;
- }
-#endif
- printk(KERN_CONT "CD/DVD-ROM");
- break;
- case ide_tape:
- printk(KERN_CONT "TAPE");
- break;
- case ide_optical:
- printk(KERN_CONT "OPTICAL");
- drive->dev_flags |= IDE_DFLAG_REMOVABLE;
- break;
- default:
- printk(KERN_CONT "UNKNOWN (type %d)", type);
- break;
- }
-
- printk(KERN_CONT " drive\n");
- drive->media = type;
- /* an ATAPI device ignores DRDY */
- drive->ready_stat = 0;
- if (ata_id_cdb_intr(id))
- drive->atapi_flags |= IDE_AFLAG_DRQ_INTERRUPT;
- drive->dev_flags |= IDE_DFLAG_DOORLOCKING;
- /* we don't do head unloading on ATAPI devices */
- drive->dev_flags |= IDE_DFLAG_NO_UNLOAD;
-}
-
-/**
- * do_identify - identify a drive
- * @drive: drive to identify
- * @cmd: command used
- * @id: buffer for IDENTIFY data
- *
- * Called when we have issued a drive identify command to
- * read and parse the results. This function is run with
- * interrupts disabled.
- */
-
-static void do_identify(ide_drive_t *drive, u8 cmd, u16 *id)
-{
- ide_hwif_t *hwif = drive->hwif;
- char *m = (char *)&id[ATA_ID_PROD];
- unsigned long flags;
- int bswap = 1;
-
- /* local CPU only; some systems need this */
- local_irq_save(flags);
- /* read 512 bytes of id info */
- hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
- local_irq_restore(flags);
-
- drive->dev_flags |= IDE_DFLAG_ID_READ;
-#ifdef DEBUG
- printk(KERN_INFO "%s: dumping identify data\n", drive->name);
- ide_dump_identify((u8 *)id);
-#endif
- ide_fix_driveid(id);
-
- /*
- * ATA_CMD_ID_ATA returns little-endian info,
- * ATA_CMD_ID_ATAPI *usually* returns little-endian info.
- */
- if (cmd == ATA_CMD_ID_ATAPI) {
- if ((m[0] == 'N' && m[1] == 'E') || /* NEC */
- (m[0] == 'F' && m[1] == 'X') || /* Mitsumi */
- (m[0] == 'P' && m[1] == 'i')) /* Pioneer */
- /* Vertos drives may still be weird */
- bswap ^= 1;
- }
-
- ide_fixstring(m, ATA_ID_PROD_LEN, bswap);
- ide_fixstring((char *)&id[ATA_ID_FW_REV], ATA_ID_FW_REV_LEN, bswap);
- ide_fixstring((char *)&id[ATA_ID_SERNO], ATA_ID_SERNO_LEN, bswap);
-
- /* we depend on this a lot! */
- m[ATA_ID_PROD_LEN - 1] = '\0';
-
- if (strstr(m, "E X A B Y T E N E S T"))
- drive->dev_flags &= ~IDE_DFLAG_PRESENT;
- else
- drive->dev_flags |= IDE_DFLAG_PRESENT;
-}
-
-/**
- * ide_dev_read_id - send ATA/ATAPI IDENTIFY command
- * @drive: drive to identify
- * @cmd: command to use
- * @id: buffer for IDENTIFY data
- * @irq_ctx: flag set when called from the IRQ context
- *
- * Sends an ATA(PI) IDENTIFY request to a drive and waits for a response.
- *
- * Returns: 0 device was identified
- * 1 device timed-out (no response to identify request)
- * 2 device aborted the command (refused to identify itself)
- */
-
-int ide_dev_read_id(ide_drive_t *drive, u8 cmd, u16 *id, int irq_ctx)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct ide_io_ports *io_ports = &hwif->io_ports;
- const struct ide_tp_ops *tp_ops = hwif->tp_ops;
- int use_altstatus = 0, rc;
- unsigned long timeout;
- u8 s = 0, a = 0;
-
- /*
- * Disable device IRQ. Otherwise we'll get spurious interrupts
- * during the identify phase that the IRQ handler isn't expecting.
- */
- if (io_ports->ctl_addr)
- tp_ops->write_devctl(hwif, ATA_NIEN | ATA_DEVCTL_OBS);
-
- /* take a deep breath */
- if (irq_ctx)
- mdelay(50);
- else
- msleep(50);
-
- if (io_ports->ctl_addr &&
- (hwif->host_flags & IDE_HFLAG_BROKEN_ALTSTATUS) == 0) {
- a = tp_ops->read_altstatus(hwif);
- s = tp_ops->read_status(hwif);
- if ((a ^ s) & ~ATA_SENSE)
- /* ancient Seagate drives, broken interfaces */
- printk(KERN_INFO "%s: probing with STATUS(0x%02x) "
- "instead of ALTSTATUS(0x%02x)\n",
- drive->name, s, a);
- else
- /* use non-intrusive polling */
- use_altstatus = 1;
- }
-
- /* set features register for atapi
- * identify command to be sure of reply
- */
- if (cmd == ATA_CMD_ID_ATAPI) {
- struct ide_taskfile tf;
-
- memset(&tf, 0, sizeof(tf));
- /* disable DMA & overlap */
- tp_ops->tf_load(drive, &tf, IDE_VALID_FEATURE);
- }
-
- /* ask drive for ID */
- tp_ops->exec_command(hwif, cmd);
-
- timeout = ((cmd == ATA_CMD_ID_ATA) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2;
-
- /* wait for IRQ and ATA_DRQ */
- if (irq_ctx) {
- rc = __ide_wait_stat(drive, ATA_DRQ, BAD_R_STAT, timeout, &s);
- if (rc)
- return 1;
- } else {
- rc = ide_busy_sleep(drive, timeout, use_altstatus);
- if (rc)
- return 1;
-
- msleep(50);
- s = tp_ops->read_status(hwif);
- }
-
- if (OK_STAT(s, ATA_DRQ, BAD_R_STAT)) {
- /* drive returned ID */
- do_identify(drive, cmd, id);
- /* drive responded with ID */
- rc = 0;
- /* clear drive IRQ */
- (void)tp_ops->read_status(hwif);
- } else {
- /* drive refused ID */
- rc = 2;
- }
- return rc;
-}
-
-int ide_busy_sleep(ide_drive_t *drive, unsigned long timeout, int altstatus)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 stat;
-
- timeout += jiffies;
-
- do {
- msleep(50); /* give drive a breather */
- stat = altstatus ? hwif->tp_ops->read_altstatus(hwif)
- : hwif->tp_ops->read_status(hwif);
- if ((stat & ATA_BUSY) == 0)
- return 0;
- } while (time_before(jiffies, timeout));
-
- printk(KERN_ERR "%s: timeout in %s\n", drive->name, __func__);
-
- return 1; /* drive timed-out */
-}
-
-static u8 ide_read_device(ide_drive_t *drive)
-{
- struct ide_taskfile tf;
-
- drive->hwif->tp_ops->tf_read(drive, &tf, IDE_VALID_DEVICE);
-
- return tf.device;
-}
-
-/**
- * do_probe - probe an IDE device
- * @drive: drive to probe
- * @cmd: command to use
- *
- * do_probe() has the difficult job of finding a drive if it exists,
- * without getting hung up if it doesn't exist, without trampling on
- * ethernet cards, and without leaving any IRQs dangling to haunt us later.
- *
- * If a drive is "known" to exist (from CMOS or kernel parameters),
- * but does not respond right away, the probe will "hang in there"
- * for the maximum wait time (about 30 seconds), otherwise it will
- * exit much more quickly.
- *
- * Returns: 0 device was identified
- * 1 device timed-out (no response to identify request)
- * 2 device aborted the command (refused to identify itself)
- * 3 bad status from device (possible for ATAPI drives)
- * 4 probe was not attempted because failure was obvious
- */
-
-static int do_probe (ide_drive_t *drive, u8 cmd)
-{
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_tp_ops *tp_ops = hwif->tp_ops;
- u16 *id = drive->id;
- int rc;
- u8 present = !!(drive->dev_flags & IDE_DFLAG_PRESENT), stat;
-
- /* avoid waiting for inappropriate probes */
- if (present && drive->media != ide_disk && cmd == ATA_CMD_ID_ATA)
- return 4;
-
-#ifdef DEBUG
- printk(KERN_INFO "probing for %s: present=%d, media=%d, probetype=%s\n",
- drive->name, present, drive->media,
- (cmd == ATA_CMD_ID_ATA) ? "ATA" : "ATAPI");
-#endif
-
- /* needed for some systems
- * (e.g. crw9624 as drive0 with disk as slave)
- */
- msleep(50);
- tp_ops->dev_select(drive);
- msleep(50);
-
- if (ide_read_device(drive) != drive->select && present == 0) {
- if (drive->dn & 1) {
- /* exit with drive0 selected */
- tp_ops->dev_select(hwif->devices[0]);
- /* allow ATA_BUSY to assert & clear */
- msleep(50);
- }
- /* no i/f present: mmm.. this should be a 4 -ml */
- return 3;
- }
-
- stat = tp_ops->read_status(hwif);
-
- if (OK_STAT(stat, ATA_DRDY, ATA_BUSY) ||
- present || cmd == ATA_CMD_ID_ATAPI) {
- rc = ide_dev_read_id(drive, cmd, id, 0);
- if (rc)
- /* failed: try again */
- rc = ide_dev_read_id(drive, cmd, id, 0);
-
- stat = tp_ops->read_status(hwif);
-
- if (stat == (ATA_BUSY | ATA_DRDY))
- return 4;
-
- if (rc == 1 && cmd == ATA_CMD_ID_ATAPI) {
- printk(KERN_ERR "%s: no response (status = 0x%02x), "
- "resetting drive\n", drive->name, stat);
- msleep(50);
- tp_ops->dev_select(drive);
- msleep(50);
- tp_ops->exec_command(hwif, ATA_CMD_DEV_RESET);
- (void)ide_busy_sleep(drive, WAIT_WORSTCASE, 0);
- rc = ide_dev_read_id(drive, cmd, id, 0);
- }
-
- /* ensure drive IRQ is clear */
- stat = tp_ops->read_status(hwif);
-
- if (rc == 1)
- printk(KERN_ERR "%s: no response (status = 0x%02x)\n",
- drive->name, stat);
- } else {
- /* not present or maybe ATAPI */
- rc = 3;
- }
- if (drive->dn & 1) {
- /* exit with drive0 selected */
- tp_ops->dev_select(hwif->devices[0]);
- msleep(50);
- /* ensure drive irq is clear */
- (void)tp_ops->read_status(hwif);
- }
- return rc;
-}
-
-/**
- * probe_for_drives - upper level drive probe
- * @drive: drive to probe for
- *
- * probe_for_drive() tests for existence of a given drive using do_probe()
- * and presents things to the user as needed.
- *
- * Returns: 0 no device was found
- * 1 device was found
- * (note: IDE_DFLAG_PRESENT might still be not set)
- */
-
-static u8 probe_for_drive(ide_drive_t *drive)
-{
- char *m;
- int rc;
- u8 cmd;
-
- drive->dev_flags &= ~IDE_DFLAG_ID_READ;
-
- m = (char *)&drive->id[ATA_ID_PROD];
- strcpy(m, "UNKNOWN");
-
- /* skip probing? */
- if ((drive->dev_flags & IDE_DFLAG_NOPROBE) == 0) {
- /* if !(success||timed-out) */
- cmd = ATA_CMD_ID_ATA;
- rc = do_probe(drive, cmd);
- if (rc >= 2) {
- /* look for ATAPI device */
- cmd = ATA_CMD_ID_ATAPI;
- rc = do_probe(drive, cmd);
- }
-
- if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
- return 0;
-
- /* identification failed? */
- if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0) {
- if (drive->media == ide_disk) {
- printk(KERN_INFO "%s: non-IDE drive, CHS=%d/%d/%d\n",
- drive->name, drive->cyl,
- drive->head, drive->sect);
- } else if (drive->media == ide_cdrom) {
- printk(KERN_INFO "%s: ATAPI cdrom (?)\n", drive->name);
- } else {
- /* nuke it */
- printk(KERN_WARNING "%s: Unknown device on bus refused identification. Ignoring.\n", drive->name);
- drive->dev_flags &= ~IDE_DFLAG_PRESENT;
- }
- } else {
- if (cmd == ATA_CMD_ID_ATAPI)
- ide_classify_atapi_dev(drive);
- else
- ide_classify_ata_dev(drive);
- }
- }
-
- if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
- return 0;
-
- /* The drive wasn't being helpful. Add generic info only */
- if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0) {
- generic_id(drive);
- return 1;
- }
-
- if (drive->media == ide_disk) {
- ide_disk_init_chs(drive);
- ide_disk_init_mult_count(drive);
- }
-
- return 1;
-}
-
-static void hwif_release_dev(struct device *dev)
-{
- ide_hwif_t *hwif = container_of(dev, ide_hwif_t, gendev);
-
- complete(&hwif->gendev_rel_comp);
-}
-
-static int ide_register_port(ide_hwif_t *hwif)
-{
- int ret;
-
- /* register with global device tree */
- dev_set_name(&hwif->gendev, "%s", hwif->name);
- dev_set_drvdata(&hwif->gendev, hwif);
- if (hwif->gendev.parent == NULL)
- hwif->gendev.parent = hwif->dev;
- hwif->gendev.release = hwif_release_dev;
-
- ret = device_register(&hwif->gendev);
- if (ret < 0) {
- printk(KERN_WARNING "IDE: %s: device_register error: %d\n",
- __func__, ret);
- goto out;
- }
-
- hwif->portdev = device_create(ide_port_class, &hwif->gendev,
- MKDEV(0, 0), hwif, "%s", hwif->name);
- if (IS_ERR(hwif->portdev)) {
- ret = PTR_ERR(hwif->portdev);
- device_unregister(&hwif->gendev);
- }
-out:
- return ret;
-}
-
-/**
- * ide_port_wait_ready - wait for port to become ready
- * @hwif: IDE port
- *
- * This is needed on some PPCs and a bunch of BIOS-less embedded
- * platforms. Typical cases are:
- *
- * - The firmware hard reset the disk before booting the kernel,
- * the drive is still doing it's poweron-reset sequence, that
- * can take up to 30 seconds.
- *
- * - The firmware does nothing (or no firmware), the device is
- * still in POST state (same as above actually).
- *
- * - Some CD/DVD/Writer combo drives tend to drive the bus during
- * their reset sequence even when they are non-selected slave
- * devices, thus preventing discovery of the main HD.
- *
- * Doing this wait-for-non-busy should not harm any existing
- * configuration and fix some issues like the above.
- *
- * BenH.
- *
- * Returns 0 on success, error code (< 0) otherwise.
- */
-
-static int ide_port_wait_ready(ide_hwif_t *hwif)
-{
- const struct ide_tp_ops *tp_ops = hwif->tp_ops;
- ide_drive_t *drive;
- int i, rc;
-
- printk(KERN_DEBUG "Probing IDE interface %s...\n", hwif->name);
-
- /* Let HW settle down a bit from whatever init state we
- * come from */
- mdelay(2);
-
- /* Wait for BSY bit to go away, spec timeout is 30 seconds,
- * I know of at least one disk who takes 31 seconds, I use 35
- * here to be safe
- */
- rc = ide_wait_not_busy(hwif, 35000);
- if (rc)
- return rc;
-
- /* Now make sure both master & slave are ready */
- ide_port_for_each_dev(i, drive, hwif) {
- /* Ignore disks that we will not probe for later. */
- if ((drive->dev_flags & IDE_DFLAG_NOPROBE) == 0 ||
- (drive->dev_flags & IDE_DFLAG_PRESENT)) {
- tp_ops->dev_select(drive);
- tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
- mdelay(2);
- rc = ide_wait_not_busy(hwif, 35000);
- if (rc)
- goto out;
- } else
- printk(KERN_DEBUG "%s: ide_wait_not_busy() skipped\n",
- drive->name);
- }
-out:
- /* Exit function with master reselected (let's be sane) */
- if (i)
- tp_ops->dev_select(hwif->devices[0]);
-
- return rc;
-}
-
-/**
- * ide_undecoded_slave - look for bad CF adapters
- * @dev1: slave device
- *
- * Analyse the drives on the interface and attempt to decide if we
- * have the same drive viewed twice. This occurs with crap CF adapters
- * and PCMCIA sometimes.
- */
-
-void ide_undecoded_slave(ide_drive_t *dev1)
-{
- ide_drive_t *dev0 = dev1->hwif->devices[0];
-
- if ((dev1->dn & 1) == 0 || (dev0->dev_flags & IDE_DFLAG_PRESENT) == 0)
- return;
-
- /* If the models don't match they are not the same product */
- if (strcmp((char *)&dev0->id[ATA_ID_PROD],
- (char *)&dev1->id[ATA_ID_PROD]))
- return;
-
- /* Serial numbers do not match */
- if (strncmp((char *)&dev0->id[ATA_ID_SERNO],
- (char *)&dev1->id[ATA_ID_SERNO], ATA_ID_SERNO_LEN))
- return;
-
- /* No serial number, thankfully very rare for CF */
- if (*(char *)&dev0->id[ATA_ID_SERNO] == 0)
- return;
-
- /* Appears to be an IDE flash adapter with decode bugs */
- printk(KERN_WARNING "ide-probe: ignoring undecoded slave\n");
-
- dev1->dev_flags &= ~IDE_DFLAG_PRESENT;
-}
-
-EXPORT_SYMBOL_GPL(ide_undecoded_slave);
-
-static int ide_probe_port(ide_hwif_t *hwif)
-{
- ide_drive_t *drive;
- unsigned int irqd;
- int i, rc = -ENODEV;
-
- BUG_ON(hwif->present);
-
- if ((hwif->devices[0]->dev_flags & IDE_DFLAG_NOPROBE) &&
- (hwif->devices[1]->dev_flags & IDE_DFLAG_NOPROBE))
- return -EACCES;
-
- /*
- * We must always disable IRQ, as probe_for_drive will assert IRQ, but
- * we'll install our IRQ driver much later...
- */
- irqd = hwif->irq;
- if (irqd)
- disable_irq(hwif->irq);
-
- if (ide_port_wait_ready(hwif) == -EBUSY)
- printk(KERN_DEBUG "%s: Wait for ready failed before probe !\n", hwif->name);
-
- /*
- * Second drive should only exist if first drive was found,
- * but a lot of cdrom drives are configured as single slaves.
- */
- ide_port_for_each_dev(i, drive, hwif) {
- (void) probe_for_drive(drive);
- if (drive->dev_flags & IDE_DFLAG_PRESENT)
- rc = 0;
- }
-
- /*
- * Use cached IRQ number. It might be (and is...) changed by probe
- * code above
- */
- if (irqd)
- enable_irq(irqd);
-
- return rc;
-}
-
-static void ide_port_tune_devices(ide_hwif_t *hwif)
-{
- const struct ide_port_ops *port_ops = hwif->port_ops;
- ide_drive_t *drive;
- int i;
-
- ide_port_for_each_present_dev(i, drive, hwif) {
- ide_check_nien_quirk_list(drive);
-
- if (port_ops && port_ops->quirkproc)
- port_ops->quirkproc(drive);
- }
-
- ide_port_for_each_present_dev(i, drive, hwif) {
- ide_set_max_pio(drive);
-
- drive->dev_flags |= IDE_DFLAG_NICE1;
-
- if (hwif->dma_ops)
- ide_set_dma(drive);
- }
-}
-
-static void ide_initialize_rq(struct request *rq)
-{
- struct ide_request *req = blk_mq_rq_to_pdu(rq);
-
- req->special = NULL;
- scsi_req_init(&req->sreq);
- req->sreq.sense = req->sense;
-}
-
-static const struct blk_mq_ops ide_mq_ops = {
- .queue_rq = ide_queue_rq,
- .initialize_rq_fn = ide_initialize_rq,
-};
-
-/*
- * init request queue
- */
-static int ide_init_queue(ide_drive_t *drive)
-{
- struct request_queue *q;
- ide_hwif_t *hwif = drive->hwif;
- int max_sectors = 256;
- int max_sg_entries = PRD_ENTRIES;
- struct blk_mq_tag_set *set;
-
- /*
- * Our default set up assumes the normal IDE case,
- * that is 64K segmenting, standard PRD setup
- * and LBA28. Some drivers then impose their own
- * limits and LBA48 we could raise it but as yet
- * do not.
- */
-
- set = &drive->tag_set;
- set->ops = &ide_mq_ops;
- set->nr_hw_queues = 1;
- set->queue_depth = 32;
- set->reserved_tags = 1;
- set->cmd_size = sizeof(struct ide_request);
- set->numa_node = hwif_to_node(hwif);
- set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
- if (blk_mq_alloc_tag_set(set))
- return 1;
-
- q = blk_mq_init_queue(set);
- if (IS_ERR(q)) {
- blk_mq_free_tag_set(set);
- return 1;
- }
-
- blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
-
- q->queuedata = drive;
- blk_queue_segment_boundary(q, 0xffff);
-
- if (hwif->rqsize < max_sectors)
- max_sectors = hwif->rqsize;
- blk_queue_max_hw_sectors(q, max_sectors);
-
-#ifdef CONFIG_PCI
- /* When we have an IOMMU, we may have a problem where pci_map_sg()
- * creates segments that don't completely match our boundary
- * requirements and thus need to be broken up again. Because it
- * doesn't align properly either, we may actually have to break up
- * to more segments than what was we got in the first place, a max
- * worst case is twice as many.
- * This will be fixed once we teach pci_map_sg() about our boundary
- * requirements, hopefully soon. *FIXME*
- */
- max_sg_entries >>= 1;
-#endif /* CONFIG_PCI */
-
- blk_queue_max_segments(q, max_sg_entries);
-
- /* assign drive queue */
- drive->queue = q;
-
- return 0;
-}
-
-static DEFINE_MUTEX(ide_cfg_mtx);
-
-/*
- * For any present drive:
- * - allocate the block device queue
- */
-static int ide_port_setup_devices(ide_hwif_t *hwif)
-{
- ide_drive_t *drive;
- int i, j = 0;
-
- mutex_lock(&ide_cfg_mtx);
- ide_port_for_each_present_dev(i, drive, hwif) {
- if (ide_init_queue(drive)) {
- printk(KERN_ERR "ide: failed to init %s\n",
- drive->name);
- drive->dev_flags &= ~IDE_DFLAG_PRESENT;
- continue;
- }
-
- j++;
- }
- mutex_unlock(&ide_cfg_mtx);
-
- return j;
-}
-
-static void ide_host_enable_irqs(struct ide_host *host)
-{
- ide_hwif_t *hwif;
- int i;
-
- ide_host_for_each_port(i, hwif, host) {
- if (hwif == NULL)
- continue;
-
- /* clear any pending IRQs */
- hwif->tp_ops->read_status(hwif);
-
- /* unmask IRQs */
- if (hwif->io_ports.ctl_addr)
- hwif->tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
- }
-}
-
-/*
- * This routine sets up the IRQ for an IDE interface.
- */
-static int init_irq (ide_hwif_t *hwif)
-{
- struct ide_io_ports *io_ports = &hwif->io_ports;
- struct ide_host *host = hwif->host;
- irq_handler_t irq_handler = host->irq_handler;
- int sa = host->irq_flags;
-
- if (irq_handler == NULL)
- irq_handler = ide_intr;
-
- if (!host->get_lock)
- if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif))
- goto out_up;
-
-#if !defined(__mc68000__)
- printk(KERN_INFO "%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name,
- io_ports->data_addr, io_ports->status_addr,
- io_ports->ctl_addr, hwif->irq);
-#else
- printk(KERN_INFO "%s at 0x%08lx on irq %d", hwif->name,
- io_ports->data_addr, hwif->irq);
-#endif /* __mc68000__ */
- if (hwif->host->host_flags & IDE_HFLAG_SERIALIZE)
- printk(KERN_CONT " (serialized)");
- printk(KERN_CONT "\n");
-
- return 0;
-out_up:
- return 1;
-}
-
-static void ata_probe(dev_t dev)
-{
- request_module("ide-disk");
- request_module("ide-cd");
- request_module("ide-tape");
- request_module("ide-floppy");
-}
-
-void ide_init_disk(struct gendisk *disk, ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- unsigned int unit = drive->dn & 1;
-
- disk->major = hwif->major;
- disk->first_minor = unit << PARTN_BITS;
- sprintf(disk->disk_name, "hd%c", 'a' + hwif->index * MAX_DRIVES + unit);
- disk->queue = drive->queue;
-}
-
-EXPORT_SYMBOL_GPL(ide_init_disk);
-
-static void drive_release_dev (struct device *dev)
-{
- ide_drive_t *drive = container_of(dev, ide_drive_t, gendev);
-
- ide_proc_unregister_device(drive);
-
- if (drive->sense_rq)
- blk_mq_free_request(drive->sense_rq);
-
- blk_cleanup_queue(drive->queue);
- drive->queue = NULL;
- blk_mq_free_tag_set(&drive->tag_set);
-
- drive->dev_flags &= ~IDE_DFLAG_PRESENT;
-
- complete(&drive->gendev_rel_comp);
-}
-
-static int hwif_init(ide_hwif_t *hwif)
-{
- if (!hwif->irq) {
- printk(KERN_ERR "%s: disabled, no IRQ\n", hwif->name);
- return 0;
- }
-
- if (__register_blkdev(hwif->major, hwif->name, ata_probe))
- return 0;
-
- if (!hwif->sg_max_nents)
- hwif->sg_max_nents = PRD_ENTRIES;
-
- hwif->sg_table = kmalloc_array(hwif->sg_max_nents,
- sizeof(struct scatterlist),
- GFP_KERNEL);
- if (!hwif->sg_table) {
- printk(KERN_ERR "%s: unable to allocate SG table.\n", hwif->name);
- goto out;
- }
-
- sg_init_table(hwif->sg_table, hwif->sg_max_nents);
-
- if (init_irq(hwif)) {
- printk(KERN_ERR "%s: disabled, unable to get IRQ %d\n",
- hwif->name, hwif->irq);
- goto out;
- }
-
- return 1;
-
-out:
- unregister_blkdev(hwif->major, hwif->name);
- return 0;
-}
-
-static void hwif_register_devices(ide_hwif_t *hwif)
-{
- ide_drive_t *drive;
- unsigned int i;
-
- ide_port_for_each_present_dev(i, drive, hwif) {
- struct device *dev = &drive->gendev;
- int ret;
-
- dev_set_name(dev, "%u.%u", hwif->index, i);
- dev_set_drvdata(dev, drive);
- dev->parent = &hwif->gendev;
- dev->bus = &ide_bus_type;
- dev->release = drive_release_dev;
-
- ret = device_register(dev);
- if (ret < 0)
- printk(KERN_WARNING "IDE: %s: device_register error: "
- "%d\n", __func__, ret);
- }
-}
-
-static void ide_port_init_devices(ide_hwif_t *hwif)
-{
- const struct ide_port_ops *port_ops = hwif->port_ops;
- ide_drive_t *drive;
- int i;
-
- ide_port_for_each_dev(i, drive, hwif) {
- drive->dn = i + hwif->channel * 2;
-
- if (hwif->host_flags & IDE_HFLAG_IO_32BIT)
- drive->io_32bit = 1;
- if (hwif->host_flags & IDE_HFLAG_NO_IO_32BIT)
- drive->dev_flags |= IDE_DFLAG_NO_IO_32BIT;
- if (hwif->host_flags & IDE_HFLAG_UNMASK_IRQS)
- drive->dev_flags |= IDE_DFLAG_UNMASK;
- if (hwif->host_flags & IDE_HFLAG_NO_UNMASK_IRQS)
- drive->dev_flags |= IDE_DFLAG_NO_UNMASK;
-
- drive->pio_mode = XFER_PIO_0;
-
- if (port_ops && port_ops->init_dev)
- port_ops->init_dev(drive);
- }
-}
-
-static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
- const struct ide_port_info *d)
-{
- hwif->channel = port;
-
- hwif->chipset = d->chipset ? d->chipset : ide_pci;
-
- if (d->init_iops)
- d->init_iops(hwif);
-
- /* ->host_flags may be set by ->init_iops (or even earlier...) */
- hwif->host_flags |= d->host_flags;
- hwif->pio_mask = d->pio_mask;
-
- if (d->tp_ops)
- hwif->tp_ops = d->tp_ops;
-
- /* ->set_pio_mode for DTC2278 is currently limited to port 0 */
- if ((hwif->host_flags & IDE_HFLAG_DTC2278) == 0 || hwif->channel == 0)
- hwif->port_ops = d->port_ops;
-
- hwif->swdma_mask = d->swdma_mask;
- hwif->mwdma_mask = d->mwdma_mask;
- hwif->ultra_mask = d->udma_mask;
-
- if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
- int rc;
-
- hwif->dma_ops = d->dma_ops;
-
- if (d->init_dma)
- rc = d->init_dma(hwif, d);
- else
- rc = ide_hwif_setup_dma(hwif, d);
-
- if (rc < 0) {
- printk(KERN_INFO "%s: DMA disabled\n", hwif->name);
-
- hwif->dma_ops = NULL;
- hwif->dma_base = 0;
- hwif->swdma_mask = 0;
- hwif->mwdma_mask = 0;
- hwif->ultra_mask = 0;
- }
- }
-
- if ((d->host_flags & IDE_HFLAG_SERIALIZE) ||
- ((d->host_flags & IDE_HFLAG_SERIALIZE_DMA) && hwif->dma_base))
- hwif->host->host_flags |= IDE_HFLAG_SERIALIZE;
-
- if (d->max_sectors)
- hwif->rqsize = d->max_sectors;
- else {
- if ((hwif->host_flags & IDE_HFLAG_NO_LBA48) ||
- (hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA))
- hwif->rqsize = 256;
- else
- hwif->rqsize = 65536;
- }
-
- /* call chipset specific routine for each enabled port */
- if (d->init_hwif)
- d->init_hwif(hwif);
-}
-
-static void ide_port_cable_detect(ide_hwif_t *hwif)
-{
- const struct ide_port_ops *port_ops = hwif->port_ops;
-
- if (port_ops && port_ops->cable_detect && (hwif->ultra_mask & 0x78)) {
- if (hwif->cbl != ATA_CBL_PATA40_SHORT)
- hwif->cbl = port_ops->cable_detect(hwif);
- }
-}
-
-/*
- * Deferred request list insertion handler
- */
-static void drive_rq_insert_work(struct work_struct *work)
-{
- ide_drive_t *drive = container_of(work, ide_drive_t, rq_work);
- ide_hwif_t *hwif = drive->hwif;
- struct request *rq;
- blk_status_t ret;
- LIST_HEAD(list);
-
- blk_mq_quiesce_queue(drive->queue);
-
- ret = BLK_STS_OK;
- spin_lock_irq(&hwif->lock);
- while (!list_empty(&drive->rq_list)) {
- rq = list_first_entry(&drive->rq_list, struct request, queuelist);
- list_del_init(&rq->queuelist);
-
- spin_unlock_irq(&hwif->lock);
- ret = ide_issue_rq(drive, rq, true);
- spin_lock_irq(&hwif->lock);
- }
- spin_unlock_irq(&hwif->lock);
-
- blk_mq_unquiesce_queue(drive->queue);
-
- if (ret != BLK_STS_OK)
- kblockd_schedule_work(&drive->rq_work);
-}
-
-static const u8 ide_hwif_to_major[] =
- { IDE0_MAJOR, IDE1_MAJOR, IDE2_MAJOR, IDE3_MAJOR, IDE4_MAJOR,
- IDE5_MAJOR, IDE6_MAJOR, IDE7_MAJOR, IDE8_MAJOR, IDE9_MAJOR };
-
-static void ide_port_init_devices_data(ide_hwif_t *hwif)
-{
- ide_drive_t *drive;
- int i;
-
- ide_port_for_each_dev(i, drive, hwif) {
- u8 j = (hwif->index * MAX_DRIVES) + i;
- u16 *saved_id = drive->id;
-
- memset(drive, 0, sizeof(*drive));
- memset(saved_id, 0, SECTOR_SIZE);
- drive->id = saved_id;
-
- drive->media = ide_disk;
- drive->select = (i << 4) | ATA_DEVICE_OBS;
- drive->hwif = hwif;
- drive->ready_stat = ATA_DRDY;
- drive->bad_wstat = BAD_W_STAT;
- drive->special_flags = IDE_SFLAG_RECALIBRATE |
- IDE_SFLAG_SET_GEOMETRY;
- drive->name[0] = 'h';
- drive->name[1] = 'd';
- drive->name[2] = 'a' + j;
- drive->max_failures = IDE_DEFAULT_MAX_FAILURES;
-
- INIT_LIST_HEAD(&drive->list);
- init_completion(&drive->gendev_rel_comp);
-
- INIT_WORK(&drive->rq_work, drive_rq_insert_work);
- INIT_LIST_HEAD(&drive->rq_list);
- }
-}
-
-static void ide_init_port_data(ide_hwif_t *hwif, unsigned int index)
-{
- /* fill in any non-zero initial values */
- hwif->index = index;
- hwif->major = ide_hwif_to_major[index];
-
- hwif->name[0] = 'i';
- hwif->name[1] = 'd';
- hwif->name[2] = 'e';
- hwif->name[3] = '0' + index;
-
- spin_lock_init(&hwif->lock);
-
- timer_setup(&hwif->timer, ide_timer_expiry, 0);
-
- init_completion(&hwif->gendev_rel_comp);
-
- hwif->tp_ops = &default_tp_ops;
-
- ide_port_init_devices_data(hwif);
-}
-
-static void ide_init_port_hw(ide_hwif_t *hwif, struct ide_hw *hw)
-{
- memcpy(&hwif->io_ports, &hw->io_ports, sizeof(hwif->io_ports));
- hwif->irq = hw->irq;
- hwif->dev = hw->dev;
- hwif->gendev.parent = hw->parent ? hw->parent : hw->dev;
- hwif->config_data = hw->config;
-}
-
-static unsigned int ide_indexes;
-
-/**
- * ide_find_port_slot - find free port slot
- * @d: IDE port info
- *
- * Return the new port slot index or -ENOENT if we are out of free slots.
- */
-
-static int ide_find_port_slot(const struct ide_port_info *d)
-{
- int idx = -ENOENT;
- u8 bootable = (d && (d->host_flags & IDE_HFLAG_NON_BOOTABLE)) ? 0 : 1;
- u8 i = (d && (d->host_flags & IDE_HFLAG_QD_2ND_PORT)) ? 1 : 0;
-
- /*
- * Claim an unassigned slot.
- *
- * Give preference to claiming other slots before claiming ide0/ide1,
- * just in case there's another interface yet-to-be-scanned
- * which uses ports 0x1f0/0x170 (the ide0/ide1 defaults).
- *
- * Unless there is a bootable card that does not use the standard
- * ports 0x1f0/0x170 (the ide0/ide1 defaults).
- */
- mutex_lock(&ide_cfg_mtx);
- if (bootable) {
- if ((ide_indexes | i) != (1 << MAX_HWIFS) - 1)
- idx = ffz(ide_indexes | i);
- } else {
- if ((ide_indexes | 3) != (1 << MAX_HWIFS) - 1)
- idx = ffz(ide_indexes | 3);
- else if ((ide_indexes & 3) != 3)
- idx = ffz(ide_indexes);
- }
- if (idx >= 0)
- ide_indexes |= (1 << idx);
- mutex_unlock(&ide_cfg_mtx);
-
- return idx;
-}
-
-static void ide_free_port_slot(int idx)
-{
- mutex_lock(&ide_cfg_mtx);
- ide_indexes &= ~(1 << idx);
- mutex_unlock(&ide_cfg_mtx);
-}
-
-static void ide_port_free_devices(ide_hwif_t *hwif)
-{
- ide_drive_t *drive;
- int i;
-
- ide_port_for_each_dev(i, drive, hwif) {
- kfree(drive->id);
- kfree(drive);
- }
-}
-
-static int ide_port_alloc_devices(ide_hwif_t *hwif, int node)
-{
- ide_drive_t *drive;
- int i;
-
- for (i = 0; i < MAX_DRIVES; i++) {
- drive = kzalloc_node(sizeof(*drive), GFP_KERNEL, node);
- if (drive == NULL)
- goto out_nomem;
-
- /*
- * In order to keep things simple we have an id
- * block for all drives at all times. If the device
- * is pre ATA or refuses ATA/ATAPI identify we
- * will add faked data to this.
- *
- * Also note that 0 everywhere means "can't do X"
- */
- drive->id = kzalloc_node(SECTOR_SIZE, GFP_KERNEL, node);
- if (drive->id == NULL)
- goto out_free_drive;
-
- hwif->devices[i] = drive;
- }
- return 0;
-
-out_free_drive:
- kfree(drive);
-out_nomem:
- ide_port_free_devices(hwif);
- return -ENOMEM;
-}
-
-struct ide_host *ide_host_alloc(const struct ide_port_info *d,
- struct ide_hw **hws, unsigned int n_ports)
-{
- struct ide_host *host;
- struct device *dev = hws[0] ? hws[0]->dev : NULL;
- int node = dev ? dev_to_node(dev) : -1;
- int i;
-
- host = kzalloc_node(sizeof(*host), GFP_KERNEL, node);
- if (host == NULL)
- return NULL;
-
- for (i = 0; i < n_ports; i++) {
- ide_hwif_t *hwif;
- int idx;
-
- if (hws[i] == NULL)
- continue;
-
- hwif = kzalloc_node(sizeof(*hwif), GFP_KERNEL, node);
- if (hwif == NULL)
- continue;
-
- if (ide_port_alloc_devices(hwif, node) < 0) {
- kfree(hwif);
- continue;
- }
-
- idx = ide_find_port_slot(d);
- if (idx < 0) {
- printk(KERN_ERR "%s: no free slot for interface\n",
- d ? d->name : "ide");
- ide_port_free_devices(hwif);
- kfree(hwif);
- continue;
- }
-
- ide_init_port_data(hwif, idx);
-
- hwif->host = host;
-
- host->ports[i] = hwif;
- host->n_ports++;
- }
-
- if (host->n_ports == 0) {
- kfree(host);
- return NULL;
- }
-
- host->dev[0] = dev;
-
- if (d) {
- host->init_chipset = d->init_chipset;
- host->get_lock = d->get_lock;
- host->release_lock = d->release_lock;
- host->host_flags = d->host_flags;
- host->irq_flags = d->irq_flags;
- }
-
- return host;
-}
-EXPORT_SYMBOL_GPL(ide_host_alloc);
-
-static void ide_port_free(ide_hwif_t *hwif)
-{
- ide_port_free_devices(hwif);
- ide_free_port_slot(hwif->index);
- kfree(hwif);
-}
-
-static void ide_disable_port(ide_hwif_t *hwif)
-{
- struct ide_host *host = hwif->host;
- int i;
-
- printk(KERN_INFO "%s: disabling port\n", hwif->name);
-
- for (i = 0; i < MAX_HOST_PORTS; i++) {
- if (host->ports[i] == hwif) {
- host->ports[i] = NULL;
- host->n_ports--;
- }
- }
-
- ide_port_free(hwif);
-}
-
-int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
- struct ide_hw **hws)
-{
- ide_hwif_t *hwif, *mate = NULL;
- int i, j = 0;
-
- pr_warn("legacy IDE will be removed in 2021, please switch to libata\n"
- "Report any missing HW support to linux-ide@vger.kernel.org\n");
-
- ide_host_for_each_port(i, hwif, host) {
- if (hwif == NULL) {
- mate = NULL;
- continue;
- }
-
- ide_init_port_hw(hwif, hws[i]);
- ide_port_apply_params(hwif);
-
- if ((i & 1) && mate) {
- hwif->mate = mate;
- mate->mate = hwif;
- }
-
- mate = (i & 1) ? NULL : hwif;
-
- ide_init_port(hwif, i & 1, d);
- ide_port_cable_detect(hwif);
-
- hwif->port_flags |= IDE_PFLAG_PROBING;
-
- ide_port_init_devices(hwif);
- }
-
- ide_host_for_each_port(i, hwif, host) {
- if (hwif == NULL)
- continue;
-
- if (ide_probe_port(hwif) == 0)
- hwif->present = 1;
-
- hwif->port_flags &= ~IDE_PFLAG_PROBING;
-
- if ((hwif->host_flags & IDE_HFLAG_4DRIVES) == 0 ||
- hwif->mate == NULL || hwif->mate->present == 0) {
- if (ide_register_port(hwif)) {
- ide_disable_port(hwif);
- continue;
- }
- }
-
- if (hwif->present)
- ide_port_tune_devices(hwif);
- }
-
- ide_host_enable_irqs(host);
-
- ide_host_for_each_port(i, hwif, host) {
- if (hwif == NULL)
- continue;
-
- if (hwif_init(hwif) == 0) {
- printk(KERN_INFO "%s: failed to initialize IDE "
- "interface\n", hwif->name);
- device_unregister(hwif->portdev);
- device_unregister(&hwif->gendev);
- ide_disable_port(hwif);
- continue;
- }
-
- if (hwif->present)
- if (ide_port_setup_devices(hwif) == 0) {
- hwif->present = 0;
- continue;
- }
-
- j++;
-
- ide_acpi_init_port(hwif);
-
- if (hwif->present)
- ide_acpi_port_init_devices(hwif);
- }
-
- ide_host_for_each_port(i, hwif, host) {
- if (hwif == NULL)
- continue;
-
- ide_sysfs_register_port(hwif);
- ide_proc_register_port(hwif);
-
- if (hwif->present) {
- ide_proc_port_register_devices(hwif);
- hwif_register_devices(hwif);
- }
- }
-
- return j ? 0 : -1;
-}
-EXPORT_SYMBOL_GPL(ide_host_register);
-
-int ide_host_add(const struct ide_port_info *d, struct ide_hw **hws,
- unsigned int n_ports, struct ide_host **hostp)
-{
- struct ide_host *host;
- int rc;
-
- host = ide_host_alloc(d, hws, n_ports);
- if (host == NULL)
- return -ENOMEM;
-
- rc = ide_host_register(host, d, hws);
- if (rc) {
- ide_host_free(host);
- return rc;
- }
-
- if (hostp)
- *hostp = host;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ide_host_add);
-
-static void __ide_port_unregister_devices(ide_hwif_t *hwif)
-{
- ide_drive_t *drive;
- int i;
-
- ide_port_for_each_present_dev(i, drive, hwif) {
- device_unregister(&drive->gendev);
- wait_for_completion(&drive->gendev_rel_comp);
- }
-}
-
-void ide_port_unregister_devices(ide_hwif_t *hwif)
-{
- mutex_lock(&ide_cfg_mtx);
- __ide_port_unregister_devices(hwif);
- hwif->present = 0;
- ide_port_init_devices_data(hwif);
- mutex_unlock(&ide_cfg_mtx);
-}
-EXPORT_SYMBOL_GPL(ide_port_unregister_devices);
-
-/**
- * ide_unregister - free an IDE interface
- * @hwif: IDE interface
- *
- * Perform the final unregister of an IDE interface.
- *
- * Locking:
- * The caller must not hold the IDE locks.
- *
- * It is up to the caller to be sure there is no pending I/O here,
- * and that the interface will not be reopened (present/vanishing
- * locking isn't yet done BTW).
- */
-
-static void ide_unregister(ide_hwif_t *hwif)
-{
- mutex_lock(&ide_cfg_mtx);
-
- if (hwif->present) {
- __ide_port_unregister_devices(hwif);
- hwif->present = 0;
- }
-
- ide_proc_unregister_port(hwif);
-
- if (!hwif->host->get_lock)
- free_irq(hwif->irq, hwif);
-
- device_unregister(hwif->portdev);
- device_unregister(&hwif->gendev);
- wait_for_completion(&hwif->gendev_rel_comp);
-
- /*
- * Remove us from the kernel's knowledge
- */
- kfree(hwif->sg_table);
- unregister_blkdev(hwif->major, hwif->name);
-
- ide_release_dma_engine(hwif);
-
- mutex_unlock(&ide_cfg_mtx);
-}
-
-void ide_host_free(struct ide_host *host)
-{
- ide_hwif_t *hwif;
- int i;
-
- ide_host_for_each_port(i, hwif, host) {
- if (hwif)
- ide_port_free(hwif);
- }
-
- kfree(host);
-}
-EXPORT_SYMBOL_GPL(ide_host_free);
-
-void ide_host_remove(struct ide_host *host)
-{
- ide_hwif_t *hwif;
- int i;
-
- ide_host_for_each_port(i, hwif, host) {
- if (hwif)
- ide_unregister(hwif);
- }
-
- ide_host_free(host);
-}
-EXPORT_SYMBOL_GPL(ide_host_remove);
-
-void ide_port_scan(ide_hwif_t *hwif)
-{
- int rc;
-
- ide_port_apply_params(hwif);
- ide_port_cable_detect(hwif);
-
- hwif->port_flags |= IDE_PFLAG_PROBING;
-
- ide_port_init_devices(hwif);
-
- rc = ide_probe_port(hwif);
-
- hwif->port_flags &= ~IDE_PFLAG_PROBING;
-
- if (rc < 0)
- return;
-
- hwif->present = 1;
-
- ide_port_tune_devices(hwif);
- ide_port_setup_devices(hwif);
- ide_acpi_port_init_devices(hwif);
- hwif_register_devices(hwif);
- ide_proc_port_register_devices(hwif);
-}
-EXPORT_SYMBOL_GPL(ide_port_scan);
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
deleted file mode 100644
index 15c17f3781ee..000000000000
--- a/drivers/ide/ide-proc.c
+++ /dev/null
@@ -1,633 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1997-1998 Mark Lord
- * Copyright (C) 2003 Red Hat
- *
- * Some code was moved here from ide.c, see it for original copyrights.
- */
-
-/*
- * This is the /proc/ide/ filesystem implementation.
- *
- * Drive/Driver settings can be retrieved by reading the drive's
- * "settings" files. e.g. "cat /proc/ide0/hda/settings"
- * To write a new value "val" into a specific setting "name", use:
- * echo "name:val" >/proc/ide/ide0/hda/settings
- */
-
-#include <linux/module.h>
-
-#include <linux/uaccess.h>
-#include <linux/errno.h>
-#include <linux/proc_fs.h>
-#include <linux/stat.h>
-#include <linux/mm.h>
-#include <linux/pci.h>
-#include <linux/ctype.h>
-#include <linux/ide.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-
-#include <asm/io.h>
-
-static struct proc_dir_entry *proc_ide_root;
-
-static int ide_imodel_proc_show(struct seq_file *m, void *v)
-{
- ide_hwif_t *hwif = (ide_hwif_t *) m->private;
- const char *name;
-
- switch (hwif->chipset) {
- case ide_generic: name = "generic"; break;
- case ide_pci: name = "pci"; break;
- case ide_cmd640: name = "cmd640"; break;
- case ide_dtc2278: name = "dtc2278"; break;
- case ide_ali14xx: name = "ali14xx"; break;
- case ide_qd65xx: name = "qd65xx"; break;
- case ide_umc8672: name = "umc8672"; break;
- case ide_ht6560b: name = "ht6560b"; break;
- case ide_4drives: name = "4drives"; break;
- case ide_pmac: name = "mac-io"; break;
- case ide_au1xxx: name = "au1xxx"; break;
- case ide_palm3710: name = "palm3710"; break;
- case ide_acorn: name = "acorn"; break;
- default: name = "(unknown)"; break;
- }
- seq_printf(m, "%s\n", name);
- return 0;
-}
-
-static int ide_mate_proc_show(struct seq_file *m, void *v)
-{
- ide_hwif_t *hwif = (ide_hwif_t *) m->private;
-
- if (hwif && hwif->mate)
- seq_printf(m, "%s\n", hwif->mate->name);
- else
- seq_printf(m, "(none)\n");
- return 0;
-}
-
-static int ide_channel_proc_show(struct seq_file *m, void *v)
-{
- ide_hwif_t *hwif = (ide_hwif_t *) m->private;
-
- seq_printf(m, "%c\n", hwif->channel ? '1' : '0');
- return 0;
-}
-
-static int ide_identify_proc_show(struct seq_file *m, void *v)
-{
- ide_drive_t *drive = (ide_drive_t *)m->private;
- u8 *buf;
-
- if (!drive) {
- seq_putc(m, '\n');
- return 0;
- }
-
- buf = kmalloc(SECTOR_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- if (taskfile_lib_get_identify(drive, buf) == 0) {
- __le16 *val = (__le16 *)buf;
- int i;
-
- for (i = 0; i < SECTOR_SIZE / 2; i++) {
- seq_printf(m, "%04x%c", le16_to_cpu(val[i]),
- (i % 8) == 7 ? '\n' : ' ');
- }
- } else
- seq_putc(m, buf[0]);
- kfree(buf);
- return 0;
-}
-
-/**
- * ide_find_setting - find a specific setting
- * @st: setting table pointer
- * @name: setting name
- *
- * Scan's the setting table for a matching entry and returns
- * this or NULL if no entry is found. The caller must hold the
- * setting semaphore
- */
-
-static
-const struct ide_proc_devset *ide_find_setting(const struct ide_proc_devset *st,
- char *name)
-{
- while (st->name) {
- if (strcmp(st->name, name) == 0)
- break;
- st++;
- }
- return st->name ? st : NULL;
-}
-
-/**
- * ide_read_setting - read an IDE setting
- * @drive: drive to read from
- * @setting: drive setting
- *
- * Read a drive setting and return the value. The caller
- * must hold the ide_setting_mtx when making this call.
- *
- * BUGS: the data return and error are the same return value
- * so an error -EINVAL and true return of the same value cannot
- * be told apart
- */
-
-static int ide_read_setting(ide_drive_t *drive,
- const struct ide_proc_devset *setting)
-{
- const struct ide_devset *ds = setting->setting;
- int val = -EINVAL;
-
- if (ds->get)
- val = ds->get(drive);
-
- return val;
-}
-
-/**
- * ide_write_setting - read an IDE setting
- * @drive: drive to read from
- * @setting: drive setting
- * @val: value
- *
- * Write a drive setting if it is possible. The caller
- * must hold the ide_setting_mtx when making this call.
- *
- * BUGS: the data return and error are the same return value
- * so an error -EINVAL and true return of the same value cannot
- * be told apart
- *
- * FIXME: This should be changed to enqueue a special request
- * to the driver to change settings, and then wait on a sema for completion.
- * The current scheme of polling is kludgy, though safe enough.
- */
-
-static int ide_write_setting(ide_drive_t *drive,
- const struct ide_proc_devset *setting, int val)
-{
- const struct ide_devset *ds = setting->setting;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
- if (!ds->set)
- return -EPERM;
- if ((ds->flags & DS_SYNC)
- && (val < setting->min || val > setting->max))
- return -EINVAL;
- return ide_devset_execute(drive, ds, val);
-}
-
-ide_devset_get(xfer_rate, current_speed);
-
-static int set_xfer_rate (ide_drive_t *drive, int arg)
-{
- struct ide_cmd cmd;
-
- if (arg < XFER_PIO_0 || arg > XFER_UDMA_6)
- return -EINVAL;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.tf.command = ATA_CMD_SET_FEATURES;
- cmd.tf.feature = SETFEATURES_XFER;
- cmd.tf.nsect = (u8)arg;
- cmd.valid.out.tf = IDE_VALID_FEATURE | IDE_VALID_NSECT;
- cmd.valid.in.tf = IDE_VALID_NSECT;
- cmd.tf_flags = IDE_TFLAG_SET_XFER;
-
- return ide_no_data_taskfile(drive, &cmd);
-}
-
-ide_devset_rw(current_speed, xfer_rate);
-ide_devset_rw_field(init_speed, init_speed);
-ide_devset_rw_flag(nice1, IDE_DFLAG_NICE1);
-ide_devset_ro_field(number, dn);
-
-static const struct ide_proc_devset ide_generic_settings[] = {
- IDE_PROC_DEVSET(current_speed, 0, 70),
- IDE_PROC_DEVSET(init_speed, 0, 70),
- IDE_PROC_DEVSET(io_32bit, 0, 1 + (SUPPORT_VLB_SYNC << 1)),
- IDE_PROC_DEVSET(keepsettings, 0, 1),
- IDE_PROC_DEVSET(nice1, 0, 1),
- IDE_PROC_DEVSET(number, 0, 3),
- IDE_PROC_DEVSET(pio_mode, 0, 255),
- IDE_PROC_DEVSET(unmaskirq, 0, 1),
- IDE_PROC_DEVSET(using_dma, 0, 1),
- { NULL },
-};
-
-static void proc_ide_settings_warn(void)
-{
- printk_once(KERN_WARNING "Warning: /proc/ide/hd?/settings interface is "
- "obsolete, and will be removed soon!\n");
-}
-
-static int ide_settings_proc_show(struct seq_file *m, void *v)
-{
- const struct ide_proc_devset *setting, *g, *d;
- const struct ide_devset *ds;
- ide_drive_t *drive = (ide_drive_t *) m->private;
- int rc, mul_factor, div_factor;
-
- proc_ide_settings_warn();
-
- mutex_lock(&ide_setting_mtx);
- g = ide_generic_settings;
- d = drive->settings;
- seq_printf(m, "name\t\t\tvalue\t\tmin\t\tmax\t\tmode\n");
- seq_printf(m, "----\t\t\t-----\t\t---\t\t---\t\t----\n");
- while (g->name || (d && d->name)) {
- /* read settings in the alphabetical order */
- if (g->name && d && d->name) {
- if (strcmp(d->name, g->name) < 0)
- setting = d++;
- else
- setting = g++;
- } else if (d && d->name) {
- setting = d++;
- } else
- setting = g++;
- mul_factor = setting->mulf ? setting->mulf(drive) : 1;
- div_factor = setting->divf ? setting->divf(drive) : 1;
- seq_printf(m, "%-24s", setting->name);
- rc = ide_read_setting(drive, setting);
- if (rc >= 0)
- seq_printf(m, "%-16d", rc * mul_factor / div_factor);
- else
- seq_printf(m, "%-16s", "write-only");
- seq_printf(m, "%-16d%-16d", (setting->min * mul_factor + div_factor - 1) / div_factor, setting->max * mul_factor / div_factor);
- ds = setting->setting;
- if (ds->get)
- seq_printf(m, "r");
- if (ds->set)
- seq_printf(m, "w");
- seq_printf(m, "\n");
- }
- mutex_unlock(&ide_setting_mtx);
- return 0;
-}
-
-static int ide_settings_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ide_settings_proc_show, PDE_DATA(inode));
-}
-
-#define MAX_LEN 30
-
-static ssize_t ide_settings_proc_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *pos)
-{
- ide_drive_t *drive = PDE_DATA(file_inode(file));
- char name[MAX_LEN + 1];
- int for_real = 0, mul_factor, div_factor;
- unsigned long n;
-
- const struct ide_proc_devset *setting;
- char *buf, *s;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
-
- proc_ide_settings_warn();
-
- if (count >= PAGE_SIZE)
- return -EINVAL;
-
- s = buf = (char *)__get_free_page(GFP_USER);
- if (!buf)
- return -ENOMEM;
-
- if (copy_from_user(buf, buffer, count)) {
- free_page((unsigned long)buf);
- return -EFAULT;
- }
-
- buf[count] = '\0';
-
- /*
- * Skip over leading whitespace
- */
- while (count && isspace(*s)) {
- --count;
- ++s;
- }
- /*
- * Do one full pass to verify all parameters,
- * then do another to actually write the new settings.
- */
- do {
- char *p = s;
- n = count;
- while (n > 0) {
- unsigned val;
- char *q = p;
-
- while (n > 0 && *p != ':') {
- --n;
- p++;
- }
- if (*p != ':')
- goto parse_error;
- if (p - q > MAX_LEN)
- goto parse_error;
- memcpy(name, q, p - q);
- name[p - q] = 0;
-
- if (n > 0) {
- --n;
- p++;
- } else
- goto parse_error;
-
- val = simple_strtoul(p, &q, 10);
- n -= q - p;
- p = q;
- if (n > 0 && !isspace(*p))
- goto parse_error;
- while (n > 0 && isspace(*p)) {
- --n;
- ++p;
- }
-
- mutex_lock(&ide_setting_mtx);
- /* generic settings first, then driver specific ones */
- setting = ide_find_setting(ide_generic_settings, name);
- if (!setting) {
- if (drive->settings)
- setting = ide_find_setting(drive->settings, name);
- if (!setting) {
- mutex_unlock(&ide_setting_mtx);
- goto parse_error;
- }
- }
- if (for_real) {
- mul_factor = setting->mulf ? setting->mulf(drive) : 1;
- div_factor = setting->divf ? setting->divf(drive) : 1;
- ide_write_setting(drive, setting, val * div_factor / mul_factor);
- }
- mutex_unlock(&ide_setting_mtx);
- }
- } while (!for_real++);
- free_page((unsigned long)buf);
- return count;
-parse_error:
- free_page((unsigned long)buf);
- printk("%s(): parse error\n", __func__);
- return -EINVAL;
-}
-
-static const struct proc_ops ide_settings_proc_ops = {
- .proc_open = ide_settings_proc_open,
- .proc_read = seq_read,
- .proc_lseek = seq_lseek,
- .proc_release = single_release,
- .proc_write = ide_settings_proc_write,
-};
-
-int ide_capacity_proc_show(struct seq_file *m, void *v)
-{
- seq_printf(m, "%llu\n", (long long)0x7fffffff);
- return 0;
-}
-EXPORT_SYMBOL_GPL(ide_capacity_proc_show);
-
-int ide_geometry_proc_show(struct seq_file *m, void *v)
-{
- ide_drive_t *drive = (ide_drive_t *) m->private;
-
- seq_printf(m, "physical %d/%d/%d\n",
- drive->cyl, drive->head, drive->sect);
- seq_printf(m, "logical %d/%d/%d\n",
- drive->bios_cyl, drive->bios_head, drive->bios_sect);
- return 0;
-}
-EXPORT_SYMBOL(ide_geometry_proc_show);
-
-static int ide_dmodel_proc_show(struct seq_file *seq, void *v)
-{
- ide_drive_t *drive = (ide_drive_t *) seq->private;
- char *m = (char *)&drive->id[ATA_ID_PROD];
-
- seq_printf(seq, "%.40s\n", m[0] ? m : "(none)");
- return 0;
-}
-
-static int ide_driver_proc_show(struct seq_file *m, void *v)
-{
- ide_drive_t *drive = (ide_drive_t *)m->private;
- struct device *dev = &drive->gendev;
- struct ide_driver *ide_drv;
-
- if (dev->driver) {
- ide_drv = to_ide_driver(dev->driver);
- seq_printf(m, "%s version %s\n",
- dev->driver->name, ide_drv->version);
- } else
- seq_printf(m, "ide-default version 0.9.newide\n");
- return 0;
-}
-
-static int ide_media_proc_show(struct seq_file *m, void *v)
-{
- ide_drive_t *drive = (ide_drive_t *) m->private;
- const char *media;
-
- switch (drive->media) {
- case ide_disk: media = "disk\n"; break;
- case ide_cdrom: media = "cdrom\n"; break;
- case ide_tape: media = "tape\n"; break;
- case ide_floppy: media = "floppy\n"; break;
- case ide_optical: media = "optical\n"; break;
- default: media = "UNKNOWN\n"; break;
- }
- seq_puts(m, media);
- return 0;
-}
-
-static int ide_media_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ide_media_proc_show, PDE_DATA(inode));
-}
-
-static const struct file_operations ide_media_proc_fops = {
- .owner = THIS_MODULE,
- .open = ide_media_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static ide_proc_entry_t generic_drive_entries[] = {
- { "driver", S_IFREG|S_IRUGO, ide_driver_proc_show },
- { "identify", S_IFREG|S_IRUSR, ide_identify_proc_show },
- { "media", S_IFREG|S_IRUGO, ide_media_proc_show },
- { "model", S_IFREG|S_IRUGO, ide_dmodel_proc_show },
- {}
-};
-
-static void ide_add_proc_entries(struct proc_dir_entry *dir, ide_proc_entry_t *p, void *data)
-{
- struct proc_dir_entry *ent;
-
- if (!dir || !p)
- return;
- while (p->name != NULL) {
- ent = proc_create_single_data(p->name, p->mode, dir, p->show, data);
- if (!ent) return;
- p++;
- }
-}
-
-static void ide_remove_proc_entries(struct proc_dir_entry *dir, ide_proc_entry_t *p)
-{
- if (!dir || !p)
- return;
- while (p->name != NULL) {
- remove_proc_entry(p->name, dir);
- p++;
- }
-}
-
-void ide_proc_register_driver(ide_drive_t *drive, struct ide_driver *driver)
-{
- mutex_lock(&ide_setting_mtx);
- drive->settings = driver->proc_devsets(drive);
- mutex_unlock(&ide_setting_mtx);
-
- ide_add_proc_entries(drive->proc, driver->proc_entries(drive), drive);
-}
-
-EXPORT_SYMBOL(ide_proc_register_driver);
-
-/**
- * ide_proc_unregister_driver - remove driver specific data
- * @drive: drive
- * @driver: driver
- *
- * Clean up the driver specific /proc files and IDE settings
- * for a given drive.
- *
- * Takes ide_setting_mtx.
- */
-
-void ide_proc_unregister_driver(ide_drive_t *drive, struct ide_driver *driver)
-{
- ide_remove_proc_entries(drive->proc, driver->proc_entries(drive));
-
- mutex_lock(&ide_setting_mtx);
- /*
- * ide_setting_mtx protects both the settings list and the use
- * of settings (we cannot take a setting out that is being used).
- */
- drive->settings = NULL;
- mutex_unlock(&ide_setting_mtx);
-}
-EXPORT_SYMBOL(ide_proc_unregister_driver);
-
-void ide_proc_port_register_devices(ide_hwif_t *hwif)
-{
- struct proc_dir_entry *ent;
- struct proc_dir_entry *parent = hwif->proc;
- ide_drive_t *drive;
- char name[64];
- int i;
-
- ide_port_for_each_dev(i, drive, hwif) {
- if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
- continue;
-
- drive->proc = proc_mkdir(drive->name, parent);
- if (drive->proc) {
- ide_add_proc_entries(drive->proc, generic_drive_entries, drive);
- proc_create_data("settings", S_IFREG|S_IRUSR|S_IWUSR,
- drive->proc, &ide_settings_proc_ops,
- drive);
- }
- sprintf(name, "ide%d/%s", (drive->name[2]-'a')/2, drive->name);
- ent = proc_symlink(drive->name, proc_ide_root, name);
- if (!ent) return;
- }
-}
-
-void ide_proc_unregister_device(ide_drive_t *drive)
-{
- if (drive->proc) {
- remove_proc_entry("settings", drive->proc);
- ide_remove_proc_entries(drive->proc, generic_drive_entries);
- remove_proc_entry(drive->name, proc_ide_root);
- remove_proc_entry(drive->name, drive->hwif->proc);
- drive->proc = NULL;
- }
-}
-
-static ide_proc_entry_t hwif_entries[] = {
- { "channel", S_IFREG|S_IRUGO, ide_channel_proc_show },
- { "mate", S_IFREG|S_IRUGO, ide_mate_proc_show },
- { "model", S_IFREG|S_IRUGO, ide_imodel_proc_show },
- {}
-};
-
-void ide_proc_register_port(ide_hwif_t *hwif)
-{
- if (!hwif->proc) {
- hwif->proc = proc_mkdir(hwif->name, proc_ide_root);
-
- if (!hwif->proc)
- return;
-
- ide_add_proc_entries(hwif->proc, hwif_entries, hwif);
- }
-}
-
-void ide_proc_unregister_port(ide_hwif_t *hwif)
-{
- if (hwif->proc) {
- ide_remove_proc_entries(hwif->proc, hwif_entries);
- remove_proc_entry(hwif->name, proc_ide_root);
- hwif->proc = NULL;
- }
-}
-
-static int proc_print_driver(struct device_driver *drv, void *data)
-{
- struct ide_driver *ide_drv = to_ide_driver(drv);
- struct seq_file *s = data;
-
- seq_printf(s, "%s version %s\n", drv->name, ide_drv->version);
-
- return 0;
-}
-
-static int ide_drivers_show(struct seq_file *s, void *p)
-{
- int err;
-
- err = bus_for_each_drv(&ide_bus_type, NULL, s, proc_print_driver);
- if (err < 0)
- printk(KERN_WARNING "IDE: %s: bus_for_each_drv error: %d\n",
- __func__, err);
- return 0;
-}
-
-DEFINE_PROC_SHOW_ATTRIBUTE(ide_drivers);
-
-void proc_ide_create(void)
-{
- proc_ide_root = proc_mkdir("ide", NULL);
-
- if (!proc_ide_root)
- return;
-
- proc_create("drivers", 0, proc_ide_root, &ide_drivers_proc_ops);
-}
-
-void proc_ide_destroy(void)
-{
- remove_proc_entry("drivers", proc_ide_root);
- remove_proc_entry("ide", NULL);
-}
diff --git a/drivers/ide/ide-scan-pci.c b/drivers/ide/ide-scan-pci.c
deleted file mode 100644
index b0411a1827a3..000000000000
--- a/drivers/ide/ide-scan-pci.c
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * support for probing IDE PCI devices in the PCI bus order
- *
- * Copyright (c) 1998-2000 Andre Hedrick <andre@linux-ide.org>
- * Copyright (c) 1995-1998 Mark Lord
- *
- * May be copied or modified under the terms of the GNU General Public License
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/ide.h>
-
-/*
- * Module interfaces
- */
-
-static int pre_init = 1; /* Before first ordered IDE scan */
-static LIST_HEAD(ide_pci_drivers);
-
-/*
- * __ide_pci_register_driver - attach IDE driver
- * @driver: pci driver
- * @module: owner module of the driver
- *
- * Registers a driver with the IDE layer. The IDE layer arranges that
- * boot time setup is done in the expected device order and then
- * hands the controllers off to the core PCI code to do the rest of
- * the work.
- *
- * Returns are the same as for pci_register_driver
- */
-
-int __ide_pci_register_driver(struct pci_driver *driver, struct module *module,
- const char *mod_name)
-{
- if (!pre_init)
- return __pci_register_driver(driver, module, mod_name);
- driver->driver.owner = module;
- list_add_tail(&driver->node, &ide_pci_drivers);
- return 0;
-}
-EXPORT_SYMBOL_GPL(__ide_pci_register_driver);
-
-/**
- * ide_scan_pcidev - find an IDE driver for a device
- * @dev: PCI device to check
- *
- * Look for an IDE driver to handle the device we are considering.
- * This is only used during boot up to get the ordering correct. After
- * boot up the pci layer takes over the job.
- */
-
-static int __init ide_scan_pcidev(struct pci_dev *dev)
-{
- struct list_head *l;
- struct pci_driver *d;
- int ret;
-
- list_for_each(l, &ide_pci_drivers) {
- d = list_entry(l, struct pci_driver, node);
- if (d->id_table) {
- const struct pci_device_id *id =
- pci_match_id(d->id_table, dev);
-
- if (id != NULL) {
- pci_assign_irq(dev);
- ret = d->probe(dev, id);
- if (ret >= 0) {
- dev->driver = d;
- pci_dev_get(dev);
- return 1;
- }
- }
- }
- }
- return 0;
-}
-
-/**
- * ide_scan_pcibus - perform the initial IDE driver scan
- *
- * Perform the initial bus rather than driver ordered scan of the
- * PCI drivers. After this all IDE pci handling becomes standard
- * module ordering not traditionally ordered.
- */
-
-static int __init ide_scan_pcibus(void)
-{
- struct pci_dev *dev = NULL;
- struct pci_driver *d, *tmp;
-
- pre_init = 0;
- for_each_pci_dev(dev)
- ide_scan_pcidev(dev);
-
- /*
- * Hand the drivers over to the PCI layer now we
- * are post init.
- */
-
- list_for_each_entry_safe(d, tmp, &ide_pci_drivers, node) {
- list_del(&d->node);
- if (__pci_register_driver(d, d->driver.owner,
- d->driver.mod_name))
- printk(KERN_ERR "%s: failed to register %s driver\n",
- __func__, d->driver.mod_name);
- }
-
- return 0;
-}
-device_initcall(ide_scan_pcibus);
diff --git a/drivers/ide/ide-sysfs.c b/drivers/ide/ide-sysfs.c
deleted file mode 100644
index c08a8a0916e2..000000000000
--- a/drivers/ide/ide-sysfs.c
+++ /dev/null
@@ -1,143 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/kernel.h>
-#include <linux/ide.h>
-
-char *ide_media_string(ide_drive_t *drive)
-{
- switch (drive->media) {
- case ide_disk:
- return "disk";
- case ide_cdrom:
- return "cdrom";
- case ide_tape:
- return "tape";
- case ide_floppy:
- return "floppy";
- case ide_optical:
- return "optical";
- default:
- return "UNKNOWN";
- }
-}
-
-static ssize_t media_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- ide_drive_t *drive = to_ide_device(dev);
- return sprintf(buf, "%s\n", ide_media_string(drive));
-}
-static DEVICE_ATTR_RO(media);
-
-static ssize_t drivename_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- ide_drive_t *drive = to_ide_device(dev);
- return sprintf(buf, "%s\n", drive->name);
-}
-static DEVICE_ATTR_RO(drivename);
-
-static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- ide_drive_t *drive = to_ide_device(dev);
- return sprintf(buf, "ide:m-%s\n", ide_media_string(drive));
-}
-static DEVICE_ATTR_RO(modalias);
-
-static ssize_t model_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- ide_drive_t *drive = to_ide_device(dev);
- return sprintf(buf, "%s\n", (char *)&drive->id[ATA_ID_PROD]);
-}
-static DEVICE_ATTR_RO(model);
-
-static ssize_t firmware_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- ide_drive_t *drive = to_ide_device(dev);
- return sprintf(buf, "%s\n", (char *)&drive->id[ATA_ID_FW_REV]);
-}
-static DEVICE_ATTR_RO(firmware);
-
-static ssize_t serial_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- ide_drive_t *drive = to_ide_device(dev);
- return sprintf(buf, "%s\n", (char *)&drive->id[ATA_ID_SERNO]);
-}
-static DEVICE_ATTR(serial, 0400, serial_show, NULL);
-
-static DEVICE_ATTR(unload_heads, 0644, ide_park_show, ide_park_store);
-
-static struct attribute *ide_attrs[] = {
- &dev_attr_media.attr,
- &dev_attr_drivename.attr,
- &dev_attr_modalias.attr,
- &dev_attr_model.attr,
- &dev_attr_firmware.attr,
- &dev_attr_serial.attr,
- &dev_attr_unload_heads.attr,
- NULL,
-};
-
-static const struct attribute_group ide_attr_group = {
- .attrs = ide_attrs,
-};
-
-const struct attribute_group *ide_dev_groups[] = {
- &ide_attr_group,
- NULL,
-};
-
-static ssize_t store_delete_devices(struct device *portdev,
- struct device_attribute *attr,
- const char *buf, size_t n)
-{
- ide_hwif_t *hwif = dev_get_drvdata(portdev);
-
- if (strncmp(buf, "1", n))
- return -EINVAL;
-
- ide_port_unregister_devices(hwif);
-
- return n;
-};
-
-static DEVICE_ATTR(delete_devices, S_IWUSR, NULL, store_delete_devices);
-
-static ssize_t store_scan(struct device *portdev,
- struct device_attribute *attr,
- const char *buf, size_t n)
-{
- ide_hwif_t *hwif = dev_get_drvdata(portdev);
-
- if (strncmp(buf, "1", n))
- return -EINVAL;
-
- ide_port_unregister_devices(hwif);
- ide_port_scan(hwif);
-
- return n;
-};
-
-static DEVICE_ATTR(scan, S_IWUSR, NULL, store_scan);
-
-static struct device_attribute *ide_port_attrs[] = {
- &dev_attr_delete_devices,
- &dev_attr_scan,
- NULL
-};
-
-int ide_sysfs_register_port(ide_hwif_t *hwif)
-{
- int i, rc;
-
- for (i = 0; ide_port_attrs[i]; i++) {
- rc = device_create_file(hwif->portdev, ide_port_attrs[i]);
- if (rc)
- break;
- }
-
- return rc;
-}
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
deleted file mode 100644
index fa05e7e7d609..000000000000
--- a/drivers/ide/ide-tape.c
+++ /dev/null
@@ -1,2083 +0,0 @@
-/*
- * IDE ATAPI streaming tape driver.
- *
- * Copyright (C) 1995-1999 Gadi Oxman <gadio@netvision.net.il>
- * Copyright (C) 2003-2005 Bartlomiej Zolnierkiewicz
- *
- * This driver was constructed as a student project in the software laboratory
- * of the faculty of electrical engineering in the Technion - Israel's
- * Institute Of Technology, with the guide of Avner Lottem and Dr. Ilana David.
- *
- * It is hereby placed under the terms of the GNU general public license.
- * (See linux/COPYING).
- *
- * For a historical changelog see
- * Documentation/ide/ChangeLog.ide-tape.1995-2002
- */
-
-#define DRV_NAME "ide-tape"
-
-#define IDETAPE_VERSION "1.20"
-
-#include <linux/compat.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/jiffies.h>
-#include <linux/major.h>
-#include <linux/errno.h>
-#include <linux/genhd.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/completion.h>
-#include <linux/bitops.h>
-#include <linux/mutex.h>
-#include <scsi/scsi.h>
-
-#include <asm/byteorder.h>
-#include <linux/uaccess.h>
-#include <linux/io.h>
-#include <asm/unaligned.h>
-#include <linux/mtio.h>
-
-/* define to see debug info */
-#undef IDETAPE_DEBUG_LOG
-
-#ifdef IDETAPE_DEBUG_LOG
-#define ide_debug_log(lvl, fmt, args...) __ide_debug_log(lvl, fmt, ## args)
-#else
-#define ide_debug_log(lvl, fmt, args...) do {} while (0)
-#endif
-
-/**************************** Tunable parameters *****************************/
-/*
- * After each failed packet command we issue a request sense command and retry
- * the packet command IDETAPE_MAX_PC_RETRIES times.
- *
- * Setting IDETAPE_MAX_PC_RETRIES to 0 will disable retries.
- */
-#define IDETAPE_MAX_PC_RETRIES 3
-
-/*
- * The following parameter is used to select the point in the internal tape fifo
- * in which we will start to refill the buffer. Decreasing the following
- * parameter will improve the system's latency and interactive response, while
- * using a high value might improve system throughput.
- */
-#define IDETAPE_FIFO_THRESHOLD 2
-
-/*
- * DSC polling parameters.
- *
- * Polling for DSC (a single bit in the status register) is a very important
- * function in ide-tape. There are two cases in which we poll for DSC:
- *
- * 1. Before a read/write packet command, to ensure that we can transfer data
- * from/to the tape's data buffers, without causing an actual media access.
- * In case the tape is not ready yet, we take out our request from the device
- * request queue, so that ide.c could service requests from the other device
- * on the same interface in the meantime.
- *
- * 2. After the successful initialization of a "media access packet command",
- * which is a command that can take a long time to complete (the interval can
- * range from several seconds to even an hour). Again, we postpone our request
- * in the middle to free the bus for the other device. The polling frequency
- * here should be lower than the read/write frequency since those media access
- * commands are slow. We start from a "fast" frequency - IDETAPE_DSC_MA_FAST
- * (1 second), and if we don't receive DSC after IDETAPE_DSC_MA_THRESHOLD
- * (5 min), we switch it to a lower frequency - IDETAPE_DSC_MA_SLOW (1 min).
- *
- * We also set a timeout for the timer, in case something goes wrong. The
- * timeout should be longer then the maximum execution time of a tape operation.
- */
-
-/* DSC timings. */
-#define IDETAPE_DSC_RW_MIN 5*HZ/100 /* 50 msec */
-#define IDETAPE_DSC_RW_MAX 40*HZ/100 /* 400 msec */
-#define IDETAPE_DSC_RW_TIMEOUT 2*60*HZ /* 2 minutes */
-#define IDETAPE_DSC_MA_FAST 2*HZ /* 2 seconds */
-#define IDETAPE_DSC_MA_THRESHOLD 5*60*HZ /* 5 minutes */
-#define IDETAPE_DSC_MA_SLOW 30*HZ /* 30 seconds */
-#define IDETAPE_DSC_MA_TIMEOUT 2*60*60*HZ /* 2 hours */
-
-/*************************** End of tunable parameters ***********************/
-
-/* tape directions */
-enum {
- IDETAPE_DIR_NONE = (1 << 0),
- IDETAPE_DIR_READ = (1 << 1),
- IDETAPE_DIR_WRITE = (1 << 2),
-};
-
-/* Tape door status */
-#define DOOR_UNLOCKED 0
-#define DOOR_LOCKED 1
-#define DOOR_EXPLICITLY_LOCKED 2
-
-/* Some defines for the SPACE command */
-#define IDETAPE_SPACE_OVER_FILEMARK 1
-#define IDETAPE_SPACE_TO_EOD 3
-
-/* Some defines for the LOAD UNLOAD command */
-#define IDETAPE_LU_LOAD_MASK 1
-#define IDETAPE_LU_RETENSION_MASK 2
-#define IDETAPE_LU_EOT_MASK 4
-
-/* Structures related to the SELECT SENSE / MODE SENSE packet commands. */
-#define IDETAPE_BLOCK_DESCRIPTOR 0
-#define IDETAPE_CAPABILITIES_PAGE 0x2a
-
-/*
- * Most of our global data which we need to save even as we leave the driver due
- * to an interrupt or a timer event is stored in the struct defined below.
- */
-typedef struct ide_tape_obj {
- ide_drive_t *drive;
- struct ide_driver *driver;
- struct gendisk *disk;
- struct device dev;
-
- /* used by REQ_IDETAPE_{READ,WRITE} requests */
- struct ide_atapi_pc queued_pc;
-
- /*
- * DSC polling variables.
- *
- * While polling for DSC we use postponed_rq to postpone the current
- * request so that ide.c will be able to service pending requests on the
- * other device. Note that at most we will have only one DSC (usually
- * data transfer) request in the device request queue.
- */
- bool postponed_rq;
-
- /* The time in which we started polling for DSC */
- unsigned long dsc_polling_start;
- /* Timer used to poll for dsc */
- struct timer_list dsc_timer;
- /* Read/Write dsc polling frequency */
- unsigned long best_dsc_rw_freq;
- unsigned long dsc_poll_freq;
- unsigned long dsc_timeout;
-
- /* Read position information */
- u8 partition;
- /* Current block */
- unsigned int first_frame;
-
- /* Last error information */
- u8 sense_key, asc, ascq;
-
- /* Character device operation */
- unsigned int minor;
- /* device name */
- char name[4];
- /* Current character device data transfer direction */
- u8 chrdev_dir;
-
- /* tape block size, usually 512 or 1024 bytes */
- unsigned short blk_size;
- int user_bs_factor;
-
- /* Copy of the tape's Capabilities and Mechanical Page */
- u8 caps[20];
-
- /*
- * Active data transfer request parameters.
- *
- * At most, there is only one ide-tape originated data transfer request
- * in the device request queue. This allows ide.c to easily service
- * requests from the other device when we postpone our active request.
- */
-
- /* Data buffer size chosen based on the tape's recommendation */
- int buffer_size;
- /* Staging buffer of buffer_size bytes */
- void *buf;
- /* The read/write cursor */
- void *cur;
- /* The number of valid bytes in buf */
- size_t valid;
-
- /* Measures average tape speed */
- unsigned long avg_time;
- int avg_size;
- int avg_speed;
-
- /* the door is currently locked */
- int door_locked;
- /* the tape hardware is write protected */
- char drv_write_prot;
- /* the tape is write protected (hardware or opened as read-only) */
- char write_prot;
-} idetape_tape_t;
-
-static DEFINE_MUTEX(ide_tape_mutex);
-static DEFINE_MUTEX(idetape_ref_mutex);
-
-static DEFINE_MUTEX(idetape_chrdev_mutex);
-
-static struct class *idetape_sysfs_class;
-
-static void ide_tape_release(struct device *);
-
-static struct ide_tape_obj *idetape_devs[MAX_HWIFS * MAX_DRIVES];
-
-static struct ide_tape_obj *ide_tape_get(struct gendisk *disk, bool cdev,
- unsigned int i)
-{
- struct ide_tape_obj *tape = NULL;
-
- mutex_lock(&idetape_ref_mutex);
-
- if (cdev)
- tape = idetape_devs[i];
- else
- tape = ide_drv_g(disk, ide_tape_obj);
-
- if (tape) {
- if (ide_device_get(tape->drive))
- tape = NULL;
- else
- get_device(&tape->dev);
- }
-
- mutex_unlock(&idetape_ref_mutex);
- return tape;
-}
-
-static void ide_tape_put(struct ide_tape_obj *tape)
-{
- ide_drive_t *drive = tape->drive;
-
- mutex_lock(&idetape_ref_mutex);
- put_device(&tape->dev);
- ide_device_put(drive);
- mutex_unlock(&idetape_ref_mutex);
-}
-
-/*
- * called on each failed packet command retry to analyze the request sense. We
- * currently do not utilize this information.
- */
-static void idetape_analyze_error(ide_drive_t *drive)
-{
- idetape_tape_t *tape = drive->driver_data;
- struct ide_atapi_pc *pc = drive->failed_pc;
- struct request *rq = drive->hwif->rq;
- u8 *sense = bio_data(rq->bio);
-
- tape->sense_key = sense[2] & 0xF;
- tape->asc = sense[12];
- tape->ascq = sense[13];
-
- ide_debug_log(IDE_DBG_FUNC,
- "cmd: 0x%x, sense key = %x, asc = %x, ascq = %x",
- rq->cmd[0], tape->sense_key, tape->asc, tape->ascq);
-
- /* correct remaining bytes to transfer */
- if (pc->flags & PC_FLAG_DMA_ERROR)
- scsi_req(rq)->resid_len = tape->blk_size * get_unaligned_be32(&sense[3]);
-
- /*
- * If error was the result of a zero-length read or write command,
- * with sense key=5, asc=0x22, ascq=0, let it slide. Some drives
- * (i.e. Seagate STT3401A Travan) don't support 0-length read/writes.
- */
- if ((pc->c[0] == READ_6 || pc->c[0] == WRITE_6)
- /* length == 0 */
- && pc->c[4] == 0 && pc->c[3] == 0 && pc->c[2] == 0) {
- if (tape->sense_key == 5) {
- /* don't report an error, everything's ok */
- pc->error = 0;
- /* don't retry read/write */
- pc->flags |= PC_FLAG_ABORT;
- }
- }
- if (pc->c[0] == READ_6 && (sense[2] & 0x80)) {
- pc->error = IDE_DRV_ERROR_FILEMARK;
- pc->flags |= PC_FLAG_ABORT;
- }
- if (pc->c[0] == WRITE_6) {
- if ((sense[2] & 0x40) || (tape->sense_key == 0xd
- && tape->asc == 0x0 && tape->ascq == 0x2)) {
- pc->error = IDE_DRV_ERROR_EOD;
- pc->flags |= PC_FLAG_ABORT;
- }
- }
- if (pc->c[0] == READ_6 || pc->c[0] == WRITE_6) {
- if (tape->sense_key == 8) {
- pc->error = IDE_DRV_ERROR_EOD;
- pc->flags |= PC_FLAG_ABORT;
- }
- if (!(pc->flags & PC_FLAG_ABORT) &&
- (blk_rq_bytes(rq) - scsi_req(rq)->resid_len))
- pc->retries = IDETAPE_MAX_PC_RETRIES + 1;
- }
-}
-
-static void ide_tape_handle_dsc(ide_drive_t *);
-
-static int ide_tape_callback(ide_drive_t *drive, int dsc)
-{
- idetape_tape_t *tape = drive->driver_data;
- struct ide_atapi_pc *pc = drive->pc;
- struct request *rq = drive->hwif->rq;
- int uptodate = pc->error ? 0 : 1;
- int err = uptodate ? 0 : IDE_DRV_ERROR_GENERAL;
-
- ide_debug_log(IDE_DBG_FUNC, "cmd: 0x%x, dsc: %d, err: %d", rq->cmd[0],
- dsc, err);
-
- if (dsc)
- ide_tape_handle_dsc(drive);
-
- if (drive->failed_pc == pc)
- drive->failed_pc = NULL;
-
- if (pc->c[0] == REQUEST_SENSE) {
- if (uptodate)
- idetape_analyze_error(drive);
- else
- printk(KERN_ERR "ide-tape: Error in REQUEST SENSE "
- "itself - Aborting request!\n");
- } else if (pc->c[0] == READ_6 || pc->c[0] == WRITE_6) {
- unsigned int blocks =
- (blk_rq_bytes(rq) - scsi_req(rq)->resid_len) / tape->blk_size;
-
- tape->avg_size += blocks * tape->blk_size;
-
- if (time_after_eq(jiffies, tape->avg_time + HZ)) {
- tape->avg_speed = tape->avg_size * HZ /
- (jiffies - tape->avg_time) / 1024;
- tape->avg_size = 0;
- tape->avg_time = jiffies;
- }
-
- tape->first_frame += blocks;
-
- if (pc->error) {
- uptodate = 0;
- err = pc->error;
- }
- }
- scsi_req(rq)->result = err;
-
- return uptodate;
-}
-
-/*
- * Postpone the current request so that ide.c will be able to service requests
- * from another device on the same port while we are polling for DSC.
- */
-static void ide_tape_stall_queue(ide_drive_t *drive)
-{
- idetape_tape_t *tape = drive->driver_data;
-
- ide_debug_log(IDE_DBG_FUNC, "cmd: 0x%x, dsc_poll_freq: %lu",
- drive->hwif->rq->cmd[0], tape->dsc_poll_freq);
-
- tape->postponed_rq = true;
-
- ide_stall_queue(drive, tape->dsc_poll_freq);
-}
-
-static void ide_tape_handle_dsc(ide_drive_t *drive)
-{
- idetape_tape_t *tape = drive->driver_data;
-
- /* Media access command */
- tape->dsc_polling_start = jiffies;
- tape->dsc_poll_freq = IDETAPE_DSC_MA_FAST;
- tape->dsc_timeout = jiffies + IDETAPE_DSC_MA_TIMEOUT;
- /* Allow ide.c to handle other requests */
- ide_tape_stall_queue(drive);
-}
-
-/*
- * Packet Command Interface
- *
- * The current Packet Command is available in drive->pc, and will not change
- * until we finish handling it. Each packet command is associated with a
- * callback function that will be called when the command is finished.
- *
- * The handling will be done in three stages:
- *
- * 1. ide_tape_issue_pc will send the packet command to the drive, and will set
- * the interrupt handler to ide_pc_intr.
- *
- * 2. On each interrupt, ide_pc_intr will be called. This step will be
- * repeated until the device signals us that no more interrupts will be issued.
- *
- * 3. ATAPI Tape media access commands have immediate status with a delayed
- * process. In case of a successful initiation of a media access packet command,
- * the DSC bit will be set when the actual execution of the command is finished.
- * Since the tape drive will not issue an interrupt, we have to poll for this
- * event. In this case, we define the request as "low priority request" by
- * setting rq_status to IDETAPE_RQ_POSTPONED, set a timer to poll for DSC and
- * exit the driver.
- *
- * ide.c will then give higher priority to requests which originate from the
- * other device, until will change rq_status to RQ_ACTIVE.
- *
- * 4. When the packet command is finished, it will be checked for errors.
- *
- * 5. In case an error was found, we queue a request sense packet command in
- * front of the request queue and retry the operation up to
- * IDETAPE_MAX_PC_RETRIES times.
- *
- * 6. In case no error was found, or we decided to give up and not to retry
- * again, the callback function will be called and then we will handle the next
- * request.
- */
-
-static ide_startstop_t ide_tape_issue_pc(ide_drive_t *drive,
- struct ide_cmd *cmd,
- struct ide_atapi_pc *pc)
-{
- idetape_tape_t *tape = drive->driver_data;
- struct request *rq = drive->hwif->rq;
-
- if (drive->failed_pc == NULL && pc->c[0] != REQUEST_SENSE)
- drive->failed_pc = pc;
-
- /* Set the current packet command */
- drive->pc = pc;
-
- if (pc->retries > IDETAPE_MAX_PC_RETRIES ||
- (pc->flags & PC_FLAG_ABORT)) {
-
- /*
- * We will "abort" retrying a packet command in case legitimate
- * error code was received (crossing a filemark, or end of the
- * media, for example).
- */
- if (!(pc->flags & PC_FLAG_ABORT)) {
- if (!(pc->c[0] == TEST_UNIT_READY &&
- tape->sense_key == 2 && tape->asc == 4 &&
- (tape->ascq == 1 || tape->ascq == 8))) {
- printk(KERN_ERR "ide-tape: %s: I/O error, "
- "pc = %2x, key = %2x, "
- "asc = %2x, ascq = %2x\n",
- tape->name, pc->c[0],
- tape->sense_key, tape->asc,
- tape->ascq);
- }
- /* Giving up */
- pc->error = IDE_DRV_ERROR_GENERAL;
- }
-
- drive->failed_pc = NULL;
- drive->pc_callback(drive, 0);
- ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(rq));
- return ide_stopped;
- }
- ide_debug_log(IDE_DBG_SENSE, "retry #%d, cmd: 0x%02x", pc->retries,
- pc->c[0]);
-
- pc->retries++;
-
- return ide_issue_pc(drive, cmd);
-}
-
-/* A mode sense command is used to "sense" tape parameters. */
-static void idetape_create_mode_sense_cmd(struct ide_atapi_pc *pc, u8 page_code)
-{
- ide_init_pc(pc);
- pc->c[0] = MODE_SENSE;
- if (page_code != IDETAPE_BLOCK_DESCRIPTOR)
- /* DBD = 1 - Don't return block descriptors */
- pc->c[1] = 8;
- pc->c[2] = page_code;
- /*
- * Changed pc->c[3] to 0 (255 will at best return unused info).
- *
- * For SCSI this byte is defined as subpage instead of high byte
- * of length and some IDE drives seem to interpret it this way
- * and return an error when 255 is used.
- */
- pc->c[3] = 0;
- /* We will just discard data in that case */
- pc->c[4] = 255;
- if (page_code == IDETAPE_BLOCK_DESCRIPTOR)
- pc->req_xfer = 12;
- else if (page_code == IDETAPE_CAPABILITIES_PAGE)
- pc->req_xfer = 24;
- else
- pc->req_xfer = 50;
-}
-
-static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- idetape_tape_t *tape = drive->driver_data;
- struct ide_atapi_pc *pc = drive->pc;
- u8 stat;
-
- stat = hwif->tp_ops->read_status(hwif);
-
- if (stat & ATA_DSC) {
- if (stat & ATA_ERR) {
- /* Error detected */
- if (pc->c[0] != TEST_UNIT_READY)
- printk(KERN_ERR "ide-tape: %s: I/O error, ",
- tape->name);
- /* Retry operation */
- ide_retry_pc(drive);
- return ide_stopped;
- }
- pc->error = 0;
- } else {
- pc->error = IDE_DRV_ERROR_GENERAL;
- drive->failed_pc = NULL;
- }
- drive->pc_callback(drive, 0);
- return ide_stopped;
-}
-
-static void ide_tape_create_rw_cmd(idetape_tape_t *tape,
- struct ide_atapi_pc *pc, struct request *rq,
- u8 opcode)
-{
- unsigned int length = blk_rq_sectors(rq) / (tape->blk_size >> 9);
-
- ide_init_pc(pc);
- put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
- pc->c[1] = 1;
-
- if (blk_rq_bytes(rq) == tape->buffer_size)
- pc->flags |= PC_FLAG_DMA_OK;
-
- if (opcode == READ_6)
- pc->c[0] = READ_6;
- else if (opcode == WRITE_6) {
- pc->c[0] = WRITE_6;
- pc->flags |= PC_FLAG_WRITING;
- }
-
- memcpy(scsi_req(rq)->cmd, pc->c, 12);
-}
-
-static ide_startstop_t idetape_do_request(ide_drive_t *drive,
- struct request *rq, sector_t block)
-{
- ide_hwif_t *hwif = drive->hwif;
- idetape_tape_t *tape = drive->driver_data;
- struct ide_atapi_pc *pc = NULL;
- struct ide_cmd cmd;
- struct scsi_request *req = scsi_req(rq);
- u8 stat;
-
- ide_debug_log(IDE_DBG_RQ, "cmd: 0x%x, sector: %llu, nr_sectors: %u",
- req->cmd[0], (unsigned long long)blk_rq_pos(rq),
- blk_rq_sectors(rq));
-
- BUG_ON(!blk_rq_is_private(rq));
- BUG_ON(ide_req(rq)->type != ATA_PRIV_MISC &&
- ide_req(rq)->type != ATA_PRIV_SENSE);
-
- /* Retry a failed packet command */
- if (drive->failed_pc && drive->pc->c[0] == REQUEST_SENSE) {
- pc = drive->failed_pc;
- goto out;
- }
-
- /*
- * If the tape is still busy, postpone our request and service
- * the other device meanwhile.
- */
- stat = hwif->tp_ops->read_status(hwif);
-
- if ((drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) == 0 &&
- (req->cmd[13] & REQ_IDETAPE_PC2) == 0)
- drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC;
-
- if (drive->dev_flags & IDE_DFLAG_POST_RESET) {
- drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC;
- drive->dev_flags &= ~IDE_DFLAG_POST_RESET;
- }
-
- if (!(drive->atapi_flags & IDE_AFLAG_IGNORE_DSC) &&
- !(stat & ATA_DSC)) {
- if (!tape->postponed_rq) {
- tape->dsc_polling_start = jiffies;
- tape->dsc_poll_freq = tape->best_dsc_rw_freq;
- tape->dsc_timeout = jiffies + IDETAPE_DSC_RW_TIMEOUT;
- } else if (time_after(jiffies, tape->dsc_timeout)) {
- printk(KERN_ERR "ide-tape: %s: DSC timeout\n",
- tape->name);
- if (req->cmd[13] & REQ_IDETAPE_PC2) {
- idetape_media_access_finished(drive);
- return ide_stopped;
- } else {
- return ide_do_reset(drive);
- }
- } else if (time_after(jiffies,
- tape->dsc_polling_start +
- IDETAPE_DSC_MA_THRESHOLD))
- tape->dsc_poll_freq = IDETAPE_DSC_MA_SLOW;
- ide_tape_stall_queue(drive);
- return ide_stopped;
- } else {
- drive->atapi_flags &= ~IDE_AFLAG_IGNORE_DSC;
- tape->postponed_rq = false;
- }
-
- if (req->cmd[13] & REQ_IDETAPE_READ) {
- pc = &tape->queued_pc;
- ide_tape_create_rw_cmd(tape, pc, rq, READ_6);
- goto out;
- }
- if (req->cmd[13] & REQ_IDETAPE_WRITE) {
- pc = &tape->queued_pc;
- ide_tape_create_rw_cmd(tape, pc, rq, WRITE_6);
- goto out;
- }
- if (req->cmd[13] & REQ_IDETAPE_PC1) {
- pc = (struct ide_atapi_pc *)ide_req(rq)->special;
- req->cmd[13] &= ~(REQ_IDETAPE_PC1);
- req->cmd[13] |= REQ_IDETAPE_PC2;
- goto out;
- }
- if (req->cmd[13] & REQ_IDETAPE_PC2) {
- idetape_media_access_finished(drive);
- return ide_stopped;
- }
- BUG();
-
-out:
- /* prepare sense request for this command */
- ide_prep_sense(drive, rq);
-
- memset(&cmd, 0, sizeof(cmd));
-
- if (rq_data_dir(rq))
- cmd.tf_flags |= IDE_TFLAG_WRITE;
-
- cmd.rq = rq;
-
- ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
- ide_map_sg(drive, &cmd);
-
- return ide_tape_issue_pc(drive, &cmd, pc);
-}
-
-/*
- * Write a filemark if write_filemark=1. Flush the device buffers without
- * writing a filemark otherwise.
- */
-static void idetape_create_write_filemark_cmd(ide_drive_t *drive,
- struct ide_atapi_pc *pc, int write_filemark)
-{
- ide_init_pc(pc);
- pc->c[0] = WRITE_FILEMARKS;
- pc->c[4] = write_filemark;
- pc->flags |= PC_FLAG_WAIT_FOR_DSC;
-}
-
-static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
-{
- idetape_tape_t *tape = drive->driver_data;
- struct gendisk *disk = tape->disk;
- int load_attempted = 0;
-
- /* Wait for the tape to become ready */
- set_bit(ilog2(IDE_AFLAG_MEDIUM_PRESENT), &drive->atapi_flags);
- timeout += jiffies;
- while (time_before(jiffies, timeout)) {
- if (ide_do_test_unit_ready(drive, disk) == 0)
- return 0;
- if ((tape->sense_key == 2 && tape->asc == 4 && tape->ascq == 2)
- || (tape->asc == 0x3A)) {
- /* no media */
- if (load_attempted)
- return -ENOMEDIUM;
- ide_do_start_stop(drive, disk, IDETAPE_LU_LOAD_MASK);
- load_attempted = 1;
- /* not about to be ready */
- } else if (!(tape->sense_key == 2 && tape->asc == 4 &&
- (tape->ascq == 1 || tape->ascq == 8)))
- return -EIO;
- msleep(100);
- }
- return -EIO;
-}
-
-static int idetape_flush_tape_buffers(ide_drive_t *drive)
-{
- struct ide_tape_obj *tape = drive->driver_data;
- struct ide_atapi_pc pc;
- int rc;
-
- idetape_create_write_filemark_cmd(drive, &pc, 0);
- rc = ide_queue_pc_tail(drive, tape->disk, &pc, NULL, 0);
- if (rc)
- return rc;
- idetape_wait_ready(drive, 60 * 5 * HZ);
- return 0;
-}
-
-static int ide_tape_read_position(ide_drive_t *drive)
-{
- idetape_tape_t *tape = drive->driver_data;
- struct ide_atapi_pc pc;
- u8 buf[20];
-
- ide_debug_log(IDE_DBG_FUNC, "enter");
-
- /* prep cmd */
- ide_init_pc(&pc);
- pc.c[0] = READ_POSITION;
- pc.req_xfer = 20;
-
- if (ide_queue_pc_tail(drive, tape->disk, &pc, buf, pc.req_xfer))
- return -1;
-
- if (!pc.error) {
- ide_debug_log(IDE_DBG_FUNC, "BOP - %s",
- (buf[0] & 0x80) ? "Yes" : "No");
- ide_debug_log(IDE_DBG_FUNC, "EOP - %s",
- (buf[0] & 0x40) ? "Yes" : "No");
-
- if (buf[0] & 0x4) {
- printk(KERN_INFO "ide-tape: Block location is unknown"
- "to the tape\n");
- clear_bit(ilog2(IDE_AFLAG_ADDRESS_VALID),
- &drive->atapi_flags);
- return -1;
- } else {
- ide_debug_log(IDE_DBG_FUNC, "Block Location: %u",
- be32_to_cpup((__be32 *)&buf[4]));
-
- tape->partition = buf[1];
- tape->first_frame = be32_to_cpup((__be32 *)&buf[4]);
- set_bit(ilog2(IDE_AFLAG_ADDRESS_VALID),
- &drive->atapi_flags);
- }
- }
-
- return tape->first_frame;
-}
-
-static void idetape_create_locate_cmd(ide_drive_t *drive,
- struct ide_atapi_pc *pc,
- unsigned int block, u8 partition, int skip)
-{
- ide_init_pc(pc);
- pc->c[0] = POSITION_TO_ELEMENT;
- pc->c[1] = 2;
- put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[3]);
- pc->c[8] = partition;
- pc->flags |= PC_FLAG_WAIT_FOR_DSC;
-}
-
-static void __ide_tape_discard_merge_buffer(ide_drive_t *drive)
-{
- idetape_tape_t *tape = drive->driver_data;
-
- if (tape->chrdev_dir != IDETAPE_DIR_READ)
- return;
-
- clear_bit(ilog2(IDE_AFLAG_FILEMARK), &drive->atapi_flags);
- tape->valid = 0;
- if (tape->buf != NULL) {
- kfree(tape->buf);
- tape->buf = NULL;
- }
-
- tape->chrdev_dir = IDETAPE_DIR_NONE;
-}
-
-/*
- * Position the tape to the requested block using the LOCATE packet command.
- * A READ POSITION command is then issued to check where we are positioned. Like
- * all higher level operations, we queue the commands at the tail of the request
- * queue and wait for their completion.
- */
-static int idetape_position_tape(ide_drive_t *drive, unsigned int block,
- u8 partition, int skip)
-{
- idetape_tape_t *tape = drive->driver_data;
- struct gendisk *disk = tape->disk;
- int ret;
- struct ide_atapi_pc pc;
-
- if (tape->chrdev_dir == IDETAPE_DIR_READ)
- __ide_tape_discard_merge_buffer(drive);
- idetape_wait_ready(drive, 60 * 5 * HZ);
- idetape_create_locate_cmd(drive, &pc, block, partition, skip);
- ret = ide_queue_pc_tail(drive, disk, &pc, NULL, 0);
- if (ret)
- return ret;
-
- ret = ide_tape_read_position(drive);
- if (ret < 0)
- return ret;
- return 0;
-}
-
-static void ide_tape_discard_merge_buffer(ide_drive_t *drive,
- int restore_position)
-{
- idetape_tape_t *tape = drive->driver_data;
- int seek, position;
-
- __ide_tape_discard_merge_buffer(drive);
- if (restore_position) {
- position = ide_tape_read_position(drive);
- seek = position > 0 ? position : 0;
- if (idetape_position_tape(drive, seek, 0, 0)) {
- printk(KERN_INFO "ide-tape: %s: position_tape failed in"
- " %s\n", tape->name, __func__);
- return;
- }
- }
-}
-
-/*
- * Generate a read/write request for the block device interface and wait for it
- * to be serviced.
- */
-static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
-{
- idetape_tape_t *tape = drive->driver_data;
- struct request *rq;
- int ret;
-
- ide_debug_log(IDE_DBG_FUNC, "cmd: 0x%x, size: %d", cmd, size);
-
- BUG_ON(cmd != REQ_IDETAPE_READ && cmd != REQ_IDETAPE_WRITE);
- BUG_ON(size < 0 || size % tape->blk_size);
-
- rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
- ide_req(rq)->type = ATA_PRIV_MISC;
- scsi_req(rq)->cmd[13] = cmd;
- rq->rq_disk = tape->disk;
- rq->__sector = tape->first_frame;
-
- if (size) {
- ret = blk_rq_map_kern(drive->queue, rq, tape->buf, size,
- GFP_NOIO);
- if (ret)
- goto out_put;
- }
-
- blk_execute_rq(tape->disk, rq, 0);
-
- /* calculate the number of transferred bytes and update buffer state */
- size -= scsi_req(rq)->resid_len;
- tape->cur = tape->buf;
- if (cmd == REQ_IDETAPE_READ)
- tape->valid = size;
- else
- tape->valid = 0;
-
- ret = size;
- if (scsi_req(rq)->result == IDE_DRV_ERROR_GENERAL)
- ret = -EIO;
-out_put:
- blk_put_request(rq);
- return ret;
-}
-
-static void idetape_create_inquiry_cmd(struct ide_atapi_pc *pc)
-{
- ide_init_pc(pc);
- pc->c[0] = INQUIRY;
- pc->c[4] = 254;
- pc->req_xfer = 254;
-}
-
-static void idetape_create_rewind_cmd(ide_drive_t *drive,
- struct ide_atapi_pc *pc)
-{
- ide_init_pc(pc);
- pc->c[0] = REZERO_UNIT;
- pc->flags |= PC_FLAG_WAIT_FOR_DSC;
-}
-
-static void idetape_create_erase_cmd(struct ide_atapi_pc *pc)
-{
- ide_init_pc(pc);
- pc->c[0] = ERASE;
- pc->c[1] = 1;
- pc->flags |= PC_FLAG_WAIT_FOR_DSC;
-}
-
-static void idetape_create_space_cmd(struct ide_atapi_pc *pc, int count, u8 cmd)
-{
- ide_init_pc(pc);
- pc->c[0] = SPACE;
- put_unaligned(cpu_to_be32(count), (unsigned int *) &pc->c[1]);
- pc->c[1] = cmd;
- pc->flags |= PC_FLAG_WAIT_FOR_DSC;
-}
-
-static void ide_tape_flush_merge_buffer(ide_drive_t *drive)
-{
- idetape_tape_t *tape = drive->driver_data;
-
- if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
- printk(KERN_ERR "ide-tape: bug: Trying to empty merge buffer"
- " but we are not writing.\n");
- return;
- }
- if (tape->buf) {
- size_t aligned = roundup(tape->valid, tape->blk_size);
-
- memset(tape->cur, 0, aligned - tape->valid);
- idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, aligned);
- kfree(tape->buf);
- tape->buf = NULL;
- }
- tape->chrdev_dir = IDETAPE_DIR_NONE;
-}
-
-static int idetape_init_rw(ide_drive_t *drive, int dir)
-{
- idetape_tape_t *tape = drive->driver_data;
- int rc;
-
- BUG_ON(dir != IDETAPE_DIR_READ && dir != IDETAPE_DIR_WRITE);
-
- if (tape->chrdev_dir == dir)
- return 0;
-
- if (tape->chrdev_dir == IDETAPE_DIR_READ)
- ide_tape_discard_merge_buffer(drive, 1);
- else if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
- ide_tape_flush_merge_buffer(drive);
- idetape_flush_tape_buffers(drive);
- }
-
- if (tape->buf || tape->valid) {
- printk(KERN_ERR "ide-tape: valid should be 0 now\n");
- tape->valid = 0;
- }
-
- tape->buf = kmalloc(tape->buffer_size, GFP_KERNEL);
- if (!tape->buf)
- return -ENOMEM;
- tape->chrdev_dir = dir;
- tape->cur = tape->buf;
-
- /*
- * Issue a 0 rw command to ensure that DSC handshake is
- * switched from completion mode to buffer available mode. No
- * point in issuing this if DSC overlap isn't supported, some
- * drives (Seagate STT3401A) will return an error.
- */
- if (drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) {
- int cmd = dir == IDETAPE_DIR_READ ? REQ_IDETAPE_READ
- : REQ_IDETAPE_WRITE;
-
- rc = idetape_queue_rw_tail(drive, cmd, 0);
- if (rc < 0) {
- kfree(tape->buf);
- tape->buf = NULL;
- tape->chrdev_dir = IDETAPE_DIR_NONE;
- return rc;
- }
- }
-
- return 0;
-}
-
-static void idetape_pad_zeros(ide_drive_t *drive, int bcount)
-{
- idetape_tape_t *tape = drive->driver_data;
-
- memset(tape->buf, 0, tape->buffer_size);
-
- while (bcount) {
- unsigned int count = min(tape->buffer_size, bcount);
-
- idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, count);
- bcount -= count;
- }
-}
-
-/*
- * Rewinds the tape to the Beginning Of the current Partition (BOP). We
- * currently support only one partition.
- */
-static int idetape_rewind_tape(ide_drive_t *drive)
-{
- struct ide_tape_obj *tape = drive->driver_data;
- struct gendisk *disk = tape->disk;
- struct ide_atapi_pc pc;
- int ret;
-
- ide_debug_log(IDE_DBG_FUNC, "enter");
-
- idetape_create_rewind_cmd(drive, &pc);
- ret = ide_queue_pc_tail(drive, disk, &pc, NULL, 0);
- if (ret)
- return ret;
-
- ret = ide_tape_read_position(drive);
- if (ret < 0)
- return ret;
- return 0;
-}
-
-/* mtio.h compatible commands should be issued to the chrdev interface. */
-static int idetape_blkdev_ioctl(ide_drive_t *drive, unsigned int cmd,
- unsigned long arg)
-{
- idetape_tape_t *tape = drive->driver_data;
- void __user *argp = (void __user *)arg;
-
- struct idetape_config {
- int dsc_rw_frequency;
- int dsc_media_access_frequency;
- int nr_stages;
- } config;
-
- ide_debug_log(IDE_DBG_FUNC, "cmd: 0x%04x", cmd);
-
- switch (cmd) {
- case 0x0340:
- if (copy_from_user(&config, argp, sizeof(config)))
- return -EFAULT;
- tape->best_dsc_rw_freq = config.dsc_rw_frequency;
- break;
- case 0x0350:
- memset(&config, 0, sizeof(config));
- config.dsc_rw_frequency = (int) tape->best_dsc_rw_freq;
- config.nr_stages = 1;
- if (copy_to_user(argp, &config, sizeof(config)))
- return -EFAULT;
- break;
- default:
- return -EIO;
- }
- return 0;
-}
-
-static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op,
- int mt_count)
-{
- idetape_tape_t *tape = drive->driver_data;
- struct gendisk *disk = tape->disk;
- struct ide_atapi_pc pc;
- int retval, count = 0;
- int sprev = !!(tape->caps[4] & 0x20);
-
-
- ide_debug_log(IDE_DBG_FUNC, "mt_op: %d, mt_count: %d", mt_op, mt_count);
-
- if (mt_count == 0)
- return 0;
- if (MTBSF == mt_op || MTBSFM == mt_op) {
- if (!sprev)
- return -EIO;
- mt_count = -mt_count;
- }
-
- if (tape->chrdev_dir == IDETAPE_DIR_READ) {
- tape->valid = 0;
- if (test_and_clear_bit(ilog2(IDE_AFLAG_FILEMARK),
- &drive->atapi_flags))
- ++count;
- ide_tape_discard_merge_buffer(drive, 0);
- }
-
- switch (mt_op) {
- case MTFSF:
- case MTBSF:
- idetape_create_space_cmd(&pc, mt_count - count,
- IDETAPE_SPACE_OVER_FILEMARK);
- return ide_queue_pc_tail(drive, disk, &pc, NULL, 0);
- case MTFSFM:
- case MTBSFM:
- if (!sprev)
- return -EIO;
- retval = idetape_space_over_filemarks(drive, MTFSF,
- mt_count - count);
- if (retval)
- return retval;
- count = (MTBSFM == mt_op ? 1 : -1);
- return idetape_space_over_filemarks(drive, MTFSF, count);
- default:
- printk(KERN_ERR "ide-tape: MTIO operation %d not supported\n",
- mt_op);
- return -EIO;
- }
-}
-
-/*
- * Our character device read / write functions.
- *
- * The tape is optimized to maximize throughput when it is transferring an
- * integral number of the "continuous transfer limit", which is a parameter of
- * the specific tape (26kB on my particular tape, 32kB for Onstream).
- *
- * As of version 1.3 of the driver, the character device provides an abstract
- * continuous view of the media - any mix of block sizes (even 1 byte) on the
- * same backup/restore procedure is supported. The driver will internally
- * convert the requests to the recommended transfer unit, so that an unmatch
- * between the user's block size to the recommended size will only result in a
- * (slightly) increased driver overhead, but will no longer hit performance.
- * This is not applicable to Onstream.
- */
-static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct ide_tape_obj *tape = file->private_data;
- ide_drive_t *drive = tape->drive;
- size_t done = 0;
- ssize_t ret = 0;
- int rc;
-
- ide_debug_log(IDE_DBG_FUNC, "count %zd", count);
-
- if (tape->chrdev_dir != IDETAPE_DIR_READ) {
- if (test_bit(ilog2(IDE_AFLAG_DETECT_BS), &drive->atapi_flags))
- if (count > tape->blk_size &&
- (count % tape->blk_size) == 0)
- tape->user_bs_factor = count / tape->blk_size;
- }
-
- rc = idetape_init_rw(drive, IDETAPE_DIR_READ);
- if (rc < 0)
- return rc;
-
- while (done < count) {
- size_t todo;
-
- /* refill if staging buffer is empty */
- if (!tape->valid) {
- /* If we are at a filemark, nothing more to read */
- if (test_bit(ilog2(IDE_AFLAG_FILEMARK),
- &drive->atapi_flags))
- break;
- /* read */
- if (idetape_queue_rw_tail(drive, REQ_IDETAPE_READ,
- tape->buffer_size) <= 0)
- break;
- }
-
- /* copy out */
- todo = min_t(size_t, count - done, tape->valid);
- if (copy_to_user(buf + done, tape->cur, todo))
- ret = -EFAULT;
-
- tape->cur += todo;
- tape->valid -= todo;
- done += todo;
- }
-
- if (!done && test_bit(ilog2(IDE_AFLAG_FILEMARK), &drive->atapi_flags)) {
- idetape_space_over_filemarks(drive, MTFSF, 1);
- return 0;
- }
-
- return ret ? ret : done;
-}
-
-static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct ide_tape_obj *tape = file->private_data;
- ide_drive_t *drive = tape->drive;
- size_t done = 0;
- ssize_t ret = 0;
- int rc;
-
- /* The drive is write protected. */
- if (tape->write_prot)
- return -EACCES;
-
- ide_debug_log(IDE_DBG_FUNC, "count %zd", count);
-
- /* Initialize write operation */
- rc = idetape_init_rw(drive, IDETAPE_DIR_WRITE);
- if (rc < 0)
- return rc;
-
- while (done < count) {
- size_t todo;
-
- /* flush if staging buffer is full */
- if (tape->valid == tape->buffer_size &&
- idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
- tape->buffer_size) <= 0)
- return rc;
-
- /* copy in */
- todo = min_t(size_t, count - done,
- tape->buffer_size - tape->valid);
- if (copy_from_user(tape->cur, buf + done, todo))
- ret = -EFAULT;
-
- tape->cur += todo;
- tape->valid += todo;
- done += todo;
- }
-
- return ret ? ret : done;
-}
-
-static int idetape_write_filemark(ide_drive_t *drive)
-{
- struct ide_tape_obj *tape = drive->driver_data;
- struct ide_atapi_pc pc;
-
- /* Write a filemark */
- idetape_create_write_filemark_cmd(drive, &pc, 1);
- if (ide_queue_pc_tail(drive, tape->disk, &pc, NULL, 0)) {
- printk(KERN_ERR "ide-tape: Couldn't write a filemark\n");
- return -EIO;
- }
- return 0;
-}
-
-/*
- * Called from idetape_chrdev_ioctl when the general mtio MTIOCTOP ioctl is
- * requested.
- *
- * Note: MTBSF and MTBSFM are not supported when the tape doesn't support
- * spacing over filemarks in the reverse direction. In this case, MTFSFM is also
- * usually not supported.
- *
- * The following commands are currently not supported:
- *
- * MTFSS, MTBSS, MTWSM, MTSETDENSITY, MTSETDRVBUFFER, MT_ST_BOOLEANS,
- * MT_ST_WRITE_THRESHOLD.
- */
-static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
-{
- idetape_tape_t *tape = drive->driver_data;
- struct gendisk *disk = tape->disk;
- struct ide_atapi_pc pc;
- int i, retval;
-
- ide_debug_log(IDE_DBG_FUNC, "MTIOCTOP ioctl: mt_op: %d, mt_count: %d",
- mt_op, mt_count);
-
- switch (mt_op) {
- case MTFSF:
- case MTFSFM:
- case MTBSF:
- case MTBSFM:
- if (!mt_count)
- return 0;
- return idetape_space_over_filemarks(drive, mt_op, mt_count);
- default:
- break;
- }
-
- switch (mt_op) {
- case MTWEOF:
- if (tape->write_prot)
- return -EACCES;
- ide_tape_discard_merge_buffer(drive, 1);
- for (i = 0; i < mt_count; i++) {
- retval = idetape_write_filemark(drive);
- if (retval)
- return retval;
- }
- return 0;
- case MTREW:
- ide_tape_discard_merge_buffer(drive, 0);
- if (idetape_rewind_tape(drive))
- return -EIO;
- return 0;
- case MTLOAD:
- ide_tape_discard_merge_buffer(drive, 0);
- return ide_do_start_stop(drive, disk, IDETAPE_LU_LOAD_MASK);
- case MTUNLOAD:
- case MTOFFL:
- /*
- * If door is locked, attempt to unlock before
- * attempting to eject.
- */
- if (tape->door_locked) {
- if (!ide_set_media_lock(drive, disk, 0))
- tape->door_locked = DOOR_UNLOCKED;
- }
- ide_tape_discard_merge_buffer(drive, 0);
- retval = ide_do_start_stop(drive, disk, !IDETAPE_LU_LOAD_MASK);
- if (!retval)
- clear_bit(ilog2(IDE_AFLAG_MEDIUM_PRESENT),
- &drive->atapi_flags);
- return retval;
- case MTNOP:
- ide_tape_discard_merge_buffer(drive, 0);
- return idetape_flush_tape_buffers(drive);
- case MTRETEN:
- ide_tape_discard_merge_buffer(drive, 0);
- return ide_do_start_stop(drive, disk,
- IDETAPE_LU_RETENSION_MASK | IDETAPE_LU_LOAD_MASK);
- case MTEOM:
- idetape_create_space_cmd(&pc, 0, IDETAPE_SPACE_TO_EOD);
- return ide_queue_pc_tail(drive, disk, &pc, NULL, 0);
- case MTERASE:
- (void)idetape_rewind_tape(drive);
- idetape_create_erase_cmd(&pc);
- return ide_queue_pc_tail(drive, disk, &pc, NULL, 0);
- case MTSETBLK:
- if (mt_count) {
- if (mt_count < tape->blk_size ||
- mt_count % tape->blk_size)
- return -EIO;
- tape->user_bs_factor = mt_count / tape->blk_size;
- clear_bit(ilog2(IDE_AFLAG_DETECT_BS),
- &drive->atapi_flags);
- } else
- set_bit(ilog2(IDE_AFLAG_DETECT_BS),
- &drive->atapi_flags);
- return 0;
- case MTSEEK:
- ide_tape_discard_merge_buffer(drive, 0);
- return idetape_position_tape(drive,
- mt_count * tape->user_bs_factor, tape->partition, 0);
- case MTSETPART:
- ide_tape_discard_merge_buffer(drive, 0);
- return idetape_position_tape(drive, 0, mt_count, 0);
- case MTFSR:
- case MTBSR:
- case MTLOCK:
- retval = ide_set_media_lock(drive, disk, 1);
- if (retval)
- return retval;
- tape->door_locked = DOOR_EXPLICITLY_LOCKED;
- return 0;
- case MTUNLOCK:
- retval = ide_set_media_lock(drive, disk, 0);
- if (retval)
- return retval;
- tape->door_locked = DOOR_UNLOCKED;
- return 0;
- default:
- printk(KERN_ERR "ide-tape: MTIO operation %d not supported\n",
- mt_op);
- return -EIO;
- }
-}
-
-/*
- * Our character device ioctls. General mtio.h magnetic io commands are
- * supported here, and not in the corresponding block interface. Our own
- * ide-tape ioctls are supported on both interfaces.
- */
-static long do_idetape_chrdev_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct ide_tape_obj *tape = file->private_data;
- ide_drive_t *drive = tape->drive;
- struct mtop mtop;
- struct mtget mtget;
- struct mtpos mtpos;
- int block_offset = 0, position = tape->first_frame;
- void __user *argp = (void __user *)arg;
-
- ide_debug_log(IDE_DBG_FUNC, "cmd: 0x%x", cmd);
-
- if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
- ide_tape_flush_merge_buffer(drive);
- idetape_flush_tape_buffers(drive);
- }
- if (cmd == MTIOCGET || cmd == MTIOCPOS) {
- block_offset = tape->valid /
- (tape->blk_size * tape->user_bs_factor);
- position = ide_tape_read_position(drive);
- if (position < 0)
- return -EIO;
- }
- switch (cmd) {
- case MTIOCTOP:
- if (copy_from_user(&mtop, argp, sizeof(struct mtop)))
- return -EFAULT;
- return idetape_mtioctop(drive, mtop.mt_op, mtop.mt_count);
- case MTIOCGET:
- memset(&mtget, 0, sizeof(struct mtget));
- mtget.mt_type = MT_ISSCSI2;
- mtget.mt_blkno = position / tape->user_bs_factor - block_offset;
- mtget.mt_dsreg =
- ((tape->blk_size * tape->user_bs_factor)
- << MT_ST_BLKSIZE_SHIFT) & MT_ST_BLKSIZE_MASK;
-
- if (tape->drv_write_prot)
- mtget.mt_gstat |= GMT_WR_PROT(0xffffffff);
-
- return put_user_mtget(argp, &mtget);
- case MTIOCPOS:
- mtpos.mt_blkno = position / tape->user_bs_factor - block_offset;
- return put_user_mtpos(argp, &mtpos);
- default:
- if (tape->chrdev_dir == IDETAPE_DIR_READ)
- ide_tape_discard_merge_buffer(drive, 1);
- return idetape_blkdev_ioctl(drive, cmd, arg);
- }
-}
-
-static long idetape_chrdev_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- long ret;
- mutex_lock(&ide_tape_mutex);
- ret = do_idetape_chrdev_ioctl(file, cmd, arg);
- mutex_unlock(&ide_tape_mutex);
- return ret;
-}
-
-static long idetape_chrdev_compat_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- long ret;
-
- if (cmd == MTIOCPOS32)
- cmd = MTIOCPOS;
- else if (cmd == MTIOCGET32)
- cmd = MTIOCGET;
-
- mutex_lock(&ide_tape_mutex);
- ret = do_idetape_chrdev_ioctl(file, cmd, arg);
- mutex_unlock(&ide_tape_mutex);
- return ret;
-}
-
-/*
- * Do a mode sense page 0 with block descriptor and if it succeeds set the tape
- * block size with the reported value.
- */
-static void ide_tape_get_bsize_from_bdesc(ide_drive_t *drive)
-{
- idetape_tape_t *tape = drive->driver_data;
- struct ide_atapi_pc pc;
- u8 buf[12];
-
- idetape_create_mode_sense_cmd(&pc, IDETAPE_BLOCK_DESCRIPTOR);
- if (ide_queue_pc_tail(drive, tape->disk, &pc, buf, pc.req_xfer)) {
- printk(KERN_ERR "ide-tape: Can't get block descriptor\n");
- if (tape->blk_size == 0) {
- printk(KERN_WARNING "ide-tape: Cannot deal with zero "
- "block size, assuming 32k\n");
- tape->blk_size = 32768;
- }
- return;
- }
- tape->blk_size = (buf[4 + 5] << 16) +
- (buf[4 + 6] << 8) +
- buf[4 + 7];
- tape->drv_write_prot = (buf[2] & 0x80) >> 7;
-
- ide_debug_log(IDE_DBG_FUNC, "blk_size: %d, write_prot: %d",
- tape->blk_size, tape->drv_write_prot);
-}
-
-static int idetape_chrdev_open(struct inode *inode, struct file *filp)
-{
- unsigned int minor = iminor(inode), i = minor & ~0xc0;
- ide_drive_t *drive;
- idetape_tape_t *tape;
- int retval;
-
- if (i >= MAX_HWIFS * MAX_DRIVES)
- return -ENXIO;
-
- mutex_lock(&idetape_chrdev_mutex);
-
- tape = ide_tape_get(NULL, true, i);
- if (!tape) {
- mutex_unlock(&idetape_chrdev_mutex);
- return -ENXIO;
- }
-
- drive = tape->drive;
- filp->private_data = tape;
-
- ide_debug_log(IDE_DBG_FUNC, "enter");
-
- /*
- * We really want to do nonseekable_open(inode, filp); here, but some
- * versions of tar incorrectly call lseek on tapes and bail out if that
- * fails. So we disallow pread() and pwrite(), but permit lseeks.
- */
- filp->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
-
-
- if (test_and_set_bit(ilog2(IDE_AFLAG_BUSY), &drive->atapi_flags)) {
- retval = -EBUSY;
- goto out_put_tape;
- }
-
- retval = idetape_wait_ready(drive, 60 * HZ);
- if (retval) {
- clear_bit(ilog2(IDE_AFLAG_BUSY), &drive->atapi_flags);
- printk(KERN_ERR "ide-tape: %s: drive not ready\n", tape->name);
- goto out_put_tape;
- }
-
- ide_tape_read_position(drive);
- if (!test_bit(ilog2(IDE_AFLAG_ADDRESS_VALID), &drive->atapi_flags))
- (void)idetape_rewind_tape(drive);
-
- /* Read block size and write protect status from drive. */
- ide_tape_get_bsize_from_bdesc(drive);
-
- /* Set write protect flag if device is opened as read-only. */
- if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
- tape->write_prot = 1;
- else
- tape->write_prot = tape->drv_write_prot;
-
- /* Make sure drive isn't write protected if user wants to write. */
- if (tape->write_prot) {
- if ((filp->f_flags & O_ACCMODE) == O_WRONLY ||
- (filp->f_flags & O_ACCMODE) == O_RDWR) {
- clear_bit(ilog2(IDE_AFLAG_BUSY), &drive->atapi_flags);
- retval = -EROFS;
- goto out_put_tape;
- }
- }
-
- /* Lock the tape drive door so user can't eject. */
- if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
- if (!ide_set_media_lock(drive, tape->disk, 1)) {
- if (tape->door_locked != DOOR_EXPLICITLY_LOCKED)
- tape->door_locked = DOOR_LOCKED;
- }
- }
- mutex_unlock(&idetape_chrdev_mutex);
-
- return 0;
-
-out_put_tape:
- ide_tape_put(tape);
-
- mutex_unlock(&idetape_chrdev_mutex);
-
- return retval;
-}
-
-static void idetape_write_release(ide_drive_t *drive, unsigned int minor)
-{
- idetape_tape_t *tape = drive->driver_data;
-
- ide_tape_flush_merge_buffer(drive);
- tape->buf = kmalloc(tape->buffer_size, GFP_KERNEL);
- if (tape->buf != NULL) {
- idetape_pad_zeros(drive, tape->blk_size *
- (tape->user_bs_factor - 1));
- kfree(tape->buf);
- tape->buf = NULL;
- }
- idetape_write_filemark(drive);
- idetape_flush_tape_buffers(drive);
- idetape_flush_tape_buffers(drive);
-}
-
-static int idetape_chrdev_release(struct inode *inode, struct file *filp)
-{
- struct ide_tape_obj *tape = filp->private_data;
- ide_drive_t *drive = tape->drive;
- unsigned int minor = iminor(inode);
-
- mutex_lock(&idetape_chrdev_mutex);
-
- tape = drive->driver_data;
-
- ide_debug_log(IDE_DBG_FUNC, "enter");
-
- if (tape->chrdev_dir == IDETAPE_DIR_WRITE)
- idetape_write_release(drive, minor);
- if (tape->chrdev_dir == IDETAPE_DIR_READ) {
- if (minor < 128)
- ide_tape_discard_merge_buffer(drive, 1);
- }
-
- if (minor < 128 && test_bit(ilog2(IDE_AFLAG_MEDIUM_PRESENT),
- &drive->atapi_flags))
- (void) idetape_rewind_tape(drive);
-
- if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
- if (tape->door_locked == DOOR_LOCKED) {
- if (!ide_set_media_lock(drive, tape->disk, 0))
- tape->door_locked = DOOR_UNLOCKED;
- }
- }
- clear_bit(ilog2(IDE_AFLAG_BUSY), &drive->atapi_flags);
- ide_tape_put(tape);
-
- mutex_unlock(&idetape_chrdev_mutex);
-
- return 0;
-}
-
-static void idetape_get_inquiry_results(ide_drive_t *drive)
-{
- idetape_tape_t *tape = drive->driver_data;
- struct ide_atapi_pc pc;
- u8 pc_buf[256];
- char fw_rev[4], vendor_id[8], product_id[16];
-
- idetape_create_inquiry_cmd(&pc);
- if (ide_queue_pc_tail(drive, tape->disk, &pc, pc_buf, pc.req_xfer)) {
- printk(KERN_ERR "ide-tape: %s: can't get INQUIRY results\n",
- tape->name);
- return;
- }
- memcpy(vendor_id, &pc_buf[8], 8);
- memcpy(product_id, &pc_buf[16], 16);
- memcpy(fw_rev, &pc_buf[32], 4);
-
- ide_fixstring(vendor_id, 8, 0);
- ide_fixstring(product_id, 16, 0);
- ide_fixstring(fw_rev, 4, 0);
-
- printk(KERN_INFO "ide-tape: %s <-> %s: %.8s %.16s rev %.4s\n",
- drive->name, tape->name, vendor_id, product_id, fw_rev);
-}
-
-/*
- * Ask the tape about its various parameters. In particular, we will adjust our
- * data transfer buffer size to the recommended value as returned by the tape.
- */
-static void idetape_get_mode_sense_results(ide_drive_t *drive)
-{
- idetape_tape_t *tape = drive->driver_data;
- struct ide_atapi_pc pc;
- u8 buf[24], *caps;
- u8 speed, max_speed;
-
- idetape_create_mode_sense_cmd(&pc, IDETAPE_CAPABILITIES_PAGE);
- if (ide_queue_pc_tail(drive, tape->disk, &pc, buf, pc.req_xfer)) {
- printk(KERN_ERR "ide-tape: Can't get tape parameters - assuming"
- " some default values\n");
- tape->blk_size = 512;
- put_unaligned(52, (u16 *)&tape->caps[12]);
- put_unaligned(540, (u16 *)&tape->caps[14]);
- put_unaligned(6*52, (u16 *)&tape->caps[16]);
- return;
- }
- caps = buf + 4 + buf[3];
-
- /* convert to host order and save for later use */
- speed = be16_to_cpup((__be16 *)&caps[14]);
- max_speed = be16_to_cpup((__be16 *)&caps[8]);
-
- *(u16 *)&caps[8] = max_speed;
- *(u16 *)&caps[12] = be16_to_cpup((__be16 *)&caps[12]);
- *(u16 *)&caps[14] = speed;
- *(u16 *)&caps[16] = be16_to_cpup((__be16 *)&caps[16]);
-
- if (!speed) {
- printk(KERN_INFO "ide-tape: %s: invalid tape speed "
- "(assuming 650KB/sec)\n", drive->name);
- *(u16 *)&caps[14] = 650;
- }
- if (!max_speed) {
- printk(KERN_INFO "ide-tape: %s: invalid max_speed "
- "(assuming 650KB/sec)\n", drive->name);
- *(u16 *)&caps[8] = 650;
- }
-
- memcpy(&tape->caps, caps, 20);
-
- /* device lacks locking support according to capabilities page */
- if ((caps[6] & 1) == 0)
- drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;
-
- if (caps[7] & 0x02)
- tape->blk_size = 512;
- else if (caps[7] & 0x04)
- tape->blk_size = 1024;
-}
-
-#ifdef CONFIG_IDE_PROC_FS
-#define ide_tape_devset_get(name, field) \
-static int get_##name(ide_drive_t *drive) \
-{ \
- idetape_tape_t *tape = drive->driver_data; \
- return tape->field; \
-}
-
-#define ide_tape_devset_set(name, field) \
-static int set_##name(ide_drive_t *drive, int arg) \
-{ \
- idetape_tape_t *tape = drive->driver_data; \
- tape->field = arg; \
- return 0; \
-}
-
-#define ide_tape_devset_rw_field(_name, _field) \
-ide_tape_devset_get(_name, _field) \
-ide_tape_devset_set(_name, _field) \
-IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name)
-
-#define ide_tape_devset_r_field(_name, _field) \
-ide_tape_devset_get(_name, _field) \
-IDE_DEVSET(_name, 0, get_##_name, NULL)
-
-static int mulf_tdsc(ide_drive_t *drive) { return 1000; }
-static int divf_tdsc(ide_drive_t *drive) { return HZ; }
-static int divf_buffer(ide_drive_t *drive) { return 2; }
-static int divf_buffer_size(ide_drive_t *drive) { return 1024; }
-
-ide_devset_rw_flag(dsc_overlap, IDE_DFLAG_DSC_OVERLAP);
-
-ide_tape_devset_rw_field(tdsc, best_dsc_rw_freq);
-
-ide_tape_devset_r_field(avg_speed, avg_speed);
-ide_tape_devset_r_field(speed, caps[14]);
-ide_tape_devset_r_field(buffer, caps[16]);
-ide_tape_devset_r_field(buffer_size, buffer_size);
-
-static const struct ide_proc_devset idetape_settings[] = {
- __IDE_PROC_DEVSET(avg_speed, 0, 0xffff, NULL, NULL),
- __IDE_PROC_DEVSET(buffer, 0, 0xffff, NULL, divf_buffer),
- __IDE_PROC_DEVSET(buffer_size, 0, 0xffff, NULL, divf_buffer_size),
- __IDE_PROC_DEVSET(dsc_overlap, 0, 1, NULL, NULL),
- __IDE_PROC_DEVSET(speed, 0, 0xffff, NULL, NULL),
- __IDE_PROC_DEVSET(tdsc, IDETAPE_DSC_RW_MIN, IDETAPE_DSC_RW_MAX,
- mulf_tdsc, divf_tdsc),
- { NULL },
-};
-#endif
-
-/*
- * The function below is called to:
- *
- * 1. Initialize our various state variables.
- * 2. Ask the tape for its capabilities.
- * 3. Allocate a buffer which will be used for data transfer. The buffer size
- * is chosen based on the recommendation which we received in step 2.
- *
- * Note that at this point ide.c already assigned us an irq, so that we can
- * queue requests here and wait for their completion.
- */
-static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
-{
- unsigned long t;
- int speed;
- u16 *ctl = (u16 *)&tape->caps[12];
-
- ide_debug_log(IDE_DBG_FUNC, "minor: %d", minor);
-
- drive->pc_callback = ide_tape_callback;
-
- drive->dev_flags |= IDE_DFLAG_DSC_OVERLAP;
-
- if (drive->hwif->host_flags & IDE_HFLAG_NO_DSC) {
- printk(KERN_INFO "ide-tape: %s: disabling DSC overlap\n",
- tape->name);
- drive->dev_flags &= ~IDE_DFLAG_DSC_OVERLAP;
- }
-
- /* Seagate Travan drives do not support DSC overlap. */
- if (strstr((char *)&drive->id[ATA_ID_PROD], "Seagate STT3401"))
- drive->dev_flags &= ~IDE_DFLAG_DSC_OVERLAP;
-
- tape->minor = minor;
- tape->name[0] = 'h';
- tape->name[1] = 't';
- tape->name[2] = '0' + minor;
- tape->chrdev_dir = IDETAPE_DIR_NONE;
-
- idetape_get_inquiry_results(drive);
- idetape_get_mode_sense_results(drive);
- ide_tape_get_bsize_from_bdesc(drive);
- tape->user_bs_factor = 1;
- tape->buffer_size = *ctl * tape->blk_size;
- while (tape->buffer_size > 0xffff) {
- printk(KERN_NOTICE "ide-tape: decreasing stage size\n");
- *ctl /= 2;
- tape->buffer_size = *ctl * tape->blk_size;
- }
-
- /* select the "best" DSC read/write polling freq */
- speed = max(*(u16 *)&tape->caps[14], *(u16 *)&tape->caps[8]);
-
- t = (IDETAPE_FIFO_THRESHOLD * tape->buffer_size * HZ) / (speed * 1000);
-
- /*
- * Ensure that the number we got makes sense; limit it within
- * IDETAPE_DSC_RW_MIN and IDETAPE_DSC_RW_MAX.
- */
- tape->best_dsc_rw_freq = clamp_t(unsigned long, t, IDETAPE_DSC_RW_MIN,
- IDETAPE_DSC_RW_MAX);
- printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, "
- "%ums tDSC%s\n",
- drive->name, tape->name, *(u16 *)&tape->caps[14],
- (*(u16 *)&tape->caps[16] * 512) / tape->buffer_size,
- tape->buffer_size / 1024,
- jiffies_to_msecs(tape->best_dsc_rw_freq),
- (drive->dev_flags & IDE_DFLAG_USING_DMA) ? ", DMA" : "");
-
- ide_proc_register_driver(drive, tape->driver);
-}
-
-static void ide_tape_remove(ide_drive_t *drive)
-{
- idetape_tape_t *tape = drive->driver_data;
-
- ide_proc_unregister_driver(drive, tape->driver);
- device_del(&tape->dev);
-
- mutex_lock(&idetape_ref_mutex);
- put_device(&tape->dev);
- mutex_unlock(&idetape_ref_mutex);
-}
-
-static void ide_tape_release(struct device *dev)
-{
- struct ide_tape_obj *tape = to_ide_drv(dev, ide_tape_obj);
- ide_drive_t *drive = tape->drive;
- struct gendisk *g = tape->disk;
-
- BUG_ON(tape->valid);
-
- drive->dev_flags &= ~IDE_DFLAG_DSC_OVERLAP;
- drive->driver_data = NULL;
- device_destroy(idetape_sysfs_class, MKDEV(IDETAPE_MAJOR, tape->minor));
- device_destroy(idetape_sysfs_class,
- MKDEV(IDETAPE_MAJOR, tape->minor + 128));
- idetape_devs[tape->minor] = NULL;
- g->private_data = NULL;
- put_disk(g);
- kfree(tape);
-}
-
-#ifdef CONFIG_IDE_PROC_FS
-static int idetape_name_proc_show(struct seq_file *m, void *v)
-{
- ide_drive_t *drive = (ide_drive_t *) m->private;
- idetape_tape_t *tape = drive->driver_data;
-
- seq_printf(m, "%s\n", tape->name);
- return 0;
-}
-
-static ide_proc_entry_t idetape_proc[] = {
- { "capacity", S_IFREG|S_IRUGO, ide_capacity_proc_show },
- { "name", S_IFREG|S_IRUGO, idetape_name_proc_show },
- {}
-};
-
-static ide_proc_entry_t *ide_tape_proc_entries(ide_drive_t *drive)
-{
- return idetape_proc;
-}
-
-static const struct ide_proc_devset *ide_tape_proc_devsets(ide_drive_t *drive)
-{
- return idetape_settings;
-}
-#endif
-
-static int ide_tape_probe(ide_drive_t *);
-
-static struct ide_driver idetape_driver = {
- .gen_driver = {
- .owner = THIS_MODULE,
- .name = "ide-tape",
- .bus = &ide_bus_type,
- },
- .probe = ide_tape_probe,
- .remove = ide_tape_remove,
- .version = IDETAPE_VERSION,
- .do_request = idetape_do_request,
-#ifdef CONFIG_IDE_PROC_FS
- .proc_entries = ide_tape_proc_entries,
- .proc_devsets = ide_tape_proc_devsets,
-#endif
-};
-
-/* Our character device supporting functions, passed to register_chrdev. */
-static const struct file_operations idetape_fops = {
- .owner = THIS_MODULE,
- .read = idetape_chrdev_read,
- .write = idetape_chrdev_write,
- .unlocked_ioctl = idetape_chrdev_ioctl,
- .compat_ioctl = IS_ENABLED(CONFIG_COMPAT) ?
- idetape_chrdev_compat_ioctl : NULL,
- .open = idetape_chrdev_open,
- .release = idetape_chrdev_release,
- .llseek = noop_llseek,
-};
-
-static int idetape_open(struct block_device *bdev, fmode_t mode)
-{
- struct ide_tape_obj *tape;
-
- mutex_lock(&ide_tape_mutex);
- tape = ide_tape_get(bdev->bd_disk, false, 0);
- mutex_unlock(&ide_tape_mutex);
-
- if (!tape)
- return -ENXIO;
-
- return 0;
-}
-
-static void idetape_release(struct gendisk *disk, fmode_t mode)
-{
- struct ide_tape_obj *tape = ide_drv_g(disk, ide_tape_obj);
-
- mutex_lock(&ide_tape_mutex);
- ide_tape_put(tape);
- mutex_unlock(&ide_tape_mutex);
-}
-
-static int idetape_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- struct ide_tape_obj *tape = ide_drv_g(bdev->bd_disk, ide_tape_obj);
- ide_drive_t *drive = tape->drive;
- int err;
-
- mutex_lock(&ide_tape_mutex);
- err = generic_ide_ioctl(drive, bdev, cmd, arg);
- if (err == -EINVAL)
- err = idetape_blkdev_ioctl(drive, cmd, arg);
- mutex_unlock(&ide_tape_mutex);
-
- return err;
-}
-
-static int idetape_compat_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- if (cmd == 0x0340 || cmd == 0x350)
- arg = (unsigned long)compat_ptr(arg);
-
- return idetape_ioctl(bdev, mode, cmd, arg);
-}
-
-static const struct block_device_operations idetape_block_ops = {
- .owner = THIS_MODULE,
- .open = idetape_open,
- .release = idetape_release,
- .ioctl = idetape_ioctl,
- .compat_ioctl = IS_ENABLED(CONFIG_COMPAT) ?
- idetape_compat_ioctl : NULL,
-};
-
-static int ide_tape_probe(ide_drive_t *drive)
-{
- idetape_tape_t *tape;
- struct gendisk *g;
- int minor;
-
- ide_debug_log(IDE_DBG_FUNC, "enter");
-
- if (!strstr(DRV_NAME, drive->driver_req))
- goto failed;
-
- if (drive->media != ide_tape)
- goto failed;
-
- if ((drive->dev_flags & IDE_DFLAG_ID_READ) &&
- ide_check_atapi_device(drive, DRV_NAME) == 0) {
- printk(KERN_ERR "ide-tape: %s: not supported by this version of"
- " the driver\n", drive->name);
- goto failed;
- }
- tape = kzalloc(sizeof(idetape_tape_t), GFP_KERNEL);
- if (tape == NULL) {
- printk(KERN_ERR "ide-tape: %s: Can't allocate a tape struct\n",
- drive->name);
- goto failed;
- }
-
- g = alloc_disk(1 << PARTN_BITS);
- if (!g)
- goto out_free_tape;
-
- ide_init_disk(g, drive);
-
- tape->dev.parent = &drive->gendev;
- tape->dev.release = ide_tape_release;
- dev_set_name(&tape->dev, "%s", dev_name(&drive->gendev));
-
- if (device_register(&tape->dev))
- goto out_free_disk;
-
- tape->drive = drive;
- tape->driver = &idetape_driver;
- tape->disk = g;
-
- g->private_data = &tape->driver;
-
- drive->driver_data = tape;
-
- mutex_lock(&idetape_ref_mutex);
- for (minor = 0; idetape_devs[minor]; minor++)
- ;
- idetape_devs[minor] = tape;
- mutex_unlock(&idetape_ref_mutex);
-
- idetape_setup(drive, tape, minor);
-
- device_create(idetape_sysfs_class, &drive->gendev,
- MKDEV(IDETAPE_MAJOR, minor), NULL, "%s", tape->name);
- device_create(idetape_sysfs_class, &drive->gendev,
- MKDEV(IDETAPE_MAJOR, minor + 128), NULL,
- "n%s", tape->name);
-
- g->fops = &idetape_block_ops;
-
- return 0;
-
-out_free_disk:
- put_disk(g);
-out_free_tape:
- kfree(tape);
-failed:
- return -ENODEV;
-}
-
-static void __exit idetape_exit(void)
-{
- driver_unregister(&idetape_driver.gen_driver);
- class_destroy(idetape_sysfs_class);
- unregister_chrdev(IDETAPE_MAJOR, "ht");
-}
-
-static int __init idetape_init(void)
-{
- int error = 1;
- idetape_sysfs_class = class_create(THIS_MODULE, "ide_tape");
- if (IS_ERR(idetape_sysfs_class)) {
- idetape_sysfs_class = NULL;
- printk(KERN_ERR "Unable to create sysfs class for ide tapes\n");
- error = -EBUSY;
- goto out;
- }
-
- if (register_chrdev(IDETAPE_MAJOR, "ht", &idetape_fops)) {
- printk(KERN_ERR "ide-tape: Failed to register chrdev"
- " interface\n");
- error = -EBUSY;
- goto out_free_class;
- }
-
- error = driver_register(&idetape_driver.gen_driver);
- if (error)
- goto out_free_chrdev;
-
- return 0;
-
-out_free_chrdev:
- unregister_chrdev(IDETAPE_MAJOR, "ht");
-out_free_class:
- class_destroy(idetape_sysfs_class);
-out:
- return error;
-}
-
-MODULE_ALIAS("ide:*m-tape*");
-module_init(idetape_init);
-module_exit(idetape_exit);
-MODULE_ALIAS_CHARDEV_MAJOR(IDETAPE_MAJOR);
-MODULE_DESCRIPTION("ATAPI Streaming TAPE Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
deleted file mode 100644
index 6665fc4724b9..000000000000
--- a/drivers/ide/ide-taskfile.c
+++ /dev/null
@@ -1,668 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2000-2002 Michael Cornwell <cornwell@acm.org>
- * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2001-2002 Klaus Smolin
- * IBM Storage Technology Division
- * Copyright (C) 2003-2004, 2007 Bartlomiej Zolnierkiewicz
- *
- * The big the bad and the ugly.
- */
-
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/hdreg.h>
-#include <linux/ide.h>
-#include <linux/nmi.h>
-#include <linux/scatterlist.h>
-#include <linux/uaccess.h>
-
-#include <asm/io.h>
-
-void ide_tf_readback(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_tp_ops *tp_ops = hwif->tp_ops;
-
- /* Be sure we're looking at the low order bytes */
- tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
-
- tp_ops->tf_read(drive, &cmd->tf, cmd->valid.in.tf);
-
- if (cmd->tf_flags & IDE_TFLAG_LBA48) {
- tp_ops->write_devctl(hwif, ATA_HOB | ATA_DEVCTL_OBS);
-
- tp_ops->tf_read(drive, &cmd->hob, cmd->valid.in.hob);
- }
-}
-
-void ide_tf_dump(const char *s, struct ide_cmd *cmd)
-{
-#ifdef DEBUG
- printk("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x "
- "lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n",
- s, cmd->tf.feature, cmd->tf.nsect,
- cmd->tf.lbal, cmd->tf.lbam, cmd->tf.lbah,
- cmd->tf.device, cmd->tf.command);
- printk("%s: hob: nsect 0x%02x lbal 0x%02x lbam 0x%02x lbah 0x%02x\n",
- s, cmd->hob.nsect, cmd->hob.lbal, cmd->hob.lbam, cmd->hob.lbah);
-#endif
-}
-
-int taskfile_lib_get_identify(ide_drive_t *drive, u8 *buf)
-{
- struct ide_cmd cmd;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.tf.nsect = 0x01;
- if (drive->media == ide_disk)
- cmd.tf.command = ATA_CMD_ID_ATA;
- else
- cmd.tf.command = ATA_CMD_ID_ATAPI;
- cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
- cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
- cmd.protocol = ATA_PROT_PIO;
-
- return ide_raw_taskfile(drive, &cmd, buf, 1);
-}
-
-static ide_startstop_t task_no_data_intr(ide_drive_t *);
-static ide_startstop_t pre_task_out_intr(ide_drive_t *, struct ide_cmd *);
-static ide_startstop_t task_pio_intr(ide_drive_t *);
-
-ide_startstop_t do_rw_taskfile(ide_drive_t *drive, struct ide_cmd *orig_cmd)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct ide_cmd *cmd = &hwif->cmd;
- struct ide_taskfile *tf = &cmd->tf;
- ide_handler_t *handler = NULL;
- const struct ide_tp_ops *tp_ops = hwif->tp_ops;
- const struct ide_dma_ops *dma_ops = hwif->dma_ops;
-
- if (orig_cmd->protocol == ATA_PROT_PIO &&
- (orig_cmd->tf_flags & IDE_TFLAG_MULTI_PIO) &&
- drive->mult_count == 0) {
- pr_err("%s: multimode not set!\n", drive->name);
- return ide_stopped;
- }
-
- if (orig_cmd->ftf_flags & IDE_FTFLAG_FLAGGED)
- orig_cmd->ftf_flags |= IDE_FTFLAG_SET_IN_FLAGS;
-
- memcpy(cmd, orig_cmd, sizeof(*cmd));
-
- if ((cmd->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) {
- ide_tf_dump(drive->name, cmd);
- tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
-
- if (cmd->ftf_flags & IDE_FTFLAG_OUT_DATA) {
- u8 data[2] = { cmd->tf.data, cmd->hob.data };
-
- tp_ops->output_data(drive, cmd, data, 2);
- }
-
- if (cmd->valid.out.tf & IDE_VALID_DEVICE) {
- u8 HIHI = (cmd->tf_flags & IDE_TFLAG_LBA48) ?
- 0xE0 : 0xEF;
-
- if (!(cmd->ftf_flags & IDE_FTFLAG_FLAGGED))
- cmd->tf.device &= HIHI;
- cmd->tf.device |= drive->select;
- }
-
- tp_ops->tf_load(drive, &cmd->hob, cmd->valid.out.hob);
- tp_ops->tf_load(drive, &cmd->tf, cmd->valid.out.tf);
- }
-
- switch (cmd->protocol) {
- case ATA_PROT_PIO:
- if (cmd->tf_flags & IDE_TFLAG_WRITE) {
- tp_ops->exec_command(hwif, tf->command);
- ndelay(400); /* FIXME */
- return pre_task_out_intr(drive, cmd);
- }
- handler = task_pio_intr;
- fallthrough;
- case ATA_PROT_NODATA:
- if (handler == NULL)
- handler = task_no_data_intr;
- ide_execute_command(drive, cmd, handler, WAIT_WORSTCASE);
- return ide_started;
- case ATA_PROT_DMA:
- if (ide_dma_prepare(drive, cmd))
- return ide_stopped;
- hwif->expiry = dma_ops->dma_timer_expiry;
- ide_execute_command(drive, cmd, ide_dma_intr, 2 * WAIT_CMD);
- dma_ops->dma_start(drive);
- fallthrough;
- default:
- return ide_started;
- }
-}
-EXPORT_SYMBOL_GPL(do_rw_taskfile);
-
-static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct ide_cmd *cmd = &hwif->cmd;
- struct ide_taskfile *tf = &cmd->tf;
- int custom = (cmd->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) ? 1 : 0;
- int retries = (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) ? 5 : 1;
- u8 stat;
-
- local_irq_enable_in_hardirq();
-
- while (1) {
- stat = hwif->tp_ops->read_status(hwif);
- if ((stat & ATA_BUSY) == 0 || retries-- == 0)
- break;
- udelay(10);
- };
-
- if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) {
- if (custom && tf->command == ATA_CMD_SET_MULTI) {
- drive->mult_req = drive->mult_count = 0;
- drive->special_flags |= IDE_SFLAG_RECALIBRATE;
- (void)ide_dump_status(drive, __func__, stat);
- return ide_stopped;
- } else if (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) {
- if ((stat & (ATA_ERR | ATA_DRQ)) == 0) {
- ide_set_handler(drive, &task_no_data_intr,
- WAIT_WORSTCASE);
- return ide_started;
- }
- }
- return ide_error(drive, "task_no_data_intr", stat);
- }
-
- if (custom && tf->command == ATA_CMD_SET_MULTI)
- drive->mult_count = drive->mult_req;
-
- if (custom == 0 || tf->command == ATA_CMD_IDLEIMMEDIATE ||
- tf->command == ATA_CMD_CHK_POWER) {
- struct request *rq = hwif->rq;
-
- if (ata_pm_request(rq))
- ide_complete_pm_rq(drive, rq);
- else
- ide_finish_cmd(drive, cmd, stat);
- }
-
- return ide_stopped;
-}
-
-static u8 wait_drive_not_busy(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- int retries;
- u8 stat;
-
- /*
- * Last sector was transferred, wait until device is ready. This can
- * take up to 6 ms on some ATAPI devices, so we will wait max 10 ms.
- */
- for (retries = 0; retries < 1000; retries++) {
- stat = hwif->tp_ops->read_status(hwif);
-
- if (stat & ATA_BUSY)
- udelay(10);
- else
- break;
- }
-
- if (stat & ATA_BUSY)
- pr_err("%s: drive still BUSY!\n", drive->name);
-
- return stat;
-}
-
-void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
- unsigned int write, unsigned int len)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct scatterlist *sg = hwif->sg_table;
- struct scatterlist *cursg = cmd->cursg;
- struct page *page;
- unsigned int offset;
- u8 *buf;
-
- if (cursg == NULL)
- cursg = cmd->cursg = sg;
-
- while (len) {
- unsigned nr_bytes = min(len, cursg->length - cmd->cursg_ofs);
-
- page = sg_page(cursg);
- offset = cursg->offset + cmd->cursg_ofs;
-
- /* get the current page and offset */
- page = nth_page(page, (offset >> PAGE_SHIFT));
- offset %= PAGE_SIZE;
-
- nr_bytes = min_t(unsigned, nr_bytes, (PAGE_SIZE - offset));
-
- buf = kmap_atomic(page) + offset;
-
- cmd->nleft -= nr_bytes;
- cmd->cursg_ofs += nr_bytes;
-
- if (cmd->cursg_ofs == cursg->length) {
- cursg = cmd->cursg = sg_next(cmd->cursg);
- cmd->cursg_ofs = 0;
- }
-
- /* do the actual data transfer */
- if (write)
- hwif->tp_ops->output_data(drive, cmd, buf, nr_bytes);
- else
- hwif->tp_ops->input_data(drive, cmd, buf, nr_bytes);
-
- kunmap_atomic(buf);
-
- len -= nr_bytes;
- }
-}
-EXPORT_SYMBOL_GPL(ide_pio_bytes);
-
-static void ide_pio_datablock(ide_drive_t *drive, struct ide_cmd *cmd,
- unsigned int write)
-{
- unsigned int nr_bytes;
-
- u8 saved_io_32bit = drive->io_32bit;
-
- if (cmd->tf_flags & IDE_TFLAG_FS)
- scsi_req(cmd->rq)->result = 0;
-
- if (cmd->tf_flags & IDE_TFLAG_IO_16BIT)
- drive->io_32bit = 0;
-
- touch_softlockup_watchdog();
-
- if (cmd->tf_flags & IDE_TFLAG_MULTI_PIO)
- nr_bytes = min_t(unsigned, cmd->nleft, drive->mult_count << 9);
- else
- nr_bytes = SECTOR_SIZE;
-
- ide_pio_bytes(drive, cmd, write, nr_bytes);
-
- drive->io_32bit = saved_io_32bit;
-}
-
-static void ide_error_cmd(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- if (cmd->tf_flags & IDE_TFLAG_FS) {
- int nr_bytes = cmd->nbytes - cmd->nleft;
-
- if (cmd->protocol == ATA_PROT_PIO &&
- ((cmd->tf_flags & IDE_TFLAG_WRITE) || cmd->nleft == 0)) {
- if (cmd->tf_flags & IDE_TFLAG_MULTI_PIO)
- nr_bytes -= drive->mult_count << 9;
- else
- nr_bytes -= SECTOR_SIZE;
- }
-
- if (nr_bytes > 0)
- ide_complete_rq(drive, BLK_STS_OK, nr_bytes);
- }
-}
-
-void ide_finish_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat)
-{
- struct request *rq = drive->hwif->rq;
- u8 err = ide_read_error(drive), nsect = cmd->tf.nsect;
- u8 set_xfer = !!(cmd->tf_flags & IDE_TFLAG_SET_XFER);
-
- ide_complete_cmd(drive, cmd, stat, err);
- scsi_req(rq)->result = err;
-
- if (err == 0 && set_xfer) {
- ide_set_xfer_rate(drive, nsect);
- ide_driveid_update(drive);
- }
-
- ide_complete_rq(drive, err ? BLK_STS_IOERR : BLK_STS_OK, blk_rq_bytes(rq));
-}
-
-/*
- * Handler for command with PIO data phase.
- */
-static ide_startstop_t task_pio_intr(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct ide_cmd *cmd = &drive->hwif->cmd;
- u8 stat = hwif->tp_ops->read_status(hwif);
- u8 write = !!(cmd->tf_flags & IDE_TFLAG_WRITE);
-
- if (write == 0) {
- /* Error? */
- if (stat & ATA_ERR)
- goto out_err;
-
- /* Didn't want any data? Odd. */
- if ((stat & ATA_DRQ) == 0) {
- /* Command all done? */
- if (OK_STAT(stat, ATA_DRDY, ATA_BUSY))
- goto out_end;
-
- /* Assume it was a spurious irq */
- goto out_wait;
- }
- } else {
- if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat))
- goto out_err;
-
- /* Deal with unexpected ATA data phase. */
- if (((stat & ATA_DRQ) == 0) ^ (cmd->nleft == 0))
- goto out_err;
- }
-
- if (write && cmd->nleft == 0)
- goto out_end;
-
- /* Still data left to transfer. */
- ide_pio_datablock(drive, cmd, write);
-
- /* Are we done? Check status and finish transfer. */
- if (write == 0 && cmd->nleft == 0) {
- stat = wait_drive_not_busy(drive);
- if (!OK_STAT(stat, 0, BAD_STAT))
- goto out_err;
-
- goto out_end;
- }
-out_wait:
- /* Still data left to transfer. */
- ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE);
- return ide_started;
-out_end:
- if ((cmd->tf_flags & IDE_TFLAG_FS) == 0)
- ide_finish_cmd(drive, cmd, stat);
- else
- ide_complete_rq(drive, BLK_STS_OK, blk_rq_sectors(cmd->rq) << 9);
- return ide_stopped;
-out_err:
- ide_error_cmd(drive, cmd);
- return ide_error(drive, __func__, stat);
-}
-
-static ide_startstop_t pre_task_out_intr(ide_drive_t *drive,
- struct ide_cmd *cmd)
-{
- ide_startstop_t startstop;
-
- if (ide_wait_stat(&startstop, drive, ATA_DRQ,
- drive->bad_wstat, WAIT_DRQ)) {
- pr_err("%s: no DRQ after issuing %sWRITE%s\n", drive->name,
- (cmd->tf_flags & IDE_TFLAG_MULTI_PIO) ? "MULT" : "",
- (drive->dev_flags & IDE_DFLAG_LBA48) ? "_EXT" : "");
- return startstop;
- }
-
- if (!force_irqthreads && (drive->dev_flags & IDE_DFLAG_UNMASK) == 0)
- local_irq_disable();
-
- ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE);
-
- ide_pio_datablock(drive, cmd, 1);
-
- return ide_started;
-}
-
-int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
- u16 nsect)
-{
- struct request *rq;
- int error;
-
- rq = blk_get_request(drive->queue,
- (cmd->tf_flags & IDE_TFLAG_WRITE) ?
- REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
- ide_req(rq)->type = ATA_PRIV_TASKFILE;
-
- /*
- * (ks) We transfer currently only whole sectors.
- * This is suffient for now. But, it would be great,
- * if we would find a solution to transfer any size.
- * To support special commands like READ LONG.
- */
- if (nsect) {
- error = blk_rq_map_kern(drive->queue, rq, buf,
- nsect * SECTOR_SIZE, GFP_NOIO);
- if (error)
- goto put_req;
- }
-
- ide_req(rq)->special = cmd;
- cmd->rq = rq;
-
- blk_execute_rq(NULL, rq, 0);
- error = scsi_req(rq)->result ? -EIO : 0;
-put_req:
- blk_put_request(rq);
- return error;
-}
-EXPORT_SYMBOL(ide_raw_taskfile);
-
-int ide_no_data_taskfile(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- cmd->protocol = ATA_PROT_NODATA;
-
- return ide_raw_taskfile(drive, cmd, NULL, 0);
-}
-EXPORT_SYMBOL_GPL(ide_no_data_taskfile);
-
-#ifdef CONFIG_IDE_TASK_IOCTL
-int ide_taskfile_ioctl(ide_drive_t *drive, unsigned long arg)
-{
- ide_task_request_t *req_task;
- struct ide_cmd cmd;
- u8 *outbuf = NULL;
- u8 *inbuf = NULL;
- u8 *data_buf = NULL;
- int err = 0;
- int tasksize = sizeof(struct ide_task_request_s);
- unsigned int taskin = 0;
- unsigned int taskout = 0;
- u16 nsect = 0;
- char __user *buf = (char __user *)arg;
-
- req_task = memdup_user(buf, tasksize);
- if (IS_ERR(req_task))
- return PTR_ERR(req_task);
-
- taskout = req_task->out_size;
- taskin = req_task->in_size;
-
- if (taskin > 65536 || taskout > 65536) {
- err = -EINVAL;
- goto abort;
- }
-
- if (taskout) {
- int outtotal = tasksize;
- outbuf = kzalloc(taskout, GFP_KERNEL);
- if (outbuf == NULL) {
- err = -ENOMEM;
- goto abort;
- }
- if (copy_from_user(outbuf, buf + outtotal, taskout)) {
- err = -EFAULT;
- goto abort;
- }
- }
-
- if (taskin) {
- int intotal = tasksize + taskout;
- inbuf = kzalloc(taskin, GFP_KERNEL);
- if (inbuf == NULL) {
- err = -ENOMEM;
- goto abort;
- }
- if (copy_from_user(inbuf, buf + intotal, taskin)) {
- err = -EFAULT;
- goto abort;
- }
- }
-
- memset(&cmd, 0, sizeof(cmd));
-
- memcpy(&cmd.hob, req_task->hob_ports, HDIO_DRIVE_HOB_HDR_SIZE - 2);
- memcpy(&cmd.tf, req_task->io_ports, HDIO_DRIVE_TASK_HDR_SIZE);
-
- cmd.valid.out.tf = IDE_VALID_DEVICE;
- cmd.valid.in.tf = IDE_VALID_DEVICE | IDE_VALID_IN_TF;
- cmd.tf_flags = IDE_TFLAG_IO_16BIT;
-
- if (drive->dev_flags & IDE_DFLAG_LBA48) {
- cmd.tf_flags |= IDE_TFLAG_LBA48;
- cmd.valid.in.hob = IDE_VALID_IN_HOB;
- }
-
- if (req_task->out_flags.all) {
- cmd.ftf_flags |= IDE_FTFLAG_FLAGGED;
-
- if (req_task->out_flags.b.data)
- cmd.ftf_flags |= IDE_FTFLAG_OUT_DATA;
-
- if (req_task->out_flags.b.nsector_hob)
- cmd.valid.out.hob |= IDE_VALID_NSECT;
- if (req_task->out_flags.b.sector_hob)
- cmd.valid.out.hob |= IDE_VALID_LBAL;
- if (req_task->out_flags.b.lcyl_hob)
- cmd.valid.out.hob |= IDE_VALID_LBAM;
- if (req_task->out_flags.b.hcyl_hob)
- cmd.valid.out.hob |= IDE_VALID_LBAH;
-
- if (req_task->out_flags.b.error_feature)
- cmd.valid.out.tf |= IDE_VALID_FEATURE;
- if (req_task->out_flags.b.nsector)
- cmd.valid.out.tf |= IDE_VALID_NSECT;
- if (req_task->out_flags.b.sector)
- cmd.valid.out.tf |= IDE_VALID_LBAL;
- if (req_task->out_flags.b.lcyl)
- cmd.valid.out.tf |= IDE_VALID_LBAM;
- if (req_task->out_flags.b.hcyl)
- cmd.valid.out.tf |= IDE_VALID_LBAH;
- } else {
- cmd.valid.out.tf |= IDE_VALID_OUT_TF;
- if (cmd.tf_flags & IDE_TFLAG_LBA48)
- cmd.valid.out.hob |= IDE_VALID_OUT_HOB;
- }
-
- if (req_task->in_flags.b.data)
- cmd.ftf_flags |= IDE_FTFLAG_IN_DATA;
-
- if (req_task->req_cmd == IDE_DRIVE_TASK_RAW_WRITE) {
- /* fixup data phase if needed */
- if (req_task->data_phase == TASKFILE_IN_DMAQ ||
- req_task->data_phase == TASKFILE_IN_DMA)
- cmd.tf_flags |= IDE_TFLAG_WRITE;
- }
-
- cmd.protocol = ATA_PROT_DMA;
-
- switch (req_task->data_phase) {
- case TASKFILE_MULTI_OUT:
- if (!drive->mult_count) {
- /* (hs): give up if multcount is not set */
- pr_err("%s: %s Multimode Write multcount is not set\n",
- drive->name, __func__);
- err = -EPERM;
- goto abort;
- }
- cmd.tf_flags |= IDE_TFLAG_MULTI_PIO;
- fallthrough;
- case TASKFILE_OUT:
- cmd.protocol = ATA_PROT_PIO;
- fallthrough;
- case TASKFILE_OUT_DMAQ:
- case TASKFILE_OUT_DMA:
- cmd.tf_flags |= IDE_TFLAG_WRITE;
- nsect = taskout / SECTOR_SIZE;
- data_buf = outbuf;
- break;
- case TASKFILE_MULTI_IN:
- if (!drive->mult_count) {
- /* (hs): give up if multcount is not set */
- pr_err("%s: %s Multimode Read multcount is not set\n",
- drive->name, __func__);
- err = -EPERM;
- goto abort;
- }
- cmd.tf_flags |= IDE_TFLAG_MULTI_PIO;
- fallthrough;
- case TASKFILE_IN:
- cmd.protocol = ATA_PROT_PIO;
- fallthrough;
- case TASKFILE_IN_DMAQ:
- case TASKFILE_IN_DMA:
- nsect = taskin / SECTOR_SIZE;
- data_buf = inbuf;
- break;
- case TASKFILE_NO_DATA:
- cmd.protocol = ATA_PROT_NODATA;
- break;
- default:
- err = -EFAULT;
- goto abort;
- }
-
- if (req_task->req_cmd == IDE_DRIVE_TASK_NO_DATA)
- nsect = 0;
- else if (!nsect) {
- nsect = (cmd.hob.nsect << 8) | cmd.tf.nsect;
-
- if (!nsect) {
- pr_err("%s: in/out command without data\n",
- drive->name);
- err = -EFAULT;
- goto abort;
- }
- }
-
- err = ide_raw_taskfile(drive, &cmd, data_buf, nsect);
-
- memcpy(req_task->hob_ports, &cmd.hob, HDIO_DRIVE_HOB_HDR_SIZE - 2);
- memcpy(req_task->io_ports, &cmd.tf, HDIO_DRIVE_TASK_HDR_SIZE);
-
- if ((cmd.ftf_flags & IDE_FTFLAG_SET_IN_FLAGS) &&
- req_task->in_flags.all == 0) {
- req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS;
- if (drive->dev_flags & IDE_DFLAG_LBA48)
- req_task->in_flags.all |= (IDE_HOB_STD_IN_FLAGS << 8);
- }
-
- if (copy_to_user(buf, req_task, tasksize)) {
- err = -EFAULT;
- goto abort;
- }
- if (taskout) {
- int outtotal = tasksize;
- if (copy_to_user(buf + outtotal, outbuf, taskout)) {
- err = -EFAULT;
- goto abort;
- }
- }
- if (taskin) {
- int intotal = tasksize + taskout;
- if (copy_to_user(buf + intotal, inbuf, taskin)) {
- err = -EFAULT;
- goto abort;
- }
- }
-abort:
- kfree(req_task);
- kfree(outbuf);
- kfree(inbuf);
-
- return err;
-}
-#endif
diff --git a/drivers/ide/ide-timings.c b/drivers/ide/ide-timings.c
deleted file mode 100644
index cfe78df74b7d..000000000000
--- a/drivers/ide/ide-timings.c
+++ /dev/null
@@ -1,198 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 1999-2001 Vojtech Pavlik
- * Copyright (c) 2007-2008 Bartlomiej Zolnierkiewicz
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
- */
-
-#include <linux/kernel.h>
-#include <linux/ide.h>
-#include <linux/module.h>
-
-/*
- * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
- * These were taken from ATA/ATAPI-6 standard, rev 0a, except
- * for PIO 5, which is a nonstandard extension and UDMA6, which
- * is currently supported only by Maxtor drives.
- */
-
-static struct ide_timing ide_timing[] = {
-
- { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
- { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
- { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
- { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
-
- { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
- { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
- { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
-
- { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
- { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
- { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
- { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
- { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
-
- { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
- { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
- { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
-
- { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
- { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
- { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
- { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
-
- { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
- { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
- { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
-
- { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 },
-
- { 0xff }
-};
-
-struct ide_timing *ide_timing_find_mode(u8 speed)
-{
- struct ide_timing *t;
-
- for (t = ide_timing; t->mode != speed; t++)
- if (t->mode == 0xff)
- return NULL;
- return t;
-}
-EXPORT_SYMBOL_GPL(ide_timing_find_mode);
-
-u16 ide_pio_cycle_time(ide_drive_t *drive, u8 pio)
-{
- u16 *id = drive->id;
- struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
- u16 cycle = 0;
-
- if (id[ATA_ID_FIELD_VALID] & 2) {
- if (ata_id_has_iordy(drive->id))
- cycle = id[ATA_ID_EIDE_PIO_IORDY];
- else
- cycle = id[ATA_ID_EIDE_PIO];
-
- /* conservative "downgrade" for all pre-ATA2 drives */
- if (pio < 3 && cycle < t->cycle)
- cycle = 0; /* use standard timing */
-
- /* Use the standard timing for the CF specific modes too */
- if (pio > 4 && ata_id_is_cfa(id))
- cycle = 0;
- }
-
- return cycle ? cycle : t->cycle;
-}
-EXPORT_SYMBOL_GPL(ide_pio_cycle_time);
-
-#define ENOUGH(v, unit) (((v) - 1) / (unit) + 1)
-#define EZ(v, unit) ((v) ? ENOUGH((v) * 1000, unit) : 0)
-
-static void ide_timing_quantize(struct ide_timing *t, struct ide_timing *q,
- int T, int UT)
-{
- q->setup = EZ(t->setup, T);
- q->act8b = EZ(t->act8b, T);
- q->rec8b = EZ(t->rec8b, T);
- q->cyc8b = EZ(t->cyc8b, T);
- q->active = EZ(t->active, T);
- q->recover = EZ(t->recover, T);
- q->cycle = EZ(t->cycle, T);
- q->udma = EZ(t->udma, UT);
-}
-
-void ide_timing_merge(struct ide_timing *a, struct ide_timing *b,
- struct ide_timing *m, unsigned int what)
-{
- if (what & IDE_TIMING_SETUP)
- m->setup = max(a->setup, b->setup);
- if (what & IDE_TIMING_ACT8B)
- m->act8b = max(a->act8b, b->act8b);
- if (what & IDE_TIMING_REC8B)
- m->rec8b = max(a->rec8b, b->rec8b);
- if (what & IDE_TIMING_CYC8B)
- m->cyc8b = max(a->cyc8b, b->cyc8b);
- if (what & IDE_TIMING_ACTIVE)
- m->active = max(a->active, b->active);
- if (what & IDE_TIMING_RECOVER)
- m->recover = max(a->recover, b->recover);
- if (what & IDE_TIMING_CYCLE)
- m->cycle = max(a->cycle, b->cycle);
- if (what & IDE_TIMING_UDMA)
- m->udma = max(a->udma, b->udma);
-}
-EXPORT_SYMBOL_GPL(ide_timing_merge);
-
-int ide_timing_compute(ide_drive_t *drive, u8 speed,
- struct ide_timing *t, int T, int UT)
-{
- u16 *id = drive->id;
- struct ide_timing *s, p;
-
- /*
- * Find the mode.
- */
- s = ide_timing_find_mode(speed);
- if (s == NULL)
- return -EINVAL;
-
- /*
- * Copy the timing from the table.
- */
- *t = *s;
-
- /*
- * If the drive is an EIDE drive, it can tell us it needs extended
- * PIO/MWDMA cycle timing.
- */
- if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
- memset(&p, 0, sizeof(p));
-
- if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
- if (speed <= XFER_PIO_2)
- p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
- else if ((speed <= XFER_PIO_4) ||
- (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
- p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
- } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
- p.cycle = id[ATA_ID_EIDE_DMA_MIN];
-
- ide_timing_merge(&p, t, t, IDE_TIMING_CYCLE | IDE_TIMING_CYC8B);
- }
-
- /*
- * Convert the timing to bus clock counts.
- */
- ide_timing_quantize(t, t, T, UT);
-
- /*
- * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
- * S.M.A.R.T and some other commands. We have to ensure that the
- * DMA cycle timing is slower/equal than the current PIO timing.
- */
- if (speed >= XFER_SW_DMA_0) {
- ide_timing_compute(drive, drive->pio_mode, &p, T, UT);
- ide_timing_merge(&p, t, t, IDE_TIMING_ALL);
- }
-
- /*
- * Lengthen active & recovery time so that cycle time is correct.
- */
- if (t->act8b + t->rec8b < t->cyc8b) {
- t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
- t->rec8b = t->cyc8b - t->act8b;
- }
-
- if (t->active + t->recover < t->cycle) {
- t->active += (t->cycle - (t->active + t->recover)) / 2;
- t->recover = t->cycle - t->active;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ide_timing_compute);
diff --git a/drivers/ide/ide-xfer-mode.c b/drivers/ide/ide-xfer-mode.c
deleted file mode 100644
index 0b9709b489b7..000000000000
--- a/drivers/ide/ide-xfer-mode.c
+++ /dev/null
@@ -1,267 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/interrupt.h>
-#include <linux/ide.h>
-#include <linux/bitops.h>
-
-static const char *udma_str[] =
- { "UDMA/16", "UDMA/25", "UDMA/33", "UDMA/44",
- "UDMA/66", "UDMA/100", "UDMA/133", "UDMA7" };
-static const char *mwdma_str[] =
- { "MWDMA0", "MWDMA1", "MWDMA2", "MWDMA3", "MWDMA4" };
-static const char *swdma_str[] =
- { "SWDMA0", "SWDMA1", "SWDMA2" };
-static const char *pio_str[] =
- { "PIO0", "PIO1", "PIO2", "PIO3", "PIO4", "PIO5", "PIO6" };
-
-/**
- * ide_xfer_verbose - return IDE mode names
- * @mode: transfer mode
- *
- * Returns a constant string giving the name of the mode
- * requested.
- */
-
-const char *ide_xfer_verbose(u8 mode)
-{
- const char *s;
- u8 i = mode & 0xf;
-
- if (mode >= XFER_UDMA_0 && mode <= XFER_UDMA_7)
- s = udma_str[i];
- else if (mode >= XFER_MW_DMA_0 && mode <= XFER_MW_DMA_4)
- s = mwdma_str[i];
- else if (mode >= XFER_SW_DMA_0 && mode <= XFER_SW_DMA_2)
- s = swdma_str[i];
- else if (mode >= XFER_PIO_0 && mode <= XFER_PIO_6)
- s = pio_str[i & 0x7];
- else if (mode == XFER_PIO_SLOW)
- s = "PIO SLOW";
- else
- s = "XFER ERROR";
-
- return s;
-}
-EXPORT_SYMBOL(ide_xfer_verbose);
-
-/**
- * ide_get_best_pio_mode - get PIO mode from drive
- * @drive: drive to consider
- * @mode_wanted: preferred mode
- * @max_mode: highest allowed mode
- *
- * This routine returns the recommended PIO settings for a given drive,
- * based on the drive->id information and the ide_pio_blacklist[].
- *
- * Drive PIO mode is auto-selected if 255 is passed as mode_wanted.
- * This is used by most chipset support modules when "auto-tuning".
- */
-
-static u8 ide_get_best_pio_mode(ide_drive_t *drive, u8 mode_wanted, u8 max_mode)
-{
- u16 *id = drive->id;
- int pio_mode = -1, overridden = 0;
-
- if (mode_wanted != 255)
- return min_t(u8, mode_wanted, max_mode);
-
- if ((drive->hwif->host_flags & IDE_HFLAG_PIO_NO_BLACKLIST) == 0)
- pio_mode = ide_scan_pio_blacklist((char *)&id[ATA_ID_PROD]);
-
- if (pio_mode != -1) {
- printk(KERN_INFO "%s: is on PIO blacklist\n", drive->name);
- } else {
- pio_mode = id[ATA_ID_OLD_PIO_MODES] >> 8;
- if (pio_mode > 2) { /* 2 is maximum allowed tPIO value */
- pio_mode = 2;
- overridden = 1;
- }
-
- if (id[ATA_ID_FIELD_VALID] & 2) { /* ATA2? */
- if (ata_id_is_cfa(id) && (id[ATA_ID_CFA_MODES] & 7))
- pio_mode = 4 + min_t(int, 2,
- id[ATA_ID_CFA_MODES] & 7);
- else if (ata_id_has_iordy(id)) {
- if (id[ATA_ID_PIO_MODES] & 7) {
- overridden = 0;
- if (id[ATA_ID_PIO_MODES] & 4)
- pio_mode = 5;
- else if (id[ATA_ID_PIO_MODES] & 2)
- pio_mode = 4;
- else
- pio_mode = 3;
- }
- }
- }
-
- if (overridden)
- printk(KERN_INFO "%s: tPIO > 2, assuming tPIO = 2\n",
- drive->name);
- }
-
- if (pio_mode > max_mode)
- pio_mode = max_mode;
-
- return pio_mode;
-}
-
-int ide_pio_need_iordy(ide_drive_t *drive, const u8 pio)
-{
- /*
- * IORDY may lead to controller lock up on certain controllers
- * if the port is not occupied.
- */
- if (pio == 0 && (drive->hwif->port_flags & IDE_PFLAG_PROBING))
- return 0;
- return ata_id_pio_need_iordy(drive->id, pio);
-}
-EXPORT_SYMBOL_GPL(ide_pio_need_iordy);
-
-int ide_set_pio_mode(ide_drive_t *drive, const u8 mode)
-{
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_port_ops *port_ops = hwif->port_ops;
-
- if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE)
- return 0;
-
- if (port_ops == NULL || port_ops->set_pio_mode == NULL)
- return -1;
-
- /*
- * TODO: temporary hack for some legacy host drivers that didn't
- * set transfer mode on the device in ->set_pio_mode method...
- */
- if (port_ops->set_dma_mode == NULL) {
- drive->pio_mode = mode;
- port_ops->set_pio_mode(hwif, drive);
- return 0;
- }
-
- if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) {
- if (ide_config_drive_speed(drive, mode))
- return -1;
- drive->pio_mode = mode;
- port_ops->set_pio_mode(hwif, drive);
- return 0;
- } else {
- drive->pio_mode = mode;
- port_ops->set_pio_mode(hwif, drive);
- return ide_config_drive_speed(drive, mode);
- }
-}
-
-int ide_set_dma_mode(ide_drive_t *drive, const u8 mode)
-{
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_port_ops *port_ops = hwif->port_ops;
-
- if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE)
- return 0;
-
- if (port_ops == NULL || port_ops->set_dma_mode == NULL)
- return -1;
-
- if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) {
- if (ide_config_drive_speed(drive, mode))
- return -1;
- drive->dma_mode = mode;
- port_ops->set_dma_mode(hwif, drive);
- return 0;
- } else {
- drive->dma_mode = mode;
- port_ops->set_dma_mode(hwif, drive);
- return ide_config_drive_speed(drive, mode);
- }
-}
-EXPORT_SYMBOL_GPL(ide_set_dma_mode);
-
-/* req_pio == "255" for auto-tune */
-void ide_set_pio(ide_drive_t *drive, u8 req_pio)
-{
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_port_ops *port_ops = hwif->port_ops;
- u8 host_pio, pio;
-
- if (port_ops == NULL || port_ops->set_pio_mode == NULL ||
- (hwif->host_flags & IDE_HFLAG_NO_SET_MODE))
- return;
-
- BUG_ON(hwif->pio_mask == 0x00);
-
- host_pio = fls(hwif->pio_mask) - 1;
-
- pio = ide_get_best_pio_mode(drive, req_pio, host_pio);
-
- /*
- * TODO:
- * - report device max PIO mode
- * - check req_pio != 255 against device max PIO mode
- */
- printk(KERN_DEBUG "%s: host max PIO%d wanted PIO%d%s selected PIO%d\n",
- drive->name, host_pio, req_pio,
- req_pio == 255 ? "(auto-tune)" : "", pio);
-
- (void)ide_set_pio_mode(drive, XFER_PIO_0 + pio);
-}
-EXPORT_SYMBOL_GPL(ide_set_pio);
-
-/**
- * ide_rate_filter - filter transfer mode
- * @drive: IDE device
- * @speed: desired speed
- *
- * Given the available transfer modes this function returns
- * the best available speed at or below the speed requested.
- *
- * TODO: check device PIO capabilities
- */
-
-static u8 ide_rate_filter(ide_drive_t *drive, u8 speed)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 mode = ide_find_dma_mode(drive, speed);
-
- if (mode == 0) {
- if (hwif->pio_mask)
- mode = fls(hwif->pio_mask) - 1 + XFER_PIO_0;
- else
- mode = XFER_PIO_4;
- }
-
-/* printk("%s: mode 0x%02x, speed 0x%02x\n", __func__, mode, speed); */
-
- return min(speed, mode);
-}
-
-/**
- * ide_set_xfer_rate - set transfer rate
- * @drive: drive to set
- * @rate: speed to attempt to set
- *
- * General helper for setting the speed of an IDE device. This
- * function knows about user enforced limits from the configuration
- * which ->set_pio_mode/->set_dma_mode does not.
- */
-
-int ide_set_xfer_rate(ide_drive_t *drive, u8 rate)
-{
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_port_ops *port_ops = hwif->port_ops;
-
- if (port_ops == NULL || port_ops->set_dma_mode == NULL ||
- (hwif->host_flags & IDE_HFLAG_NO_SET_MODE))
- return -1;
-
- rate = ide_rate_filter(drive, rate);
-
- BUG_ON(rate < XFER_PIO_0);
-
- if (rate >= XFER_PIO_0 && rate <= XFER_PIO_6)
- return ide_set_pio_mode(drive, rate);
-
- return ide_set_dma_mode(drive, rate);
-}
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
deleted file mode 100644
index 9a9c64fd1032..000000000000
--- a/drivers/ide/ide.c
+++ /dev/null
@@ -1,415 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1994-1998 Linus Torvalds & authors (see below)
- * Copyright (C) 2003-2005, 2007 Bartlomiej Zolnierkiewicz
- */
-
-/*
- * Mostly written by Mark Lord <mlord@pobox.com>
- * and Gadi Oxman <gadio@netvision.net.il>
- * and Andre Hedrick <andre@linux-ide.org>
- *
- * See linux/MAINTAINERS for address of current maintainer.
- *
- * This is the multiple IDE interface driver, as evolved from hd.c.
- * It supports up to MAX_HWIFS IDE interfaces, on one or more IRQs
- * (usually 14 & 15).
- * There can be up to two drives per interface, as per the ATA-2 spec.
- *
- * ...
- *
- * From hd.c:
- * |
- * | It traverses the request-list, using interrupts to jump between functions.
- * | As nearly all functions can be called within interrupts, we may not sleep.
- * | Special care is recommended. Have Fun!
- * |
- * | modified by Drew Eckhardt to check nr of hd's from the CMOS.
- * |
- * | Thanks to Branko Lankester, lankeste@fwi.uva.nl, who found a bug
- * | in the early extended-partition checks and added DM partitions.
- * |
- * | Early work on error handling by Mika Liljeberg (liljeber@cs.Helsinki.FI).
- * |
- * | IRQ-unmask, drive-id, multiple-mode, support for ">16 heads",
- * | and general streamlining by Mark Lord (mlord@pobox.com).
- *
- * October, 1994 -- Complete line-by-line overhaul for linux 1.1.x, by:
- *
- * Mark Lord (mlord@pobox.com) (IDE Perf.Pkg)
- * Delman Lee (delman@ieee.org) ("Mr. atdisk2")
- * Scott Snyder (snyder@fnald0.fnal.gov) (ATAPI IDE cd-rom)
- *
- * This was a rewrite of just about everything from hd.c, though some original
- * code is still sprinkled about. Think of it as a major evolution, with
- * inspiration from lots of linux users, esp. hamish@zot.apana.org.au
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/major.h>
-#include <linux/errno.h>
-#include <linux/genhd.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/hdreg.h>
-#include <linux/completion.h>
-#include <linux/device.h>
-
-struct class *ide_port_class;
-
-/**
- * ide_device_get - get an additional reference to a ide_drive_t
- * @drive: device to get a reference to
- *
- * Gets a reference to the ide_drive_t and increments the use count of the
- * underlying LLDD module.
- */
-int ide_device_get(ide_drive_t *drive)
-{
- struct device *host_dev;
- struct module *module;
-
- if (!get_device(&drive->gendev))
- return -ENXIO;
-
- host_dev = drive->hwif->host->dev[0];
- module = host_dev ? host_dev->driver->owner : NULL;
-
- if (module && !try_module_get(module)) {
- put_device(&drive->gendev);
- return -ENXIO;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ide_device_get);
-
-/**
- * ide_device_put - release a reference to a ide_drive_t
- * @drive: device to release a reference on
- *
- * Release a reference to the ide_drive_t and decrements the use count of
- * the underlying LLDD module.
- */
-void ide_device_put(ide_drive_t *drive)
-{
-#ifdef CONFIG_MODULE_UNLOAD
- struct device *host_dev = drive->hwif->host->dev[0];
- struct module *module = host_dev ? host_dev->driver->owner : NULL;
-
- module_put(module);
-#endif
- put_device(&drive->gendev);
-}
-EXPORT_SYMBOL_GPL(ide_device_put);
-
-static int ide_bus_match(struct device *dev, struct device_driver *drv)
-{
- return 1;
-}
-
-static int ide_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
- ide_drive_t *drive = to_ide_device(dev);
-
- add_uevent_var(env, "MEDIA=%s", ide_media_string(drive));
- add_uevent_var(env, "DRIVENAME=%s", drive->name);
- add_uevent_var(env, "MODALIAS=ide:m-%s", ide_media_string(drive));
- return 0;
-}
-
-static int generic_ide_probe(struct device *dev)
-{
- ide_drive_t *drive = to_ide_device(dev);
- struct ide_driver *drv = to_ide_driver(dev->driver);
-
- return drv->probe ? drv->probe(drive) : -ENODEV;
-}
-
-static int generic_ide_remove(struct device *dev)
-{
- ide_drive_t *drive = to_ide_device(dev);
- struct ide_driver *drv = to_ide_driver(dev->driver);
-
- if (drv->remove)
- drv->remove(drive);
-
- return 0;
-}
-
-static void generic_ide_shutdown(struct device *dev)
-{
- ide_drive_t *drive = to_ide_device(dev);
- struct ide_driver *drv = to_ide_driver(dev->driver);
-
- if (dev->driver && drv->shutdown)
- drv->shutdown(drive);
-}
-
-struct bus_type ide_bus_type = {
- .name = "ide",
- .match = ide_bus_match,
- .uevent = ide_uevent,
- .probe = generic_ide_probe,
- .remove = generic_ide_remove,
- .shutdown = generic_ide_shutdown,
- .dev_groups = ide_dev_groups,
- .suspend = generic_ide_suspend,
- .resume = generic_ide_resume,
-};
-
-EXPORT_SYMBOL_GPL(ide_bus_type);
-
-int ide_vlb_clk;
-EXPORT_SYMBOL_GPL(ide_vlb_clk);
-
-module_param_named(vlb_clock, ide_vlb_clk, int, 0);
-MODULE_PARM_DESC(vlb_clock, "VLB clock frequency (in MHz)");
-
-int ide_pci_clk;
-EXPORT_SYMBOL_GPL(ide_pci_clk);
-
-module_param_named(pci_clock, ide_pci_clk, int, 0);
-MODULE_PARM_DESC(pci_clock, "PCI bus clock frequency (in MHz)");
-
-static int ide_set_dev_param_mask(const char *s, const struct kernel_param *kp)
-{
- unsigned int a, b, i, j = 1;
- unsigned int *dev_param_mask = (unsigned int *)kp->arg;
-
- /* controller . device (0 or 1) [ : 1 (set) | 0 (clear) ] */
- if (sscanf(s, "%u.%u:%u", &a, &b, &j) != 3 &&
- sscanf(s, "%u.%u", &a, &b) != 2)
- return -EINVAL;
-
- i = a * MAX_DRIVES + b;
-
- if (i >= MAX_HWIFS * MAX_DRIVES || j > 1)
- return -EINVAL;
-
- if (j)
- *dev_param_mask |= (1 << i);
- else
- *dev_param_mask &= ~(1 << i);
-
- return 0;
-}
-
-static const struct kernel_param_ops param_ops_ide_dev_mask = {
- .set = ide_set_dev_param_mask
-};
-
-#define param_check_ide_dev_mask(name, p) param_check_uint(name, p)
-
-static unsigned int ide_nodma;
-
-module_param_named(nodma, ide_nodma, ide_dev_mask, 0);
-MODULE_PARM_DESC(nodma, "disallow DMA for a device");
-
-static unsigned int ide_noflush;
-
-module_param_named(noflush, ide_noflush, ide_dev_mask, 0);
-MODULE_PARM_DESC(noflush, "disable flush requests for a device");
-
-static unsigned int ide_nohpa;
-
-module_param_named(nohpa, ide_nohpa, ide_dev_mask, 0);
-MODULE_PARM_DESC(nohpa, "disable Host Protected Area for a device");
-
-static unsigned int ide_noprobe;
-
-module_param_named(noprobe, ide_noprobe, ide_dev_mask, 0);
-MODULE_PARM_DESC(noprobe, "skip probing for a device");
-
-static unsigned int ide_nowerr;
-
-module_param_named(nowerr, ide_nowerr, ide_dev_mask, 0);
-MODULE_PARM_DESC(nowerr, "ignore the ATA_DF bit for a device");
-
-static unsigned int ide_cdroms;
-
-module_param_named(cdrom, ide_cdroms, ide_dev_mask, 0);
-MODULE_PARM_DESC(cdrom, "force device as a CD-ROM");
-
-struct chs_geom {
- unsigned int cyl;
- u8 head;
- u8 sect;
-};
-
-static unsigned int ide_disks;
-static struct chs_geom ide_disks_chs[MAX_HWIFS * MAX_DRIVES];
-
-static int ide_set_disk_chs(const char *str, const struct kernel_param *kp)
-{
- unsigned int a, b, c = 0, h = 0, s = 0, i, j = 1;
-
- /* controller . device (0 or 1) : Cylinders , Heads , Sectors */
- /* controller . device (0 or 1) : 1 (use CHS) | 0 (ignore CHS) */
- if (sscanf(str, "%u.%u:%u,%u,%u", &a, &b, &c, &h, &s) != 5 &&
- sscanf(str, "%u.%u:%u", &a, &b, &j) != 3)
- return -EINVAL;
-
- i = a * MAX_DRIVES + b;
-
- if (i >= MAX_HWIFS * MAX_DRIVES || j > 1)
- return -EINVAL;
-
- if (c > INT_MAX || h > 255 || s > 255)
- return -EINVAL;
-
- if (j)
- ide_disks |= (1 << i);
- else
- ide_disks &= ~(1 << i);
-
- ide_disks_chs[i].cyl = c;
- ide_disks_chs[i].head = h;
- ide_disks_chs[i].sect = s;
-
- return 0;
-}
-
-module_param_call(chs, ide_set_disk_chs, NULL, NULL, 0);
-MODULE_PARM_DESC(chs, "force device as a disk (using CHS)");
-
-static void ide_dev_apply_params(ide_drive_t *drive, u8 unit)
-{
- int i = drive->hwif->index * MAX_DRIVES + unit;
-
- if (ide_nodma & (1 << i)) {
- printk(KERN_INFO "ide: disallowing DMA for %s\n", drive->name);
- drive->dev_flags |= IDE_DFLAG_NODMA;
- }
- if (ide_noflush & (1 << i)) {
- printk(KERN_INFO "ide: disabling flush requests for %s\n",
- drive->name);
- drive->dev_flags |= IDE_DFLAG_NOFLUSH;
- }
- if (ide_nohpa & (1 << i)) {
- printk(KERN_INFO "ide: disabling Host Protected Area for %s\n",
- drive->name);
- drive->dev_flags |= IDE_DFLAG_NOHPA;
- }
- if (ide_noprobe & (1 << i)) {
- printk(KERN_INFO "ide: skipping probe for %s\n", drive->name);
- drive->dev_flags |= IDE_DFLAG_NOPROBE;
- }
- if (ide_nowerr & (1 << i)) {
- printk(KERN_INFO "ide: ignoring the ATA_DF bit for %s\n",
- drive->name);
- drive->bad_wstat = BAD_R_STAT;
- }
- if (ide_cdroms & (1 << i)) {
- printk(KERN_INFO "ide: forcing %s as a CD-ROM\n", drive->name);
- drive->dev_flags |= IDE_DFLAG_PRESENT;
- drive->media = ide_cdrom;
- /* an ATAPI device ignores DRDY */
- drive->ready_stat = 0;
- }
- if (ide_disks & (1 << i)) {
- drive->cyl = drive->bios_cyl = ide_disks_chs[i].cyl;
- drive->head = drive->bios_head = ide_disks_chs[i].head;
- drive->sect = drive->bios_sect = ide_disks_chs[i].sect;
-
- printk(KERN_INFO "ide: forcing %s as a disk (%d/%d/%d)\n",
- drive->name,
- drive->cyl, drive->head, drive->sect);
-
- drive->dev_flags |= IDE_DFLAG_FORCED_GEOM | IDE_DFLAG_PRESENT;
- drive->media = ide_disk;
- drive->ready_stat = ATA_DRDY;
- }
-}
-
-static unsigned int ide_ignore_cable;
-
-static int ide_set_ignore_cable(const char *s, const struct kernel_param *kp)
-{
- int i, j = 1;
-
- /* controller (ignore) */
- /* controller : 1 (ignore) | 0 (use) */
- if (sscanf(s, "%d:%d", &i, &j) != 2 && sscanf(s, "%d", &i) != 1)
- return -EINVAL;
-
- if (i >= MAX_HWIFS || j < 0 || j > 1)
- return -EINVAL;
-
- if (j)
- ide_ignore_cable |= (1 << i);
- else
- ide_ignore_cable &= ~(1 << i);
-
- return 0;
-}
-
-module_param_call(ignore_cable, ide_set_ignore_cable, NULL, NULL, 0);
-MODULE_PARM_DESC(ignore_cable, "ignore cable detection");
-
-void ide_port_apply_params(ide_hwif_t *hwif)
-{
- ide_drive_t *drive;
- int i;
-
- if (ide_ignore_cable & (1 << hwif->index)) {
- printk(KERN_INFO "ide: ignoring cable detection for %s\n",
- hwif->name);
- hwif->cbl = ATA_CBL_PATA40_SHORT;
- }
-
- ide_port_for_each_dev(i, drive, hwif)
- ide_dev_apply_params(drive, i);
-}
-
-/*
- * This is gets invoked once during initialization, to set *everything* up
- */
-static int __init ide_init(void)
-{
- int ret;
-
- printk(KERN_INFO "Uniform Multi-Platform E-IDE driver\n");
-
- ret = bus_register(&ide_bus_type);
- if (ret < 0) {
- printk(KERN_WARNING "IDE: bus_register error: %d\n", ret);
- return ret;
- }
-
- ide_port_class = class_create(THIS_MODULE, "ide_port");
- if (IS_ERR(ide_port_class)) {
- ret = PTR_ERR(ide_port_class);
- goto out_port_class;
- }
-
- ide_acpi_init();
-
- proc_ide_create();
-
- return 0;
-
-out_port_class:
- bus_unregister(&ide_bus_type);
-
- return ret;
-}
-
-static void __exit ide_exit(void)
-{
- proc_ide_destroy();
-
- class_destroy(ide_port_class);
-
- bus_unregister(&ide_bus_type);
-}
-
-module_init(ide_init);
-module_exit(ide_exit);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/ide_platform.c b/drivers/ide/ide_platform.c
deleted file mode 100644
index 91639fd6c276..000000000000
--- a/drivers/ide/ide_platform.c
+++ /dev/null
@@ -1,133 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Platform IDE driver
- *
- * Copyright (C) 2007 MontaVista Software
- *
- * Maintainer: Kumar Gala <galak@kernel.crashing.org>
- */
-
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/ide.h>
-#include <linux/ioport.h>
-#include <linux/module.h>
-#include <linux/ata_platform.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-
-static void plat_ide_setup_ports(struct ide_hw *hw, void __iomem *base,
- void __iomem *ctrl,
- struct pata_platform_info *pdata, int irq)
-{
- unsigned long port = (unsigned long)base;
- int i;
-
- hw->io_ports.data_addr = port;
-
- port += (1 << pdata->ioport_shift);
- for (i = 1; i <= 7;
- i++, port += (1 << pdata->ioport_shift))
- hw->io_ports_array[i] = port;
-
- hw->io_ports.ctl_addr = (unsigned long)ctrl;
-
- hw->irq = irq;
-}
-
-static const struct ide_port_info platform_ide_port_info = {
- .host_flags = IDE_HFLAG_NO_DMA,
- .chipset = ide_generic,
-};
-
-static int plat_ide_probe(struct platform_device *pdev)
-{
- struct resource *res_base, *res_alt, *res_irq;
- void __iomem *base, *alt_base;
- struct pata_platform_info *pdata;
- struct ide_host *host;
- int ret = 0, mmio = 0;
- struct ide_hw hw, *hws[] = { &hw };
- struct ide_port_info d = platform_ide_port_info;
-
- pdata = dev_get_platdata(&pdev->dev);
-
- /* get a pointer to the register memory */
- res_base = platform_get_resource(pdev, IORESOURCE_IO, 0);
- res_alt = platform_get_resource(pdev, IORESOURCE_IO, 1);
-
- if (!res_base || !res_alt) {
- res_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- res_alt = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res_base || !res_alt) {
- ret = -ENOMEM;
- goto out;
- }
- mmio = 1;
- }
-
- res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res_irq) {
- ret = -EINVAL;
- goto out;
- }
-
- if (mmio) {
- base = devm_ioremap(&pdev->dev,
- res_base->start, resource_size(res_base));
- alt_base = devm_ioremap(&pdev->dev,
- res_alt->start, resource_size(res_alt));
- } else {
- base = devm_ioport_map(&pdev->dev,
- res_base->start, resource_size(res_base));
- alt_base = devm_ioport_map(&pdev->dev,
- res_alt->start, resource_size(res_alt));
- }
-
- memset(&hw, 0, sizeof(hw));
- plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start);
- hw.dev = &pdev->dev;
-
- d.irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
- if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE)
- d.irq_flags |= IRQF_SHARED;
-
- if (mmio)
- d.host_flags |= IDE_HFLAG_MMIO;
-
- ret = ide_host_add(&d, hws, 1, &host);
- if (ret)
- goto out;
-
- platform_set_drvdata(pdev, host);
-
- return 0;
-
-out:
- return ret;
-}
-
-static int plat_ide_remove(struct platform_device *pdev)
-{
- struct ide_host *host = dev_get_drvdata(&pdev->dev);
-
- ide_host_remove(host);
-
- return 0;
-}
-
-static struct platform_driver platform_ide_driver = {
- .driver = {
- .name = "pata_platform",
- },
- .probe = plat_ide_probe,
- .remove = plat_ide_remove,
-};
-
-module_platform_driver(platform_ide_driver);
-
-MODULE_DESCRIPTION("Platform IDE driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:pata_platform");
diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
deleted file mode 100644
index b6f674ab4fb7..000000000000
--- a/drivers/ide/it8172.c
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- * IT8172 IDE controller support
- *
- * Copyright (C) 2000 MontaVista Software Inc.
- * Copyright (C) 2008 Shane McDonald
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#define DRV_NAME "IT8172"
-
-static void it8172_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u16 drive_enables;
- u32 drive_timing;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
-
- /*
- * The highest value of DIOR/DIOW pulse width and recovery time
- * that can be set in the IT8172 is 8 PCI clock cycles. As a result,
- * it cannot be configured for PIO mode 0. This table sets these
- * parameters to the maximum supported by the IT8172.
- */
- static const u8 timings[] = { 0x3f, 0x3c, 0x1b, 0x12, 0x0a };
-
- pci_read_config_word(dev, 0x40, &drive_enables);
- pci_read_config_dword(dev, 0x44, &drive_timing);
-
- /*
- * Enable port 0x44. The IT8172 spec is confused; it calls
- * this register the "Slave IDE Timing Register", but in fact,
- * it controls timing for both master and slave drives.
- */
- drive_enables |= 0x4000;
-
- drive_enables &= drive->dn ? 0xc006 : 0xc060;
- if (drive->media == ide_disk)
- /* enable prefetch */
- drive_enables |= 0x0004 << (drive->dn * 4);
- if (ide_pio_need_iordy(drive, pio))
- /* enable IORDY sample-point */
- drive_enables |= 0x0002 << (drive->dn * 4);
-
- drive_timing &= drive->dn ? 0x00003f00 : 0x000fc000;
- drive_timing |= timings[pio] << (drive->dn * 6 + 8);
-
- pci_write_config_word(dev, 0x40, drive_enables);
- pci_write_config_dword(dev, 0x44, drive_timing);
-}
-
-static void it8172_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- int a_speed = 3 << (drive->dn * 4);
- int u_flag = 1 << drive->dn;
- int u_speed = 0;
- u8 reg48, reg4a;
- const u8 speed = drive->dma_mode;
-
- pci_read_config_byte(dev, 0x48, &reg48);
- pci_read_config_byte(dev, 0x4a, &reg4a);
-
- if (speed >= XFER_UDMA_0) {
- u8 udma = speed - XFER_UDMA_0;
- u_speed = udma << (drive->dn * 4);
-
- pci_write_config_byte(dev, 0x48, reg48 | u_flag);
- reg4a &= ~a_speed;
- pci_write_config_byte(dev, 0x4a, reg4a | u_speed);
- } else {
- const u8 mwdma_to_pio[] = { 0, 3, 4 };
-
- pci_write_config_byte(dev, 0x48, reg48 & ~u_flag);
- pci_write_config_byte(dev, 0x4a, reg4a & ~a_speed);
-
- drive->pio_mode =
- mwdma_to_pio[speed - XFER_MW_DMA_0] + XFER_PIO_0;
-
- it8172_set_pio_mode(hwif, drive);
- }
-}
-
-
-static const struct ide_port_ops it8172_port_ops = {
- .set_pio_mode = it8172_set_pio_mode,
- .set_dma_mode = it8172_set_dma_mode,
-};
-
-static const struct ide_port_info it8172_port_info = {
- .name = DRV_NAME,
- .port_ops = &it8172_port_ops,
- .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
- .host_flags = IDE_HFLAG_SINGLE,
- .pio_mask = ATA_PIO4 & ~ATA_PIO0,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA2,
-};
-
-static int it8172_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE)
- return -ENODEV; /* IT8172 is more than an IDE controller */
- return ide_pci_init_one(dev, &it8172_port_info, NULL);
-}
-
-static struct pci_device_id it8172_pci_tbl[] = {
- { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8172), 0 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, it8172_pci_tbl);
-
-static struct pci_driver it8172_pci_driver = {
- .name = "IT8172_IDE",
- .id_table = it8172_pci_tbl,
- .probe = it8172_init_one,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init it8172_ide_init(void)
-{
- return ide_pci_register_driver(&it8172_pci_driver);
-}
-
-static void __exit it8172_ide_exit(void)
-{
- pci_unregister_driver(&it8172_pci_driver);
-}
-
-module_init(it8172_ide_init);
-module_exit(it8172_ide_exit);
-
-MODULE_AUTHOR("Steve Longerbeam");
-MODULE_DESCRIPTION("PCI driver module for ITE 8172 IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
deleted file mode 100644
index d0bf4430c437..000000000000
--- a/drivers/ide/it8213.c
+++ /dev/null
@@ -1,217 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * ITE 8213 IDE driver
- *
- * Copyright (C) 2006 Jack Lee
- * Copyright (C) 2006 Alan Cox
- * Copyright (C) 2007 Bartlomiej Zolnierkiewicz
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#define DRV_NAME "it8213"
-
-/**
- * it8213_set_pio_mode - set host controller for PIO mode
- * @hwif: port
- * @drive: drive
- *
- * Set the interface PIO mode.
- */
-
-static void it8213_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- int is_slave = drive->dn & 1;
- int master_port = 0x40;
- int slave_port = 0x44;
- unsigned long flags;
- u16 master_data;
- u8 slave_data;
- static DEFINE_SPINLOCK(tune_lock);
- int control = 0;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
-
- static const u8 timings[][2] = {
- { 0, 0 },
- { 0, 0 },
- { 1, 0 },
- { 2, 1 },
- { 2, 3 }, };
-
- spin_lock_irqsave(&tune_lock, flags);
- pci_read_config_word(dev, master_port, &master_data);
-
- if (pio > 1)
- control |= 1; /* Programmable timing on */
- if (drive->media != ide_disk)
- control |= 4; /* ATAPI */
- if (ide_pio_need_iordy(drive, pio))
- control |= 2; /* IORDY */
- if (is_slave) {
- master_data |= 0x4000;
- master_data &= ~0x0070;
- if (pio > 1)
- master_data = master_data | (control << 4);
- pci_read_config_byte(dev, slave_port, &slave_data);
- slave_data = slave_data & 0xf0;
- slave_data = slave_data | (timings[pio][0] << 2) | timings[pio][1];
- } else {
- master_data &= ~0x3307;
- if (pio > 1)
- master_data = master_data | control;
- master_data = master_data | (timings[pio][0] << 12) | (timings[pio][1] << 8);
- }
- pci_write_config_word(dev, master_port, master_data);
- if (is_slave)
- pci_write_config_byte(dev, slave_port, slave_data);
- spin_unlock_irqrestore(&tune_lock, flags);
-}
-
-/**
- * it8213_set_dma_mode - set host controller for DMA mode
- * @hwif: port
- * @drive: drive
- *
- * Tune the ITE chipset for the DMA mode.
- */
-
-static void it8213_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u8 maslave = 0x40;
- int a_speed = 3 << (drive->dn * 4);
- int u_flag = 1 << drive->dn;
- int v_flag = 0x01 << drive->dn;
- int w_flag = 0x10 << drive->dn;
- int u_speed = 0;
- u16 reg4042, reg4a;
- u8 reg48, reg54, reg55;
- const u8 speed = drive->dma_mode;
-
- pci_read_config_word(dev, maslave, &reg4042);
- pci_read_config_byte(dev, 0x48, &reg48);
- pci_read_config_word(dev, 0x4a, &reg4a);
- pci_read_config_byte(dev, 0x54, &reg54);
- pci_read_config_byte(dev, 0x55, &reg55);
-
- if (speed >= XFER_UDMA_0) {
- u8 udma = speed - XFER_UDMA_0;
-
- u_speed = min_t(u8, 2 - (udma & 1), udma) << (drive->dn * 4);
-
- if (!(reg48 & u_flag))
- pci_write_config_byte(dev, 0x48, reg48 | u_flag);
- if (speed >= XFER_UDMA_5)
- pci_write_config_byte(dev, 0x55, (u8) reg55|w_flag);
- else
- pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
-
- if ((reg4a & a_speed) != u_speed)
- pci_write_config_word(dev, 0x4a, (reg4a & ~a_speed) | u_speed);
- if (speed > XFER_UDMA_2) {
- if (!(reg54 & v_flag))
- pci_write_config_byte(dev, 0x54, reg54 | v_flag);
- } else
- pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
- } else {
- const u8 mwdma_to_pio[] = { 0, 3, 4 };
-
- if (reg48 & u_flag)
- pci_write_config_byte(dev, 0x48, reg48 & ~u_flag);
- if (reg4a & a_speed)
- pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
- if (reg54 & v_flag)
- pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
- if (reg55 & w_flag)
- pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
-
- if (speed >= XFER_MW_DMA_0)
- drive->pio_mode =
- mwdma_to_pio[speed - XFER_MW_DMA_0] + XFER_PIO_0;
- else
- drive->pio_mode = XFER_PIO_2; /* for SWDMA2 */
-
- it8213_set_pio_mode(hwif, drive);
- }
-}
-
-static u8 it8213_cable_detect(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u8 reg42h = 0;
-
- pci_read_config_byte(dev, 0x42, &reg42h);
-
- return (reg42h & 0x02) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
-}
-
-static const struct ide_port_ops it8213_port_ops = {
- .set_pio_mode = it8213_set_pio_mode,
- .set_dma_mode = it8213_set_dma_mode,
- .cable_detect = it8213_cable_detect,
-};
-
-static const struct ide_port_info it8213_chipset = {
- .name = DRV_NAME,
- .enablebits = { {0x41, 0x80, 0x80} },
- .port_ops = &it8213_port_ops,
- .host_flags = IDE_HFLAG_SINGLE,
- .pio_mask = ATA_PIO4,
- .swdma_mask = ATA_SWDMA2_ONLY,
- .mwdma_mask = ATA_MWDMA12_ONLY,
- .udma_mask = ATA_UDMA6,
-};
-
-/**
- * it8213_init_one - pci layer discovery entry
- * @dev: PCI device
- * @id: ident table entry
- *
- * Called by the PCI code when it finds an ITE8213 controller. As
- * this device follows the standard interfaces we can use the
- * standard helper functions to do almost all the work for us.
- */
-
-static int it8213_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- return ide_pci_init_one(dev, &it8213_chipset, NULL);
-}
-
-static const struct pci_device_id it8213_pci_tbl[] = {
- { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8213), 0 },
- { 0, },
-};
-
-MODULE_DEVICE_TABLE(pci, it8213_pci_tbl);
-
-static struct pci_driver it8213_pci_driver = {
- .name = "ITE8213_IDE",
- .id_table = it8213_pci_tbl,
- .probe = it8213_init_one,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init it8213_ide_init(void)
-{
- return ide_pci_register_driver(&it8213_pci_driver);
-}
-
-static void __exit it8213_ide_exit(void)
-{
- pci_unregister_driver(&it8213_pci_driver);
-}
-
-module_init(it8213_ide_init);
-module_exit(it8213_ide_exit);
-
-MODULE_AUTHOR("Jack Lee, Alan Cox");
-MODULE_DESCRIPTION("PCI driver module for the ITE 8213");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
deleted file mode 100644
index 36a64c8ea575..000000000000
--- a/drivers/ide/it821x.c
+++ /dev/null
@@ -1,715 +0,0 @@
-/*
- * Copyright (C) 2004 Red Hat
- * Copyright (C) 2007 Bartlomiej Zolnierkiewicz
- *
- * May be copied or modified under the terms of the GNU General Public License
- * Based in part on the ITE vendor provided SCSI driver.
- *
- * Documentation:
- * Datasheet is freely available, some other documents under NDA.
- *
- * The ITE8212 isn't exactly a standard IDE controller. It has two
- * modes. In pass through mode then it is an IDE controller. In its smart
- * mode its actually quite a capable hardware raid controller disguised
- * as an IDE controller. Smart mode only understands DMA read/write and
- * identify, none of the fancier commands apply. The IT8211 is identical
- * in other respects but lacks the raid mode.
- *
- * Errata:
- * o Rev 0x10 also requires master/slave hold the same DMA timings and
- * cannot do ATAPI MWDMA.
- * o The identify data for raid volumes lacks CHS info (technically ok)
- * but also fails to set the LBA28 and other bits. We fix these in
- * the IDE probe quirk code.
- * o If you write LBA48 sized I/O's (ie > 256 sector) in smart mode
- * raid then the controller firmware dies
- * o Smart mode without RAID doesn't clear all the necessary identify
- * bits to reduce the command set to the one used
- *
- * This has a few impacts on the driver
- * - In pass through mode we do all the work you would expect
- * - In smart mode the clocking set up is done by the controller generally
- * but we must watch the other limits and filter.
- * - There are a few extra vendor commands that actually talk to the
- * controller but only work PIO with no IRQ.
- *
- * Vendor areas of the identify block in smart mode are used for the
- * timing and policy set up. Each HDD in raid mode also has a serial
- * block on the disk. The hardware extra commands are get/set chip status,
- * rebuild, get rebuild status.
- *
- * In Linux the driver supports pass through mode as if the device was
- * just another IDE controller. If the smart mode is running then
- * volumes are managed by the controller firmware and each IDE "disk"
- * is a raid volume. Even more cute - the controller can do automated
- * hotplug and rebuild.
- *
- * The pass through controller itself is a little demented. It has a
- * flaw that it has a single set of PIO/MWDMA timings per channel so
- * non UDMA devices restrict each others performance. It also has a
- * single clock source per channel so mixed UDMA100/133 performance
- * isn't perfect and we have to pick a clock. Thankfully none of this
- * matters in smart mode. ATAPI DMA is not currently supported.
- *
- * It seems the smart mode is a win for RAID1/RAID10 but otherwise not.
- *
- * TODO
- * - ATAPI UDMA is ok but not MWDMA it seems
- * - RAID configuration ioctls
- * - Move to libata once it grows up
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#define DRV_NAME "it821x"
-
-#define QUIRK_VORTEX86 1
-
-struct it821x_dev
-{
- unsigned int smart:1, /* Are we in smart raid mode */
- timing10:1; /* Rev 0x10 */
- u8 clock_mode; /* 0, ATA_50 or ATA_66 */
- u8 want[2][2]; /* Mode/Pri log for master slave */
- /* We need these for switching the clock when DMA goes on/off
- The high byte is the 66Mhz timing */
- u16 pio[2]; /* Cached PIO values */
- u16 mwdma[2]; /* Cached MWDMA values */
- u16 udma[2]; /* Cached UDMA values (per drive) */
- u16 quirks;
-};
-
-#define ATA_66 0
-#define ATA_50 1
-#define ATA_ANY 2
-
-#define UDMA_OFF 0
-#define MWDMA_OFF 0
-
-/*
- * We allow users to force the card into non raid mode without
- * flashing the alternative BIOS. This is also necessary right now
- * for embedded platforms that cannot run a PC BIOS but are using this
- * device.
- */
-
-static int it8212_noraid;
-
-/**
- * it821x_program - program the PIO/MWDMA registers
- * @drive: drive to tune
- * @timing: timing info
- *
- * Program the PIO/MWDMA timing for this channel according to the
- * current clock.
- */
-
-static void it821x_program(ide_drive_t *drive, u16 timing)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct it821x_dev *itdev = ide_get_hwifdata(hwif);
- int channel = hwif->channel;
- u8 conf;
-
- /* Program PIO/MWDMA timing bits */
- if(itdev->clock_mode == ATA_66)
- conf = timing >> 8;
- else
- conf = timing & 0xFF;
-
- pci_write_config_byte(dev, 0x54 + 4 * channel, conf);
-}
-
-/**
- * it821x_program_udma - program the UDMA registers
- * @drive: drive to tune
- * @timing: timing info
- *
- * Program the UDMA timing for this drive according to the
- * current clock.
- */
-
-static void it821x_program_udma(ide_drive_t *drive, u16 timing)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct it821x_dev *itdev = ide_get_hwifdata(hwif);
- int channel = hwif->channel;
- u8 unit = drive->dn & 1, conf;
-
- /* Program UDMA timing bits */
- if(itdev->clock_mode == ATA_66)
- conf = timing >> 8;
- else
- conf = timing & 0xFF;
-
- if (itdev->timing10 == 0)
- pci_write_config_byte(dev, 0x56 + 4 * channel + unit, conf);
- else {
- pci_write_config_byte(dev, 0x56 + 4 * channel, conf);
- pci_write_config_byte(dev, 0x56 + 4 * channel + 1, conf);
- }
-}
-
-/**
- * it821x_clock_strategy
- * @drive: drive to set up
- *
- * Select between the 50 and 66Mhz base clocks to get the best
- * results for this interface.
- */
-
-static void it821x_clock_strategy(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct it821x_dev *itdev = ide_get_hwifdata(hwif);
- ide_drive_t *pair = ide_get_pair_dev(drive);
- int clock, altclock, sel = 0;
- u8 unit = drive->dn & 1, v;
-
- if(itdev->want[0][0] > itdev->want[1][0]) {
- clock = itdev->want[0][1];
- altclock = itdev->want[1][1];
- } else {
- clock = itdev->want[1][1];
- altclock = itdev->want[0][1];
- }
-
- /*
- * if both clocks can be used for the mode with the higher priority
- * use the clock needed by the mode with the lower priority
- */
- if (clock == ATA_ANY)
- clock = altclock;
-
- /* Nobody cares - keep the same clock */
- if(clock == ATA_ANY)
- return;
- /* No change */
- if(clock == itdev->clock_mode)
- return;
-
- /* Load this into the controller ? */
- if(clock == ATA_66)
- itdev->clock_mode = ATA_66;
- else {
- itdev->clock_mode = ATA_50;
- sel = 1;
- }
-
- pci_read_config_byte(dev, 0x50, &v);
- v &= ~(1 << (1 + hwif->channel));
- v |= sel << (1 + hwif->channel);
- pci_write_config_byte(dev, 0x50, v);
-
- /*
- * Reprogram the UDMA/PIO of the pair drive for the switch
- * MWDMA will be dealt with by the dma switcher
- */
- if(pair && itdev->udma[1-unit] != UDMA_OFF) {
- it821x_program_udma(pair, itdev->udma[1-unit]);
- it821x_program(pair, itdev->pio[1-unit]);
- }
- /*
- * Reprogram the UDMA/PIO of our drive for the switch.
- * MWDMA will be dealt with by the dma switcher
- */
- if(itdev->udma[unit] != UDMA_OFF) {
- it821x_program_udma(drive, itdev->udma[unit]);
- it821x_program(drive, itdev->pio[unit]);
- }
-}
-
-/**
- * it821x_set_pio_mode - set host controller for PIO mode
- * @hwif: port
- * @drive: drive
- *
- * Tune the host to the desired PIO mode taking into the consideration
- * the maximum PIO mode supported by the other device on the cable.
- */
-
-static void it821x_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct it821x_dev *itdev = ide_get_hwifdata(hwif);
- ide_drive_t *pair = ide_get_pair_dev(drive);
- const u8 pio = drive->pio_mode - XFER_PIO_0;
- u8 unit = drive->dn & 1, set_pio = pio;
-
- /* Spec says 89 ref driver uses 88 */
- static u16 pio_timings[]= { 0xAA88, 0xA382, 0xA181, 0x3332, 0x3121 };
- static u8 pio_want[] = { ATA_66, ATA_66, ATA_66, ATA_66, ATA_ANY };
-
- /*
- * Compute the best PIO mode we can for a given device. We must
- * pick a speed that does not cause problems with the other device
- * on the cable.
- */
- if (pair) {
- u8 pair_pio = pair->pio_mode - XFER_PIO_0;
- /* trim PIO to the slowest of the master/slave */
- if (pair_pio < set_pio)
- set_pio = pair_pio;
- }
-
- /* We prefer 66Mhz clock for PIO 0-3, don't care for PIO4 */
- itdev->want[unit][1] = pio_want[set_pio];
- itdev->want[unit][0] = 1; /* PIO is lowest priority */
- itdev->pio[unit] = pio_timings[set_pio];
- it821x_clock_strategy(drive);
- it821x_program(drive, itdev->pio[unit]);
-}
-
-/**
- * it821x_tune_mwdma - tune a channel for MWDMA
- * @drive: drive to set up
- * @mode_wanted: the target operating mode
- *
- * Load the timing settings for this device mode into the
- * controller when doing MWDMA in pass through mode. The caller
- * must manage the whole lack of per device MWDMA/PIO timings and
- * the shared MWDMA/PIO timing register.
- */
-
-static void it821x_tune_mwdma(ide_drive_t *drive, u8 mode_wanted)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct it821x_dev *itdev = (void *)ide_get_hwifdata(hwif);
- u8 unit = drive->dn & 1, channel = hwif->channel, conf;
-
- static u16 dma[] = { 0x8866, 0x3222, 0x3121 };
- static u8 mwdma_want[] = { ATA_ANY, ATA_66, ATA_ANY };
-
- itdev->want[unit][1] = mwdma_want[mode_wanted];
- itdev->want[unit][0] = 2; /* MWDMA is low priority */
- itdev->mwdma[unit] = dma[mode_wanted];
- itdev->udma[unit] = UDMA_OFF;
-
- /* UDMA bits off - Revision 0x10 do them in pairs */
- pci_read_config_byte(dev, 0x50, &conf);
- if (itdev->timing10)
- conf |= channel ? 0x60: 0x18;
- else
- conf |= 1 << (3 + 2 * channel + unit);
- pci_write_config_byte(dev, 0x50, conf);
-
- it821x_clock_strategy(drive);
- /* FIXME: do we need to program this ? */
- /* it821x_program(drive, itdev->mwdma[unit]); */
-}
-
-/**
- * it821x_tune_udma - tune a channel for UDMA
- * @drive: drive to set up
- * @mode_wanted: the target operating mode
- *
- * Load the timing settings for this device mode into the
- * controller when doing UDMA modes in pass through.
- */
-
-static void it821x_tune_udma(ide_drive_t *drive, u8 mode_wanted)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct it821x_dev *itdev = ide_get_hwifdata(hwif);
- u8 unit = drive->dn & 1, channel = hwif->channel, conf;
-
- static u16 udma[] = { 0x4433, 0x4231, 0x3121, 0x2121, 0x1111, 0x2211, 0x1111 };
- static u8 udma_want[] = { ATA_ANY, ATA_50, ATA_ANY, ATA_66, ATA_66, ATA_50, ATA_66 };
-
- itdev->want[unit][1] = udma_want[mode_wanted];
- itdev->want[unit][0] = 3; /* UDMA is high priority */
- itdev->mwdma[unit] = MWDMA_OFF;
- itdev->udma[unit] = udma[mode_wanted];
- if(mode_wanted >= 5)
- itdev->udma[unit] |= 0x8080; /* UDMA 5/6 select on */
-
- /* UDMA on. Again revision 0x10 must do the pair */
- pci_read_config_byte(dev, 0x50, &conf);
- if (itdev->timing10)
- conf &= channel ? 0x9F: 0xE7;
- else
- conf &= ~ (1 << (3 + 2 * channel + unit));
- pci_write_config_byte(dev, 0x50, conf);
-
- it821x_clock_strategy(drive);
- it821x_program_udma(drive, itdev->udma[unit]);
-
-}
-
-/**
- * it821x_dma_read - DMA hook
- * @drive: drive for DMA
- *
- * The IT821x has a single timing register for MWDMA and for PIO
- * operations. As we flip back and forth we have to reload the
- * clock. In addition the rev 0x10 device only works if the same
- * timing value is loaded into the master and slave UDMA clock
- * so we must also reload that.
- *
- * FIXME: we could figure out in advance if we need to do reloads
- */
-
-static void it821x_dma_start(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct it821x_dev *itdev = ide_get_hwifdata(hwif);
- u8 unit = drive->dn & 1;
-
- if(itdev->mwdma[unit] != MWDMA_OFF)
- it821x_program(drive, itdev->mwdma[unit]);
- else if(itdev->udma[unit] != UDMA_OFF && itdev->timing10)
- it821x_program_udma(drive, itdev->udma[unit]);
- ide_dma_start(drive);
-}
-
-/**
- * it821x_dma_write - DMA hook
- * @drive: drive for DMA stop
- *
- * The IT821x has a single timing register for MWDMA and for PIO
- * operations. As we flip back and forth we have to reload the
- * clock.
- */
-
-static int it821x_dma_end(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct it821x_dev *itdev = ide_get_hwifdata(hwif);
- int ret = ide_dma_end(drive);
- u8 unit = drive->dn & 1;
-
- if(itdev->mwdma[unit] != MWDMA_OFF)
- it821x_program(drive, itdev->pio[unit]);
- return ret;
-}
-
-/**
- * it821x_set_dma_mode - set host controller for DMA mode
- * @hwif: port
- * @drive: drive
- *
- * Tune the ITE chipset for the desired DMA mode.
- */
-
-static void it821x_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- const u8 speed = drive->dma_mode;
-
- /*
- * MWDMA tuning is really hard because our MWDMA and PIO
- * timings are kept in the same place. We can switch in the
- * host dma on/off callbacks.
- */
- if (speed >= XFER_UDMA_0 && speed <= XFER_UDMA_6)
- it821x_tune_udma(drive, speed - XFER_UDMA_0);
- else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
- it821x_tune_mwdma(drive, speed - XFER_MW_DMA_0);
-}
-
-/**
- * it821x_cable_detect - cable detection
- * @hwif: interface to check
- *
- * Check for the presence of an ATA66 capable cable on the
- * interface. Problematic as it seems some cards don't have
- * the needed logic onboard.
- */
-
-static u8 it821x_cable_detect(ide_hwif_t *hwif)
-{
- /* The reference driver also only does disk side */
- return ATA_CBL_PATA80;
-}
-
-/**
- * it821x_quirkproc - post init callback
- * @drive: drive
- *
- * This callback is run after the drive has been probed but
- * before anything gets attached. It allows drivers to do any
- * final tuning that is needed, or fixups to work around bugs.
- */
-
-static void it821x_quirkproc(ide_drive_t *drive)
-{
- struct it821x_dev *itdev = ide_get_hwifdata(drive->hwif);
- u16 *id = drive->id;
-
- if (!itdev->smart) {
- /*
- * If we are in pass through mode then not much
- * needs to be done, but we do bother to clear the
- * IRQ mask as we may well be in PIO (eg rev 0x10)
- * for now and we know unmasking is safe on this chipset.
- */
- drive->dev_flags |= IDE_DFLAG_UNMASK;
- } else {
- /*
- * Perform fixups on smart mode. We need to "lose" some
- * capabilities the firmware lacks but does not filter, and
- * also patch up some capability bits that it forgets to set
- * in RAID mode.
- */
-
- /* Check for RAID v native */
- if (strstr((char *)&id[ATA_ID_PROD],
- "Integrated Technology Express")) {
- /* In raid mode the ident block is slightly buggy
- We need to set the bits so that the IDE layer knows
- LBA28. LBA48 and DMA ar valid */
- id[ATA_ID_CAPABILITY] |= (3 << 8); /* LBA28, DMA */
- id[ATA_ID_COMMAND_SET_2] |= 0x0400; /* LBA48 valid */
- id[ATA_ID_CFS_ENABLE_2] |= 0x0400; /* LBA48 on */
- /* Reporting logic */
- printk(KERN_INFO "%s: IT8212 %sRAID %d volume",
- drive->name, id[147] ? "Bootable " : "",
- id[ATA_ID_CSFO]);
- if (id[ATA_ID_CSFO] != 1)
- printk(KERN_CONT "(%dK stripe)", id[146]);
- printk(KERN_CONT ".\n");
- } else {
- /* Non RAID volume. Fixups to stop the core code
- doing unsupported things */
- id[ATA_ID_FIELD_VALID] &= 3;
- id[ATA_ID_QUEUE_DEPTH] = 0;
- id[ATA_ID_COMMAND_SET_1] = 0;
- id[ATA_ID_COMMAND_SET_2] &= 0xC400;
- id[ATA_ID_CFSSE] &= 0xC000;
- id[ATA_ID_CFS_ENABLE_1] = 0;
- id[ATA_ID_CFS_ENABLE_2] &= 0xC400;
- id[ATA_ID_CSF_DEFAULT] &= 0xC000;
- id[127] = 0;
- id[ATA_ID_DLF] = 0;
- id[ATA_ID_CSFO] = 0;
- id[ATA_ID_CFA_POWER] = 0;
- printk(KERN_INFO "%s: Performing identify fixups.\n",
- drive->name);
- }
-
- /*
- * Set MWDMA0 mode as enabled/support - just to tell
- * IDE core that DMA is supported (it821x hardware
- * takes care of DMA mode programming).
- */
- if (ata_id_has_dma(id)) {
- id[ATA_ID_MWDMA_MODES] |= 0x0101;
- drive->current_speed = XFER_MW_DMA_0;
- }
- }
-
-}
-
-static const struct ide_dma_ops it821x_pass_through_dma_ops = {
- .dma_host_set = ide_dma_host_set,
- .dma_setup = ide_dma_setup,
- .dma_start = it821x_dma_start,
- .dma_end = it821x_dma_end,
- .dma_test_irq = ide_dma_test_irq,
- .dma_lost_irq = ide_dma_lost_irq,
- .dma_timer_expiry = ide_dma_sff_timer_expiry,
- .dma_sff_read_status = ide_dma_sff_read_status,
-};
-
-/**
- * init_hwif_it821x - set up hwif structs
- * @hwif: interface to set up
- *
- * We do the basic set up of the interface structure. The IT8212
- * requires several custom handlers so we override the default
- * ide DMA handlers appropriately
- */
-
-static void init_hwif_it821x(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct ide_host *host = pci_get_drvdata(dev);
- struct it821x_dev *itdevs = host->host_priv;
- struct it821x_dev *idev = itdevs + hwif->channel;
- u8 conf;
-
- ide_set_hwifdata(hwif, idev);
-
- pci_read_config_byte(dev, 0x50, &conf);
- if (conf & 1) {
- idev->smart = 1;
- hwif->host_flags |= IDE_HFLAG_NO_ATAPI_DMA;
- /* Long I/O's although allowed in LBA48 space cause the
- onboard firmware to enter the twighlight zone */
- hwif->rqsize = 256;
- }
-
- /* Pull the current clocks from 0x50 also */
- if (conf & (1 << (1 + hwif->channel)))
- idev->clock_mode = ATA_50;
- else
- idev->clock_mode = ATA_66;
-
- idev->want[0][1] = ATA_ANY;
- idev->want[1][1] = ATA_ANY;
-
- /*
- * Not in the docs but according to the reference driver
- * this is necessary.
- */
-
- if (dev->revision == 0x10) {
- idev->timing10 = 1;
- hwif->host_flags |= IDE_HFLAG_NO_ATAPI_DMA;
- if (idev->smart == 0)
- printk(KERN_WARNING DRV_NAME " %s: revision 0x10, "
- "workarounds activated\n", pci_name(dev));
- }
-
- if (idev->smart == 0) {
- /* MWDMA/PIO clock switching for pass through mode */
- hwif->dma_ops = &it821x_pass_through_dma_ops;
- } else
- hwif->host_flags |= IDE_HFLAG_NO_SET_MODE;
-
- if (hwif->dma_base == 0)
- return;
-
- hwif->ultra_mask = ATA_UDMA6;
- hwif->mwdma_mask = ATA_MWDMA2;
-
- /* Vortex86SX quirk: prevent Ultra-DMA mode to fix BadCRC issue */
- if (idev->quirks & QUIRK_VORTEX86) {
- if (dev->revision == 0x11)
- hwif->ultra_mask = 0;
- }
-}
-
-static void it8212_disable_raid(struct pci_dev *dev)
-{
- /* Reset local CPU, and set BIOS not ready */
- pci_write_config_byte(dev, 0x5E, 0x01);
-
- /* Set to bypass mode, and reset PCI bus */
- pci_write_config_byte(dev, 0x50, 0x00);
- pci_write_config_word(dev, PCI_COMMAND,
- PCI_COMMAND_PARITY | PCI_COMMAND_IO |
- PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
- pci_write_config_word(dev, 0x40, 0xA0F3);
-
- pci_write_config_dword(dev,0x4C, 0x02040204);
- pci_write_config_byte(dev, 0x42, 0x36);
- pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x20);
-}
-
-static int init_chipset_it821x(struct pci_dev *dev)
-{
- u8 conf;
- static char *mode[2] = { "pass through", "smart" };
-
- /* Force the card into bypass mode if so requested */
- if (it8212_noraid) {
- printk(KERN_INFO DRV_NAME " %s: forcing bypass mode\n",
- pci_name(dev));
- it8212_disable_raid(dev);
- }
- pci_read_config_byte(dev, 0x50, &conf);
- printk(KERN_INFO DRV_NAME " %s: controller in %s mode\n",
- pci_name(dev), mode[conf & 1]);
- return 0;
-}
-
-static const struct ide_port_ops it821x_port_ops = {
- /* it821x_set_{pio,dma}_mode() are only used in pass-through mode */
- .set_pio_mode = it821x_set_pio_mode,
- .set_dma_mode = it821x_set_dma_mode,
- .quirkproc = it821x_quirkproc,
- .cable_detect = it821x_cable_detect,
-};
-
-static const struct ide_port_info it821x_chipset = {
- .name = DRV_NAME,
- .init_chipset = init_chipset_it821x,
- .init_hwif = init_hwif_it821x,
- .port_ops = &it821x_port_ops,
- .pio_mask = ATA_PIO4,
-};
-
-/**
- * it821x_init_one - pci layer discovery entry
- * @dev: PCI device
- * @id: ident table entry
- *
- * Called by the PCI code when it finds an ITE821x controller.
- * We then use the IDE PCI generic helper to do most of the work.
- */
-
-static int it821x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- struct it821x_dev *itdevs;
- int rc;
-
- itdevs = kcalloc(2, sizeof(*itdevs), GFP_KERNEL);
- if (itdevs == NULL) {
- printk(KERN_ERR DRV_NAME " %s: out of memory\n", pci_name(dev));
- return -ENOMEM;
- }
-
- itdevs->quirks = id->driver_data;
-
- rc = ide_pci_init_one(dev, &it821x_chipset, itdevs);
- if (rc)
- kfree(itdevs);
-
- return rc;
-}
-
-static void it821x_remove(struct pci_dev *dev)
-{
- struct ide_host *host = pci_get_drvdata(dev);
- struct it821x_dev *itdevs = host->host_priv;
-
- ide_pci_remove(dev);
- kfree(itdevs);
-}
-
-static const struct pci_device_id it821x_pci_tbl[] = {
- { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8211), 0 },
- { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8212), 0 },
- { PCI_VDEVICE(RDC, PCI_DEVICE_ID_RDC_D1010), QUIRK_VORTEX86 },
- { 0, },
-};
-
-MODULE_DEVICE_TABLE(pci, it821x_pci_tbl);
-
-static struct pci_driver it821x_pci_driver = {
- .name = "ITE821x IDE",
- .id_table = it821x_pci_tbl,
- .probe = it821x_init_one,
- .remove = it821x_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init it821x_ide_init(void)
-{
- return ide_pci_register_driver(&it821x_pci_driver);
-}
-
-static void __exit it821x_ide_exit(void)
-{
- pci_unregister_driver(&it821x_pci_driver);
-}
-
-module_init(it821x_ide_init);
-module_exit(it821x_ide_exit);
-
-module_param_named(noraid, it8212_noraid, int, S_IRUGO);
-MODULE_PARM_DESC(noraid, "Force card into bypass mode");
-
-MODULE_AUTHOR("Alan Cox");
-MODULE_DESCRIPTION("PCI driver module for the ITE 821x");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
deleted file mode 100644
index ae6480dcbadf..000000000000
--- a/drivers/ide/jmicron.c
+++ /dev/null
@@ -1,176 +0,0 @@
-
-/*
- * Copyright (C) 2006 Red Hat
- *
- * May be copied or modified under the terms of the GNU General Public License
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#define DRV_NAME "jmicron"
-
-typedef enum {
- PORT_PATA0 = 0,
- PORT_PATA1 = 1,
- PORT_SATA = 2,
-} port_type;
-
-/**
- * jmicron_cable_detect - cable detection
- * @hwif: IDE port
- *
- * Returns the cable type.
- */
-
-static u8 jmicron_cable_detect(ide_hwif_t *hwif)
-{
- struct pci_dev *pdev = to_pci_dev(hwif->dev);
-
- u32 control;
- u32 control5;
-
- int port = hwif->channel;
- port_type port_map[2];
-
- pci_read_config_dword(pdev, 0x40, &control);
-
- /* There are two basic mappings. One has the two SATA ports merged
- as master/slave and the secondary as PATA, the other has only the
- SATA port mapped */
- if (control & (1 << 23)) {
- port_map[0] = PORT_SATA;
- port_map[1] = PORT_PATA0;
- } else {
- port_map[0] = PORT_SATA;
- port_map[1] = PORT_SATA;
- }
-
- /* The 365/366 may have this bit set to map the second PATA port
- as the internal primary channel */
- pci_read_config_dword(pdev, 0x80, &control5);
- if (control5 & (1<<24))
- port_map[0] = PORT_PATA1;
-
- /* The two ports may then be logically swapped by the firmware */
- if (control & (1 << 22))
- port = port ^ 1;
-
- /*
- * Now we know which physical port we are talking about we can
- * actually do our cable checking etc. Thankfully we don't need
- * to do the plumbing for other cases.
- */
- switch (port_map[port]) {
- case PORT_PATA0:
- if (control & (1 << 3)) /* 40/80 pin primary */
- return ATA_CBL_PATA40;
- return ATA_CBL_PATA80;
- case PORT_PATA1:
- if (control5 & (1 << 19)) /* 40/80 pin secondary */
- return ATA_CBL_PATA40;
- return ATA_CBL_PATA80;
- case PORT_SATA:
- break;
- }
- /* Avoid bogus "control reaches end of non-void function" */
- return ATA_CBL_PATA80;
-}
-
-static void jmicron_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
-}
-
-/**
- * jmicron_set_dma_mode - set host controller for DMA mode
- * @hwif: port
- * @drive: drive
- *
- * As the JMicron snoops for timings we don't need to do anything here.
- */
-
-static void jmicron_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
-}
-
-static const struct ide_port_ops jmicron_port_ops = {
- .set_pio_mode = jmicron_set_pio_mode,
- .set_dma_mode = jmicron_set_dma_mode,
- .cable_detect = jmicron_cable_detect,
-};
-
-static const struct ide_port_info jmicron_chipset = {
- .name = DRV_NAME,
- .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
- .port_ops = &jmicron_port_ops,
- .pio_mask = ATA_PIO5,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
-};
-
-/**
- * jmicron_init_one - pci layer discovery entry
- * @dev: PCI device
- * @id: ident table entry
- *
- * Called by the PCI code when it finds a Jmicron controller.
- * We then use the IDE PCI generic helper to do most of the work.
- */
-
-static int jmicron_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- return ide_pci_init_one(dev, &jmicron_chipset, NULL);
-}
-
-/* All JMB PATA controllers have and will continue to have the same
- * interface. Matching vendor and device class is enough for all
- * current and future controllers if the controller is programmed
- * properly.
- *
- * If libata is configured, jmicron PCI quirk programs the controller
- * into the correct mode. If libata isn't configured, match known
- * device IDs too to maintain backward compatibility.
- */
-static struct pci_device_id jmicron_pci_tbl[] = {
-#if !defined(CONFIG_ATA) && !defined(CONFIG_ATA_MODULE)
- { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB361) },
- { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB363) },
- { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB365) },
- { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB366) },
- { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB368) },
-#endif
- { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
- PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 0 },
- { 0, },
-};
-
-MODULE_DEVICE_TABLE(pci, jmicron_pci_tbl);
-
-static struct pci_driver jmicron_pci_driver = {
- .name = "JMicron IDE",
- .id_table = jmicron_pci_tbl,
- .probe = jmicron_init_one,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init jmicron_ide_init(void)
-{
- return ide_pci_register_driver(&jmicron_pci_driver);
-}
-
-static void __exit jmicron_ide_exit(void)
-{
- pci_unregister_driver(&jmicron_pci_driver);
-}
-
-module_init(jmicron_ide_init);
-module_exit(jmicron_ide_exit);
-
-MODULE_AUTHOR("Alan Cox");
-MODULE_DESCRIPTION("PCI driver module for the JMicron in legacy modes");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/macide.c b/drivers/ide/macide.c
deleted file mode 100644
index 8d2bf73bc548..000000000000
--- a/drivers/ide/macide.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Macintosh IDE Driver
- *
- * Copyright (C) 1998 by Michael Schmitz
- *
- * This driver was written based on information obtained from the MacOS IDE
- * driver binary by Mikael Forselius
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- */
-
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/blkdev.h>
-#include <linux/delay.h>
-#include <linux/ide.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-
-#include <asm/macintosh.h>
-
-#define DRV_NAME "mac_ide"
-
-#define IDE_BASE 0x50F1A000 /* Base address of IDE controller */
-
-/*
- * Generic IDE registers as offsets from the base
- * These match MkLinux so they should be correct.
- */
-
-#define IDE_CONTROL 0x38 /* control/altstatus */
-
-/*
- * Mac-specific registers
- */
-
-/*
- * this register is odd; it doesn't seem to do much and it's
- * not word-aligned like virtually every other hardware register
- * on the Mac...
- */
-
-#define IDE_IFR 0x101 /* (0x101) IDE interrupt flags on Quadra:
- *
- * Bit 0+1: some interrupt flags
- * Bit 2+3: some interrupt enable
- * Bit 4: ??
- * Bit 5: IDE interrupt flag (any hwif)
- * Bit 6: maybe IDE interrupt enable (any hwif) ??
- * Bit 7: Any interrupt condition
- */
-
-volatile unsigned char *ide_ifr = (unsigned char *) (IDE_BASE + IDE_IFR);
-
-int macide_test_irq(ide_hwif_t *hwif)
-{
- if (*ide_ifr & 0x20)
- return 1;
- return 0;
-}
-
-static void macide_clear_irq(ide_drive_t *drive)
-{
- *ide_ifr &= ~0x20;
-}
-
-static void __init macide_setup_ports(struct ide_hw *hw, unsigned long base,
- int irq)
-{
- int i;
-
- memset(hw, 0, sizeof(*hw));
-
- for (i = 0; i < 8; i++)
- hw->io_ports_array[i] = base + i * 4;
-
- hw->io_ports.ctl_addr = base + IDE_CONTROL;
-
- hw->irq = irq;
-}
-
-static const struct ide_port_ops macide_port_ops = {
- .clear_irq = macide_clear_irq,
- .test_irq = macide_test_irq,
-};
-
-static const struct ide_port_info macide_port_info = {
- .port_ops = &macide_port_ops,
- .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA,
- .irq_flags = IRQF_SHARED,
- .chipset = ide_generic,
-};
-
-static const char *mac_ide_name[] =
- { "Quadra", "Powerbook", "Powerbook Baboon" };
-
-/*
- * Probe for a Macintosh IDE interface
- */
-
-static int mac_ide_probe(struct platform_device *pdev)
-{
- struct resource *mem, *irq;
- struct ide_hw hw, *hws[] = { &hw };
- struct ide_port_info d = macide_port_info;
- struct ide_host *host;
- int rc;
-
- if (!MACH_IS_MAC)
- return -ENODEV;
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem)
- return -ENODEV;
-
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!irq)
- return -ENODEV;
-
- if (!devm_request_mem_region(&pdev->dev, mem->start,
- resource_size(mem), DRV_NAME)) {
- dev_err(&pdev->dev, "resources busy\n");
- return -EBUSY;
- }
-
- printk(KERN_INFO "ide: Macintosh %s IDE controller\n",
- mac_ide_name[macintosh_config->ide_type - 1]);
-
- macide_setup_ports(&hw, mem->start, irq->start);
-
- rc = ide_host_add(&d, hws, 1, &host);
- if (rc)
- return rc;
-
- platform_set_drvdata(pdev, host);
- return 0;
-}
-
-static int mac_ide_remove(struct platform_device *pdev)
-{
- struct ide_host *host = platform_get_drvdata(pdev);
-
- ide_host_remove(host);
- return 0;
-}
-
-static struct platform_driver mac_ide_driver = {
- .driver = {
- .name = DRV_NAME,
- },
- .probe = mac_ide_probe,
- .remove = mac_ide_remove,
-};
-
-module_platform_driver(mac_ide_driver);
-
-MODULE_ALIAS("platform:" DRV_NAME);
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
deleted file mode 100644
index 11a672aba6ee..000000000000
--- a/drivers/ide/ns87415.c
+++ /dev/null
@@ -1,350 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1997-1998 Mark Lord <mlord@pobox.com>
- * Copyright (C) 1998 Eddie C. Dost <ecd@skynet.be>
- * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2004 Grant Grundler <grundler at parisc-linux.org>
- *
- * Inspired by an earlier effort from David S. Miller <davem@redhat.com>
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#include <asm/io.h>
-
-#define DRV_NAME "ns87415"
-
-#ifdef CONFIG_SUPERIO
-/* SUPERIO 87560 is a PoS chip that NatSem denies exists.
- * Unfortunately, it's built-in on all Astro-based PA-RISC workstations
- * which use the integrated NS87514 cell for CD-ROM support.
- * i.e we have to support for CD-ROM installs.
- * See drivers/parisc/superio.c for more gory details.
- */
-#include <asm/superio.h>
-
-#define SUPERIO_IDE_MAX_RETRIES 25
-
-/* Because of a defect in Super I/O, all reads of the PCI DMA status
- * registers, IDE status register and the IDE select register need to be
- * retried
- */
-static u8 superio_ide_inb (unsigned long port)
-{
- u8 tmp;
- int retries = SUPERIO_IDE_MAX_RETRIES;
-
- /* printk(" [ reading port 0x%x with retry ] ", port); */
-
- do {
- tmp = inb(port);
- if (tmp == 0)
- udelay(50);
- } while (tmp == 0 && retries-- > 0);
-
- return tmp;
-}
-
-static u8 superio_read_status(ide_hwif_t *hwif)
-{
- return superio_ide_inb(hwif->io_ports.status_addr);
-}
-
-static u8 superio_dma_sff_read_status(ide_hwif_t *hwif)
-{
- return superio_ide_inb(hwif->dma_base + ATA_DMA_STATUS);
-}
-
-static void superio_tf_read(ide_drive_t *drive, struct ide_taskfile *tf,
- u8 valid)
-{
- struct ide_io_ports *io_ports = &drive->hwif->io_ports;
-
- if (valid & IDE_VALID_ERROR)
- tf->error = inb(io_ports->feature_addr);
- if (valid & IDE_VALID_NSECT)
- tf->nsect = inb(io_ports->nsect_addr);
- if (valid & IDE_VALID_LBAL)
- tf->lbal = inb(io_ports->lbal_addr);
- if (valid & IDE_VALID_LBAM)
- tf->lbam = inb(io_ports->lbam_addr);
- if (valid & IDE_VALID_LBAH)
- tf->lbah = inb(io_ports->lbah_addr);
- if (valid & IDE_VALID_DEVICE)
- tf->device = superio_ide_inb(io_ports->device_addr);
-}
-
-static void ns87415_dev_select(ide_drive_t *drive);
-
-static const struct ide_tp_ops superio_tp_ops = {
- .exec_command = ide_exec_command,
- .read_status = superio_read_status,
- .read_altstatus = ide_read_altstatus,
- .write_devctl = ide_write_devctl,
-
- .dev_select = ns87415_dev_select,
- .tf_load = ide_tf_load,
- .tf_read = superio_tf_read,
-
- .input_data = ide_input_data,
- .output_data = ide_output_data,
-};
-
-static void superio_init_iops(struct hwif_s *hwif)
-{
- struct pci_dev *pdev = to_pci_dev(hwif->dev);
- u32 dma_stat;
- u8 port = hwif->channel, tmp;
-
- dma_stat = (pci_resource_start(pdev, 4) & ~3) + (!port ? 2 : 0xa);
-
- /* Clear error/interrupt, enable dma */
- tmp = superio_ide_inb(dma_stat);
- outb(tmp | 0x66, dma_stat);
-}
-#else
-#define superio_dma_sff_read_status ide_dma_sff_read_status
-#endif
-
-static unsigned int ns87415_count = 0, ns87415_control[MAX_HWIFS] = { 0 };
-
-/*
- * This routine either enables/disables (according to IDE_DFLAG_PRESENT)
- * the IRQ associated with the port,
- * and selects either PIO or DMA handshaking for the next I/O operation.
- */
-static void ns87415_prepare_drive (ide_drive_t *drive, unsigned int use_dma)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned int bit, other, new, *old = (unsigned int *) hwif->select_data;
- unsigned long flags;
-
- local_irq_save(flags);
- new = *old;
-
- /* Adjust IRQ enable bit */
- bit = 1 << (8 + hwif->channel);
-
- if (drive->dev_flags & IDE_DFLAG_PRESENT)
- new &= ~bit;
- else
- new |= bit;
-
- /* Select PIO or DMA, DMA may only be selected for one drive/channel. */
- bit = 1 << (20 + (drive->dn & 1) + (hwif->channel << 1));
- other = 1 << (20 + (1 - (drive->dn & 1)) + (hwif->channel << 1));
- new = use_dma ? ((new & ~other) | bit) : (new & ~bit);
-
- if (new != *old) {
- unsigned char stat;
-
- /*
- * Don't change DMA engine settings while Write Buffers
- * are busy.
- */
- (void) pci_read_config_byte(dev, 0x43, &stat);
- while (stat & 0x03) {
- udelay(1);
- (void) pci_read_config_byte(dev, 0x43, &stat);
- }
-
- *old = new;
- (void) pci_write_config_dword(dev, 0x40, new);
-
- /*
- * And let things settle...
- */
- udelay(10);
- }
-
- local_irq_restore(flags);
-}
-
-static void ns87415_dev_select(ide_drive_t *drive)
-{
- ns87415_prepare_drive(drive,
- !!(drive->dev_flags & IDE_DFLAG_USING_DMA));
-
- outb(drive->select | ATA_DEVICE_OBS, drive->hwif->io_ports.device_addr);
-}
-
-static void ns87415_dma_start(ide_drive_t *drive)
-{
- ns87415_prepare_drive(drive, 1);
- ide_dma_start(drive);
-}
-
-static int ns87415_dma_end(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 dma_stat = 0, dma_cmd = 0;
-
- dma_stat = hwif->dma_ops->dma_sff_read_status(hwif);
- /* get DMA command mode */
- dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
- /* stop DMA */
- outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD);
- /* from ERRATA: clear the INTR & ERROR bits */
- dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
- outb(dma_cmd | 6, hwif->dma_base + ATA_DMA_CMD);
-
- ns87415_prepare_drive(drive, 0);
-
- /* verify good DMA status */
- return (dma_stat & 7) != 4;
-}
-
-static void init_hwif_ns87415 (ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned int ctrl, using_inta;
- u8 progif;
-#ifdef __sparc_v9__
- int timeout;
- u8 stat;
-#endif
-
- /*
- * We cannot probe for IRQ: both ports share common IRQ on INTA.
- * Also, leave IRQ masked during drive probing, to prevent infinite
- * interrupts from a potentially floating INTA..
- *
- * IRQs get unmasked in dev_select() when drive is first used.
- */
- (void) pci_read_config_dword(dev, 0x40, &ctrl);
- (void) pci_read_config_byte(dev, 0x09, &progif);
- /* is irq in "native" mode? */
- using_inta = progif & (1 << (hwif->channel << 1));
- if (!using_inta)
- using_inta = ctrl & (1 << (4 + hwif->channel));
- if (hwif->mate) {
- hwif->select_data = hwif->mate->select_data;
- } else {
- hwif->select_data = (unsigned long)
- &ns87415_control[ns87415_count++];
- ctrl |= (1 << 8) | (1 << 9); /* mask both IRQs */
- if (using_inta)
- ctrl &= ~(1 << 6); /* unmask INTA */
- *((unsigned int *)hwif->select_data) = ctrl;
- (void) pci_write_config_dword(dev, 0x40, ctrl);
-
- /*
- * Set prefetch size to 512 bytes for both ports,
- * but don't turn on/off prefetching here.
- */
- pci_write_config_byte(dev, 0x55, 0xee);
-
-#ifdef __sparc_v9__
- /*
- * XXX: Reset the device, if we don't it will not respond to
- * dev_select() properly during first ide_probe_port().
- */
- timeout = 10000;
- outb(12, hwif->io_ports.ctl_addr);
- udelay(10);
- outb(8, hwif->io_ports.ctl_addr);
- do {
- udelay(50);
- stat = hwif->tp_ops->read_status(hwif);
- if (stat == 0xff)
- break;
- } while ((stat & ATA_BUSY) && --timeout);
-#endif
- }
-
- if (!using_inta)
- hwif->irq = pci_get_legacy_ide_irq(dev, hwif->channel);
-
- if (!hwif->dma_base)
- return;
-
- outb(0x60, hwif->dma_base + ATA_DMA_STATUS);
-}
-
-static const struct ide_tp_ops ns87415_tp_ops = {
- .exec_command = ide_exec_command,
- .read_status = ide_read_status,
- .read_altstatus = ide_read_altstatus,
- .write_devctl = ide_write_devctl,
-
- .dev_select = ns87415_dev_select,
- .tf_load = ide_tf_load,
- .tf_read = ide_tf_read,
-
- .input_data = ide_input_data,
- .output_data = ide_output_data,
-};
-
-static const struct ide_dma_ops ns87415_dma_ops = {
- .dma_host_set = ide_dma_host_set,
- .dma_setup = ide_dma_setup,
- .dma_start = ns87415_dma_start,
- .dma_end = ns87415_dma_end,
- .dma_test_irq = ide_dma_test_irq,
- .dma_lost_irq = ide_dma_lost_irq,
- .dma_timer_expiry = ide_dma_sff_timer_expiry,
- .dma_sff_read_status = superio_dma_sff_read_status,
-};
-
-static const struct ide_port_info ns87415_chipset = {
- .name = DRV_NAME,
- .init_hwif = init_hwif_ns87415,
- .tp_ops = &ns87415_tp_ops,
- .dma_ops = &ns87415_dma_ops,
- .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA |
- IDE_HFLAG_NO_ATAPI_DMA,
-};
-
-static int ns87415_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- struct ide_port_info d = ns87415_chipset;
-
-#ifdef CONFIG_SUPERIO
- if (PCI_SLOT(dev->devfn) == 0xE) {
- /* Built-in - assume it's under superio. */
- d.init_iops = superio_init_iops;
- d.tp_ops = &superio_tp_ops;
- }
-#endif
- return ide_pci_init_one(dev, &d, NULL);
-}
-
-static const struct pci_device_id ns87415_pci_tbl[] = {
- { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_87415), 0 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, ns87415_pci_tbl);
-
-static struct pci_driver ns87415_pci_driver = {
- .name = "NS87415_IDE",
- .id_table = ns87415_pci_tbl,
- .probe = ns87415_init_one,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init ns87415_ide_init(void)
-{
- return ide_pci_register_driver(&ns87415_pci_driver);
-}
-
-static void __exit ns87415_ide_exit(void)
-{
- pci_unregister_driver(&ns87415_pci_driver);
-}
-
-module_init(ns87415_ide_init);
-module_exit(ns87415_ide_exit);
-
-MODULE_AUTHOR("Mark Lord, Eddie Dost, Andre Hedrick");
-MODULE_DESCRIPTION("PCI driver module for NS87415 IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
deleted file mode 100644
index c374f82333c6..000000000000
--- a/drivers/ide/opti621.c
+++ /dev/null
@@ -1,179 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1996-1998 Linus Torvalds & authors (see below)
- */
-
-/*
- * Authors:
- * Jaromir Koutek <miri@punknet.cz>,
- * Jan Harkes <jaharkes@cwi.nl>,
- * Mark Lord <mlord@pobox.com>
- * Some parts of code are from ali14xx.c and from rz1000.c.
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-
-#include <asm/io.h>
-
-#define DRV_NAME "opti621"
-
-#define READ_REG 0 /* index of Read cycle timing register */
-#define WRITE_REG 1 /* index of Write cycle timing register */
-#define CNTRL_REG 3 /* index of Control register */
-#define STRAP_REG 5 /* index of Strap register */
-#define MISC_REG 6 /* index of Miscellaneous register */
-
-static int reg_base;
-
-static DEFINE_SPINLOCK(opti621_lock);
-
-/* Write value to register reg, base of register
- * is at reg_base (0x1f0 primary, 0x170 secondary,
- * if not changed by PCI configuration).
- * This is from setupvic.exe program.
- */
-static void write_reg(u8 value, int reg)
-{
- inw(reg_base + 1);
- inw(reg_base + 1);
- outb(3, reg_base + 2);
- outb(value, reg_base + reg);
- outb(0x83, reg_base + 2);
-}
-
-/* Read value from register reg, base of register
- * is at reg_base (0x1f0 primary, 0x170 secondary,
- * if not changed by PCI configuration).
- * This is from setupvic.exe program.
- */
-static u8 read_reg(int reg)
-{
- u8 ret = 0;
-
- inw(reg_base + 1);
- inw(reg_base + 1);
- outb(3, reg_base + 2);
- ret = inb(reg_base + reg);
- outb(0x83, reg_base + 2);
-
- return ret;
-}
-
-static void opti621_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- ide_drive_t *pair = ide_get_pair_dev(drive);
- unsigned long flags;
- unsigned long mode = drive->pio_mode, pair_mode;
- const u8 pio = mode - XFER_PIO_0;
- u8 tim, misc, addr_pio = pio, clk;
-
- /* DRDY is default 2 (by OPTi Databook) */
- static const u8 addr_timings[2][5] = {
- { 0x20, 0x10, 0x00, 0x00, 0x00 }, /* 33 MHz */
- { 0x10, 0x10, 0x00, 0x00, 0x00 }, /* 25 MHz */
- };
- static const u8 data_rec_timings[2][5] = {
- { 0x5b, 0x45, 0x32, 0x21, 0x20 }, /* 33 MHz */
- { 0x48, 0x34, 0x21, 0x10, 0x10 } /* 25 MHz */
- };
-
- ide_set_drivedata(drive, (void *)mode);
-
- if (pair) {
- pair_mode = (unsigned long)ide_get_drivedata(pair);
- if (pair_mode && pair_mode < mode)
- addr_pio = pair_mode - XFER_PIO_0;
- }
-
- spin_lock_irqsave(&opti621_lock, flags);
-
- reg_base = hwif->io_ports.data_addr;
-
- /* allow Register-B */
- outb(0xc0, reg_base + CNTRL_REG);
- /* hmm, setupvic.exe does this ;-) */
- outb(0xff, reg_base + 5);
- /* if reads 0xff, adapter not exist? */
- (void)inb(reg_base + CNTRL_REG);
- /* if reads 0xc0, no interface exist? */
- read_reg(CNTRL_REG);
-
- /* check CLK speed */
- clk = read_reg(STRAP_REG) & 1;
-
- printk(KERN_INFO "%s: CLK = %d MHz\n", hwif->name, clk ? 25 : 33);
-
- tim = data_rec_timings[clk][pio];
- misc = addr_timings[clk][addr_pio];
-
- /* select Index-0/1 for Register-A/B */
- write_reg(drive->dn & 1, MISC_REG);
- /* set read cycle timings */
- write_reg(tim, READ_REG);
- /* set write cycle timings */
- write_reg(tim, WRITE_REG);
-
- /* use Register-A for drive 0 */
- /* use Register-B for drive 1 */
- write_reg(0x85, CNTRL_REG);
-
- /* set address setup, DRDY timings, */
- /* and read prefetch for both drives */
- write_reg(misc, MISC_REG);
-
- spin_unlock_irqrestore(&opti621_lock, flags);
-}
-
-static const struct ide_port_ops opti621_port_ops = {
- .set_pio_mode = opti621_set_pio_mode,
-};
-
-static const struct ide_port_info opti621_chipset = {
- .name = DRV_NAME,
- .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
- .port_ops = &opti621_port_ops,
- .host_flags = IDE_HFLAG_NO_DMA,
- .pio_mask = ATA_PIO4,
-};
-
-static int opti621_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- return ide_pci_init_one(dev, &opti621_chipset, NULL);
-}
-
-static const struct pci_device_id opti621_pci_tbl[] = {
- { PCI_VDEVICE(OPTI, PCI_DEVICE_ID_OPTI_82C621), 0 },
- { PCI_VDEVICE(OPTI, PCI_DEVICE_ID_OPTI_82C825), 0 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, opti621_pci_tbl);
-
-static struct pci_driver opti621_pci_driver = {
- .name = "Opti621_IDE",
- .id_table = opti621_pci_tbl,
- .probe = opti621_init_one,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init opti621_ide_init(void)
-{
- return ide_pci_register_driver(&opti621_pci_driver);
-}
-
-static void __exit opti621_ide_exit(void)
-{
- pci_unregister_driver(&opti621_pci_driver);
-}
-
-module_init(opti621_ide_init);
-module_exit(opti621_ide_exit);
-
-MODULE_AUTHOR("Jaromir Koutek, Jan Harkes, Mark Lord");
-MODULE_DESCRIPTION("PCI driver module for Opti621 IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/palm_bk3710.c b/drivers/ide/palm_bk3710.c
deleted file mode 100644
index d1fe4c13e35c..000000000000
--- a/drivers/ide/palm_bk3710.c
+++ /dev/null
@@ -1,387 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Palmchip bk3710 IDE controller
- *
- * Copyright (C) 2006 Texas Instruments.
- * Copyright (C) 2007 MontaVista Software, Inc., <source@mvista.com>
- *
- * ----------------------------------------------------------------------------
- *
- * ----------------------------------------------------------------------------
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/ioport.h>
-#include <linux/ide.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-
-/* Offset of the primary interface registers */
-#define IDE_PALM_ATA_PRI_REG_OFFSET 0x1F0
-
-/* Primary Control Offset */
-#define IDE_PALM_ATA_PRI_CTL_OFFSET 0x3F6
-
-#define BK3710_BMICP 0x00
-#define BK3710_BMISP 0x02
-#define BK3710_BMIDTP 0x04
-#define BK3710_IDETIMP 0x40
-#define BK3710_IDESTATUS 0x47
-#define BK3710_UDMACTL 0x48
-#define BK3710_MISCCTL 0x50
-#define BK3710_REGSTB 0x54
-#define BK3710_REGRCVR 0x58
-#define BK3710_DATSTB 0x5C
-#define BK3710_DATRCVR 0x60
-#define BK3710_DMASTB 0x64
-#define BK3710_DMARCVR 0x68
-#define BK3710_UDMASTB 0x6C
-#define BK3710_UDMATRP 0x70
-#define BK3710_UDMAENV 0x74
-#define BK3710_IORDYTMP 0x78
-
-static unsigned ideclk_period; /* in nanoseconds */
-
-struct palm_bk3710_udmatiming {
- unsigned int rptime; /* tRP -- Ready to pause time (nsec) */
- unsigned int cycletime; /* tCYCTYP2/2 -- avg Cycle Time (nsec) */
- /* tENV is always a minimum of 20 nsec */
-};
-
-static const struct palm_bk3710_udmatiming palm_bk3710_udmatimings[6] = {
- { 160, 240 / 2 }, /* UDMA Mode 0 */
- { 125, 160 / 2 }, /* UDMA Mode 1 */
- { 100, 120 / 2 }, /* UDMA Mode 2 */
- { 100, 90 / 2 }, /* UDMA Mode 3 */
- { 100, 60 / 2 }, /* UDMA Mode 4 */
- { 85, 40 / 2 }, /* UDMA Mode 5 */
-};
-
-static void palm_bk3710_setudmamode(void __iomem *base, unsigned int dev,
- unsigned int mode)
-{
- u8 tenv, trp, t0;
- u32 val32;
- u16 val16;
-
- /* DMA Data Setup */
- t0 = DIV_ROUND_UP(palm_bk3710_udmatimings[mode].cycletime,
- ideclk_period) - 1;
- tenv = DIV_ROUND_UP(20, ideclk_period) - 1;
- trp = DIV_ROUND_UP(palm_bk3710_udmatimings[mode].rptime,
- ideclk_period) - 1;
-
- /* udmastb Ultra DMA Access Strobe Width */
- val32 = readl(base + BK3710_UDMASTB) & (0xFF << (dev ? 0 : 8));
- val32 |= (t0 << (dev ? 8 : 0));
- writel(val32, base + BK3710_UDMASTB);
-
- /* udmatrp Ultra DMA Ready to Pause Time */
- val32 = readl(base + BK3710_UDMATRP) & (0xFF << (dev ? 0 : 8));
- val32 |= (trp << (dev ? 8 : 0));
- writel(val32, base + BK3710_UDMATRP);
-
- /* udmaenv Ultra DMA envelop Time */
- val32 = readl(base + BK3710_UDMAENV) & (0xFF << (dev ? 0 : 8));
- val32 |= (tenv << (dev ? 8 : 0));
- writel(val32, base + BK3710_UDMAENV);
-
- /* Enable UDMA for Device */
- val16 = readw(base + BK3710_UDMACTL) | (1 << dev);
- writew(val16, base + BK3710_UDMACTL);
-}
-
-static void palm_bk3710_setdmamode(void __iomem *base, unsigned int dev,
- unsigned short min_cycle,
- unsigned int mode)
-{
- u8 td, tkw, t0;
- u32 val32;
- u16 val16;
- struct ide_timing *t;
- int cycletime;
-
- t = ide_timing_find_mode(mode);
- cycletime = max_t(int, t->cycle, min_cycle);
-
- /* DMA Data Setup */
- t0 = DIV_ROUND_UP(cycletime, ideclk_period);
- td = DIV_ROUND_UP(t->active, ideclk_period);
- tkw = t0 - td - 1;
- td -= 1;
-
- val32 = readl(base + BK3710_DMASTB) & (0xFF << (dev ? 0 : 8));
- val32 |= (td << (dev ? 8 : 0));
- writel(val32, base + BK3710_DMASTB);
-
- val32 = readl(base + BK3710_DMARCVR) & (0xFF << (dev ? 0 : 8));
- val32 |= (tkw << (dev ? 8 : 0));
- writel(val32, base + BK3710_DMARCVR);
-
- /* Disable UDMA for Device */
- val16 = readw(base + BK3710_UDMACTL) & ~(1 << dev);
- writew(val16, base + BK3710_UDMACTL);
-}
-
-static void palm_bk3710_setpiomode(void __iomem *base, ide_drive_t *mate,
- unsigned int dev, unsigned int cycletime,
- unsigned int mode)
-{
- u8 t2, t2i, t0;
- u32 val32;
- struct ide_timing *t;
-
- t = ide_timing_find_mode(XFER_PIO_0 + mode);
-
- /* PIO Data Setup */
- t0 = DIV_ROUND_UP(cycletime, ideclk_period);
- t2 = DIV_ROUND_UP(t->active, ideclk_period);
-
- t2i = t0 - t2 - 1;
- t2 -= 1;
-
- val32 = readl(base + BK3710_DATSTB) & (0xFF << (dev ? 0 : 8));
- val32 |= (t2 << (dev ? 8 : 0));
- writel(val32, base + BK3710_DATSTB);
-
- val32 = readl(base + BK3710_DATRCVR) & (0xFF << (dev ? 0 : 8));
- val32 |= (t2i << (dev ? 8 : 0));
- writel(val32, base + BK3710_DATRCVR);
-
- if (mate) {
- u8 mode2 = mate->pio_mode - XFER_PIO_0;
-
- if (mode2 < mode)
- mode = mode2;
- }
-
- /* TASKFILE Setup */
- t0 = DIV_ROUND_UP(t->cyc8b, ideclk_period);
- t2 = DIV_ROUND_UP(t->act8b, ideclk_period);
-
- t2i = t0 - t2 - 1;
- t2 -= 1;
-
- val32 = readl(base + BK3710_REGSTB) & (0xFF << (dev ? 0 : 8));
- val32 |= (t2 << (dev ? 8 : 0));
- writel(val32, base + BK3710_REGSTB);
-
- val32 = readl(base + BK3710_REGRCVR) & (0xFF << (dev ? 0 : 8));
- val32 |= (t2i << (dev ? 8 : 0));
- writel(val32, base + BK3710_REGRCVR);
-}
-
-static void palm_bk3710_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- int is_slave = drive->dn & 1;
- void __iomem *base = (void __iomem *)hwif->dma_base;
- const u8 xferspeed = drive->dma_mode;
-
- if (xferspeed >= XFER_UDMA_0) {
- palm_bk3710_setudmamode(base, is_slave,
- xferspeed - XFER_UDMA_0);
- } else {
- palm_bk3710_setdmamode(base, is_slave,
- drive->id[ATA_ID_EIDE_DMA_MIN],
- xferspeed);
- }
-}
-
-static void palm_bk3710_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- unsigned int cycle_time;
- int is_slave = drive->dn & 1;
- ide_drive_t *mate;
- void __iomem *base = (void __iomem *)hwif->dma_base;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
-
- /*
- * Obtain the drive PIO data for tuning the Palm Chip registers
- */
- cycle_time = ide_pio_cycle_time(drive, pio);
- mate = ide_get_pair_dev(drive);
- palm_bk3710_setpiomode(base, mate, is_slave, cycle_time, pio);
-}
-
-static void palm_bk3710_chipinit(void __iomem *base)
-{
- /*
- * REVISIT: the ATA reset signal needs to be managed through a
- * GPIO, which means it should come from platform_data. Until
- * we get and use such information, we have to trust that things
- * have been reset before we get here.
- */
-
- /*
- * Program the IDETIMP Register Value based on the following assumptions
- *
- * (ATA_IDETIMP_IDEEN , ENABLE ) |
- * (ATA_IDETIMP_PREPOST1 , DISABLE) |
- * (ATA_IDETIMP_PREPOST0 , DISABLE) |
- *
- * DM6446 silicon rev 2.1 and earlier have no observed net benefit
- * from enabling prefetch/postwrite.
- */
- writew(BIT(15), base + BK3710_IDETIMP);
-
- /*
- * UDMACTL Ultra-ATA DMA Control
- * (ATA_UDMACTL_UDMAP1 , 0 ) |
- * (ATA_UDMACTL_UDMAP0 , 0 )
- *
- */
- writew(0, base + BK3710_UDMACTL);
-
- /*
- * MISCCTL Miscellaneous Conrol Register
- * (ATA_MISCCTL_HWNHLD1P , 1 cycle)
- * (ATA_MISCCTL_HWNHLD0P , 1 cycle)
- * (ATA_MISCCTL_TIMORIDE , 1)
- */
- writel(0x001, base + BK3710_MISCCTL);
-
- /*
- * IORDYTMP IORDY Timer for Primary Register
- * (ATA_IORDYTMP_IORDYTMP , 0xffff )
- */
- writel(0xFFFF, base + BK3710_IORDYTMP);
-
- /*
- * Configure BMISP Register
- * (ATA_BMISP_DMAEN1 , DISABLE ) |
- * (ATA_BMISP_DMAEN0 , DISABLE ) |
- * (ATA_BMISP_IORDYINT , CLEAR) |
- * (ATA_BMISP_INTRSTAT , CLEAR) |
- * (ATA_BMISP_DMAERROR , CLEAR)
- */
- writew(0, base + BK3710_BMISP);
-
- palm_bk3710_setpiomode(base, NULL, 0, 600, 0);
- palm_bk3710_setpiomode(base, NULL, 1, 600, 0);
-}
-
-static u8 palm_bk3710_cable_detect(ide_hwif_t *hwif)
-{
- return ATA_CBL_PATA80;
-}
-
-static int palm_bk3710_init_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
-{
- printk(KERN_INFO " %s: MMIO-DMA\n", hwif->name);
-
- if (ide_allocate_dma_engine(hwif))
- return -1;
-
- hwif->dma_base = hwif->io_ports.data_addr - IDE_PALM_ATA_PRI_REG_OFFSET;
-
- return 0;
-}
-
-static const struct ide_port_ops palm_bk3710_ports_ops = {
- .set_pio_mode = palm_bk3710_set_pio_mode,
- .set_dma_mode = palm_bk3710_set_dma_mode,
- .cable_detect = palm_bk3710_cable_detect,
-};
-
-static struct ide_port_info palm_bk3710_port_info __initdata = {
- .init_dma = palm_bk3710_init_dma,
- .port_ops = &palm_bk3710_ports_ops,
- .dma_ops = &sff_dma_ops,
- .host_flags = IDE_HFLAG_MMIO,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .chipset = ide_palm3710,
-};
-
-static int __init palm_bk3710_probe(struct platform_device *pdev)
-{
- struct clk *clk;
- struct resource *mem, *irq;
- void __iomem *base;
- unsigned long rate, mem_size;
- int i, rc;
- struct ide_hw hw, *hws[] = { &hw };
-
- clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk))
- return -ENODEV;
-
- clk_enable(clk);
- rate = clk_get_rate(clk);
- if (!rate)
- return -EINVAL;
-
- /* NOTE: round *down* to meet minimum timings; we count in clocks */
- ideclk_period = 1000000000UL / rate;
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (mem == NULL) {
- printk(KERN_ERR "failed to get memory region resource\n");
- return -ENODEV;
- }
-
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (irq == NULL) {
- printk(KERN_ERR "failed to get IRQ resource\n");
- return -ENODEV;
- }
-
- mem_size = resource_size(mem);
- if (request_mem_region(mem->start, mem_size, "palm_bk3710") == NULL) {
- printk(KERN_ERR "failed to request memory region\n");
- return -EBUSY;
- }
-
- base = ioremap(mem->start, mem_size);
- if (!base) {
- printk(KERN_ERR "failed to map IO memory\n");
- release_mem_region(mem->start, mem_size);
- return -ENOMEM;
- }
-
- /* Configure the Palm Chip controller */
- palm_bk3710_chipinit(base);
-
- memset(&hw, 0, sizeof(hw));
- for (i = 0; i < IDE_NR_PORTS - 2; i++)
- hw.io_ports_array[i] = (unsigned long)
- (base + IDE_PALM_ATA_PRI_REG_OFFSET + i);
- hw.io_ports.ctl_addr = (unsigned long)
- (base + IDE_PALM_ATA_PRI_CTL_OFFSET);
- hw.irq = irq->start;
- hw.dev = &pdev->dev;
-
- palm_bk3710_port_info.udma_mask = rate < 100000000 ? ATA_UDMA4 :
- ATA_UDMA5;
-
- /* Register the IDE interface with Linux */
- rc = ide_host_add(&palm_bk3710_port_info, hws, 1, NULL);
- if (rc)
- goto out;
-
- return 0;
-out:
- printk(KERN_WARNING "Palm Chip BK3710 IDE Register Fail\n");
- return rc;
-}
-
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:palm_bk3710");
-
-static struct platform_driver platform_bk_driver = {
- .driver = {
- .name = "palm_bk3710",
- },
-};
-
-static int __init palm_bk3710_init(void)
-{
- return platform_driver_probe(&platform_bk_driver, palm_bk3710_probe);
-}
-
-module_init(palm_bk3710_init);
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
deleted file mode 100644
index 4fcafb9121e0..000000000000
--- a/drivers/ide/pdc202xx_new.c
+++ /dev/null
@@ -1,557 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Promise TX2/TX4/TX2000/133 IDE driver
- *
- * Split from:
- * linux/drivers/ide/pdc202xx.c Version 0.35 Mar. 30, 2002
- * Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2005-2007 MontaVista Software, Inc.
- * Portions Copyright (C) 1999 Promise Technology, Inc.
- * Author: Frank Tiernan (frankt@promise.com)
- * Released under terms of General Public License
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/ide.h>
-#include <linux/ktime.h>
-
-#include <asm/io.h>
-
-#ifdef CONFIG_PPC_PMAC
-#include <asm/prom.h>
-#endif
-
-#define DRV_NAME "pdc202xx_new"
-
-#undef DEBUG
-
-#ifdef DEBUG
-#define DBG(fmt, args...) printk("%s: " fmt, __func__, ## args)
-#else
-#define DBG(fmt, args...)
-#endif
-
-static u8 max_dma_rate(struct pci_dev *pdev)
-{
- u8 mode;
-
- switch(pdev->device) {
- case PCI_DEVICE_ID_PROMISE_20277:
- case PCI_DEVICE_ID_PROMISE_20276:
- case PCI_DEVICE_ID_PROMISE_20275:
- case PCI_DEVICE_ID_PROMISE_20271:
- case PCI_DEVICE_ID_PROMISE_20269:
- mode = 4;
- break;
- case PCI_DEVICE_ID_PROMISE_20270:
- case PCI_DEVICE_ID_PROMISE_20268:
- mode = 3;
- break;
- default:
- return 0;
- }
-
- return mode;
-}
-
-/**
- * get_indexed_reg - Get indexed register
- * @hwif: for the port address
- * @index: index of the indexed register
- */
-static u8 get_indexed_reg(ide_hwif_t *hwif, u8 index)
-{
- u8 value;
-
- outb(index, hwif->dma_base + 1);
- value = inb(hwif->dma_base + 3);
-
- DBG("index[%02X] value[%02X]\n", index, value);
- return value;
-}
-
-/**
- * set_indexed_reg - Set indexed register
- * @hwif: for the port address
- * @index: index of the indexed register
- */
-static void set_indexed_reg(ide_hwif_t *hwif, u8 index, u8 value)
-{
- outb(index, hwif->dma_base + 1);
- outb(value, hwif->dma_base + 3);
- DBG("index[%02X] value[%02X]\n", index, value);
-}
-
-/*
- * ATA Timing Tables based on 133 MHz PLL output clock.
- *
- * If the PLL outputs 100 MHz clock, the ASIC hardware will set
- * the timing registers automatically when "set features" command is
- * issued to the device. However, if the PLL output clock is 133 MHz,
- * the following tables must be used.
- */
-static struct pio_timing {
- u8 reg0c, reg0d, reg13;
-} pio_timings [] = {
- { 0xfb, 0x2b, 0xac }, /* PIO mode 0, IORDY off, Prefetch off */
- { 0x46, 0x29, 0xa4 }, /* PIO mode 1, IORDY off, Prefetch off */
- { 0x23, 0x26, 0x64 }, /* PIO mode 2, IORDY off, Prefetch off */
- { 0x27, 0x0d, 0x35 }, /* PIO mode 3, IORDY on, Prefetch off */
- { 0x23, 0x09, 0x25 }, /* PIO mode 4, IORDY on, Prefetch off */
-};
-
-static struct mwdma_timing {
- u8 reg0e, reg0f;
-} mwdma_timings [] = {
- { 0xdf, 0x5f }, /* MWDMA mode 0 */
- { 0x6b, 0x27 }, /* MWDMA mode 1 */
- { 0x69, 0x25 }, /* MWDMA mode 2 */
-};
-
-static struct udma_timing {
- u8 reg10, reg11, reg12;
-} udma_timings [] = {
- { 0x4a, 0x0f, 0xd5 }, /* UDMA mode 0 */
- { 0x3a, 0x0a, 0xd0 }, /* UDMA mode 1 */
- { 0x2a, 0x07, 0xcd }, /* UDMA mode 2 */
- { 0x1a, 0x05, 0xcd }, /* UDMA mode 3 */
- { 0x1a, 0x03, 0xcd }, /* UDMA mode 4 */
- { 0x1a, 0x02, 0xcb }, /* UDMA mode 5 */
- { 0x1a, 0x01, 0xcb }, /* UDMA mode 6 */
-};
-
-static void pdcnew_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u8 adj = (drive->dn & 1) ? 0x08 : 0x00;
- const u8 speed = drive->dma_mode;
-
- /*
- * IDE core issues SETFEATURES_XFER to the drive first (thanks to
- * IDE_HFLAG_POST_SET_MODE in ->host_flags). PDC202xx hardware will
- * automatically set the timing registers based on 100 MHz PLL output.
- *
- * As we set up the PLL to output 133 MHz for UltraDMA/133 capable
- * chips, we must override the default register settings...
- */
- if (max_dma_rate(dev) == 4) {
- u8 mode = speed & 0x07;
-
- if (speed >= XFER_UDMA_0) {
- set_indexed_reg(hwif, 0x10 + adj,
- udma_timings[mode].reg10);
- set_indexed_reg(hwif, 0x11 + adj,
- udma_timings[mode].reg11);
- set_indexed_reg(hwif, 0x12 + adj,
- udma_timings[mode].reg12);
- } else {
- set_indexed_reg(hwif, 0x0e + adj,
- mwdma_timings[mode].reg0e);
- set_indexed_reg(hwif, 0x0f + adj,
- mwdma_timings[mode].reg0f);
- }
- } else if (speed == XFER_UDMA_2) {
- /* Set tHOLD bit to 0 if using UDMA mode 2 */
- u8 tmp = get_indexed_reg(hwif, 0x10 + adj);
-
- set_indexed_reg(hwif, 0x10 + adj, tmp & 0x7f);
- }
-}
-
-static void pdcnew_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u8 adj = (drive->dn & 1) ? 0x08 : 0x00;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
-
- if (max_dma_rate(dev) == 4) {
- set_indexed_reg(hwif, 0x0c + adj, pio_timings[pio].reg0c);
- set_indexed_reg(hwif, 0x0d + adj, pio_timings[pio].reg0d);
- set_indexed_reg(hwif, 0x13 + adj, pio_timings[pio].reg13);
- }
-}
-
-static u8 pdcnew_cable_detect(ide_hwif_t *hwif)
-{
- if (get_indexed_reg(hwif, 0x0b) & 0x04)
- return ATA_CBL_PATA40;
- else
- return ATA_CBL_PATA80;
-}
-
-static void pdcnew_reset(ide_drive_t *drive)
-{
- /*
- * Deleted this because it is redundant from the caller.
- */
- printk(KERN_WARNING "pdc202xx_new: %s channel reset.\n",
- drive->hwif->channel ? "Secondary" : "Primary");
-}
-
-/**
- * read_counter - Read the byte count registers
- * @dma_base: for the port address
- */
-static long read_counter(u32 dma_base)
-{
- u32 pri_dma_base = dma_base, sec_dma_base = dma_base + 0x08;
- u8 cnt0, cnt1, cnt2, cnt3;
- long count = 0, last;
- int retry = 3;
-
- do {
- last = count;
-
- /* Read the current count */
- outb(0x20, pri_dma_base + 0x01);
- cnt0 = inb(pri_dma_base + 0x03);
- outb(0x21, pri_dma_base + 0x01);
- cnt1 = inb(pri_dma_base + 0x03);
- outb(0x20, sec_dma_base + 0x01);
- cnt2 = inb(sec_dma_base + 0x03);
- outb(0x21, sec_dma_base + 0x01);
- cnt3 = inb(sec_dma_base + 0x03);
-
- count = (cnt3 << 23) | (cnt2 << 15) | (cnt1 << 8) | cnt0;
-
- /*
- * The 30-bit decrementing counter is read in 4 pieces.
- * Incorrect value may be read when the most significant bytes
- * are changing...
- */
- } while (retry-- && (((last ^ count) & 0x3fff8000) || last < count));
-
- DBG("cnt0[%02X] cnt1[%02X] cnt2[%02X] cnt3[%02X]\n",
- cnt0, cnt1, cnt2, cnt3);
-
- return count;
-}
-
-/**
- * detect_pll_input_clock - Detect the PLL input clock in Hz.
- * @dma_base: for the port address
- * E.g. 16949000 on 33 MHz PCI bus, i.e. half of the PCI clock.
- */
-static long detect_pll_input_clock(unsigned long dma_base)
-{
- ktime_t start_time, end_time;
- long start_count, end_count;
- long pll_input, usec_elapsed;
- u8 scr1;
-
- start_count = read_counter(dma_base);
- start_time = ktime_get();
-
- /* Start the test mode */
- outb(0x01, dma_base + 0x01);
- scr1 = inb(dma_base + 0x03);
- DBG("scr1[%02X]\n", scr1);
- outb(scr1 | 0x40, dma_base + 0x03);
-
- /* Let the counter run for 10 ms. */
- mdelay(10);
-
- end_count = read_counter(dma_base);
- end_time = ktime_get();
-
- /* Stop the test mode */
- outb(0x01, dma_base + 0x01);
- scr1 = inb(dma_base + 0x03);
- DBG("scr1[%02X]\n", scr1);
- outb(scr1 & ~0x40, dma_base + 0x03);
-
- /*
- * Calculate the input clock in Hz
- * (the clock counter is 30 bit wide and counts down)
- */
- usec_elapsed = ktime_us_delta(end_time, start_time);
- pll_input = ((start_count - end_count) & 0x3fffffff) / 10 *
- (10000000 / usec_elapsed);
-
- DBG("start[%ld] end[%ld]\n", start_count, end_count);
-
- return pll_input;
-}
-
-#ifdef CONFIG_PPC_PMAC
-static void apple_kiwi_init(struct pci_dev *pdev)
-{
- struct device_node *np = pci_device_to_OF_node(pdev);
- u8 conf;
-
- if (np == NULL || !of_device_is_compatible(np, "kiwi-root"))
- return;
-
- if (pdev->revision >= 0x03) {
- /* Setup chip magic config stuff (from darwin) */
- pci_read_config_byte (pdev, 0x40, &conf);
- pci_write_config_byte(pdev, 0x40, (conf | 0x01));
- }
-}
-#endif /* CONFIG_PPC_PMAC */
-
-static int init_chipset_pdcnew(struct pci_dev *dev)
-{
- const char *name = DRV_NAME;
- unsigned long dma_base = pci_resource_start(dev, 4);
- unsigned long sec_dma_base = dma_base + 0x08;
- long pll_input, pll_output, ratio;
- int f, r;
- u8 pll_ctl0, pll_ctl1;
-
- if (dma_base == 0)
- return -EFAULT;
-
-#ifdef CONFIG_PPC_PMAC
- apple_kiwi_init(dev);
-#endif
-
- /* Calculate the required PLL output frequency */
- switch(max_dma_rate(dev)) {
- case 4: /* it's 133 MHz for Ultra133 chips */
- pll_output = 133333333;
- break;
- case 3: /* and 100 MHz for Ultra100 chips */
- default:
- pll_output = 100000000;
- break;
- }
-
- /*
- * Detect PLL input clock.
- * On some systems, where PCI bus is running at non-standard clock rate
- * (e.g. 25 or 40 MHz), we have to adjust the cycle time.
- * PDC20268 and newer chips employ PLL circuit to help correct timing
- * registers setting.
- */
- pll_input = detect_pll_input_clock(dma_base);
- printk(KERN_INFO "%s %s: PLL input clock is %ld kHz\n",
- name, pci_name(dev), pll_input / 1000);
-
- /* Sanity check */
- if (unlikely(pll_input < 5000000L || pll_input > 70000000L)) {
- printk(KERN_ERR "%s %s: Bad PLL input clock %ld Hz, giving up!"
- "\n", name, pci_name(dev), pll_input);
- goto out;
- }
-
-#ifdef DEBUG
- DBG("pll_output is %ld Hz\n", pll_output);
-
- /* Show the current clock value of PLL control register
- * (maybe already configured by the BIOS)
- */
- outb(0x02, sec_dma_base + 0x01);
- pll_ctl0 = inb(sec_dma_base + 0x03);
- outb(0x03, sec_dma_base + 0x01);
- pll_ctl1 = inb(sec_dma_base + 0x03);
-
- DBG("pll_ctl[%02X][%02X]\n", pll_ctl0, pll_ctl1);
-#endif
-
- /*
- * Calculate the ratio of F, R and NO
- * POUT = (F + 2) / (( R + 2) * NO)
- */
- ratio = pll_output / (pll_input / 1000);
- if (ratio < 8600L) { /* 8.6x */
- /* Using NO = 0x01, R = 0x0d */
- r = 0x0d;
- } else if (ratio < 12900L) { /* 12.9x */
- /* Using NO = 0x01, R = 0x08 */
- r = 0x08;
- } else if (ratio < 16100L) { /* 16.1x */
- /* Using NO = 0x01, R = 0x06 */
- r = 0x06;
- } else if (ratio < 64000L) { /* 64x */
- r = 0x00;
- } else {
- /* Invalid ratio */
- printk(KERN_ERR "%s %s: Bad ratio %ld, giving up!\n",
- name, pci_name(dev), ratio);
- goto out;
- }
-
- f = (ratio * (r + 2)) / 1000 - 2;
-
- DBG("F[%d] R[%d] ratio*1000[%ld]\n", f, r, ratio);
-
- if (unlikely(f < 0 || f > 127)) {
- /* Invalid F */
- printk(KERN_ERR "%s %s: F[%d] invalid!\n",
- name, pci_name(dev), f);
- goto out;
- }
-
- pll_ctl0 = (u8) f;
- pll_ctl1 = (u8) r;
-
- DBG("Writing pll_ctl[%02X][%02X]\n", pll_ctl0, pll_ctl1);
-
- outb(0x02, sec_dma_base + 0x01);
- outb(pll_ctl0, sec_dma_base + 0x03);
- outb(0x03, sec_dma_base + 0x01);
- outb(pll_ctl1, sec_dma_base + 0x03);
-
- /* Wait the PLL circuit to be stable */
- mdelay(30);
-
-#ifdef DEBUG
- /*
- * Show the current clock value of PLL control register
- */
- outb(0x02, sec_dma_base + 0x01);
- pll_ctl0 = inb(sec_dma_base + 0x03);
- outb(0x03, sec_dma_base + 0x01);
- pll_ctl1 = inb(sec_dma_base + 0x03);
-
- DBG("pll_ctl[%02X][%02X]\n", pll_ctl0, pll_ctl1);
-#endif
-
- out:
- return 0;
-}
-
-static struct pci_dev *pdc20270_get_dev2(struct pci_dev *dev)
-{
- struct pci_dev *dev2;
-
- dev2 = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn) + 1,
- PCI_FUNC(dev->devfn)));
-
- if (dev2 &&
- dev2->vendor == dev->vendor &&
- dev2->device == dev->device) {
-
- if (dev2->irq != dev->irq) {
- dev2->irq = dev->irq;
- printk(KERN_INFO DRV_NAME " %s: PCI config space "
- "interrupt fixed\n", pci_name(dev));
- }
-
- return dev2;
- }
-
- return NULL;
-}
-
-static const struct ide_port_ops pdcnew_port_ops = {
- .set_pio_mode = pdcnew_set_pio_mode,
- .set_dma_mode = pdcnew_set_dma_mode,
- .resetproc = pdcnew_reset,
- .cable_detect = pdcnew_cable_detect,
-};
-
-#define DECLARE_PDCNEW_DEV(udma) \
- { \
- .name = DRV_NAME, \
- .init_chipset = init_chipset_pdcnew, \
- .port_ops = &pdcnew_port_ops, \
- .host_flags = IDE_HFLAG_POST_SET_MODE | \
- IDE_HFLAG_ERROR_STOPS_FIFO | \
- IDE_HFLAG_OFF_BOARD, \
- .pio_mask = ATA_PIO4, \
- .mwdma_mask = ATA_MWDMA2, \
- .udma_mask = udma, \
- }
-
-static const struct ide_port_info pdcnew_chipsets[] = {
- /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
- /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
-};
-
-/**
- * pdc202new_init_one - called when a pdc202xx is found
- * @dev: the pdc202new device
- * @id: the matching pci id
- *
- * Called when the PCI registration layer (or the IDE initialization)
- * finds a device matching our IDE device tables.
- */
-
-static int pdc202new_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- const struct ide_port_info *d = &pdcnew_chipsets[id->driver_data];
- struct pci_dev *bridge = dev->bus->self;
-
- if (dev->device == PCI_DEVICE_ID_PROMISE_20270 && bridge &&
- bridge->vendor == PCI_VENDOR_ID_DEC &&
- bridge->device == PCI_DEVICE_ID_DEC_21150) {
- struct pci_dev *dev2;
-
- if (PCI_SLOT(dev->devfn) & 2)
- return -ENODEV;
-
- dev2 = pdc20270_get_dev2(dev);
-
- if (dev2) {
- int ret = ide_pci_init_two(dev, dev2, d, NULL);
- if (ret < 0)
- pci_dev_put(dev2);
- return ret;
- }
- }
-
- if (dev->device == PCI_DEVICE_ID_PROMISE_20276 && bridge &&
- bridge->vendor == PCI_VENDOR_ID_INTEL &&
- (bridge->device == PCI_DEVICE_ID_INTEL_I960 ||
- bridge->device == PCI_DEVICE_ID_INTEL_I960RM)) {
- printk(KERN_INFO DRV_NAME " %s: attached to I2O RAID controller,"
- " skipping\n", pci_name(dev));
- return -ENODEV;
- }
-
- return ide_pci_init_one(dev, d, NULL);
-}
-
-static void pdc202new_remove(struct pci_dev *dev)
-{
- struct ide_host *host = pci_get_drvdata(dev);
- struct pci_dev *dev2 = host->dev[1] ? to_pci_dev(host->dev[1]) : NULL;
-
- ide_pci_remove(dev);
- pci_dev_put(dev2);
-}
-
-static const struct pci_device_id pdc202new_pci_tbl[] = {
- { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20268), 0 },
- { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20269), 1 },
- { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20270), 0 },
- { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20271), 1 },
- { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20275), 1 },
- { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20276), 1 },
- { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20277), 1 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, pdc202new_pci_tbl);
-
-static struct pci_driver pdc202new_pci_driver = {
- .name = "Promise_IDE",
- .id_table = pdc202new_pci_tbl,
- .probe = pdc202new_init_one,
- .remove = pdc202new_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init pdc202new_ide_init(void)
-{
- return ide_pci_register_driver(&pdc202new_pci_driver);
-}
-
-static void __exit pdc202new_ide_exit(void)
-{
- pci_unregister_driver(&pdc202new_pci_driver);
-}
-
-module_init(pdc202new_ide_init);
-module_exit(pdc202new_ide_exit);
-
-MODULE_AUTHOR("Andre Hedrick, Frank Tiernan");
-MODULE_DESCRIPTION("PCI driver module for Promise PDC20268 and higher");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
deleted file mode 100644
index 5248ac064e6e..000000000000
--- a/drivers/ide/pdc202xx_old.c
+++ /dev/null
@@ -1,362 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2006-2007, 2009 MontaVista Software, Inc.
- * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz
- *
- * Portions Copyright (C) 1999 Promise Technology, Inc.
- * Author: Frank Tiernan (frankt@promise.com)
- * Released under terms of General Public License
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/blkdev.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/ide.h>
-
-#include <asm/io.h>
-
-#define DRV_NAME "pdc202xx_old"
-
-static void pdc202xx_set_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u8 drive_pci = 0x60 + (drive->dn << 2);
- const u8 speed = drive->dma_mode;
-
- u8 AP = 0, BP = 0, CP = 0;
- u8 TA = 0, TB = 0, TC = 0;
-
- pci_read_config_byte(dev, drive_pci, &AP);
- pci_read_config_byte(dev, drive_pci + 1, &BP);
- pci_read_config_byte(dev, drive_pci + 2, &CP);
-
- switch(speed) {
- case XFER_UDMA_5:
- case XFER_UDMA_4: TB = 0x20; TC = 0x01; break;
- case XFER_UDMA_2: TB = 0x20; TC = 0x01; break;
- case XFER_UDMA_3:
- case XFER_UDMA_1: TB = 0x40; TC = 0x02; break;
- case XFER_UDMA_0:
- case XFER_MW_DMA_2: TB = 0x60; TC = 0x03; break;
- case XFER_MW_DMA_1: TB = 0x60; TC = 0x04; break;
- case XFER_MW_DMA_0: TB = 0xE0; TC = 0x0F; break;
- case XFER_PIO_4: TA = 0x01; TB = 0x04; break;
- case XFER_PIO_3: TA = 0x02; TB = 0x06; break;
- case XFER_PIO_2: TA = 0x03; TB = 0x08; break;
- case XFER_PIO_1: TA = 0x05; TB = 0x0C; break;
- case XFER_PIO_0:
- default: TA = 0x09; TB = 0x13; break;
- }
-
- if (speed < XFER_SW_DMA_0) {
- /*
- * preserve SYNC_INT / ERDDY_EN bits while clearing
- * Prefetch_EN / IORDY_EN / PA[3:0] bits of register A
- */
- AP &= ~0x3f;
- if (ide_pio_need_iordy(drive, speed - XFER_PIO_0))
- AP |= 0x20; /* set IORDY_EN bit */
- if (drive->media == ide_disk)
- AP |= 0x10; /* set Prefetch_EN bit */
- /* clear PB[4:0] bits of register B */
- BP &= ~0x1f;
- pci_write_config_byte(dev, drive_pci, AP | TA);
- pci_write_config_byte(dev, drive_pci + 1, BP | TB);
- } else {
- /* clear MB[2:0] bits of register B */
- BP &= ~0xe0;
- /* clear MC[3:0] bits of register C */
- CP &= ~0x0f;
- pci_write_config_byte(dev, drive_pci + 1, BP | TB);
- pci_write_config_byte(dev, drive_pci + 2, CP | TC);
- }
-}
-
-static void pdc202xx_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- drive->dma_mode = drive->pio_mode;
- pdc202xx_set_mode(hwif, drive);
-}
-
-static int pdc202xx_test_irq(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned long high_16 = pci_resource_start(dev, 4);
- u8 sc1d = inb(high_16 + 0x1d);
-
- if (hwif->channel) {
- /*
- * bit 7: error, bit 6: interrupting,
- * bit 5: FIFO full, bit 4: FIFO empty
- */
- return (sc1d & 0x40) ? 1 : 0;
- } else {
- /*
- * bit 3: error, bit 2: interrupting,
- * bit 1: FIFO full, bit 0: FIFO empty
- */
- return (sc1d & 0x04) ? 1 : 0;
- }
-}
-
-static u8 pdc2026x_cable_detect(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u16 CIS, mask = hwif->channel ? (1 << 11) : (1 << 10);
-
- pci_read_config_word(dev, 0x50, &CIS);
-
- return (CIS & mask) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
-}
-
-/*
- * Set the control register to use the 66MHz system
- * clock for UDMA 3/4/5 mode operation when necessary.
- *
- * FIXME: this register is shared by both channels, some locking is needed
- *
- * It may also be possible to leave the 66MHz clock on
- * and readjust the timing parameters.
- */
-static void pdc_old_enable_66MHz_clock(ide_hwif_t *hwif)
-{
- unsigned long clock_reg = hwif->extra_base + 0x01;
- u8 clock = inb(clock_reg);
-
- outb(clock | (hwif->channel ? 0x08 : 0x02), clock_reg);
-}
-
-static void pdc_old_disable_66MHz_clock(ide_hwif_t *hwif)
-{
- unsigned long clock_reg = hwif->extra_base + 0x01;
- u8 clock = inb(clock_reg);
-
- outb(clock & ~(hwif->channel ? 0x08 : 0x02), clock_reg);
-}
-
-static void pdc2026x_init_hwif(ide_hwif_t *hwif)
-{
- pdc_old_disable_66MHz_clock(hwif);
-}
-
-static void pdc202xx_dma_start(ide_drive_t *drive)
-{
- if (drive->current_speed > XFER_UDMA_2)
- pdc_old_enable_66MHz_clock(drive->hwif);
- if (drive->media != ide_disk || (drive->dev_flags & IDE_DFLAG_LBA48)) {
- ide_hwif_t *hwif = drive->hwif;
- struct request *rq = hwif->rq;
- unsigned long high_16 = hwif->extra_base - 16;
- unsigned long atapi_reg = high_16 + (hwif->channel ? 0x24 : 0x20);
- u32 word_count = 0;
- u8 clock = inb(high_16 + 0x11);
-
- outb(clock | (hwif->channel ? 0x08 : 0x02), high_16 + 0x11);
- word_count = (blk_rq_sectors(rq) << 8);
- word_count = (rq_data_dir(rq) == READ) ?
- word_count | 0x05000000 :
- word_count | 0x06000000;
- outl(word_count, atapi_reg);
- }
- ide_dma_start(drive);
-}
-
-static int pdc202xx_dma_end(ide_drive_t *drive)
-{
- if (drive->media != ide_disk || (drive->dev_flags & IDE_DFLAG_LBA48)) {
- ide_hwif_t *hwif = drive->hwif;
- unsigned long high_16 = hwif->extra_base - 16;
- unsigned long atapi_reg = high_16 + (hwif->channel ? 0x24 : 0x20);
- u8 clock = 0;
-
- outl(0, atapi_reg); /* zero out extra */
- clock = inb(high_16 + 0x11);
- outb(clock & ~(hwif->channel ? 0x08:0x02), high_16 + 0x11);
- }
- if (drive->current_speed > XFER_UDMA_2)
- pdc_old_disable_66MHz_clock(drive->hwif);
- return ide_dma_end(drive);
-}
-
-static int init_chipset_pdc202xx(struct pci_dev *dev)
-{
- unsigned long dmabase = pci_resource_start(dev, 4);
- u8 udma_speed_flag = 0, primary_mode = 0, secondary_mode = 0;
-
- if (dmabase == 0)
- goto out;
-
- udma_speed_flag = inb(dmabase | 0x1f);
- primary_mode = inb(dmabase | 0x1a);
- secondary_mode = inb(dmabase | 0x1b);
- printk(KERN_INFO "%s: (U)DMA Burst Bit %sABLED " \
- "Primary %s Mode " \
- "Secondary %s Mode.\n", pci_name(dev),
- (udma_speed_flag & 1) ? "EN" : "DIS",
- (primary_mode & 1) ? "MASTER" : "PCI",
- (secondary_mode & 1) ? "MASTER" : "PCI" );
-
- if (!(udma_speed_flag & 1)) {
- printk(KERN_INFO "%s: FORCING BURST BIT 0x%02x->0x%02x ",
- pci_name(dev), udma_speed_flag,
- (udma_speed_flag|1));
- outb(udma_speed_flag | 1, dmabase | 0x1f);
- printk("%sACTIVE\n", (inb(dmabase | 0x1f) & 1) ? "" : "IN");
- }
-out:
- return 0;
-}
-
-static void pdc202ata4_fixup_irq(struct pci_dev *dev, const char *name)
-{
- if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE) {
- u8 irq = 0, irq2 = 0;
- pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
- /* 0xbc */
- pci_read_config_byte(dev, (PCI_INTERRUPT_LINE)|0x80, &irq2);
- if (irq != irq2) {
- pci_write_config_byte(dev,
- (PCI_INTERRUPT_LINE)|0x80, irq); /* 0xbc */
- printk(KERN_INFO "%s %s: PCI config space interrupt "
- "mirror fixed\n", name, pci_name(dev));
- }
- }
-}
-
-#define IDE_HFLAGS_PDC202XX \
- (IDE_HFLAG_ERROR_STOPS_FIFO | \
- IDE_HFLAG_OFF_BOARD)
-
-static const struct ide_port_ops pdc20246_port_ops = {
- .set_pio_mode = pdc202xx_set_pio_mode,
- .set_dma_mode = pdc202xx_set_mode,
- .test_irq = pdc202xx_test_irq,
-};
-
-static const struct ide_port_ops pdc2026x_port_ops = {
- .set_pio_mode = pdc202xx_set_pio_mode,
- .set_dma_mode = pdc202xx_set_mode,
- .test_irq = pdc202xx_test_irq,
- .cable_detect = pdc2026x_cable_detect,
-};
-
-static const struct ide_dma_ops pdc2026x_dma_ops = {
- .dma_host_set = ide_dma_host_set,
- .dma_setup = ide_dma_setup,
- .dma_start = pdc202xx_dma_start,
- .dma_end = pdc202xx_dma_end,
- .dma_test_irq = ide_dma_test_irq,
- .dma_lost_irq = ide_dma_lost_irq,
- .dma_timer_expiry = ide_dma_sff_timer_expiry,
- .dma_sff_read_status = ide_dma_sff_read_status,
-};
-
-#define DECLARE_PDC2026X_DEV(udma, sectors) \
- { \
- .name = DRV_NAME, \
- .init_chipset = init_chipset_pdc202xx, \
- .init_hwif = pdc2026x_init_hwif, \
- .port_ops = &pdc2026x_port_ops, \
- .dma_ops = &pdc2026x_dma_ops, \
- .host_flags = IDE_HFLAGS_PDC202XX, \
- .pio_mask = ATA_PIO4, \
- .mwdma_mask = ATA_MWDMA2, \
- .udma_mask = udma, \
- .max_sectors = sectors, \
- }
-
-static const struct ide_port_info pdc202xx_chipsets[] = {
- { /* 0: PDC20246 */
- .name = DRV_NAME,
- .init_chipset = init_chipset_pdc202xx,
- .port_ops = &pdc20246_port_ops,
- .dma_ops = &sff_dma_ops,
- .host_flags = IDE_HFLAGS_PDC202XX,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA2,
- },
-
- /* 1: PDC2026{2,3} */
- DECLARE_PDC2026X_DEV(ATA_UDMA4, 0),
- /* 2: PDC2026{5,7}: UDMA5, limit LBA48 requests to 256 sectors */
- DECLARE_PDC2026X_DEV(ATA_UDMA5, 256),
-};
-
-/**
- * pdc202xx_init_one - called when a PDC202xx is found
- * @dev: the pdc202xx device
- * @id: the matching pci id
- *
- * Called when the PCI registration layer (or the IDE initialization)
- * finds a device matching our IDE device tables.
- */
-
-static int pdc202xx_init_one(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- const struct ide_port_info *d;
- u8 idx = id->driver_data;
-
- d = &pdc202xx_chipsets[idx];
-
- if (idx < 2)
- pdc202ata4_fixup_irq(dev, d->name);
-
- if (dev->vendor == PCI_DEVICE_ID_PROMISE_20265) {
- struct pci_dev *bridge = dev->bus->self;
-
- if (bridge &&
- bridge->vendor == PCI_VENDOR_ID_INTEL &&
- (bridge->device == PCI_DEVICE_ID_INTEL_I960 ||
- bridge->device == PCI_DEVICE_ID_INTEL_I960RM)) {
- printk(KERN_INFO DRV_NAME " %s: skipping Promise "
- "PDC20265 attached to I2O RAID controller\n",
- pci_name(dev));
- return -ENODEV;
- }
- }
-
- return ide_pci_init_one(dev, d, NULL);
-}
-
-static const struct pci_device_id pdc202xx_pci_tbl[] = {
- { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20246), 0 },
- { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20262), 1 },
- { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20263), 1 },
- { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20265), 2 },
- { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20267), 2 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, pdc202xx_pci_tbl);
-
-static struct pci_driver pdc202xx_pci_driver = {
- .name = "Promise_Old_IDE",
- .id_table = pdc202xx_pci_tbl,
- .probe = pdc202xx_init_one,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init pdc202xx_ide_init(void)
-{
- return ide_pci_register_driver(&pdc202xx_pci_driver);
-}
-
-static void __exit pdc202xx_ide_exit(void)
-{
- pci_unregister_driver(&pdc202xx_pci_driver);
-}
-
-module_init(pdc202xx_ide_init);
-module_exit(pdc202xx_ide_exit);
-
-MODULE_AUTHOR("Andre Hedrick, Frank Tiernan, Bartlomiej Zolnierkiewicz");
-MODULE_DESCRIPTION("PCI driver module for older Promise IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
deleted file mode 100644
index a671cead6ae7..000000000000
--- a/drivers/ide/piix.c
+++ /dev/null
@@ -1,476 +0,0 @@
-/*
- * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
- * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2003 Red Hat
- * Copyright (C) 2006-2007 MontaVista Software, Inc. <source@mvista.com>
- *
- * May be copied or modified under the terms of the GNU General Public License
- *
- * Documentation:
- *
- * Publicly available from Intel web site. Errata documentation
- * is also publicly available. As an aide to anyone hacking on this
- * driver the list of errata that are relevant is below.going back to
- * PIIX4. Older device documentation is now a bit tricky to find.
- *
- * Errata of note:
- *
- * Unfixable
- * PIIX4 errata #9 - Only on ultra obscure hw
- * ICH3 errata #13 - Not observed to affect real hw
- * by Intel
- *
- * Things we must deal with
- * PIIX4 errata #10 - BM IDE hang with non UDMA
- * (must stop/start dma to recover)
- * 440MX errata #15 - As PIIX4 errata #10
- * PIIX4 errata #15 - Must not read control registers
- * during a PIO transfer
- * 440MX errata #13 - As PIIX4 errata #15
- * ICH2 errata #21 - DMA mode 0 doesn't work right
- * ICH0/1 errata #55 - As ICH2 errata #21
- * ICH2 spec c #9 - Extra operations needed to handle
- * drive hotswap [NOT YET SUPPORTED]
- * ICH2 spec c #20 - IDE PRD must not cross a 64K boundary
- * and must be dword aligned
- * ICH2 spec c #24 - UDMA mode 4,5 t85/86 should be 6ns not 3.3
- *
- * Should have been BIOS fixed:
- * 450NX: errata #19 - DMA hangs on old 450NX
- * 450NX: errata #20 - DMA hangs on old 450NX
- * 450NX: errata #25 - Corruption with DMA on old 450NX
- * ICH3 errata #15 - IDE deadlock under high load
- * (BIOS must set dev 31 fn 0 bit 23)
- * ICH3 errata #18 - Don't use native mode
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#include <asm/io.h>
-
-#define DRV_NAME "piix"
-
-static int no_piix_dma;
-
-/**
- * piix_set_pio_mode - set host controller for PIO mode
- * @port: port
- * @drive: drive
- *
- * Set the interface PIO mode based upon the settings done by AMI BIOS.
- */
-
-static void piix_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- int is_slave = drive->dn & 1;
- int master_port = hwif->channel ? 0x42 : 0x40;
- int slave_port = 0x44;
- unsigned long flags;
- u16 master_data;
- u8 slave_data;
- static DEFINE_SPINLOCK(tune_lock);
- int control = 0;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
-
- /* ISP RTC */
- static const u8 timings[][2]= {
- { 0, 0 },
- { 0, 0 },
- { 1, 0 },
- { 2, 1 },
- { 2, 3 }, };
-
- /*
- * Master vs slave is synchronized above us but the slave register is
- * shared by the two hwifs so the corner case of two slave timeouts in
- * parallel must be locked.
- */
- spin_lock_irqsave(&tune_lock, flags);
- pci_read_config_word(dev, master_port, &master_data);
-
- if (pio > 1)
- control |= 1; /* Programmable timing on */
- if (drive->media == ide_disk)
- control |= 4; /* Prefetch, post write */
- if (ide_pio_need_iordy(drive, pio))
- control |= 2; /* IORDY */
- if (is_slave) {
- master_data |= 0x4000;
- master_data &= ~0x0070;
- if (pio > 1) {
- /* Set PPE, IE and TIME */
- master_data |= control << 4;
- }
- pci_read_config_byte(dev, slave_port, &slave_data);
- slave_data &= hwif->channel ? 0x0f : 0xf0;
- slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) <<
- (hwif->channel ? 4 : 0);
- } else {
- master_data &= ~0x3307;
- if (pio > 1) {
- /* enable PPE, IE and TIME */
- master_data |= control;
- }
- master_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8);
- }
- pci_write_config_word(dev, master_port, master_data);
- if (is_slave)
- pci_write_config_byte(dev, slave_port, slave_data);
- spin_unlock_irqrestore(&tune_lock, flags);
-}
-
-/**
- * piix_set_dma_mode - set host controller for DMA mode
- * @hwif: port
- * @drive: drive
- *
- * Set a PIIX host controller to the desired DMA mode. This involves
- * programming the right timing data into the PCI configuration space.
- */
-
-static void piix_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u8 maslave = hwif->channel ? 0x42 : 0x40;
- int a_speed = 3 << (drive->dn * 4);
- int u_flag = 1 << drive->dn;
- int v_flag = 0x01 << drive->dn;
- int w_flag = 0x10 << drive->dn;
- int u_speed = 0;
- int sitre;
- u16 reg4042, reg4a;
- u8 reg48, reg54, reg55;
- const u8 speed = drive->dma_mode;
-
- pci_read_config_word(dev, maslave, &reg4042);
- sitre = (reg4042 & 0x4000) ? 1 : 0;
- pci_read_config_byte(dev, 0x48, &reg48);
- pci_read_config_word(dev, 0x4a, &reg4a);
- pci_read_config_byte(dev, 0x54, &reg54);
- pci_read_config_byte(dev, 0x55, &reg55);
-
- if (speed >= XFER_UDMA_0) {
- u8 udma = speed - XFER_UDMA_0;
-
- u_speed = min_t(u8, 2 - (udma & 1), udma) << (drive->dn * 4);
-
- if (!(reg48 & u_flag))
- pci_write_config_byte(dev, 0x48, reg48 | u_flag);
- if (speed == XFER_UDMA_5) {
- pci_write_config_byte(dev, 0x55, (u8) reg55|w_flag);
- } else {
- pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
- }
- if ((reg4a & a_speed) != u_speed)
- pci_write_config_word(dev, 0x4a, (reg4a & ~a_speed) | u_speed);
- if (speed > XFER_UDMA_2) {
- if (!(reg54 & v_flag))
- pci_write_config_byte(dev, 0x54, reg54 | v_flag);
- } else
- pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
- } else {
- const u8 mwdma_to_pio[] = { 0, 3, 4 };
-
- if (reg48 & u_flag)
- pci_write_config_byte(dev, 0x48, reg48 & ~u_flag);
- if (reg4a & a_speed)
- pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
- if (reg54 & v_flag)
- pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
- if (reg55 & w_flag)
- pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
-
- if (speed >= XFER_MW_DMA_0)
- drive->pio_mode =
- mwdma_to_pio[speed - XFER_MW_DMA_0] + XFER_PIO_0;
- else
- drive->pio_mode = XFER_PIO_2; /* for SWDMA2 */
-
- piix_set_pio_mode(hwif, drive);
- }
-}
-
-/**
- * init_chipset_ich - set up the ICH chipset
- * @dev: PCI device to set up
- *
- * Initialize the PCI device as required. For the ICH this turns
- * out to be nice and simple.
- */
-
-static int init_chipset_ich(struct pci_dev *dev)
-{
- u32 extra = 0;
-
- pci_read_config_dword(dev, 0x54, &extra);
- pci_write_config_dword(dev, 0x54, extra | 0x400);
-
- return 0;
-}
-
-/**
- * ich_clear_irq - clear BMDMA status
- * @drive: IDE drive
- *
- * ICHx contollers set DMA INTR no matter DMA or PIO.
- * BMDMA status might need to be cleared even for
- * PIO interrupts to prevent spurious/lost IRQ.
- */
-static void ich_clear_irq(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 dma_stat;
-
- /*
- * ide_dma_end() needs BMDMA status for error checking.
- * So, skip clearing BMDMA status here and leave it
- * to ide_dma_end() if this is DMA interrupt.
- */
- if (drive->waiting_for_dma || hwif->dma_base == 0)
- return;
-
- /* clear the INTR & ERROR bits */
- dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
- /* Should we force the bit as well ? */
- outb(dma_stat, hwif->dma_base + ATA_DMA_STATUS);
-}
-
-struct ich_laptop {
- u16 device;
- u16 subvendor;
- u16 subdevice;
-};
-
-/*
- * List of laptops that use short cables rather than 80 wire
- */
-
-static const struct ich_laptop ich_laptop[] = {
- /* devid, subvendor, subdev */
- { 0x27DF, 0x1025, 0x0102 }, /* ICH7 on Acer 5602aWLMi */
- { 0x27DF, 0x0005, 0x0280 }, /* ICH7 on Acer 5602WLMi */
- { 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */
- { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */
- { 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */
- { 0x27DF, 0x1071, 0xD221 }, /* ICH7 on Hercules EC-900 */
- { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on Acer Aspire 2023WLMi */
- { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */
- { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */
- { 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */
- { 0x27df, 0x104d, 0x900e }, /* ICH7 on Sony TZ-90 */
- /* end marker */
- { 0, }
-};
-
-static u8 piix_cable_detect(ide_hwif_t *hwif)
-{
- struct pci_dev *pdev = to_pci_dev(hwif->dev);
- const struct ich_laptop *lap = &ich_laptop[0];
- u8 reg54h = 0, mask = hwif->channel ? 0xc0 : 0x30;
-
- /* check for specials */
- while (lap->device) {
- if (lap->device == pdev->device &&
- lap->subvendor == pdev->subsystem_vendor &&
- lap->subdevice == pdev->subsystem_device) {
- return ATA_CBL_PATA40_SHORT;
- }
- lap++;
- }
-
- pci_read_config_byte(pdev, 0x54, &reg54h);
-
- return (reg54h & mask) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
-}
-
-/**
- * init_hwif_piix - fill in the hwif for the PIIX
- * @hwif: IDE interface
- *
- * Set up the ide_hwif_t for the PIIX interface according to the
- * capabilities of the hardware.
- */
-
-static void init_hwif_piix(ide_hwif_t *hwif)
-{
- if (!hwif->dma_base)
- return;
-
- if (no_piix_dma)
- hwif->ultra_mask = hwif->mwdma_mask = hwif->swdma_mask = 0;
-}
-
-static const struct ide_port_ops piix_port_ops = {
- .set_pio_mode = piix_set_pio_mode,
- .set_dma_mode = piix_set_dma_mode,
- .cable_detect = piix_cable_detect,
-};
-
-static const struct ide_port_ops ich_port_ops = {
- .set_pio_mode = piix_set_pio_mode,
- .set_dma_mode = piix_set_dma_mode,
- .clear_irq = ich_clear_irq,
- .cable_detect = piix_cable_detect,
-};
-
-#define DECLARE_PIIX_DEV(udma) \
- { \
- .name = DRV_NAME, \
- .init_hwif = init_hwif_piix, \
- .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \
- .port_ops = &piix_port_ops, \
- .pio_mask = ATA_PIO4, \
- .swdma_mask = ATA_SWDMA2_ONLY, \
- .mwdma_mask = ATA_MWDMA12_ONLY, \
- .udma_mask = udma, \
- }
-
-#define DECLARE_ICH_DEV(mwdma, udma) \
- { \
- .name = DRV_NAME, \
- .init_chipset = init_chipset_ich, \
- .init_hwif = init_hwif_piix, \
- .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \
- .port_ops = &ich_port_ops, \
- .pio_mask = ATA_PIO4, \
- .swdma_mask = ATA_SWDMA2_ONLY, \
- .mwdma_mask = mwdma, \
- .udma_mask = udma, \
- }
-
-static const struct ide_port_info piix_pci_info[] = {
- /* 0: MPIIX */
- { /*
- * MPIIX actually has only a single IDE channel mapped to
- * the primary or secondary ports depending on the value
- * of the bit 14 of the IDETIM register at offset 0x6c
- */
- .name = DRV_NAME,
- .enablebits = {{0x6d,0xc0,0x80}, {0x6d,0xc0,0xc0}},
- .host_flags = IDE_HFLAG_ISA_PORTS | IDE_HFLAG_NO_DMA,
- .pio_mask = ATA_PIO4,
- /* This is a painful system best to let it self tune for now */
- },
- /* 1: PIIXa/PIIXb/PIIX3 */
- DECLARE_PIIX_DEV(0x00), /* no udma */
- /* 2: PIIX4 */
- DECLARE_PIIX_DEV(ATA_UDMA2),
- /* 3: ICH0 */
- DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA2),
- /* 4: ICH */
- DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA4),
- /* 5: PIIX4 */
- DECLARE_PIIX_DEV(ATA_UDMA4),
- /* 6: ICH[2-6]/ICH[2-3]M/C-ICH/ICH5-SATA/ESB2/ICH8M */
- DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA5),
- /* 7: ICH7/7-R, no MWDMA1 */
- DECLARE_ICH_DEV(ATA_MWDMA2_ONLY, ATA_UDMA5),
-};
-
-/**
- * piix_init_one - called when a PIIX is found
- * @dev: the piix device
- * @id: the matching pci id
- *
- * Called when the PCI registration layer (or the IDE initialization)
- * finds a device matching our IDE device tables.
- */
-
-static int piix_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- return ide_pci_init_one(dev, &piix_pci_info[id->driver_data], NULL);
-}
-
-/**
- * piix_check_450nx - Check for problem 450NX setup
- *
- * Check for the present of 450NX errata #19 and errata #25. If
- * they are found, disable use of DMA IDE
- */
-
-static void piix_check_450nx(void)
-{
- struct pci_dev *pdev = NULL;
- u16 cfg;
- while((pdev=pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev))!=NULL)
- {
- /* Look for 450NX PXB. Check for problem configurations
- A PCI quirk checks bit 6 already */
- pci_read_config_word(pdev, 0x41, &cfg);
- /* Only on the original revision: IDE DMA can hang */
- if (pdev->revision == 0x00)
- no_piix_dma = 1;
- /* On all revisions below 5 PXB bus lock must be disabled for IDE */
- else if (cfg & (1<<14) && pdev->revision < 5)
- no_piix_dma = 2;
- }
- if(no_piix_dma)
- printk(KERN_WARNING DRV_NAME ": 450NX errata present, disabling IDE DMA.\n");
- if(no_piix_dma == 2)
- printk(KERN_WARNING DRV_NAME ": A BIOS update may resolve this.\n");
-}
-
-static const struct pci_device_id piix_pci_tbl[] = {
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371FB_0), 1 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371FB_1), 1 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371MX), 0 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371SB_1), 1 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371AB), 2 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801AB_1), 3 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82443MX_1), 2 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801AA_1), 4 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82372FB_1), 5 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82451NX), 2 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801BA_9), 6 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801BA_8), 6 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801CA_10), 6 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801CA_11), 6 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_11), 6 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801EB_11), 6 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801E_11), 6 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_10), 6 },
-#ifdef CONFIG_BLK_DEV_IDE_SATA
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801EB_1), 6 },
-#endif
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB_2), 6 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH6_19), 6 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH7_21), 7 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_1), 6 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB2_18), 7 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH8_6), 6 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, piix_pci_tbl);
-
-static struct pci_driver piix_pci_driver = {
- .name = "PIIX_IDE",
- .id_table = piix_pci_tbl,
- .probe = piix_init_one,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init piix_ide_init(void)
-{
- piix_check_450nx();
- return ide_pci_register_driver(&piix_pci_driver);
-}
-
-static void __exit piix_ide_exit(void)
-{
- pci_unregister_driver(&piix_pci_driver);
-}
-
-module_init(piix_ide_init);
-module_exit(piix_ide_exit);
-
-MODULE_AUTHOR("Andre Hedrick, Andrzej Krzysztofowicz");
-MODULE_DESCRIPTION("PCI driver module for Intel PIIX IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
deleted file mode 100644
index ea0b064b5f56..000000000000
--- a/drivers/ide/pmac.c
+++ /dev/null
@@ -1,1703 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Support for IDE interfaces on PowerMacs.
- *
- * These IDE interfaces are memory-mapped and have a DBDMA channel
- * for doing DMA.
- *
- * Copyright (C) 1998-2003 Paul Mackerras & Ben. Herrenschmidt
- * Copyright (C) 2007-2008 Bartlomiej Zolnierkiewicz
- *
- * Some code taken from drivers/ide/ide-dma.c:
- *
- * Copyright (c) 1995-1998 Mark Lord
- *
- * TODO: - Use pre-calculated (kauai) timing tables all the time and
- * get rid of the "rounded" tables used previously, so we have the
- * same table format for all controllers and can then just have one
- * big table
- */
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/ide.h>
-#include <linux/notifier.h>
-#include <linux/module.h>
-#include <linux/reboot.h>
-#include <linux/pci.h>
-#include <linux/adb.h>
-#include <linux/pmu.h>
-#include <linux/scatterlist.h>
-#include <linux/slab.h>
-
-#include <asm/prom.h>
-#include <asm/io.h>
-#include <asm/dbdma.h>
-#include <asm/ide.h>
-#include <asm/machdep.h>
-#include <asm/pmac_feature.h>
-#include <asm/sections.h>
-#include <asm/irq.h>
-#include <asm/mediabay.h>
-
-#define DRV_NAME "ide-pmac"
-
-#undef IDE_PMAC_DEBUG
-
-#define DMA_WAIT_TIMEOUT 50
-
-typedef struct pmac_ide_hwif {
- unsigned long regbase;
- int irq;
- int kind;
- int aapl_bus_id;
- unsigned broken_dma : 1;
- unsigned broken_dma_warn : 1;
- struct device_node* node;
- struct macio_dev *mdev;
- u32 timings[4];
- volatile u32 __iomem * *kauai_fcr;
- ide_hwif_t *hwif;
-
- /* Those fields are duplicating what is in hwif. We currently
- * can't use the hwif ones because of some assumptions that are
- * beeing done by the generic code about the kind of dma controller
- * and format of the dma table. This will have to be fixed though.
- */
- volatile struct dbdma_regs __iomem * dma_regs;
- struct dbdma_cmd* dma_table_cpu;
-} pmac_ide_hwif_t;
-
-enum {
- controller_ohare, /* OHare based */
- controller_heathrow, /* Heathrow/Paddington */
- controller_kl_ata3, /* KeyLargo ATA-3 */
- controller_kl_ata4, /* KeyLargo ATA-4 */
- controller_un_ata6, /* UniNorth2 ATA-6 */
- controller_k2_ata6, /* K2 ATA-6 */
- controller_sh_ata6, /* Shasta ATA-6 */
-};
-
-static const char* model_name[] = {
- "OHare ATA", /* OHare based */
- "Heathrow ATA", /* Heathrow/Paddington */
- "KeyLargo ATA-3", /* KeyLargo ATA-3 (MDMA only) */
- "KeyLargo ATA-4", /* KeyLargo ATA-4 (UDMA/66) */
- "UniNorth ATA-6", /* UniNorth2 ATA-6 (UDMA/100) */
- "K2 ATA-6", /* K2 ATA-6 (UDMA/100) */
- "Shasta ATA-6", /* Shasta ATA-6 (UDMA/133) */
-};
-
-/*
- * Extra registers, both 32-bit little-endian
- */
-#define IDE_TIMING_CONFIG 0x200
-#define IDE_INTERRUPT 0x300
-
-/* Kauai (U2) ATA has different register setup */
-#define IDE_KAUAI_PIO_CONFIG 0x200
-#define IDE_KAUAI_ULTRA_CONFIG 0x210
-#define IDE_KAUAI_POLL_CONFIG 0x220
-
-/*
- * Timing configuration register definitions
- */
-
-/* Number of IDE_SYSCLK_NS ticks, argument is in nanoseconds */
-#define SYSCLK_TICKS(t) (((t) + IDE_SYSCLK_NS - 1) / IDE_SYSCLK_NS)
-#define SYSCLK_TICKS_66(t) (((t) + IDE_SYSCLK_66_NS - 1) / IDE_SYSCLK_66_NS)
-#define IDE_SYSCLK_NS 30 /* 33Mhz cell */
-#define IDE_SYSCLK_66_NS 15 /* 66Mhz cell */
-
-/* 133Mhz cell, found in shasta.
- * See comments about 100 Mhz Uninorth 2...
- * Note that PIO_MASK and MDMA_MASK seem to overlap
- */
-#define TR_133_PIOREG_PIO_MASK 0xff000fff
-#define TR_133_PIOREG_MDMA_MASK 0x00fff800
-#define TR_133_UDMAREG_UDMA_MASK 0x0003ffff
-#define TR_133_UDMAREG_UDMA_EN 0x00000001
-
-/* 100Mhz cell, found in Uninorth 2. I don't have much infos about
- * this one yet, it appears as a pci device (106b/0033) on uninorth
- * internal PCI bus and it's clock is controlled like gem or fw. It
- * appears to be an evolution of keylargo ATA4 with a timing register
- * extended to 2 32bits registers and a similar DBDMA channel. Other
- * registers seem to exist but I can't tell much about them.
- *
- * So far, I'm using pre-calculated tables for this extracted from
- * the values used by the MacOS X driver.
- *
- * The "PIO" register controls PIO and MDMA timings, the "ULTRA"
- * register controls the UDMA timings. At least, it seems bit 0
- * of this one enables UDMA vs. MDMA, and bits 4..7 are the
- * cycle time in units of 10ns. Bits 8..15 are used by I don't
- * know their meaning yet
- */
-#define TR_100_PIOREG_PIO_MASK 0xff000fff
-#define TR_100_PIOREG_MDMA_MASK 0x00fff000
-#define TR_100_UDMAREG_UDMA_MASK 0x0000ffff
-#define TR_100_UDMAREG_UDMA_EN 0x00000001
-
-
-/* 66Mhz cell, found in KeyLargo. Can do ultra mode 0 to 2 on
- * 40 connector cable and to 4 on 80 connector one.
- * Clock unit is 15ns (66Mhz)
- *
- * 3 Values can be programmed:
- * - Write data setup, which appears to match the cycle time. They
- * also call it DIOW setup.
- * - Ready to pause time (from spec)
- * - Address setup. That one is weird. I don't see where exactly
- * it fits in UDMA cycles, I got it's name from an obscure piece
- * of commented out code in Darwin. They leave it to 0, we do as
- * well, despite a comment that would lead to think it has a
- * min value of 45ns.
- * Apple also add 60ns to the write data setup (or cycle time ?) on
- * reads.
- */
-#define TR_66_UDMA_MASK 0xfff00000
-#define TR_66_UDMA_EN 0x00100000 /* Enable Ultra mode for DMA */
-#define TR_66_UDMA_ADDRSETUP_MASK 0xe0000000 /* Address setup */
-#define TR_66_UDMA_ADDRSETUP_SHIFT 29
-#define TR_66_UDMA_RDY2PAUS_MASK 0x1e000000 /* Ready 2 pause time */
-#define TR_66_UDMA_RDY2PAUS_SHIFT 25
-#define TR_66_UDMA_WRDATASETUP_MASK 0x01e00000 /* Write data setup time */
-#define TR_66_UDMA_WRDATASETUP_SHIFT 21
-#define TR_66_MDMA_MASK 0x000ffc00
-#define TR_66_MDMA_RECOVERY_MASK 0x000f8000
-#define TR_66_MDMA_RECOVERY_SHIFT 15
-#define TR_66_MDMA_ACCESS_MASK 0x00007c00
-#define TR_66_MDMA_ACCESS_SHIFT 10
-#define TR_66_PIO_MASK 0x000003ff
-#define TR_66_PIO_RECOVERY_MASK 0x000003e0
-#define TR_66_PIO_RECOVERY_SHIFT 5
-#define TR_66_PIO_ACCESS_MASK 0x0000001f
-#define TR_66_PIO_ACCESS_SHIFT 0
-
-/* 33Mhz cell, found in OHare, Heathrow (& Paddington) and KeyLargo
- * Can do pio & mdma modes, clock unit is 30ns (33Mhz)
- *
- * The access time and recovery time can be programmed. Some older
- * Darwin code base limit OHare to 150ns cycle time. I decided to do
- * the same here fore safety against broken old hardware ;)
- * The HalfTick bit, when set, adds half a clock (15ns) to the access
- * time and removes one from recovery. It's not supported on KeyLargo
- * implementation afaik. The E bit appears to be set for PIO mode 0 and
- * is used to reach long timings used in this mode.
- */
-#define TR_33_MDMA_MASK 0x003ff800
-#define TR_33_MDMA_RECOVERY_MASK 0x001f0000
-#define TR_33_MDMA_RECOVERY_SHIFT 16
-#define TR_33_MDMA_ACCESS_MASK 0x0000f800
-#define TR_33_MDMA_ACCESS_SHIFT 11
-#define TR_33_MDMA_HALFTICK 0x00200000
-#define TR_33_PIO_MASK 0x000007ff
-#define TR_33_PIO_E 0x00000400
-#define TR_33_PIO_RECOVERY_MASK 0x000003e0
-#define TR_33_PIO_RECOVERY_SHIFT 5
-#define TR_33_PIO_ACCESS_MASK 0x0000001f
-#define TR_33_PIO_ACCESS_SHIFT 0
-
-/*
- * Interrupt register definitions
- */
-#define IDE_INTR_DMA 0x80000000
-#define IDE_INTR_DEVICE 0x40000000
-
-/*
- * FCR Register on Kauai. Not sure what bit 0x4 is ...
- */
-#define KAUAI_FCR_UATA_MAGIC 0x00000004
-#define KAUAI_FCR_UATA_RESET_N 0x00000002
-#define KAUAI_FCR_UATA_ENABLE 0x00000001
-
-/* Rounded Multiword DMA timings
- *
- * I gave up finding a generic formula for all controller
- * types and instead, built tables based on timing values
- * used by Apple in Darwin's implementation.
- */
-struct mdma_timings_t {
- int accessTime;
- int recoveryTime;
- int cycleTime;
-};
-
-struct mdma_timings_t mdma_timings_33[] =
-{
- { 240, 240, 480 },
- { 180, 180, 360 },
- { 135, 135, 270 },
- { 120, 120, 240 },
- { 105, 105, 210 },
- { 90, 90, 180 },
- { 75, 75, 150 },
- { 75, 45, 120 },
- { 0, 0, 0 }
-};
-
-struct mdma_timings_t mdma_timings_33k[] =
-{
- { 240, 240, 480 },
- { 180, 180, 360 },
- { 150, 150, 300 },
- { 120, 120, 240 },
- { 90, 120, 210 },
- { 90, 90, 180 },
- { 90, 60, 150 },
- { 90, 30, 120 },
- { 0, 0, 0 }
-};
-
-struct mdma_timings_t mdma_timings_66[] =
-{
- { 240, 240, 480 },
- { 180, 180, 360 },
- { 135, 135, 270 },
- { 120, 120, 240 },
- { 105, 105, 210 },
- { 90, 90, 180 },
- { 90, 75, 165 },
- { 75, 45, 120 },
- { 0, 0, 0 }
-};
-
-/* KeyLargo ATA-4 Ultra DMA timings (rounded) */
-struct {
- int addrSetup; /* ??? */
- int rdy2pause;
- int wrDataSetup;
-} kl66_udma_timings[] =
-{
- { 0, 180, 120 }, /* Mode 0 */
- { 0, 150, 90 }, /* 1 */
- { 0, 120, 60 }, /* 2 */
- { 0, 90, 45 }, /* 3 */
- { 0, 90, 30 } /* 4 */
-};
-
-/* UniNorth 2 ATA/100 timings */
-struct kauai_timing {
- int cycle_time;
- u32 timing_reg;
-};
-
-static struct kauai_timing kauai_pio_timings[] =
-{
- { 930 , 0x08000fff },
- { 600 , 0x08000a92 },
- { 383 , 0x0800060f },
- { 360 , 0x08000492 },
- { 330 , 0x0800048f },
- { 300 , 0x080003cf },
- { 270 , 0x080003cc },
- { 240 , 0x0800038b },
- { 239 , 0x0800030c },
- { 180 , 0x05000249 },
- { 120 , 0x04000148 },
- { 0 , 0 },
-};
-
-static struct kauai_timing kauai_mdma_timings[] =
-{
- { 1260 , 0x00fff000 },
- { 480 , 0x00618000 },
- { 360 , 0x00492000 },
- { 270 , 0x0038e000 },
- { 240 , 0x0030c000 },
- { 210 , 0x002cb000 },
- { 180 , 0x00249000 },
- { 150 , 0x00209000 },
- { 120 , 0x00148000 },
- { 0 , 0 },
-};
-
-static struct kauai_timing kauai_udma_timings[] =
-{
- { 120 , 0x000070c0 },
- { 90 , 0x00005d80 },
- { 60 , 0x00004a60 },
- { 45 , 0x00003a50 },
- { 30 , 0x00002a30 },
- { 20 , 0x00002921 },
- { 0 , 0 },
-};
-
-static struct kauai_timing shasta_pio_timings[] =
-{
- { 930 , 0x08000fff },
- { 600 , 0x0A000c97 },
- { 383 , 0x07000712 },
- { 360 , 0x040003cd },
- { 330 , 0x040003cd },
- { 300 , 0x040003cd },
- { 270 , 0x040003cd },
- { 240 , 0x040003cd },
- { 239 , 0x040003cd },
- { 180 , 0x0400028b },
- { 120 , 0x0400010a },
- { 0 , 0 },
-};
-
-static struct kauai_timing shasta_mdma_timings[] =
-{
- { 1260 , 0x00fff000 },
- { 480 , 0x00820800 },
- { 360 , 0x00820800 },
- { 270 , 0x00820800 },
- { 240 , 0x00820800 },
- { 210 , 0x00820800 },
- { 180 , 0x00820800 },
- { 150 , 0x0028b000 },
- { 120 , 0x001ca000 },
- { 0 , 0 },
-};
-
-static struct kauai_timing shasta_udma133_timings[] =
-{
- { 120 , 0x00035901, },
- { 90 , 0x000348b1, },
- { 60 , 0x00033881, },
- { 45 , 0x00033861, },
- { 30 , 0x00033841, },
- { 20 , 0x00033031, },
- { 15 , 0x00033021, },
- { 0 , 0 },
-};
-
-
-static inline u32
-kauai_lookup_timing(struct kauai_timing* table, int cycle_time)
-{
- int i;
-
- for (i=0; table[i].cycle_time; i++)
- if (cycle_time > table[i+1].cycle_time)
- return table[i].timing_reg;
- BUG();
- return 0;
-}
-
-/* allow up to 256 DBDMA commands per xfer */
-#define MAX_DCMDS 256
-
-/*
- * Wait 1s for disk to answer on IDE bus after a hard reset
- * of the device (via GPIO/FCR).
- *
- * Some devices seem to "pollute" the bus even after dropping
- * the BSY bit (typically some combo drives slave on the UDMA
- * bus) after a hard reset. Since we hard reset all drives on
- * KeyLargo ATA66, we have to keep that delay around. I may end
- * up not hard resetting anymore on these and keep the delay only
- * for older interfaces instead (we have to reset when coming
- * from MacOS...) --BenH.
- */
-#define IDE_WAKEUP_DELAY (1*HZ)
-
-static int pmac_ide_init_dma(ide_hwif_t *, const struct ide_port_info *);
-
-#define PMAC_IDE_REG(x) \
- ((void __iomem *)((drive)->hwif->io_ports.data_addr + (x)))
-
-/*
- * Apply the timings of the proper unit (master/slave) to the shared
- * timing register when selecting that unit. This version is for
- * ASICs with a single timing register
- */
-static void pmac_ide_apply_timings(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
-
- if (drive->dn & 1)
- writel(pmif->timings[1], PMAC_IDE_REG(IDE_TIMING_CONFIG));
- else
- writel(pmif->timings[0], PMAC_IDE_REG(IDE_TIMING_CONFIG));
- (void)readl(PMAC_IDE_REG(IDE_TIMING_CONFIG));
-}
-
-/*
- * Apply the timings of the proper unit (master/slave) to the shared
- * timing register when selecting that unit. This version is for
- * ASICs with a dual timing register (Kauai)
- */
-static void pmac_ide_kauai_apply_timings(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
-
- if (drive->dn & 1) {
- writel(pmif->timings[1], PMAC_IDE_REG(IDE_KAUAI_PIO_CONFIG));
- writel(pmif->timings[3], PMAC_IDE_REG(IDE_KAUAI_ULTRA_CONFIG));
- } else {
- writel(pmif->timings[0], PMAC_IDE_REG(IDE_KAUAI_PIO_CONFIG));
- writel(pmif->timings[2], PMAC_IDE_REG(IDE_KAUAI_ULTRA_CONFIG));
- }
- (void)readl(PMAC_IDE_REG(IDE_KAUAI_PIO_CONFIG));
-}
-
-/*
- * Force an update of controller timing values for a given drive
- */
-static void
-pmac_ide_do_update_timings(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
-
- if (pmif->kind == controller_sh_ata6 ||
- pmif->kind == controller_un_ata6 ||
- pmif->kind == controller_k2_ata6)
- pmac_ide_kauai_apply_timings(drive);
- else
- pmac_ide_apply_timings(drive);
-}
-
-static void pmac_dev_select(ide_drive_t *drive)
-{
- pmac_ide_apply_timings(drive);
-
- writeb(drive->select | ATA_DEVICE_OBS,
- (void __iomem *)drive->hwif->io_ports.device_addr);
-}
-
-static void pmac_kauai_dev_select(ide_drive_t *drive)
-{
- pmac_ide_kauai_apply_timings(drive);
-
- writeb(drive->select | ATA_DEVICE_OBS,
- (void __iomem *)drive->hwif->io_ports.device_addr);
-}
-
-static void pmac_exec_command(ide_hwif_t *hwif, u8 cmd)
-{
- writeb(cmd, (void __iomem *)hwif->io_ports.command_addr);
- (void)readl((void __iomem *)(hwif->io_ports.data_addr
- + IDE_TIMING_CONFIG));
-}
-
-static void pmac_write_devctl(ide_hwif_t *hwif, u8 ctl)
-{
- writeb(ctl, (void __iomem *)hwif->io_ports.ctl_addr);
- (void)readl((void __iomem *)(hwif->io_ports.data_addr
- + IDE_TIMING_CONFIG));
-}
-
-/*
- * Old tuning functions (called on hdparm -p), sets up drive PIO timings
- */
-static void pmac_ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
- const u8 pio = drive->pio_mode - XFER_PIO_0;
- struct ide_timing *tim = ide_timing_find_mode(XFER_PIO_0 + pio);
- u32 *timings, t;
- unsigned accessTicks, recTicks;
- unsigned accessTime, recTime;
- unsigned int cycle_time;
-
- /* which drive is it ? */
- timings = &pmif->timings[drive->dn & 1];
- t = *timings;
-
- cycle_time = ide_pio_cycle_time(drive, pio);
-
- switch (pmif->kind) {
- case controller_sh_ata6: {
- /* 133Mhz cell */
- u32 tr = kauai_lookup_timing(shasta_pio_timings, cycle_time);
- t = (t & ~TR_133_PIOREG_PIO_MASK) | tr;
- break;
- }
- case controller_un_ata6:
- case controller_k2_ata6: {
- /* 100Mhz cell */
- u32 tr = kauai_lookup_timing(kauai_pio_timings, cycle_time);
- t = (t & ~TR_100_PIOREG_PIO_MASK) | tr;
- break;
- }
- case controller_kl_ata4:
- /* 66Mhz cell */
- recTime = cycle_time - tim->active - tim->setup;
- recTime = max(recTime, 150U);
- accessTime = tim->active;
- accessTime = max(accessTime, 150U);
- accessTicks = SYSCLK_TICKS_66(accessTime);
- accessTicks = min(accessTicks, 0x1fU);
- recTicks = SYSCLK_TICKS_66(recTime);
- recTicks = min(recTicks, 0x1fU);
- t = (t & ~TR_66_PIO_MASK) |
- (accessTicks << TR_66_PIO_ACCESS_SHIFT) |
- (recTicks << TR_66_PIO_RECOVERY_SHIFT);
- break;
- default: {
- /* 33Mhz cell */
- int ebit = 0;
- recTime = cycle_time - tim->active - tim->setup;
- recTime = max(recTime, 150U);
- accessTime = tim->active;
- accessTime = max(accessTime, 150U);
- accessTicks = SYSCLK_TICKS(accessTime);
- accessTicks = min(accessTicks, 0x1fU);
- accessTicks = max(accessTicks, 4U);
- recTicks = SYSCLK_TICKS(recTime);
- recTicks = min(recTicks, 0x1fU);
- recTicks = max(recTicks, 5U) - 4;
- if (recTicks > 9) {
- recTicks--; /* guess, but it's only for PIO0, so... */
- ebit = 1;
- }
- t = (t & ~TR_33_PIO_MASK) |
- (accessTicks << TR_33_PIO_ACCESS_SHIFT) |
- (recTicks << TR_33_PIO_RECOVERY_SHIFT);
- if (ebit)
- t |= TR_33_PIO_E;
- break;
- }
- }
-
-#ifdef IDE_PMAC_DEBUG
- printk(KERN_ERR "%s: Set PIO timing for mode %d, reg: 0x%08x\n",
- drive->name, pio, *timings);
-#endif
-
- *timings = t;
- pmac_ide_do_update_timings(drive);
-}
-
-/*
- * Calculate KeyLargo ATA/66 UDMA timings
- */
-static int
-set_timings_udma_ata4(u32 *timings, u8 speed)
-{
- unsigned rdyToPauseTicks, wrDataSetupTicks, addrTicks;
-
- if (speed > XFER_UDMA_4)
- return 1;
-
- rdyToPauseTicks = SYSCLK_TICKS_66(kl66_udma_timings[speed & 0xf].rdy2pause);
- wrDataSetupTicks = SYSCLK_TICKS_66(kl66_udma_timings[speed & 0xf].wrDataSetup);
- addrTicks = SYSCLK_TICKS_66(kl66_udma_timings[speed & 0xf].addrSetup);
-
- *timings = ((*timings) & ~(TR_66_UDMA_MASK | TR_66_MDMA_MASK)) |
- (wrDataSetupTicks << TR_66_UDMA_WRDATASETUP_SHIFT) |
- (rdyToPauseTicks << TR_66_UDMA_RDY2PAUS_SHIFT) |
- (addrTicks <<TR_66_UDMA_ADDRSETUP_SHIFT) |
- TR_66_UDMA_EN;
-#ifdef IDE_PMAC_DEBUG
- printk(KERN_ERR "ide_pmac: Set UDMA timing for mode %d, reg: 0x%08x\n",
- speed & 0xf, *timings);
-#endif
-
- return 0;
-}
-
-/*
- * Calculate Kauai ATA/100 UDMA timings
- */
-static int
-set_timings_udma_ata6(u32 *pio_timings, u32 *ultra_timings, u8 speed)
-{
- struct ide_timing *t = ide_timing_find_mode(speed);
- u32 tr;
-
- if (speed > XFER_UDMA_5 || t == NULL)
- return 1;
- tr = kauai_lookup_timing(kauai_udma_timings, (int)t->udma);
- *ultra_timings = ((*ultra_timings) & ~TR_100_UDMAREG_UDMA_MASK) | tr;
- *ultra_timings = (*ultra_timings) | TR_100_UDMAREG_UDMA_EN;
-
- return 0;
-}
-
-/*
- * Calculate Shasta ATA/133 UDMA timings
- */
-static int
-set_timings_udma_shasta(u32 *pio_timings, u32 *ultra_timings, u8 speed)
-{
- struct ide_timing *t = ide_timing_find_mode(speed);
- u32 tr;
-
- if (speed > XFER_UDMA_6 || t == NULL)
- return 1;
- tr = kauai_lookup_timing(shasta_udma133_timings, (int)t->udma);
- *ultra_timings = ((*ultra_timings) & ~TR_133_UDMAREG_UDMA_MASK) | tr;
- *ultra_timings = (*ultra_timings) | TR_133_UDMAREG_UDMA_EN;
-
- return 0;
-}
-
-/*
- * Calculate MDMA timings for all cells
- */
-static void
-set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
- u8 speed)
-{
- u16 *id = drive->id;
- int cycleTime, accessTime = 0, recTime = 0;
- unsigned accessTicks, recTicks;
- struct mdma_timings_t* tm = NULL;
- int i;
-
- /* Get default cycle time for mode */
- switch(speed & 0xf) {
- case 0: cycleTime = 480; break;
- case 1: cycleTime = 150; break;
- case 2: cycleTime = 120; break;
- default:
- BUG();
- break;
- }
-
- /* Check if drive provides explicit DMA cycle time */
- if ((id[ATA_ID_FIELD_VALID] & 2) && id[ATA_ID_EIDE_DMA_TIME])
- cycleTime = max_t(int, id[ATA_ID_EIDE_DMA_TIME], cycleTime);
-
- /* OHare limits according to some old Apple sources */
- if ((intf_type == controller_ohare) && (cycleTime < 150))
- cycleTime = 150;
- /* Get the proper timing array for this controller */
- switch(intf_type) {
- case controller_sh_ata6:
- case controller_un_ata6:
- case controller_k2_ata6:
- break;
- case controller_kl_ata4:
- tm = mdma_timings_66;
- break;
- case controller_kl_ata3:
- tm = mdma_timings_33k;
- break;
- default:
- tm = mdma_timings_33;
- break;
- }
- if (tm != NULL) {
- /* Lookup matching access & recovery times */
- i = -1;
- for (;;) {
- if (tm[i+1].cycleTime < cycleTime)
- break;
- i++;
- }
- cycleTime = tm[i].cycleTime;
- accessTime = tm[i].accessTime;
- recTime = tm[i].recoveryTime;
-
-#ifdef IDE_PMAC_DEBUG
- printk(KERN_ERR "%s: MDMA, cycleTime: %d, accessTime: %d, recTime: %d\n",
- drive->name, cycleTime, accessTime, recTime);
-#endif
- }
- switch(intf_type) {
- case controller_sh_ata6: {
- /* 133Mhz cell */
- u32 tr = kauai_lookup_timing(shasta_mdma_timings, cycleTime);
- *timings = ((*timings) & ~TR_133_PIOREG_MDMA_MASK) | tr;
- *timings2 = (*timings2) & ~TR_133_UDMAREG_UDMA_EN;
- }
- break;
- case controller_un_ata6:
- case controller_k2_ata6: {
- /* 100Mhz cell */
- u32 tr = kauai_lookup_timing(kauai_mdma_timings, cycleTime);
- *timings = ((*timings) & ~TR_100_PIOREG_MDMA_MASK) | tr;
- *timings2 = (*timings2) & ~TR_100_UDMAREG_UDMA_EN;
- }
- break;
- case controller_kl_ata4:
- /* 66Mhz cell */
- accessTicks = SYSCLK_TICKS_66(accessTime);
- accessTicks = min(accessTicks, 0x1fU);
- accessTicks = max(accessTicks, 0x1U);
- recTicks = SYSCLK_TICKS_66(recTime);
- recTicks = min(recTicks, 0x1fU);
- recTicks = max(recTicks, 0x3U);
- /* Clear out mdma bits and disable udma */
- *timings = ((*timings) & ~(TR_66_MDMA_MASK | TR_66_UDMA_MASK)) |
- (accessTicks << TR_66_MDMA_ACCESS_SHIFT) |
- (recTicks << TR_66_MDMA_RECOVERY_SHIFT);
- break;
- case controller_kl_ata3:
- /* 33Mhz cell on KeyLargo */
- accessTicks = SYSCLK_TICKS(accessTime);
- accessTicks = max(accessTicks, 1U);
- accessTicks = min(accessTicks, 0x1fU);
- accessTime = accessTicks * IDE_SYSCLK_NS;
- recTicks = SYSCLK_TICKS(recTime);
- recTicks = max(recTicks, 1U);
- recTicks = min(recTicks, 0x1fU);
- *timings = ((*timings) & ~TR_33_MDMA_MASK) |
- (accessTicks << TR_33_MDMA_ACCESS_SHIFT) |
- (recTicks << TR_33_MDMA_RECOVERY_SHIFT);
- break;
- default: {
- /* 33Mhz cell on others */
- int halfTick = 0;
- int origAccessTime = accessTime;
- int origRecTime = recTime;
-
- accessTicks = SYSCLK_TICKS(accessTime);
- accessTicks = max(accessTicks, 1U);
- accessTicks = min(accessTicks, 0x1fU);
- accessTime = accessTicks * IDE_SYSCLK_NS;
- recTicks = SYSCLK_TICKS(recTime);
- recTicks = max(recTicks, 2U) - 1;
- recTicks = min(recTicks, 0x1fU);
- recTime = (recTicks + 1) * IDE_SYSCLK_NS;
- if ((accessTicks > 1) &&
- ((accessTime - IDE_SYSCLK_NS/2) >= origAccessTime) &&
- ((recTime - IDE_SYSCLK_NS/2) >= origRecTime)) {
- halfTick = 1;
- accessTicks--;
- }
- *timings = ((*timings) & ~TR_33_MDMA_MASK) |
- (accessTicks << TR_33_MDMA_ACCESS_SHIFT) |
- (recTicks << TR_33_MDMA_RECOVERY_SHIFT);
- if (halfTick)
- *timings |= TR_33_MDMA_HALFTICK;
- }
- }
-#ifdef IDE_PMAC_DEBUG
- printk(KERN_ERR "%s: Set MDMA timing for mode %d, reg: 0x%08x\n",
- drive->name, speed & 0xf, *timings);
-#endif
-}
-
-static void pmac_ide_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
- int ret = 0;
- u32 *timings, *timings2, tl[2];
- u8 unit = drive->dn & 1;
- const u8 speed = drive->dma_mode;
-
- timings = &pmif->timings[unit];
- timings2 = &pmif->timings[unit+2];
-
- /* Copy timings to local image */
- tl[0] = *timings;
- tl[1] = *timings2;
-
- if (speed >= XFER_UDMA_0) {
- if (pmif->kind == controller_kl_ata4)
- ret = set_timings_udma_ata4(&tl[0], speed);
- else if (pmif->kind == controller_un_ata6
- || pmif->kind == controller_k2_ata6)
- ret = set_timings_udma_ata6(&tl[0], &tl[1], speed);
- else if (pmif->kind == controller_sh_ata6)
- ret = set_timings_udma_shasta(&tl[0], &tl[1], speed);
- else
- ret = -1;
- } else
- set_timings_mdma(drive, pmif->kind, &tl[0], &tl[1], speed);
-
- if (ret)
- return;
-
- /* Apply timings to controller */
- *timings = tl[0];
- *timings2 = tl[1];
-
- pmac_ide_do_update_timings(drive);
-}
-
-/*
- * Blast some well known "safe" values to the timing registers at init or
- * wakeup from sleep time, before we do real calculation
- */
-static void
-sanitize_timings(pmac_ide_hwif_t *pmif)
-{
- unsigned int value, value2 = 0;
-
- switch(pmif->kind) {
- case controller_sh_ata6:
- value = 0x0a820c97;
- value2 = 0x00033031;
- break;
- case controller_un_ata6:
- case controller_k2_ata6:
- value = 0x08618a92;
- value2 = 0x00002921;
- break;
- case controller_kl_ata4:
- value = 0x0008438c;
- break;
- case controller_kl_ata3:
- value = 0x00084526;
- break;
- case controller_heathrow:
- case controller_ohare:
- default:
- value = 0x00074526;
- break;
- }
- pmif->timings[0] = pmif->timings[1] = value;
- pmif->timings[2] = pmif->timings[3] = value2;
-}
-
-static int on_media_bay(pmac_ide_hwif_t *pmif)
-{
- return pmif->mdev && pmif->mdev->media_bay != NULL;
-}
-
-/* Suspend call back, should be called after the child devices
- * have actually been suspended
- */
-static int pmac_ide_do_suspend(pmac_ide_hwif_t *pmif)
-{
- /* We clear the timings */
- pmif->timings[0] = 0;
- pmif->timings[1] = 0;
-
- disable_irq(pmif->irq);
-
- /* The media bay will handle itself just fine */
- if (on_media_bay(pmif))
- return 0;
-
- /* Kauai has bus control FCRs directly here */
- if (pmif->kauai_fcr) {
- u32 fcr = readl(pmif->kauai_fcr);
- fcr &= ~(KAUAI_FCR_UATA_RESET_N | KAUAI_FCR_UATA_ENABLE);
- writel(fcr, pmif->kauai_fcr);
- }
-
- /* Disable the bus on older machines and the cell on kauai */
- ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, pmif->node, pmif->aapl_bus_id,
- 0);
-
- return 0;
-}
-
-/* Resume call back, should be called before the child devices
- * are resumed
- */
-static int pmac_ide_do_resume(pmac_ide_hwif_t *pmif)
-{
- /* Hard reset & re-enable controller (do we really need to reset ? -BenH) */
- if (!on_media_bay(pmif)) {
- ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 1);
- ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, pmif->node, pmif->aapl_bus_id, 1);
- msleep(10);
- ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 0);
-
- /* Kauai has it different */
- if (pmif->kauai_fcr) {
- u32 fcr = readl(pmif->kauai_fcr);
- fcr |= KAUAI_FCR_UATA_RESET_N | KAUAI_FCR_UATA_ENABLE;
- writel(fcr, pmif->kauai_fcr);
- }
-
- msleep(jiffies_to_msecs(IDE_WAKEUP_DELAY));
- }
-
- /* Sanitize drive timings */
- sanitize_timings(pmif);
-
- enable_irq(pmif->irq);
-
- return 0;
-}
-
-static u8 pmac_ide_cable_detect(ide_hwif_t *hwif)
-{
- pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
- struct device_node *np = pmif->node;
- const char *cable = of_get_property(np, "cable-type", NULL);
- struct device_node *root = of_find_node_by_path("/");
- const char *model = of_get_property(root, "model", NULL);
-
- of_node_put(root);
- /* Get cable type from device-tree. */
- if (cable && !strncmp(cable, "80-", 3)) {
- /* Some drives fail to detect 80c cable in PowerBook */
- /* These machine use proprietary short IDE cable anyway */
- if (!strncmp(model, "PowerBook", 9))
- return ATA_CBL_PATA40_SHORT;
- else
- return ATA_CBL_PATA80;
- }
-
- /*
- * G5's seem to have incorrect cable type in device-tree.
- * Let's assume they have a 80 conductor cable, this seem
- * to be always the case unless the user mucked around.
- */
- if (of_device_is_compatible(np, "K2-UATA") ||
- of_device_is_compatible(np, "shasta-ata"))
- return ATA_CBL_PATA80;
-
- return ATA_CBL_PATA40;
-}
-
-static void pmac_ide_init_dev(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
-
- if (on_media_bay(pmif)) {
- if (check_media_bay(pmif->mdev->media_bay) == MB_CD) {
- drive->dev_flags &= ~IDE_DFLAG_NOPROBE;
- return;
- }
- drive->dev_flags |= IDE_DFLAG_NOPROBE;
- }
-}
-
-static const struct ide_tp_ops pmac_tp_ops = {
- .exec_command = pmac_exec_command,
- .read_status = ide_read_status,
- .read_altstatus = ide_read_altstatus,
- .write_devctl = pmac_write_devctl,
-
- .dev_select = pmac_dev_select,
- .tf_load = ide_tf_load,
- .tf_read = ide_tf_read,
-
- .input_data = ide_input_data,
- .output_data = ide_output_data,
-};
-
-static const struct ide_tp_ops pmac_ata6_tp_ops = {
- .exec_command = pmac_exec_command,
- .read_status = ide_read_status,
- .read_altstatus = ide_read_altstatus,
- .write_devctl = pmac_write_devctl,
-
- .dev_select = pmac_kauai_dev_select,
- .tf_load = ide_tf_load,
- .tf_read = ide_tf_read,
-
- .input_data = ide_input_data,
- .output_data = ide_output_data,
-};
-
-static const struct ide_port_ops pmac_ide_ata4_port_ops = {
- .init_dev = pmac_ide_init_dev,
- .set_pio_mode = pmac_ide_set_pio_mode,
- .set_dma_mode = pmac_ide_set_dma_mode,
- .cable_detect = pmac_ide_cable_detect,
-};
-
-static const struct ide_port_ops pmac_ide_port_ops = {
- .init_dev = pmac_ide_init_dev,
- .set_pio_mode = pmac_ide_set_pio_mode,
- .set_dma_mode = pmac_ide_set_dma_mode,
-};
-
-static const struct ide_dma_ops pmac_dma_ops;
-
-static const struct ide_port_info pmac_port_info = {
- .name = DRV_NAME,
- .init_dma = pmac_ide_init_dma,
- .chipset = ide_pmac,
- .tp_ops = &pmac_tp_ops,
- .port_ops = &pmac_ide_port_ops,
- .dma_ops = &pmac_dma_ops,
- .host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA |
- IDE_HFLAG_POST_SET_MODE |
- IDE_HFLAG_MMIO |
- IDE_HFLAG_UNMASK_IRQS,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
-};
-
-/*
- * Setup, register & probe an IDE channel driven by this driver, this is
- * called by one of the 2 probe functions (macio or PCI).
- */
-static int pmac_ide_setup_device(pmac_ide_hwif_t *pmif, struct ide_hw *hw)
-{
- struct device_node *np = pmif->node;
- const int *bidp;
- struct ide_host *host;
- struct ide_hw *hws[] = { hw };
- struct ide_port_info d = pmac_port_info;
- int rc;
-
- pmif->broken_dma = pmif->broken_dma_warn = 0;
- if (of_device_is_compatible(np, "shasta-ata")) {
- pmif->kind = controller_sh_ata6;
- d.tp_ops = &pmac_ata6_tp_ops;
- d.port_ops = &pmac_ide_ata4_port_ops;
- d.udma_mask = ATA_UDMA6;
- } else if (of_device_is_compatible(np, "kauai-ata")) {
- pmif->kind = controller_un_ata6;
- d.tp_ops = &pmac_ata6_tp_ops;
- d.port_ops = &pmac_ide_ata4_port_ops;
- d.udma_mask = ATA_UDMA5;
- } else if (of_device_is_compatible(np, "K2-UATA")) {
- pmif->kind = controller_k2_ata6;
- d.tp_ops = &pmac_ata6_tp_ops;
- d.port_ops = &pmac_ide_ata4_port_ops;
- d.udma_mask = ATA_UDMA5;
- } else if (of_device_is_compatible(np, "keylargo-ata")) {
- if (of_node_name_eq(np, "ata-4")) {
- pmif->kind = controller_kl_ata4;
- d.port_ops = &pmac_ide_ata4_port_ops;
- d.udma_mask = ATA_UDMA4;
- } else
- pmif->kind = controller_kl_ata3;
- } else if (of_device_is_compatible(np, "heathrow-ata")) {
- pmif->kind = controller_heathrow;
- } else {
- pmif->kind = controller_ohare;
- pmif->broken_dma = 1;
- }
-
- bidp = of_get_property(np, "AAPL,bus-id", NULL);
- pmif->aapl_bus_id = bidp ? *bidp : 0;
-
- /* On Kauai-type controllers, we make sure the FCR is correct */
- if (pmif->kauai_fcr)
- writel(KAUAI_FCR_UATA_MAGIC |
- KAUAI_FCR_UATA_RESET_N |
- KAUAI_FCR_UATA_ENABLE, pmif->kauai_fcr);
-
- /* Make sure we have sane timings */
- sanitize_timings(pmif);
-
- /* If we are on a media bay, wait for it to settle and lock it */
- if (pmif->mdev)
- lock_media_bay(pmif->mdev->media_bay);
-
- host = ide_host_alloc(&d, hws, 1);
- if (host == NULL) {
- rc = -ENOMEM;
- goto bail;
- }
- pmif->hwif = host->ports[0];
-
- if (on_media_bay(pmif)) {
- /* Fixup bus ID for media bay */
- if (!bidp)
- pmif->aapl_bus_id = 1;
- } else if (pmif->kind == controller_ohare) {
- /* The code below is having trouble on some ohare machines
- * (timing related ?). Until I can put my hand on one of these
- * units, I keep the old way
- */
- ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, 0, 1);
- } else {
- /* This is necessary to enable IDE when net-booting */
- ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 1);
- ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, pmif->aapl_bus_id, 1);
- msleep(10);
- ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 0);
- msleep(jiffies_to_msecs(IDE_WAKEUP_DELAY));
- }
-
- printk(KERN_INFO DRV_NAME ": Found Apple %s controller (%s), "
- "bus ID %d%s, irq %d\n", model_name[pmif->kind],
- pmif->mdev ? "macio" : "PCI", pmif->aapl_bus_id,
- on_media_bay(pmif) ? " (mediabay)" : "", hw->irq);
-
- rc = ide_host_register(host, &d, hws);
- if (rc)
- pmif->hwif = NULL;
-
- if (pmif->mdev)
- unlock_media_bay(pmif->mdev->media_bay);
-
- bail:
- if (rc && host)
- ide_host_free(host);
- return rc;
-}
-
-static void pmac_ide_init_ports(struct ide_hw *hw, unsigned long base)
-{
- int i;
-
- for (i = 0; i < 8; ++i)
- hw->io_ports_array[i] = base + i * 0x10;
-
- hw->io_ports.ctl_addr = base + 0x160;
-}
-
-/*
- * Attach to a macio probed interface
- */
-static int pmac_ide_macio_attach(struct macio_dev *mdev,
- const struct of_device_id *match)
-{
- void __iomem *base;
- unsigned long regbase;
- pmac_ide_hwif_t *pmif;
- int irq, rc;
- struct ide_hw hw;
-
- pmif = kzalloc(sizeof(*pmif), GFP_KERNEL);
- if (pmif == NULL)
- return -ENOMEM;
-
- if (macio_resource_count(mdev) == 0) {
- printk(KERN_WARNING "ide-pmac: no address for %pOF\n",
- mdev->ofdev.dev.of_node);
- rc = -ENXIO;
- goto out_free_pmif;
- }
-
- /* Request memory resource for IO ports */
- if (macio_request_resource(mdev, 0, "ide-pmac (ports)")) {
- printk(KERN_ERR "ide-pmac: can't request MMIO resource for "
- "%pOF!\n", mdev->ofdev.dev.of_node);
- rc = -EBUSY;
- goto out_free_pmif;
- }
-
- /* XXX This is bogus. Should be fixed in the registry by checking
- * the kind of host interrupt controller, a bit like gatwick
- * fixes in irq.c. That works well enough for the single case
- * where that happens though...
- */
- if (macio_irq_count(mdev) == 0) {
- printk(KERN_WARNING "ide-pmac: no intrs for device %pOF, using "
- "13\n", mdev->ofdev.dev.of_node);
- irq = irq_create_mapping(NULL, 13);
- } else
- irq = macio_irq(mdev, 0);
-
- base = ioremap(macio_resource_start(mdev, 0), 0x400);
- regbase = (unsigned long) base;
-
- pmif->mdev = mdev;
- pmif->node = mdev->ofdev.dev.of_node;
- pmif->regbase = regbase;
- pmif->irq = irq;
- pmif->kauai_fcr = NULL;
-
- if (macio_resource_count(mdev) >= 2) {
- if (macio_request_resource(mdev, 1, "ide-pmac (dma)"))
- printk(KERN_WARNING "ide-pmac: can't request DMA "
- "resource for %pOF!\n",
- mdev->ofdev.dev.of_node);
- else
- pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000);
- } else
- pmif->dma_regs = NULL;
-
- dev_set_drvdata(&mdev->ofdev.dev, pmif);
-
- memset(&hw, 0, sizeof(hw));
- pmac_ide_init_ports(&hw, pmif->regbase);
- hw.irq = irq;
- hw.dev = &mdev->bus->pdev->dev;
- hw.parent = &mdev->ofdev.dev;
-
- rc = pmac_ide_setup_device(pmif, &hw);
- if (rc != 0) {
- /* The inteface is released to the common IDE layer */
- dev_set_drvdata(&mdev->ofdev.dev, NULL);
- iounmap(base);
- if (pmif->dma_regs) {
- iounmap(pmif->dma_regs);
- macio_release_resource(mdev, 1);
- }
- macio_release_resource(mdev, 0);
- kfree(pmif);
- }
-
- return rc;
-
-out_free_pmif:
- kfree(pmif);
- return rc;
-}
-
-static int
-pmac_ide_macio_suspend(struct macio_dev *mdev, pm_message_t mesg)
-{
- pmac_ide_hwif_t *pmif = dev_get_drvdata(&mdev->ofdev.dev);
- int rc = 0;
-
- if (mesg.event != mdev->ofdev.dev.power.power_state.event
- && (mesg.event & PM_EVENT_SLEEP)) {
- rc = pmac_ide_do_suspend(pmif);
- if (rc == 0)
- mdev->ofdev.dev.power.power_state = mesg;
- }
-
- return rc;
-}
-
-static int
-pmac_ide_macio_resume(struct macio_dev *mdev)
-{
- pmac_ide_hwif_t *pmif = dev_get_drvdata(&mdev->ofdev.dev);
- int rc = 0;
-
- if (mdev->ofdev.dev.power.power_state.event != PM_EVENT_ON) {
- rc = pmac_ide_do_resume(pmif);
- if (rc == 0)
- mdev->ofdev.dev.power.power_state = PMSG_ON;
- }
-
- return rc;
-}
-
-/*
- * Attach to a PCI probed interface
- */
-static int pmac_ide_pci_attach(struct pci_dev *pdev,
- const struct pci_device_id *id)
-{
- struct device_node *np;
- pmac_ide_hwif_t *pmif;
- void __iomem *base;
- unsigned long rbase, rlen;
- int rc;
- struct ide_hw hw;
-
- np = pci_device_to_OF_node(pdev);
- if (np == NULL) {
- printk(KERN_ERR "ide-pmac: cannot find MacIO node for Kauai ATA interface\n");
- return -ENODEV;
- }
-
- pmif = kzalloc(sizeof(*pmif), GFP_KERNEL);
- if (pmif == NULL)
- return -ENOMEM;
-
- if (pci_enable_device(pdev)) {
- printk(KERN_WARNING "ide-pmac: Can't enable PCI device for "
- "%pOF\n", np);
- rc = -ENXIO;
- goto out_free_pmif;
- }
- pci_set_master(pdev);
-
- if (pci_request_regions(pdev, "Kauai ATA")) {
- printk(KERN_ERR "ide-pmac: Cannot obtain PCI resources for "
- "%pOF\n", np);
- rc = -ENXIO;
- goto out_free_pmif;
- }
-
- pmif->mdev = NULL;
- pmif->node = np;
-
- rbase = pci_resource_start(pdev, 0);
- rlen = pci_resource_len(pdev, 0);
-
- base = ioremap(rbase, rlen);
- pmif->regbase = (unsigned long) base + 0x2000;
- pmif->dma_regs = base + 0x1000;
- pmif->kauai_fcr = base;
- pmif->irq = pdev->irq;
-
- pci_set_drvdata(pdev, pmif);
-
- memset(&hw, 0, sizeof(hw));
- pmac_ide_init_ports(&hw, pmif->regbase);
- hw.irq = pdev->irq;
- hw.dev = &pdev->dev;
-
- rc = pmac_ide_setup_device(pmif, &hw);
- if (rc != 0) {
- /* The inteface is released to the common IDE layer */
- iounmap(base);
- pci_release_regions(pdev);
- kfree(pmif);
- }
-
- return rc;
-
-out_free_pmif:
- kfree(pmif);
- return rc;
-}
-
-static int
-pmac_ide_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
-{
- pmac_ide_hwif_t *pmif = pci_get_drvdata(pdev);
- int rc = 0;
-
- if (mesg.event != pdev->dev.power.power_state.event
- && (mesg.event & PM_EVENT_SLEEP)) {
- rc = pmac_ide_do_suspend(pmif);
- if (rc == 0)
- pdev->dev.power.power_state = mesg;
- }
-
- return rc;
-}
-
-static int
-pmac_ide_pci_resume(struct pci_dev *pdev)
-{
- pmac_ide_hwif_t *pmif = pci_get_drvdata(pdev);
- int rc = 0;
-
- if (pdev->dev.power.power_state.event != PM_EVENT_ON) {
- rc = pmac_ide_do_resume(pmif);
- if (rc == 0)
- pdev->dev.power.power_state = PMSG_ON;
- }
-
- return rc;
-}
-
-#ifdef CONFIG_PMAC_MEDIABAY
-static void pmac_ide_macio_mb_event(struct macio_dev* mdev, int mb_state)
-{
- pmac_ide_hwif_t *pmif = dev_get_drvdata(&mdev->ofdev.dev);
-
- switch(mb_state) {
- case MB_CD:
- if (!pmif->hwif->present)
- ide_port_scan(pmif->hwif);
- break;
- default:
- if (pmif->hwif->present)
- ide_port_unregister_devices(pmif->hwif);
- }
-}
-#endif /* CONFIG_PMAC_MEDIABAY */
-
-
-static struct of_device_id pmac_ide_macio_match[] =
-{
- {
- .name = "IDE",
- },
- {
- .name = "ATA",
- },
- {
- .type = "ide",
- },
- {
- .type = "ata",
- },
- {},
-};
-
-static struct macio_driver pmac_ide_macio_driver =
-{
- .driver = {
- .name = "ide-pmac",
- .owner = THIS_MODULE,
- .of_match_table = pmac_ide_macio_match,
- },
- .probe = pmac_ide_macio_attach,
- .suspend = pmac_ide_macio_suspend,
- .resume = pmac_ide_macio_resume,
-#ifdef CONFIG_PMAC_MEDIABAY
- .mediabay_event = pmac_ide_macio_mb_event,
-#endif
-};
-
-static const struct pci_device_id pmac_ide_pci_match[] = {
- { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_UNI_N_ATA), 0 },
- { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID_ATA100), 0 },
- { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_K2_ATA100), 0 },
- { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_SH_ATA), 0 },
- { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID2_ATA), 0 },
- {},
-};
-
-static struct pci_driver pmac_ide_pci_driver = {
- .name = "ide-pmac",
- .id_table = pmac_ide_pci_match,
- .probe = pmac_ide_pci_attach,
- .suspend = pmac_ide_pci_suspend,
- .resume = pmac_ide_pci_resume,
-};
-MODULE_DEVICE_TABLE(pci, pmac_ide_pci_match);
-
-int __init pmac_ide_probe(void)
-{
- int error;
-
- if (!machine_is(powermac))
- return -ENODEV;
-
-#ifdef CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST
- error = pci_register_driver(&pmac_ide_pci_driver);
- if (error)
- goto out;
- error = macio_register_driver(&pmac_ide_macio_driver);
- if (error) {
- pci_unregister_driver(&pmac_ide_pci_driver);
- goto out;
- }
-#else
- error = macio_register_driver(&pmac_ide_macio_driver);
- if (error)
- goto out;
- error = pci_register_driver(&pmac_ide_pci_driver);
- if (error) {
- macio_unregister_driver(&pmac_ide_macio_driver);
- goto out;
- }
-#endif
-out:
- return error;
-}
-
-/*
- * pmac_ide_build_dmatable builds the DBDMA command list
- * for a transfer and sets the DBDMA channel to point to it.
- */
-static int pmac_ide_build_dmatable(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- ide_hwif_t *hwif = drive->hwif;
- pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
- struct dbdma_cmd *table;
- volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
- struct scatterlist *sg;
- int wr = !!(cmd->tf_flags & IDE_TFLAG_WRITE);
- int i = cmd->sg_nents, count = 0;
-
- /* DMA table is already aligned */
- table = (struct dbdma_cmd *) pmif->dma_table_cpu;
-
- /* Make sure DMA controller is stopped (necessary ?) */
- writel((RUN|PAUSE|FLUSH|WAKE|DEAD) << 16, &dma->control);
- while (readl(&dma->status) & RUN)
- udelay(1);
-
- /* Build DBDMA commands list */
- sg = hwif->sg_table;
- while (i && sg_dma_len(sg)) {
- u32 cur_addr;
- u32 cur_len;
-
- cur_addr = sg_dma_address(sg);
- cur_len = sg_dma_len(sg);
-
- if (pmif->broken_dma && cur_addr & (L1_CACHE_BYTES - 1)) {
- if (pmif->broken_dma_warn == 0) {
- printk(KERN_WARNING "%s: DMA on non aligned address, "
- "switching to PIO on Ohare chipset\n", drive->name);
- pmif->broken_dma_warn = 1;
- }
- return 0;
- }
- while (cur_len) {
- unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00;
-
- if (count++ >= MAX_DCMDS) {
- printk(KERN_WARNING "%s: DMA table too small\n",
- drive->name);
- return 0;
- }
- table->command = cpu_to_le16(wr? OUTPUT_MORE: INPUT_MORE);
- table->req_count = cpu_to_le16(tc);
- table->phy_addr = cpu_to_le32(cur_addr);
- table->cmd_dep = 0;
- table->xfer_status = 0;
- table->res_count = 0;
- cur_addr += tc;
- cur_len -= tc;
- ++table;
- }
- sg = sg_next(sg);
- i--;
- }
-
- /* convert the last command to an input/output last command */
- if (count) {
- table[-1].command = cpu_to_le16(wr? OUTPUT_LAST: INPUT_LAST);
- /* add the stop command to the end of the list */
- memset(table, 0, sizeof(struct dbdma_cmd));
- table->command = cpu_to_le16(DBDMA_STOP);
- mb();
- writel(hwif->dmatable_dma, &dma->cmdptr);
- return 1;
- }
-
- printk(KERN_DEBUG "%s: empty DMA table?\n", drive->name);
-
- return 0; /* revert to PIO for this request */
-}
-
-/*
- * Prepare a DMA transfer. We build the DMA table, adjust the timings for
- * a read on KeyLargo ATA/66 and mark us as waiting for DMA completion
- */
-static int pmac_ide_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- ide_hwif_t *hwif = drive->hwif;
- pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
- u8 unit = drive->dn & 1, ata4 = (pmif->kind == controller_kl_ata4);
- u8 write = !!(cmd->tf_flags & IDE_TFLAG_WRITE);
-
- if (pmac_ide_build_dmatable(drive, cmd) == 0)
- return 1;
-
- /* Apple adds 60ns to wrDataSetup on reads */
- if (ata4 && (pmif->timings[unit] & TR_66_UDMA_EN)) {
- writel(pmif->timings[unit] + (write ? 0 : 0x00800000UL),
- PMAC_IDE_REG(IDE_TIMING_CONFIG));
- (void)readl(PMAC_IDE_REG(IDE_TIMING_CONFIG));
- }
-
- return 0;
-}
-
-/*
- * Kick the DMA controller into life after the DMA command has been issued
- * to the drive.
- */
-static void
-pmac_ide_dma_start(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
- volatile struct dbdma_regs __iomem *dma;
-
- dma = pmif->dma_regs;
-
- writel((RUN << 16) | RUN, &dma->control);
- /* Make sure it gets to the controller right now */
- (void)readl(&dma->control);
-}
-
-/*
- * After a DMA transfer, make sure the controller is stopped
- */
-static int
-pmac_ide_dma_end (ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
- volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
- u32 dstat;
-
- dstat = readl(&dma->status);
- writel(((RUN|WAKE|DEAD) << 16), &dma->control);
-
- /* verify good dma status. we don't check for ACTIVE beeing 0. We should...
- * in theory, but with ATAPI decices doing buffer underruns, that would
- * cause us to disable DMA, which isn't what we want
- */
- return (dstat & (RUN|DEAD)) != RUN;
-}
-
-/*
- * Check out that the interrupt we got was for us. We can't always know this
- * for sure with those Apple interfaces (well, we could on the recent ones but
- * that's not implemented yet), on the other hand, we don't have shared interrupts
- * so it's not really a problem
- */
-static int
-pmac_ide_dma_test_irq (ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
- volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
- unsigned long status, timeout;
-
- /* We have to things to deal with here:
- *
- * - The dbdma won't stop if the command was started
- * but completed with an error without transferring all
- * datas. This happens when bad blocks are met during
- * a multi-block transfer.
- *
- * - The dbdma fifo hasn't yet finished flushing to
- * to system memory when the disk interrupt occurs.
- *
- */
-
- /* If ACTIVE is cleared, the STOP command have passed and
- * transfer is complete.
- */
- status = readl(&dma->status);
- if (!(status & ACTIVE))
- return 1;
-
- /* If dbdma didn't execute the STOP command yet, the
- * active bit is still set. We consider that we aren't
- * sharing interrupts (which is hopefully the case with
- * those controllers) and so we just try to flush the
- * channel for pending data in the fifo
- */
- udelay(1);
- writel((FLUSH << 16) | FLUSH, &dma->control);
- timeout = 0;
- for (;;) {
- udelay(1);
- status = readl(&dma->status);
- if ((status & FLUSH) == 0)
- break;
- if (++timeout > 100) {
- printk(KERN_WARNING "ide%d, ide_dma_test_irq timeout flushing channel\n",
- hwif->index);
- break;
- }
- }
- return 1;
-}
-
-static void pmac_ide_dma_host_set(ide_drive_t *drive, int on)
-{
-}
-
-static void
-pmac_ide_dma_lost_irq (ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
- volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
- unsigned long status = readl(&dma->status);
-
- printk(KERN_ERR "ide-pmac lost interrupt, dma status: %lx\n", status);
-}
-
-static const struct ide_dma_ops pmac_dma_ops = {
- .dma_host_set = pmac_ide_dma_host_set,
- .dma_setup = pmac_ide_dma_setup,
- .dma_start = pmac_ide_dma_start,
- .dma_end = pmac_ide_dma_end,
- .dma_test_irq = pmac_ide_dma_test_irq,
- .dma_lost_irq = pmac_ide_dma_lost_irq,
-};
-
-/*
- * Allocate the data structures needed for using DMA with an interface
- * and fill the proper list of functions pointers
- */
-static int pmac_ide_init_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
-{
- pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
- struct pci_dev *dev = to_pci_dev(hwif->dev);
-
- /* We won't need pci_dev if we switch to generic consistent
- * DMA routines ...
- */
- if (dev == NULL || pmif->dma_regs == 0)
- return -ENODEV;
- /*
- * Allocate space for the DBDMA commands.
- * The +2 is +1 for the stop command and +1 to allow for
- * aligning the start address to a multiple of 16 bytes.
- */
- pmif->dma_table_cpu = dma_alloc_coherent(&dev->dev,
- (MAX_DCMDS + 2) * sizeof(struct dbdma_cmd),
- &hwif->dmatable_dma, GFP_KERNEL);
- if (pmif->dma_table_cpu == NULL) {
- printk(KERN_ERR "%s: unable to allocate DMA command list\n",
- hwif->name);
- return -ENOMEM;
- }
-
- hwif->sg_max_nents = MAX_DCMDS;
-
- return 0;
-}
-
-module_init(pmac_ide_probe);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/q40ide.c b/drivers/ide/q40ide.c
deleted file mode 100644
index ecd0a69245f6..000000000000
--- a/drivers/ide/q40ide.c
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Q40 I/O port IDE Driver
- *
- * (c) Richard Zidlicky
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- *
- *
- */
-
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/blkdev.h>
-#include <linux/ide.h>
-#include <linux/module.h>
-
-#include <asm/ide.h>
-
- /*
- * Bases of the IDE interfaces
- */
-
-#define Q40IDE_NUM_HWIFS 2
-
-#define PCIDE_BASE1 0x1f0
-#define PCIDE_BASE2 0x170
-#define PCIDE_BASE3 0x1e8
-#define PCIDE_BASE4 0x168
-#define PCIDE_BASE5 0x1e0
-#define PCIDE_BASE6 0x160
-
-static const unsigned long pcide_bases[Q40IDE_NUM_HWIFS] = {
- PCIDE_BASE1, PCIDE_BASE2, /* PCIDE_BASE3, PCIDE_BASE4 , PCIDE_BASE5,
- PCIDE_BASE6 */
-};
-
-static int q40ide_default_irq(unsigned long base)
-{
- switch (base) {
- case 0x1f0: return 14;
- case 0x170: return 15;
- case 0x1e8: return 11;
- default:
- return 0;
- }
-}
-
-
-/*
- * Addresses are pretranslated for Q40 ISA access.
- */
-static void q40_ide_setup_ports(struct ide_hw *hw, unsigned long base, int irq)
-{
- memset(hw, 0, sizeof(*hw));
- /* BIG FAT WARNING:
- assumption: only DATA port is ever used in 16 bit mode */
- hw->io_ports.data_addr = Q40_ISA_IO_W(base);
- hw->io_ports.error_addr = Q40_ISA_IO_B(base + 1);
- hw->io_ports.nsect_addr = Q40_ISA_IO_B(base + 2);
- hw->io_ports.lbal_addr = Q40_ISA_IO_B(base + 3);
- hw->io_ports.lbam_addr = Q40_ISA_IO_B(base + 4);
- hw->io_ports.lbah_addr = Q40_ISA_IO_B(base + 5);
- hw->io_ports.device_addr = Q40_ISA_IO_B(base + 6);
- hw->io_ports.status_addr = Q40_ISA_IO_B(base + 7);
- hw->io_ports.ctl_addr = Q40_ISA_IO_B(base + 0x206);
-
- hw->irq = irq;
-}
-
-static void q40ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd,
- void *buf, unsigned int len)
-{
- unsigned long data_addr = drive->hwif->io_ports.data_addr;
-
- if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) {
- __ide_mm_insw(data_addr, buf, (len + 1) / 2);
- return;
- }
-
- raw_insw_swapw((u16 *)data_addr, buf, (len + 1) / 2);
-}
-
-static void q40ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd,
- void *buf, unsigned int len)
-{
- unsigned long data_addr = drive->hwif->io_ports.data_addr;
-
- if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) {
- __ide_mm_outsw(data_addr, buf, (len + 1) / 2);
- return;
- }
-
- raw_outsw_swapw((u16 *)data_addr, buf, (len + 1) / 2);
-}
-
-/* Q40 has a byte-swapped IDE interface */
-static const struct ide_tp_ops q40ide_tp_ops = {
- .exec_command = ide_exec_command,
- .read_status = ide_read_status,
- .read_altstatus = ide_read_altstatus,
- .write_devctl = ide_write_devctl,
-
- .dev_select = ide_dev_select,
- .tf_load = ide_tf_load,
- .tf_read = ide_tf_read,
-
- .input_data = q40ide_input_data,
- .output_data = q40ide_output_data,
-};
-
-static const struct ide_port_info q40ide_port_info = {
- .tp_ops = &q40ide_tp_ops,
- .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA,
- .irq_flags = IRQF_SHARED,
- .chipset = ide_generic,
-};
-
-/*
- * the static array is needed to have the name reported in /proc/ioports,
- * hwif->name unfortunately isn't available yet
- */
-static const char *q40_ide_names[Q40IDE_NUM_HWIFS]={
- "ide0", "ide1"
-};
-
-/*
- * Probe for Q40 IDE interfaces
- */
-
-static int __init q40ide_init(void)
-{
- int i;
- struct ide_hw hw[Q40IDE_NUM_HWIFS], *hws[] = { NULL, NULL };
-
- if (!MACH_IS_Q40)
- return -ENODEV;
-
- printk(KERN_INFO "ide: Q40 IDE controller\n");
-
- for (i = 0; i < Q40IDE_NUM_HWIFS; i++) {
- const char *name = q40_ide_names[i];
-
- if (!request_region(pcide_bases[i], 8, name)) {
- printk("could not reserve ports %lx-%lx for %s\n",
- pcide_bases[i],pcide_bases[i]+8,name);
- continue;
- }
- if (!request_region(pcide_bases[i]+0x206, 1, name)) {
- printk("could not reserve port %lx for %s\n",
- pcide_bases[i]+0x206,name);
- release_region(pcide_bases[i], 8);
- continue;
- }
- q40_ide_setup_ports(&hw[i], pcide_bases[i],
- q40ide_default_irq(pcide_bases[i]));
-
- hws[i] = &hw[i];
- }
-
- return ide_host_add(&q40ide_port_info, hws, Q40IDE_NUM_HWIFS, NULL);
-}
-
-module_init(q40ide_init);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/qd65xx.c b/drivers/ide/qd65xx.c
deleted file mode 100644
index ab79b6289464..000000000000
--- a/drivers/ide/qd65xx.c
+++ /dev/null
@@ -1,446 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1996-2001 Linus Torvalds & author (see below)
- */
-
-/*
- * Version 0.03 Cleaned auto-tune, added probe
- * Version 0.04 Added second channel tuning
- * Version 0.05 Enhanced tuning ; added qd6500 support
- * Version 0.06 Added dos driver's list
- * Version 0.07 Second channel bug fix
- *
- * QDI QD6500/QD6580 EIDE controller fast support
- *
- * To activate controller support, use "ide0=qd65xx"
- */
-
-/*
- * Rewritten from the work of Colten Edwards <pje120@cs.usask.ca> by
- * Samuel Thibault <samuel.thibault@ens-lyon.org>
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/ioport.h>
-#include <linux/blkdev.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-#include <asm/io.h>
-
-#define DRV_NAME "qd65xx"
-
-#include "qd65xx.h"
-
-/*
- * I/O ports are 0x30-0x31 (and 0x32-0x33 for qd6580)
- * or 0xb0-0xb1 (and 0xb2-0xb3 for qd6580)
- * -- qd6500 is a single IDE interface
- * -- qd6580 is a dual IDE interface
- *
- * More research on qd6580 being done by willmore@cig.mot.com (David)
- * More Information given by Petr Soucek (petr@ryston.cz)
- * http://www.ryston.cz/petr/vlb
- */
-
-/*
- * base: Timer1
- *
- *
- * base+0x01: Config (R/O)
- *
- * bit 0: ide baseport: 1 = 0x1f0 ; 0 = 0x170 (only useful for qd6500)
- * bit 1: qd65xx baseport: 1 = 0xb0 ; 0 = 0x30
- * bit 2: ID3: bus speed: 1 = <=33MHz ; 0 = >33MHz
- * bit 3: qd6500: 1 = disabled, 0 = enabled
- * qd6580: 1
- * upper nibble:
- * qd6500: 1100
- * qd6580: either 1010 or 0101
- *
- *
- * base+0x02: Timer2 (qd6580 only)
- *
- *
- * base+0x03: Control (qd6580 only)
- *
- * bits 0-3 must always be set 1
- * bit 4 must be set 1, but is set 0 by dos driver while measuring vlb clock
- * bit 0 : 1 = Only primary port enabled : channel 0 for hda, channel 1 for hdb
- * 0 = Primary and Secondary ports enabled : channel 0 for hda & hdb
- * channel 1 for hdc & hdd
- * bit 1 : 1 = only disks on primary port
- * 0 = disks & ATAPI devices on primary port
- * bit 2-4 : always 0
- * bit 5 : status, but of what ?
- * bit 6 : always set 1 by dos driver
- * bit 7 : set 1 for non-ATAPI devices on primary port
- * (maybe read-ahead and post-write buffer ?)
- */
-
-static int timings[4]={-1,-1,-1,-1}; /* stores current timing for each timer */
-
-/*
- * qd65xx_select:
- *
- * This routine is invoked to prepare for access to a given drive.
- */
-
-static void qd65xx_dev_select(ide_drive_t *drive)
-{
- u8 index = (( (QD_TIMREG(drive)) & 0x80 ) >> 7) |
- (QD_TIMREG(drive) & 0x02);
-
- if (timings[index] != QD_TIMING(drive))
- outb(timings[index] = QD_TIMING(drive), QD_TIMREG(drive));
-
- outb(drive->select | ATA_DEVICE_OBS, drive->hwif->io_ports.device_addr);
-}
-
-/*
- * qd6500_compute_timing
- *
- * computes the timing value where
- * lower nibble represents active time, in count of VLB clocks
- * upper nibble represents recovery time, in count of VLB clocks
- */
-
-static u8 qd6500_compute_timing (ide_hwif_t *hwif, int active_time, int recovery_time)
-{
- int clk = ide_vlb_clk ? ide_vlb_clk : 50;
- u8 act_cyc, rec_cyc;
-
- if (clk <= 33) {
- act_cyc = 9 - IDE_IN(active_time * clk / 1000 + 1, 2, 9);
- rec_cyc = 15 - IDE_IN(recovery_time * clk / 1000 + 1, 0, 15);
- } else {
- act_cyc = 8 - IDE_IN(active_time * clk / 1000 + 1, 1, 8);
- rec_cyc = 18 - IDE_IN(recovery_time * clk / 1000 + 1, 3, 18);
- }
-
- return (rec_cyc << 4) | 0x08 | act_cyc;
-}
-
-/*
- * qd6580_compute_timing
- *
- * idem for qd6580
- */
-
-static u8 qd6580_compute_timing (int active_time, int recovery_time)
-{
- int clk = ide_vlb_clk ? ide_vlb_clk : 50;
- u8 act_cyc, rec_cyc;
-
- act_cyc = 17 - IDE_IN(active_time * clk / 1000 + 1, 2, 17);
- rec_cyc = 15 - IDE_IN(recovery_time * clk / 1000 + 1, 2, 15);
-
- return (rec_cyc << 4) | act_cyc;
-}
-
-/*
- * qd_find_disk_type
- *
- * tries to find timing from dos driver's table
- */
-
-static int qd_find_disk_type (ide_drive_t *drive,
- int *active_time, int *recovery_time)
-{
- struct qd65xx_timing_s *p;
- char *m = (char *)&drive->id[ATA_ID_PROD];
- char model[ATA_ID_PROD_LEN];
-
- if (*m == 0)
- return 0;
-
- strncpy(model, m, ATA_ID_PROD_LEN);
- ide_fixstring(model, ATA_ID_PROD_LEN, 1); /* byte-swap */
-
- for (p = qd65xx_timing ; p->offset != -1 ; p++) {
- if (!strncmp(p->model, model+p->offset, 4)) {
- printk(KERN_DEBUG "%s: listed !\n", drive->name);
- *active_time = p->active;
- *recovery_time = p->recovery;
- return 1;
- }
- }
- return 0;
-}
-
-/*
- * qd_set_timing:
- *
- * records the timing
- */
-
-static void qd_set_timing (ide_drive_t *drive, u8 timing)
-{
- unsigned long data = (unsigned long)ide_get_drivedata(drive);
-
- data &= 0xff00;
- data |= timing;
- ide_set_drivedata(drive, (void *)data);
-
- printk(KERN_DEBUG "%s: %#x\n", drive->name, timing);
-}
-
-static void qd6500_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- u16 *id = drive->id;
- int active_time = 175;
- int recovery_time = 415; /* worst case values from the dos driver */
-
- /* FIXME: use drive->pio_mode value */
- if (!qd_find_disk_type(drive, &active_time, &recovery_time) &&
- (id[ATA_ID_OLD_PIO_MODES] & 0xff) && (id[ATA_ID_FIELD_VALID] & 2) &&
- id[ATA_ID_EIDE_PIO] >= 240) {
- printk(KERN_INFO "%s: PIO mode%d\n", drive->name,
- id[ATA_ID_OLD_PIO_MODES] & 0xff);
- active_time = 110;
- recovery_time = drive->id[ATA_ID_EIDE_PIO] - 120;
- }
-
- qd_set_timing(drive, qd6500_compute_timing(drive->hwif,
- active_time, recovery_time));
-}
-
-static void qd6580_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- const u8 pio = drive->pio_mode - XFER_PIO_0;
- struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
- unsigned int cycle_time;
- int active_time = 175;
- int recovery_time = 415; /* worst case values from the dos driver */
- u8 base = (hwif->config_data & 0xff00) >> 8;
-
- if (drive->id && !qd_find_disk_type(drive, &active_time, &recovery_time)) {
- cycle_time = ide_pio_cycle_time(drive, pio);
-
- switch (pio) {
- case 0: break;
- case 3:
- if (cycle_time >= 110) {
- active_time = 86;
- recovery_time = cycle_time - 102;
- } else
- printk(KERN_WARNING "%s: Strange recovery time !\n",drive->name);
- break;
- case 4:
- if (cycle_time >= 69) {
- active_time = 70;
- recovery_time = cycle_time - 61;
- } else
- printk(KERN_WARNING "%s: Strange recovery time !\n",drive->name);
- break;
- default:
- if (cycle_time >= 180) {
- active_time = 110;
- recovery_time = cycle_time - 120;
- } else {
- active_time = t->active;
- recovery_time = cycle_time - active_time;
- }
- }
- printk(KERN_INFO "%s: PIO mode%d\n", drive->name,pio);
- }
-
- if (!hwif->channel && drive->media != ide_disk) {
- outb(0x5f, QD_CONTROL_PORT);
- printk(KERN_WARNING "%s: ATAPI: disabled read-ahead FIFO "
- "and post-write buffer on %s.\n",
- drive->name, hwif->name);
- }
-
- qd_set_timing(drive, qd6580_compute_timing(active_time, recovery_time));
-}
-
-/*
- * qd_testreg
- *
- * tests if the given port is a register
- */
-
-static int __init qd_testreg(int port)
-{
- unsigned long flags;
- u8 savereg, readreg;
-
- local_irq_save(flags);
- savereg = inb_p(port);
- outb_p(QD_TESTVAL, port); /* safe value */
- readreg = inb_p(port);
- outb(savereg, port);
- local_irq_restore(flags);
-
- if (savereg == QD_TESTVAL) {
- printk(KERN_ERR "Outch ! the probe for qd65xx isn't reliable !\n");
- printk(KERN_ERR "Please contact maintainers to tell about your hardware\n");
- printk(KERN_ERR "Assuming qd65xx is not present.\n");
- return 1;
- }
-
- return (readreg != QD_TESTVAL);
-}
-
-static void __init qd6500_init_dev(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 base = (hwif->config_data & 0xff00) >> 8;
- u8 config = QD_CONFIG(hwif);
-
- ide_set_drivedata(drive, (void *)QD6500_DEF_DATA);
-}
-
-static void __init qd6580_init_dev(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- unsigned long t1, t2;
- u8 base = (hwif->config_data & 0xff00) >> 8;
- u8 config = QD_CONFIG(hwif);
-
- if (hwif->host_flags & IDE_HFLAG_SINGLE) {
- t1 = QD6580_DEF_DATA;
- t2 = QD6580_DEF_DATA2;
- } else
- t2 = t1 = hwif->channel ? QD6580_DEF_DATA2 : QD6580_DEF_DATA;
-
- ide_set_drivedata(drive, (void *)((drive->dn & 1) ? t2 : t1));
-}
-
-static const struct ide_tp_ops qd65xx_tp_ops = {
- .exec_command = ide_exec_command,
- .read_status = ide_read_status,
- .read_altstatus = ide_read_altstatus,
- .write_devctl = ide_write_devctl,
-
- .dev_select = qd65xx_dev_select,
- .tf_load = ide_tf_load,
- .tf_read = ide_tf_read,
-
- .input_data = ide_input_data,
- .output_data = ide_output_data,
-};
-
-static const struct ide_port_ops qd6500_port_ops = {
- .init_dev = qd6500_init_dev,
- .set_pio_mode = qd6500_set_pio_mode,
-};
-
-static const struct ide_port_ops qd6580_port_ops = {
- .init_dev = qd6580_init_dev,
- .set_pio_mode = qd6580_set_pio_mode,
-};
-
-static const struct ide_port_info qd65xx_port_info __initconst = {
- .name = DRV_NAME,
- .tp_ops = &qd65xx_tp_ops,
- .chipset = ide_qd65xx,
- .host_flags = IDE_HFLAG_IO_32BIT |
- IDE_HFLAG_NO_DMA,
- .pio_mask = ATA_PIO4,
-};
-
-/*
- * qd_probe:
- *
- * looks at the specified baseport, and if qd found, registers & initialises it
- * return 1 if another qd may be probed
- */
-
-static int __init qd_probe(int base)
-{
- int rc;
- u8 config, unit, control;
- struct ide_port_info d = qd65xx_port_info;
-
- config = inb(QD_CONFIG_PORT);
-
- if (! ((config & QD_CONFIG_BASEPORT) >> 1 == (base == 0xb0)) )
- return -ENODEV;
-
- unit = ! (config & QD_CONFIG_IDE_BASEPORT);
-
- if (unit)
- d.host_flags |= IDE_HFLAG_QD_2ND_PORT;
-
- switch (config & 0xf0) {
- case QD_CONFIG_QD6500:
- if (qd_testreg(base))
- return -ENODEV; /* bad register */
-
- if (config & QD_CONFIG_DISABLED) {
- printk(KERN_WARNING "qd6500 is disabled !\n");
- return -ENODEV;
- }
-
- printk(KERN_NOTICE "qd6500 at %#x\n", base);
- printk(KERN_DEBUG "qd6500: config=%#x, ID3=%u\n",
- config, QD_ID3);
-
- d.port_ops = &qd6500_port_ops;
- d.host_flags |= IDE_HFLAG_SINGLE;
- break;
- case QD_CONFIG_QD6580_A:
- case QD_CONFIG_QD6580_B:
- if (qd_testreg(base) || qd_testreg(base + 0x02))
- return -ENODEV; /* bad registers */
-
- control = inb(QD_CONTROL_PORT);
-
- printk(KERN_NOTICE "qd6580 at %#x\n", base);
- printk(KERN_DEBUG "qd6580: config=%#x, control=%#x, ID3=%u\n",
- config, control, QD_ID3);
-
- outb(QD_DEF_CONTR, QD_CONTROL_PORT);
-
- d.port_ops = &qd6580_port_ops;
- if (control & QD_CONTR_SEC_DISABLED)
- d.host_flags |= IDE_HFLAG_SINGLE;
-
- printk(KERN_INFO "qd6580: %s IDE board\n",
- (control & QD_CONTR_SEC_DISABLED) ? "single" : "dual");
- break;
- default:
- return -ENODEV;
- }
-
- rc = ide_legacy_device_add(&d, (base << 8) | config);
-
- if (d.host_flags & IDE_HFLAG_SINGLE)
- return (rc == 0) ? 1 : rc;
-
- return rc;
-}
-
-static bool probe_qd65xx;
-
-module_param_named(probe, probe_qd65xx, bool, 0);
-MODULE_PARM_DESC(probe, "probe for QD65xx chipsets");
-
-static int __init qd65xx_init(void)
-{
- int rc1, rc2 = -ENODEV;
-
- if (probe_qd65xx == 0)
- return -ENODEV;
-
- rc1 = qd_probe(0x30);
- if (rc1)
- rc2 = qd_probe(0xb0);
-
- if (rc1 < 0 && rc2 < 0)
- return -ENODEV;
-
- return 0;
-}
-
-module_init(qd65xx_init);
-
-MODULE_AUTHOR("Samuel Thibault");
-MODULE_DESCRIPTION("support of qd65xx vlb ide chipset");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/qd65xx.h b/drivers/ide/qd65xx.h
deleted file mode 100644
index 01a43ab45e0e..000000000000
--- a/drivers/ide/qd65xx.h
+++ /dev/null
@@ -1,145 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2000 Linus Torvalds & authors
- */
-
-/*
- * Authors: Petr Soucek <petr@ryston.cz>
- * Samuel Thibault <samuel.thibault@ens-lyon.org>
- */
-
-/* truncates a in [b,c] */
-#define IDE_IN(a,b,c) ( ((a)<(b)) ? (b) : ( (a)>(c) ? (c) : (a)) )
-
-#define IDE_IMPLY(a,b) ((!(a)) || (b))
-
-#define QD_TIM1_PORT (base)
-#define QD_CONFIG_PORT (base+0x01)
-#define QD_TIM2_PORT (base+0x02)
-#define QD_CONTROL_PORT (base+0x03)
-
-#define QD_CONFIG_IDE_BASEPORT 0x01
-#define QD_CONFIG_BASEPORT 0x02
-#define QD_CONFIG_ID3 0x04
-#define QD_CONFIG_DISABLED 0x08
-#define QD_CONFIG_QD6500 0xc0
-#define QD_CONFIG_QD6580_A 0xa0
-#define QD_CONFIG_QD6580_B 0x50
-
-#define QD_CONTR_SEC_DISABLED 0x01
-
-#define QD_ID3 ((config & QD_CONFIG_ID3)!=0)
-
-#define QD_CONFIG(hwif) ((hwif)->config_data & 0x00ff)
-
-static inline u8 QD_TIMING(ide_drive_t *drive)
-{
- return (unsigned long)ide_get_drivedata(drive) & 0x00ff;
-}
-
-static inline u8 QD_TIMREG(ide_drive_t *drive)
-{
- return ((unsigned long)ide_get_drivedata(drive) & 0xff00) >> 8;
-}
-
-#define QD6500_DEF_DATA ((QD_TIM1_PORT<<8) | (QD_ID3 ? 0x0c : 0x08))
-#define QD6580_DEF_DATA ((QD_TIM1_PORT<<8) | (QD_ID3 ? 0x0a : 0x00))
-#define QD6580_DEF_DATA2 ((QD_TIM2_PORT<<8) | (QD_ID3 ? 0x0a : 0x00))
-#define QD_DEF_CONTR (0x40 | ((control & 0x02) ? 0x9f : 0x1f))
-
-#define QD_TESTVAL 0x19 /* safe value */
-
-/* Drive specific timing taken from DOS driver v3.7 */
-
-static struct qd65xx_timing_s {
- s8 offset; /* ofset from the beginning of Model Number" */
- char model[4]; /* 4 chars from Model number, no conversion */
- s16 active; /* active time */
- s16 recovery; /* recovery time */
-} qd65xx_timing [] = {
- { 30, "2040", 110, 225 }, /* Conner CP30204 */
- { 30, "2045", 135, 225 }, /* Conner CP30254 */
- { 30, "1040", 155, 325 }, /* Conner CP30104 */
- { 30, "1047", 135, 265 }, /* Conner CP30174 */
- { 30, "5344", 135, 225 }, /* Conner CP3544 */
- { 30, "01 4", 175, 405 }, /* Conner CP-3104 */
- { 27, "C030", 175, 375 }, /* Conner CP3000 */
- { 8, "PL42", 110, 295 }, /* Quantum LP240 */
- { 8, "PL21", 110, 315 }, /* Quantum LP120 */
- { 8, "PL25", 175, 385 }, /* Quantum LP52 */
- { 4, "PA24", 110, 285 }, /* WD Piranha SP4200 */
- { 6, "2200", 110, 260 }, /* WD Caviar AC2200 */
- { 6, "3204", 110, 235 }, /* WD Caviar AC2340 */
- { 6, "1202", 110, 265 }, /* WD Caviar AC2120 */
- { 0, "DS3-", 135, 315 }, /* Teac SD340 */
- { 8, "KM32", 175, 355 }, /* Toshiba MK234 */
- { 2, "53A1", 175, 355 }, /* Seagate ST351A */
- { 2, "4108", 175, 295 }, /* Seagate ST1480A */
- { 2, "1344", 175, 335 }, /* Seagate ST3144A */
- { 6, "7 12", 110, 225 }, /* Maxtor 7213A */
- { 30, "02F4", 145, 295 }, /* Conner 3204F */
- { 2, "1302", 175, 335 }, /* Seagate ST3120A */
- { 2, "2334", 145, 265 }, /* Seagate ST3243A */
- { 2, "2338", 145, 275 }, /* Seagate ST3283A */
- { 2, "3309", 145, 275 }, /* Seagate ST3390A */
- { 2, "5305", 145, 275 }, /* Seagate ST3550A */
- { 2, "4100", 175, 295 }, /* Seagate ST1400A */
- { 2, "4110", 175, 295 }, /* Seagate ST1401A */
- { 2, "6300", 135, 265 }, /* Seagate ST3600A */
- { 2, "5300", 135, 265 }, /* Seagate ST3500A */
- { 6, "7 31", 135, 225 }, /* Maxtor 7131 AT */
- { 6, "7 43", 115, 265 }, /* Maxtor 7345 AT */
- { 6, "7 42", 110, 255 }, /* Maxtor 7245 AT */
- { 6, "3 04", 135, 265 }, /* Maxtor 340 AT */
- { 6, "61 0", 135, 285 }, /* WD AC160 */
- { 6, "1107", 135, 235 }, /* WD AC1170 */
- { 6, "2101", 110, 220 }, /* WD AC1210 */
- { 6, "4202", 135, 245 }, /* WD AC2420 */
- { 6, "41 0", 175, 355 }, /* WD Caviar 140 */
- { 6, "82 0", 175, 355 }, /* WD Caviar 280 */
- { 8, "PL01", 175, 375 }, /* Quantum LP105 */
- { 8, "PL25", 110, 295 }, /* Quantum LP525 */
- { 10, "4S 2", 175, 385 }, /* Quantum ELS42 */
- { 10, "8S 5", 175, 385 }, /* Quantum ELS85 */
- { 10, "1S72", 175, 385 }, /* Quantum ELS127 */
- { 10, "1S07", 175, 385 }, /* Quantum ELS170 */
- { 8, "ZE42", 135, 295 }, /* Quantum EZ240 */
- { 8, "ZE21", 175, 385 }, /* Quantum EZ127 */
- { 8, "ZE58", 175, 385 }, /* Quantum EZ85 */
- { 8, "ZE24", 175, 385 }, /* Quantum EZ42 */
- { 27, "C036", 155, 325 }, /* Conner CP30064 */
- { 27, "C038", 155, 325 }, /* Conner CP30084 */
- { 6, "2205", 110, 255 }, /* WDC AC2250 */
- { 2, " CHA", 140, 415 }, /* WDC AH series; WDC AH260, WDC */
- { 2, " CLA", 140, 415 }, /* WDC AL series: WDC AL2120, 2170, */
- { 4, "UC41", 140, 415 }, /* WDC CU140 */
- { 6, "1207", 130, 275 }, /* WDC AC2170 */
- { 6, "2107", 130, 275 }, /* WDC AC1270 */
- { 6, "5204", 130, 275 }, /* WDC AC2540 */
- { 30, "3004", 110, 235 }, /* Conner CP30340 */
- { 30, "0345", 135, 255 }, /* Conner CP30544 */
- { 12, "12A3", 175, 320 }, /* MAXTOR LXT-213A */
- { 12, "43A0", 145, 240 }, /* MAXTOR LXT-340A */
- { 6, "7 21", 180, 290 }, /* Maxtor 7120 AT */
- { 6, "7 71", 135, 240 }, /* Maxtor 7170 AT */
- { 12, "45\0000", 110, 205 }, /* MAXTOR MXT-540 */
- { 8, "PL11", 180, 290 }, /* QUANTUM LP110A */
- { 8, "OG21", 150, 275 }, /* QUANTUM GO120 */
- { 12, "42A5", 175, 320 }, /* MAXTOR LXT-245A */
- { 2, "2309", 175, 295 }, /* ST3290A */
- { 2, "3358", 180, 310 }, /* ST3385A */
- { 2, "6355", 180, 310 }, /* ST3655A */
- { 2, "1900", 175, 270 }, /* ST9100A */
- { 2, "1954", 175, 270 }, /* ST9145A */
- { 2, "1909", 175, 270 }, /* ST9190AG */
- { 2, "2953", 175, 270 }, /* ST9235A */
- { 2, "1359", 175, 270 }, /* ST3195A */
- { 24, "3R11", 175, 290 }, /* ALPS ELECTRIC Co.,LTD, DR311C */
- { 0, "2M26", 175, 215 }, /* M262XT-0Ah */
- { 4, "2253", 175, 300 }, /* HP C2235A */
- { 4, "-32A", 145, 245 }, /* H3133-A2 */
- { 30, "0326", 150, 270 }, /* Samsung Electronics 120MB */
- { 30, "3044", 110, 195 }, /* Conner CFA340A */
- { 30, "43A0", 110, 195 }, /* Conner CFA340A */
- { -1, " ", 175, 415 } /* unknown disk name */
-};
diff --git a/drivers/ide/rapide.c b/drivers/ide/rapide.c
deleted file mode 100644
index 0ab8b86b7ed7..000000000000
--- a/drivers/ide/rapide.c
+++ /dev/null
@@ -1,106 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 1996-2002 Russell King.
- */
-
-#include <linux/module.h>
-#include <linux/blkdev.h>
-#include <linux/errno.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#include <asm/ecard.h>
-
-static const struct ide_port_info rapide_port_info = {
- .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA,
- .chipset = ide_generic,
-};
-
-static void rapide_setup_ports(struct ide_hw *hw, void __iomem *base,
- void __iomem *ctrl, unsigned int sz, int irq)
-{
- unsigned long port = (unsigned long)base;
- int i;
-
- for (i = 0; i <= 7; i++) {
- hw->io_ports_array[i] = port;
- port += sz;
- }
- hw->io_ports.ctl_addr = (unsigned long)ctrl;
- hw->irq = irq;
-}
-
-static int rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
-{
- void __iomem *base;
- struct ide_host *host;
- int ret;
- struct ide_hw hw, *hws[] = { &hw };
-
- ret = ecard_request_resources(ec);
- if (ret)
- goto out;
-
- base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0);
- if (!base) {
- ret = -ENOMEM;
- goto release;
- }
-
- memset(&hw, 0, sizeof(hw));
- rapide_setup_ports(&hw, base, base + 0x818, 1 << 6, ec->irq);
- hw.dev = &ec->dev;
-
- ret = ide_host_add(&rapide_port_info, hws, 1, &host);
- if (ret)
- goto release;
-
- ecard_set_drvdata(ec, host);
- goto out;
-
- release:
- ecard_release_resources(ec);
- out:
- return ret;
-}
-
-static void rapide_remove(struct expansion_card *ec)
-{
- struct ide_host *host = ecard_get_drvdata(ec);
-
- ecard_set_drvdata(ec, NULL);
-
- ide_host_remove(host);
-
- ecard_release_resources(ec);
-}
-
-static struct ecard_id rapide_ids[] = {
- { MANU_YELLOWSTONE, PROD_YELLOWSTONE_RAPIDE32 },
- { 0xffff, 0xffff }
-};
-
-static struct ecard_driver rapide_driver = {
- .probe = rapide_probe,
- .remove = rapide_remove,
- .id_table = rapide_ids,
- .drv = {
- .name = "rapide",
- },
-};
-
-static int __init rapide_init(void)
-{
- return ecard_register_driver(&rapide_driver);
-}
-
-static void __exit rapide_exit(void)
-{
- ecard_remove_driver(&rapide_driver);
-}
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Yellowstone RAPIDE driver");
-
-module_init(rapide_init);
-module_exit(rapide_exit);
diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
deleted file mode 100644
index fce2b7de5a19..000000000000
--- a/drivers/ide/rz1000.c
+++ /dev/null
@@ -1,100 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1995-1998 Linus Torvalds & author (see below)
- */
-
-/*
- * Principal Author: mlord@pobox.com (Mark Lord)
- *
- * See linux/MAINTAINERS for address of current maintainer.
- *
- * This file provides support for disabling the buggy read-ahead
- * mode of the RZ1000 IDE chipset, commonly used on Intel motherboards.
- *
- * Dunno if this fixes both ports, or only the primary port (?).
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#define DRV_NAME "rz1000"
-
-static int rz1000_disable_readahead(struct pci_dev *dev)
-{
- u16 reg;
-
- if (!pci_read_config_word (dev, 0x40, &reg) &&
- !pci_write_config_word(dev, 0x40, reg & 0xdfff)) {
- printk(KERN_INFO "%s: disabled chipset read-ahead "
- "(buggy RZ1000/RZ1001)\n", pci_name(dev));
- return 0;
- } else {
- printk(KERN_INFO "%s: serialized, disabled unmasking "
- "(buggy RZ1000/RZ1001)\n", pci_name(dev));
- return 1;
- }
-}
-
-static const struct ide_port_info rz1000_chipset = {
- .name = DRV_NAME,
- .host_flags = IDE_HFLAG_NO_DMA,
-};
-
-static int rz1000_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- struct ide_port_info d = rz1000_chipset;
- int rc;
-
- rc = pci_enable_device(dev);
- if (rc)
- return rc;
-
- if (rz1000_disable_readahead(dev)) {
- d.host_flags |= IDE_HFLAG_SERIALIZE;
- d.host_flags |= IDE_HFLAG_NO_UNMASK_IRQS;
- }
-
- return ide_pci_init_one(dev, &d, NULL);
-}
-
-static void rz1000_remove(struct pci_dev *dev)
-{
- ide_pci_remove(dev);
- pci_disable_device(dev);
-}
-
-static const struct pci_device_id rz1000_pci_tbl[] = {
- { PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_RZ1000), 0 },
- { PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_RZ1001), 0 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, rz1000_pci_tbl);
-
-static struct pci_driver rz1000_pci_driver = {
- .name = "RZ1000_IDE",
- .id_table = rz1000_pci_tbl,
- .probe = rz1000_init_one,
- .remove = rz1000_remove,
-};
-
-static int __init rz1000_ide_init(void)
-{
- return ide_pci_register_driver(&rz1000_pci_driver);
-}
-
-static void __exit rz1000_ide_exit(void)
-{
- pci_unregister_driver(&rz1000_pci_driver);
-}
-
-module_init(rz1000_ide_init);
-module_exit(rz1000_ide_exit);
-
-MODULE_AUTHOR("Andre Hedrick");
-MODULE_DESCRIPTION("PCI driver module for RZ1000 IDE");
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
deleted file mode 100644
index a5b701818405..000000000000
--- a/drivers/ide/sc1200.c
+++ /dev/null
@@ -1,355 +0,0 @@
-/*
- * Copyright (C) 2000-2002 Mark Lord <mlord@pobox.com>
- * Copyright (C) 2007 Bartlomiej Zolnierkiewicz
- *
- * May be copied or modified under the terms of the GNU General Public License
- *
- * Development of this chipset driver was funded
- * by the nice folks at National Semiconductor.
- *
- * Documentation:
- * Available from National Semiconductor
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/ide.h>
-#include <linux/pm.h>
-
-#include <asm/io.h>
-
-#define DRV_NAME "sc1200"
-
-#define SC1200_REV_A 0x00
-#define SC1200_REV_B1 0x01
-#define SC1200_REV_B3 0x02
-#define SC1200_REV_C1 0x03
-#define SC1200_REV_D1 0x04
-
-#define PCI_CLK_33 0x00
-#define PCI_CLK_48 0x01
-#define PCI_CLK_66 0x02
-#define PCI_CLK_33A 0x03
-
-static unsigned short sc1200_get_pci_clock (void)
-{
- unsigned char chip_id, silicon_revision;
- unsigned int pci_clock;
- /*
- * Check the silicon revision, as not all versions of the chip
- * have the register with the fast PCI bus timings.
- */
- chip_id = inb (0x903c);
- silicon_revision = inb (0x903d);
-
- // Read the fast pci clock frequency
- if (chip_id == 0x04 && silicon_revision < SC1200_REV_B1) {
- pci_clock = PCI_CLK_33;
- } else {
- // check clock generator configuration (cfcc)
- // the clock is in bits 8 and 9 of this word
-
- pci_clock = inw (0x901e);
- pci_clock >>= 8;
- pci_clock &= 0x03;
- if (pci_clock == PCI_CLK_33A)
- pci_clock = PCI_CLK_33;
- }
- return pci_clock;
-}
-
-/*
- * Here are the standard PIO mode 0-4 timings for each "format".
- * Format-0 uses fast data reg timings, with slower command reg timings.
- * Format-1 uses fast timings for all registers, but won't work with all drives.
- */
-static const unsigned int sc1200_pio_timings[4][5] =
- {{0x00009172, 0x00012171, 0x00020080, 0x00032010, 0x00040010}, // format0 33Mhz
- {0xd1329172, 0x71212171, 0x30200080, 0x20102010, 0x00100010}, // format1, 33Mhz
- {0xfaa3f4f3, 0xc23232b2, 0x513101c1, 0x31213121, 0x10211021}, // format1, 48Mhz
- {0xfff4fff4, 0xf35353d3, 0x814102f1, 0x42314231, 0x11311131}}; // format1, 66Mhz
-
-/*
- * After chip reset, the PIO timings are set to 0x00009172, which is not valid.
- */
-//#define SC1200_BAD_PIO(timings) (((timings)&~0x80000000)==0x00009172)
-
-static void sc1200_tunepio(ide_drive_t *drive, u8 pio)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *pdev = to_pci_dev(hwif->dev);
- unsigned int basereg = hwif->channel ? 0x50 : 0x40, format = 0;
-
- pci_read_config_dword(pdev, basereg + 4, &format);
- format = (format >> 31) & 1;
- if (format)
- format += sc1200_get_pci_clock();
- pci_write_config_dword(pdev, basereg + ((drive->dn & 1) << 3),
- sc1200_pio_timings[format][pio]);
-}
-
-/*
- * The SC1200 specifies that two drives sharing a cable cannot mix
- * UDMA/MDMA. It has to be one or the other, for the pair, though
- * different timings can still be chosen for each drive. We could
- * set the appropriate timing bits on the fly, but that might be
- * a bit confusing. So, for now we statically handle this requirement
- * by looking at our mate drive to see what it is capable of, before
- * choosing a mode for our own drive.
- */
-static u8 sc1200_udma_filter(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- ide_drive_t *mate = ide_get_pair_dev(drive);
- u16 *mateid;
- u8 mask = hwif->ultra_mask;
-
- if (mate == NULL)
- goto out;
- mateid = mate->id;
-
- if (ata_id_has_dma(mateid) && __ide_dma_bad_drive(mate) == 0) {
- if ((mateid[ATA_ID_FIELD_VALID] & 4) &&
- (mateid[ATA_ID_UDMA_MODES] & 7))
- goto out;
- if (mateid[ATA_ID_MWDMA_MODES] & 7)
- mask = 0;
- }
-out:
- return mask;
-}
-
-static void sc1200_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned int reg, timings;
- unsigned short pci_clock;
- unsigned int basereg = hwif->channel ? 0x50 : 0x40;
- const u8 mode = drive->dma_mode;
-
- static const u32 udma_timing[3][3] = {
- { 0x00921250, 0x00911140, 0x00911030 },
- { 0x00932470, 0x00922260, 0x00922140 },
- { 0x009436a1, 0x00933481, 0x00923261 },
- };
-
- static const u32 mwdma_timing[3][3] = {
- { 0x00077771, 0x00012121, 0x00002020 },
- { 0x000bbbb2, 0x00024241, 0x00013131 },
- { 0x000ffff3, 0x00035352, 0x00015151 },
- };
-
- pci_clock = sc1200_get_pci_clock();
-
- /*
- * Note that each DMA mode has several timings associated with it.
- * The correct timing depends on the fast PCI clock freq.
- */
-
- if (mode >= XFER_UDMA_0)
- timings = udma_timing[pci_clock][mode - XFER_UDMA_0];
- else
- timings = mwdma_timing[pci_clock][mode - XFER_MW_DMA_0];
-
- if ((drive->dn & 1) == 0) {
- pci_read_config_dword(dev, basereg + 4, &reg);
- timings |= reg & 0x80000000; /* preserve PIO format bit */
- pci_write_config_dword(dev, basereg + 4, timings);
- } else
- pci_write_config_dword(dev, basereg + 12, timings);
-}
-
-/* Replacement for the standard ide_dma_end action in
- * dma_proc.
- *
- * returns 1 on error, 0 otherwise
- */
-static int sc1200_dma_end(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- unsigned long dma_base = hwif->dma_base;
- u8 dma_stat;
-
- dma_stat = inb(dma_base+2); /* get DMA status */
-
- if (!(dma_stat & 4))
- printk(" ide_dma_end dma_stat=%0x err=%x newerr=%x\n",
- dma_stat, ((dma_stat&7)!=4), ((dma_stat&2)==2));
-
- outb(dma_stat|0x1b, dma_base+2); /* clear the INTR & ERROR bits */
- outb(inb(dma_base)&~1, dma_base); /* !! DO THIS HERE !! stop DMA */
-
- return (dma_stat & 7) != 4; /* verify good DMA status */
-}
-
-/*
- * sc1200_set_pio_mode() handles setting of PIO modes
- * for both the chipset and drive.
- *
- * All existing BIOSs for this chipset guarantee that all drives
- * will have valid default PIO timings set up before we get here.
- */
-
-static void sc1200_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- int mode = -1;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
-
- /*
- * bad abuse of ->set_pio_mode interface
- */
- switch (pio) {
- case 200: mode = XFER_UDMA_0; break;
- case 201: mode = XFER_UDMA_1; break;
- case 202: mode = XFER_UDMA_2; break;
- case 100: mode = XFER_MW_DMA_0; break;
- case 101: mode = XFER_MW_DMA_1; break;
- case 102: mode = XFER_MW_DMA_2; break;
- }
- if (mode != -1) {
- printk("SC1200: %s: changing (U)DMA mode\n", drive->name);
- ide_dma_off_quietly(drive);
- if (ide_set_dma_mode(drive, mode) == 0 &&
- (drive->dev_flags & IDE_DFLAG_USING_DMA))
- hwif->dma_ops->dma_host_set(drive, 1);
- return;
- }
-
- sc1200_tunepio(drive, pio);
-}
-
-#ifdef CONFIG_PM
-struct sc1200_saved_state {
- u32 regs[8];
-};
-
-static int sc1200_suspend (struct pci_dev *dev, pm_message_t state)
-{
- printk("SC1200: suspend(%u)\n", state.event);
-
- /*
- * we only save state when going from full power to less
- */
- if (state.event == PM_EVENT_ON) {
- struct ide_host *host = pci_get_drvdata(dev);
- struct sc1200_saved_state *ss = host->host_priv;
- unsigned int r;
-
- /*
- * save timing registers
- * (this may be unnecessary if BIOS also does it)
- */
- for (r = 0; r < 8; r++)
- pci_read_config_dword(dev, 0x40 + r * 4, &ss->regs[r]);
- }
-
- pci_disable_device(dev);
- pci_set_power_state(dev, pci_choose_state(dev, state));
- return 0;
-}
-
-static int sc1200_resume (struct pci_dev *dev)
-{
- struct ide_host *host = pci_get_drvdata(dev);
- struct sc1200_saved_state *ss = host->host_priv;
- unsigned int r;
- int i;
-
- i = pci_enable_device(dev);
- if (i)
- return i;
-
- /*
- * restore timing registers
- * (this may be unnecessary if BIOS also does it)
- */
- for (r = 0; r < 8; r++)
- pci_write_config_dword(dev, 0x40 + r * 4, ss->regs[r]);
-
- return 0;
-}
-#endif
-
-static const struct ide_port_ops sc1200_port_ops = {
- .set_pio_mode = sc1200_set_pio_mode,
- .set_dma_mode = sc1200_set_dma_mode,
- .udma_filter = sc1200_udma_filter,
-};
-
-static const struct ide_dma_ops sc1200_dma_ops = {
- .dma_host_set = ide_dma_host_set,
- .dma_setup = ide_dma_setup,
- .dma_start = ide_dma_start,
- .dma_end = sc1200_dma_end,
- .dma_test_irq = ide_dma_test_irq,
- .dma_lost_irq = ide_dma_lost_irq,
- .dma_timer_expiry = ide_dma_sff_timer_expiry,
- .dma_sff_read_status = ide_dma_sff_read_status,
-};
-
-static const struct ide_port_info sc1200_chipset = {
- .name = DRV_NAME,
- .port_ops = &sc1200_port_ops,
- .dma_ops = &sc1200_dma_ops,
- .host_flags = IDE_HFLAG_SERIALIZE |
- IDE_HFLAG_POST_SET_MODE |
- IDE_HFLAG_ABUSE_DMA_MODES,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA2,
-};
-
-static int sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- struct sc1200_saved_state *ss = NULL;
- int rc;
-
-#ifdef CONFIG_PM
- ss = kmalloc(sizeof(*ss), GFP_KERNEL);
- if (ss == NULL)
- return -ENOMEM;
-#endif
- rc = ide_pci_init_one(dev, &sc1200_chipset, ss);
- if (rc)
- kfree(ss);
-
- return rc;
-}
-
-static const struct pci_device_id sc1200_pci_tbl[] = {
- { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_SCx200_IDE), 0},
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, sc1200_pci_tbl);
-
-static struct pci_driver sc1200_pci_driver = {
- .name = "SC1200_IDE",
- .id_table = sc1200_pci_tbl,
- .probe = sc1200_init_one,
- .remove = ide_pci_remove,
-#ifdef CONFIG_PM
- .suspend = sc1200_suspend,
- .resume = sc1200_resume,
-#endif
-};
-
-static int __init sc1200_ide_init(void)
-{
- return ide_pci_register_driver(&sc1200_pci_driver);
-}
-
-static void __exit sc1200_ide_exit(void)
-{
- pci_unregister_driver(&sc1200_pci_driver);
-}
-
-module_init(sc1200_ide_init);
-module_exit(sc1200_ide_exit);
-
-MODULE_AUTHOR("Mark Lord");
-MODULE_DESCRIPTION("PCI driver module for NS SC1200 IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
deleted file mode 100644
index 458e72e034b0..000000000000
--- a/drivers/ide/serverworks.c
+++ /dev/null
@@ -1,456 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1998-2000 Michel Aubry
- * Copyright (C) 1998-2000 Andrzej Krzysztofowicz
- * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz
- * Portions copyright (c) 2001 Sun Microsystems
- *
- *
- * RCC/ServerWorks IDE driver for Linux
- *
- * OSB4: `Open South Bridge' IDE Interface (fn 1)
- * supports UDMA mode 2 (33 MB/s)
- *
- * CSB5: `Champion South Bridge' IDE Interface (fn 1)
- * all revisions support UDMA mode 4 (66 MB/s)
- * revision A2.0 and up support UDMA mode 5 (100 MB/s)
- *
- * *** The CSB5 does not provide ANY register ***
- * *** to detect 80-conductor cable presence. ***
- *
- * CSB6: `Champion South Bridge' IDE Interface (optional: third channel)
- *
- * HT1000: AKA BCM5785 - Hypertransport Southbridge for Opteron systems. IDE
- * controller same as the CSB6. Single channel ATA100 only.
- *
- * Documentation:
- * Available under NDA only. Errata info very hard to get.
- *
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#include <asm/io.h>
-
-#define DRV_NAME "serverworks"
-
-#define SVWKS_CSB5_REVISION_NEW 0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */
-#define SVWKS_CSB6_REVISION 0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */
-
-/* Seagate Barracuda ATA IV Family drives in UDMA mode 5
- * can overrun their FIFOs when used with the CSB5 */
-static const char *svwks_bad_ata100[] = {
- "ST320011A",
- "ST340016A",
- "ST360021A",
- "ST380021A",
- NULL
-};
-
-static int check_in_drive_lists (ide_drive_t *drive, const char **list)
-{
- char *m = (char *)&drive->id[ATA_ID_PROD];
-
- while (*list)
- if (!strcmp(*list++, m))
- return 1;
- return 0;
-}
-
-static u8 svwks_udma_filter(ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
-
- if (dev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE) {
- return 0x1f;
- } else if (dev->revision < SVWKS_CSB5_REVISION_NEW) {
- return 0x07;
- } else {
- u8 btr = 0, mode, mask;
-
- pci_read_config_byte(dev, 0x5A, &btr);
- mode = btr & 0x3;
-
- /* If someone decides to do UDMA133 on CSB5 the same
- issue will bite so be inclusive */
- if (mode > 2 && check_in_drive_lists(drive, svwks_bad_ata100))
- mode = 2;
-
- switch(mode) {
- case 3: mask = 0x3f; break;
- case 2: mask = 0x1f; break;
- case 1: mask = 0x07; break;
- default: mask = 0x00; break;
- }
-
- return mask;
- }
-}
-
-static u8 svwks_csb_check (struct pci_dev *dev)
-{
- switch (dev->device) {
- case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE:
- case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE:
- case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2:
- case PCI_DEVICE_ID_SERVERWORKS_HT1000IDE:
- return 1;
- default:
- break;
- }
- return 0;
-}
-
-static void svwks_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- static const u8 pio_modes[] = { 0x5d, 0x47, 0x34, 0x22, 0x20 };
- static const u8 drive_pci[] = { 0x41, 0x40, 0x43, 0x42 };
-
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- const u8 pio = drive->pio_mode - XFER_PIO_0;
-
- if (drive->dn >= ARRAY_SIZE(drive_pci))
- return;
-
- pci_write_config_byte(dev, drive_pci[drive->dn], pio_modes[pio]);
-
- if (svwks_csb_check(dev)) {
- u16 csb_pio = 0;
-
- pci_read_config_word(dev, 0x4a, &csb_pio);
-
- csb_pio &= ~(0x0f << (4 * drive->dn));
- csb_pio |= (pio << (4 * drive->dn));
-
- pci_write_config_word(dev, 0x4a, csb_pio);
- }
-}
-
-static void svwks_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- static const u8 udma_modes[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05 };
- static const u8 dma_modes[] = { 0x77, 0x21, 0x20 };
- static const u8 drive_pci2[] = { 0x45, 0x44, 0x47, 0x46 };
-
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- const u8 speed = drive->dma_mode;
- u8 unit = drive->dn & 1;
-
- u8 ultra_enable = 0, ultra_timing = 0, dma_timing = 0;
-
- if (drive->dn >= ARRAY_SIZE(drive_pci2))
- return;
-
- pci_read_config_byte(dev, (0x56|hwif->channel), &ultra_timing);
- pci_read_config_byte(dev, 0x54, &ultra_enable);
-
- ultra_timing &= ~(0x0F << (4*unit));
- ultra_enable &= ~(0x01 << drive->dn);
-
- if (speed >= XFER_UDMA_0) {
- dma_timing |= dma_modes[2];
- ultra_timing |= (udma_modes[speed - XFER_UDMA_0] << (4 * unit));
- ultra_enable |= (0x01 << drive->dn);
- } else if (speed >= XFER_MW_DMA_0)
- dma_timing |= dma_modes[speed - XFER_MW_DMA_0];
-
- pci_write_config_byte(dev, drive_pci2[drive->dn], dma_timing);
- pci_write_config_byte(dev, (0x56|hwif->channel), ultra_timing);
- pci_write_config_byte(dev, 0x54, ultra_enable);
-}
-
-static int init_chipset_svwks(struct pci_dev *dev)
-{
- unsigned int reg;
- u8 btr;
-
- /* force Master Latency Timer value to 64 PCICLKs */
- pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x40);
-
- /* OSB4 : South Bridge and IDE */
- if (dev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) {
- struct pci_dev *isa_dev =
- pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
- PCI_DEVICE_ID_SERVERWORKS_OSB4, NULL);
- if (isa_dev) {
- pci_read_config_dword(isa_dev, 0x64, &reg);
- reg &= ~0x00002000; /* disable 600ns interrupt mask */
- if(!(reg & 0x00004000))
- printk(KERN_DEBUG DRV_NAME " %s: UDMA not BIOS "
- "enabled.\n", pci_name(dev));
- reg |= 0x00004000; /* enable UDMA/33 support */
- pci_write_config_dword(isa_dev, 0x64, reg);
- pci_dev_put(isa_dev);
- }
- }
-
- /* setup CSB5/CSB6 : South Bridge and IDE option RAID */
- else if ((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) ||
- (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) ||
- (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) {
-
- /* Third Channel Test */
- if (!(PCI_FUNC(dev->devfn) & 1)) {
- struct pci_dev * findev = NULL;
- u32 reg4c = 0;
- findev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
- PCI_DEVICE_ID_SERVERWORKS_CSB5, NULL);
- if (findev) {
- pci_read_config_dword(findev, 0x4C, &reg4c);
- reg4c &= ~0x000007FF;
- reg4c |= 0x00000040;
- reg4c |= 0x00000020;
- pci_write_config_dword(findev, 0x4C, reg4c);
- pci_dev_put(findev);
- }
- outb_p(0x06, 0x0c00);
- dev->irq = inb_p(0x0c01);
- } else {
- struct pci_dev * findev = NULL;
- u8 reg41 = 0;
-
- findev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
- PCI_DEVICE_ID_SERVERWORKS_CSB6, NULL);
- if (findev) {
- pci_read_config_byte(findev, 0x41, &reg41);
- reg41 &= ~0x40;
- pci_write_config_byte(findev, 0x41, reg41);
- pci_dev_put(findev);
- }
- /*
- * This is a device pin issue on CSB6.
- * Since there will be a future raid mode,
- * early versions of the chipset require the
- * interrupt pin to be set, and it is a compatibility
- * mode issue.
- */
- if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE)
- dev->irq = 0;
- }
-// pci_read_config_dword(dev, 0x40, &pioreg)
-// pci_write_config_dword(dev, 0x40, 0x99999999);
-// pci_read_config_dword(dev, 0x44, &dmareg);
-// pci_write_config_dword(dev, 0x44, 0xFFFFFFFF);
- /* setup the UDMA Control register
- *
- * 1. clear bit 6 to enable DMA
- * 2. enable DMA modes with bits 0-1
- * 00 : legacy
- * 01 : udma2
- * 10 : udma2/udma4
- * 11 : udma2/udma4/udma5
- */
- pci_read_config_byte(dev, 0x5A, &btr);
- btr &= ~0x40;
- if (!(PCI_FUNC(dev->devfn) & 1))
- btr |= 0x2;
- else
- btr |= (dev->revision >= SVWKS_CSB5_REVISION_NEW) ? 0x3 : 0x2;
- pci_write_config_byte(dev, 0x5A, btr);
- }
- /* Setup HT1000 SouthBridge Controller - Single Channel Only */
- else if (dev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE) {
- pci_read_config_byte(dev, 0x5A, &btr);
- btr &= ~0x40;
- btr |= 0x3;
- pci_write_config_byte(dev, 0x5A, btr);
- }
-
- return 0;
-}
-
-static u8 ata66_svwks_svwks(ide_hwif_t *hwif)
-{
- return ATA_CBL_PATA80;
-}
-
-/* On Dell PowerEdge servers with a CSB5/CSB6, the top two bits
- * of the subsystem device ID indicate presence of an 80-pin cable.
- * Bit 15 clear = secondary IDE channel does not have 80-pin cable.
- * Bit 15 set = secondary IDE channel has 80-pin cable.
- * Bit 14 clear = primary IDE channel does not have 80-pin cable.
- * Bit 14 set = primary IDE channel has 80-pin cable.
- */
-static u8 ata66_svwks_dell(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
-
- if (dev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
- dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&
- (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE ||
- dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE))
- return ((1 << (hwif->channel + 14)) &
- dev->subsystem_device) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
- return ATA_CBL_PATA40;
-}
-
-/* Sun Cobalt Alpine hardware avoids the 80-pin cable
- * detect issue by attaching the drives directly to the board.
- * This check follows the Dell precedent (how scary is that?!)
- *
- * WARNING: this only works on Alpine hardware!
- */
-static u8 ata66_svwks_cobalt(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
-
- if (dev->subsystem_vendor == PCI_VENDOR_ID_SUN &&
- dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&
- dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE)
- return ((1 << (hwif->channel + 14)) &
- dev->subsystem_device) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
- return ATA_CBL_PATA40;
-}
-
-static u8 svwks_cable_detect(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
-
- /* Server Works */
- if (dev->subsystem_vendor == PCI_VENDOR_ID_SERVERWORKS)
- return ata66_svwks_svwks (hwif);
-
- /* Dell PowerEdge */
- if (dev->subsystem_vendor == PCI_VENDOR_ID_DELL)
- return ata66_svwks_dell (hwif);
-
- /* Cobalt Alpine */
- if (dev->subsystem_vendor == PCI_VENDOR_ID_SUN)
- return ata66_svwks_cobalt (hwif);
-
- /* Per Specified Design by OEM, and ASIC Architect */
- if ((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) ||
- (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2))
- return ATA_CBL_PATA80;
-
- return ATA_CBL_PATA40;
-}
-
-static const struct ide_port_ops osb4_port_ops = {
- .set_pio_mode = svwks_set_pio_mode,
- .set_dma_mode = svwks_set_dma_mode,
-};
-
-static const struct ide_port_ops svwks_port_ops = {
- .set_pio_mode = svwks_set_pio_mode,
- .set_dma_mode = svwks_set_dma_mode,
- .udma_filter = svwks_udma_filter,
- .cable_detect = svwks_cable_detect,
-};
-
-static const struct ide_port_info serverworks_chipsets[] = {
- { /* 0: OSB4 */
- .name = DRV_NAME,
- .init_chipset = init_chipset_svwks,
- .port_ops = &osb4_port_ops,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = 0x00, /* UDMA is problematic on OSB4 */
- },
- { /* 1: CSB5 */
- .name = DRV_NAME,
- .init_chipset = init_chipset_svwks,
- .port_ops = &svwks_port_ops,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA5,
- },
- { /* 2: CSB6 */
- .name = DRV_NAME,
- .init_chipset = init_chipset_svwks,
- .port_ops = &svwks_port_ops,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA5,
- },
- { /* 3: CSB6-2 */
- .name = DRV_NAME,
- .init_chipset = init_chipset_svwks,
- .port_ops = &svwks_port_ops,
- .host_flags = IDE_HFLAG_SINGLE,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA5,
- },
- { /* 4: HT1000 */
- .name = DRV_NAME,
- .init_chipset = init_chipset_svwks,
- .port_ops = &svwks_port_ops,
- .host_flags = IDE_HFLAG_SINGLE,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA5,
- }
-};
-
-/**
- * svwks_init_one - called when a OSB/CSB is found
- * @dev: the svwks device
- * @id: the matching pci id
- *
- * Called when the PCI registration layer (or the IDE initialization)
- * finds a device matching our IDE device tables.
- */
-
-static int svwks_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- struct ide_port_info d;
- u8 idx = id->driver_data;
-
- d = serverworks_chipsets[idx];
-
- if (idx == 1)
- d.host_flags |= IDE_HFLAG_CLEAR_SIMPLEX;
- else if (idx == 2 || idx == 3) {
- if ((PCI_FUNC(dev->devfn) & 1) == 0) {
- if (pci_resource_start(dev, 0) != 0x01f1)
- d.host_flags |= IDE_HFLAG_NON_BOOTABLE;
- d.host_flags |= IDE_HFLAG_SINGLE;
- } else
- d.host_flags &= ~IDE_HFLAG_SINGLE;
- }
-
- return ide_pci_init_one(dev, &d, NULL);
-}
-
-static const struct pci_device_id svwks_pci_tbl[] = {
- { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4IDE), 0 },
- { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE), 1 },
- { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE), 2 },
- { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2), 3 },
- { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000IDE), 4 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, svwks_pci_tbl);
-
-static struct pci_driver svwks_pci_driver = {
- .name = "Serverworks_IDE",
- .id_table = svwks_pci_tbl,
- .probe = svwks_init_one,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init svwks_ide_init(void)
-{
- return ide_pci_register_driver(&svwks_pci_driver);
-}
-
-static void __exit svwks_ide_exit(void)
-{
- pci_unregister_driver(&svwks_pci_driver);
-}
-
-module_init(svwks_ide_init);
-module_exit(svwks_ide_exit);
-
-MODULE_AUTHOR("Michael Aubry. Andrzej Krzysztofowicz, Andre Hedrick, Bartlomiej Zolnierkiewicz");
-MODULE_DESCRIPTION("PCI driver module for Serverworks OSB4/CSB5/CSB6 IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
deleted file mode 100644
index fdc8e813170c..000000000000
--- a/drivers/ide/setup-pci.c
+++ /dev/null
@@ -1,682 +0,0 @@
-/*
- * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 1995-1998 Mark Lord
- * Copyright (C) 2007-2009 Bartlomiej Zolnierkiewicz
- *
- * May be copied or modified under the terms of the GNU General Public License
- */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/ide.h>
-#include <linux/dma-mapping.h>
-
-#include <asm/io.h>
-
-/**
- * ide_setup_pci_baseregs - place a PCI IDE controller native
- * @dev: PCI device of interface to switch native
- * @name: Name of interface
- *
- * We attempt to place the PCI interface into PCI native mode. If
- * we succeed the BARs are ok and the controller is in PCI mode.
- * Returns 0 on success or an errno code.
- *
- * FIXME: if we program the interface and then fail to set the BARS
- * we don't switch it back to legacy mode. Do we actually care ??
- */
-
-static int ide_setup_pci_baseregs(struct pci_dev *dev, const char *name)
-{
- u8 progif = 0;
-
- /*
- * Place both IDE interfaces into PCI "native" mode:
- */
- if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) ||
- (progif & 5) != 5) {
- if ((progif & 0xa) != 0xa) {
- printk(KERN_INFO "%s %s: device not capable of full "
- "native PCI mode\n", name, pci_name(dev));
- return -EOPNOTSUPP;
- }
- printk(KERN_INFO "%s %s: placing both ports into native PCI "
- "mode\n", name, pci_name(dev));
- (void) pci_write_config_byte(dev, PCI_CLASS_PROG, progif|5);
- if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) ||
- (progif & 5) != 5) {
- printk(KERN_ERR "%s %s: rewrite of PROGIF failed, "
- "wanted 0x%04x, got 0x%04x\n",
- name, pci_name(dev), progif | 5, progif);
- return -EOPNOTSUPP;
- }
- }
- return 0;
-}
-
-#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
-static int ide_pci_clear_simplex(unsigned long dma_base, const char *name)
-{
- u8 dma_stat = inb(dma_base + 2);
-
- outb(dma_stat & 0x60, dma_base + 2);
- dma_stat = inb(dma_base + 2);
-
- return (dma_stat & 0x80) ? 1 : 0;
-}
-
-/**
- * ide_pci_dma_base - setup BMIBA
- * @hwif: IDE interface
- * @d: IDE port info
- *
- * Fetch the DMA Bus-Master-I/O-Base-Address (BMIBA) from PCI space.
- */
-
-unsigned long ide_pci_dma_base(ide_hwif_t *hwif, const struct ide_port_info *d)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned long dma_base = 0;
-
- if (hwif->host_flags & IDE_HFLAG_MMIO)
- return hwif->dma_base;
-
- if (hwif->mate && hwif->mate->dma_base) {
- dma_base = hwif->mate->dma_base - (hwif->channel ? 0 : 8);
- } else {
- u8 baridx = (d->host_flags & IDE_HFLAG_CS5520) ? 2 : 4;
-
- dma_base = pci_resource_start(dev, baridx);
-
- if (dma_base == 0) {
- printk(KERN_ERR "%s %s: DMA base is invalid\n",
- d->name, pci_name(dev));
- return 0;
- }
- }
-
- if (hwif->channel)
- dma_base += 8;
-
- return dma_base;
-}
-EXPORT_SYMBOL_GPL(ide_pci_dma_base);
-
-int ide_pci_check_simplex(ide_hwif_t *hwif, const struct ide_port_info *d)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u8 dma_stat;
-
- if (d->host_flags & (IDE_HFLAG_MMIO | IDE_HFLAG_CS5520))
- goto out;
-
- if (d->host_flags & IDE_HFLAG_CLEAR_SIMPLEX) {
- if (ide_pci_clear_simplex(hwif->dma_base, d->name))
- printk(KERN_INFO "%s %s: simplex device: DMA forced\n",
- d->name, pci_name(dev));
- goto out;
- }
-
- /*
- * If the device claims "simplex" DMA, this means that only one of
- * the two interfaces can be trusted with DMA at any point in time
- * (so we should enable DMA only on one of the two interfaces).
- *
- * FIXME: At this point we haven't probed the drives so we can't make
- * the appropriate decision. Really we should defer this problem until
- * we tune the drive then try to grab DMA ownership if we want to be
- * the DMA end. This has to be become dynamic to handle hot-plug.
- */
- dma_stat = hwif->dma_ops->dma_sff_read_status(hwif);
- if ((dma_stat & 0x80) && hwif->mate && hwif->mate->dma_base) {
- printk(KERN_INFO "%s %s: simplex device: DMA disabled\n",
- d->name, pci_name(dev));
- return -1;
- }
-out:
- return 0;
-}
-EXPORT_SYMBOL_GPL(ide_pci_check_simplex);
-
-/*
- * Set up BM-DMA capability (PnP BIOS should have done this)
- */
-int ide_pci_set_master(struct pci_dev *dev, const char *name)
-{
- u16 pcicmd;
-
- pci_read_config_word(dev, PCI_COMMAND, &pcicmd);
-
- if ((pcicmd & PCI_COMMAND_MASTER) == 0) {
- pci_set_master(dev);
-
- if (pci_read_config_word(dev, PCI_COMMAND, &pcicmd) ||
- (pcicmd & PCI_COMMAND_MASTER) == 0) {
- printk(KERN_ERR "%s %s: error updating PCICMD\n",
- name, pci_name(dev));
- return -EIO;
- }
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ide_pci_set_master);
-#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
-
-void ide_setup_pci_noise(struct pci_dev *dev, const struct ide_port_info *d)
-{
- printk(KERN_INFO "%s %s: IDE controller (0x%04x:0x%04x rev 0x%02x)\n",
- d->name, pci_name(dev),
- dev->vendor, dev->device, dev->revision);
-}
-EXPORT_SYMBOL_GPL(ide_setup_pci_noise);
-
-
-/**
- * ide_pci_enable - do PCI enables
- * @dev: PCI device
- * @bars: PCI BARs mask
- * @d: IDE port info
- *
- * Enable the IDE PCI device. We attempt to enable the device in full
- * but if that fails then we only need IO space. The PCI code should
- * have setup the proper resources for us already for controllers in
- * legacy mode.
- *
- * Returns zero on success or an error code
- */
-
-static int ide_pci_enable(struct pci_dev *dev, int bars,
- const struct ide_port_info *d)
-{
- int ret;
-
- if (pci_enable_device(dev)) {
- ret = pci_enable_device_io(dev);
- if (ret < 0) {
- printk(KERN_WARNING "%s %s: couldn't enable device\n",
- d->name, pci_name(dev));
- goto out;
- }
- printk(KERN_WARNING "%s %s: BIOS configuration fixed\n",
- d->name, pci_name(dev));
- }
-
- /*
- * assume all devices can do 32-bit DMA for now, we can add
- * a DMA mask field to the struct ide_port_info if we need it
- * (or let lower level driver set the DMA mask)
- */
- ret = dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
- if (ret < 0) {
- printk(KERN_ERR "%s %s: can't set DMA mask\n",
- d->name, pci_name(dev));
- goto out;
- }
-
- ret = pci_request_selected_regions(dev, bars, d->name);
- if (ret < 0)
- printk(KERN_ERR "%s %s: can't reserve resources\n",
- d->name, pci_name(dev));
-out:
- return ret;
-}
-
-/**
- * ide_pci_configure - configure an unconfigured device
- * @dev: PCI device
- * @d: IDE port info
- *
- * Enable and configure the PCI device we have been passed.
- * Returns zero on success or an error code.
- */
-
-static int ide_pci_configure(struct pci_dev *dev, const struct ide_port_info *d)
-{
- u16 pcicmd = 0;
- /*
- * PnP BIOS was *supposed* to have setup this device, but we
- * can do it ourselves, so long as the BIOS has assigned an IRQ
- * (or possibly the device is using a "legacy header" for IRQs).
- * Maybe the user deliberately *disabled* the device,
- * but we'll eventually ignore it again if no drives respond.
- */
- if (ide_setup_pci_baseregs(dev, d->name) ||
- pci_write_config_word(dev, PCI_COMMAND, pcicmd | PCI_COMMAND_IO)) {
- printk(KERN_INFO "%s %s: device disabled (BIOS)\n",
- d->name, pci_name(dev));
- return -ENODEV;
- }
- if (pci_read_config_word(dev, PCI_COMMAND, &pcicmd)) {
- printk(KERN_ERR "%s %s: error accessing PCI regs\n",
- d->name, pci_name(dev));
- return -EIO;
- }
- if (!(pcicmd & PCI_COMMAND_IO)) {
- printk(KERN_ERR "%s %s: unable to enable IDE controller\n",
- d->name, pci_name(dev));
- return -ENXIO;
- }
- return 0;
-}
-
-/**
- * ide_pci_check_iomem - check a register is I/O
- * @dev: PCI device
- * @d: IDE port info
- * @bar: BAR number
- *
- * Checks if a BAR is configured and points to MMIO space. If so,
- * return an error code. Otherwise return 0
- */
-
-static int ide_pci_check_iomem(struct pci_dev *dev, const struct ide_port_info *d,
- int bar)
-{
- ulong flags = pci_resource_flags(dev, bar);
-
- /* Unconfigured ? */
- if (!flags || pci_resource_len(dev, bar) == 0)
- return 0;
-
- /* I/O space */
- if (flags & IORESOURCE_IO)
- return 0;
-
- /* Bad */
- return -EINVAL;
-}
-
-/**
- * ide_hw_configure - configure a struct ide_hw instance
- * @dev: PCI device holding interface
- * @d: IDE port info
- * @port: port number
- * @hw: struct ide_hw instance corresponding to this port
- *
- * Perform the initial set up for the hardware interface structure. This
- * is done per interface port rather than per PCI device. There may be
- * more than one port per device.
- *
- * Returns zero on success or an error code.
- */
-
-static int ide_hw_configure(struct pci_dev *dev, const struct ide_port_info *d,
- unsigned int port, struct ide_hw *hw)
-{
- unsigned long ctl = 0, base = 0;
-
- if ((d->host_flags & IDE_HFLAG_ISA_PORTS) == 0) {
- if (ide_pci_check_iomem(dev, d, 2 * port) ||
- ide_pci_check_iomem(dev, d, 2 * port + 1)) {
- printk(KERN_ERR "%s %s: I/O baseregs (BIOS) are "
- "reported as MEM for port %d!\n",
- d->name, pci_name(dev), port);
- return -EINVAL;
- }
-
- ctl = pci_resource_start(dev, 2*port+1);
- base = pci_resource_start(dev, 2*port);
- } else {
- /* Use default values */
- ctl = port ? 0x374 : 0x3f4;
- base = port ? 0x170 : 0x1f0;
- }
-
- if (!base || !ctl) {
- printk(KERN_ERR "%s %s: bad PCI BARs for port %d, skipping\n",
- d->name, pci_name(dev), port);
- return -EINVAL;
- }
-
- memset(hw, 0, sizeof(*hw));
- hw->dev = &dev->dev;
- ide_std_init_ports(hw, base, ctl | 2);
-
- return 0;
-}
-
-#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
-/**
- * ide_hwif_setup_dma - configure DMA interface
- * @hwif: IDE interface
- * @d: IDE port info
- *
- * Set up the DMA base for the interface. Enable the master bits as
- * necessary and attempt to bring the device DMA into a ready to use
- * state
- */
-
-int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
-
- if ((d->host_flags & IDE_HFLAG_NO_AUTODMA) == 0 ||
- ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE &&
- (dev->class & 0x80))) {
- unsigned long base = ide_pci_dma_base(hwif, d);
-
- if (base == 0)
- return -1;
-
- hwif->dma_base = base;
-
- if (hwif->dma_ops == NULL)
- hwif->dma_ops = &sff_dma_ops;
-
- if (ide_pci_check_simplex(hwif, d) < 0)
- return -1;
-
- if (ide_pci_set_master(dev, d->name) < 0)
- return -1;
-
- if (hwif->host_flags & IDE_HFLAG_MMIO)
- printk(KERN_INFO " %s: MMIO-DMA\n", hwif->name);
- else
- printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
- hwif->name, base, base + 7);
-
- hwif->extra_base = base + (hwif->channel ? 8 : 16);
-
- if (ide_allocate_dma_engine(hwif))
- return -1;
- }
-
- return 0;
-}
-#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
-
-/**
- * ide_setup_pci_controller - set up IDE PCI
- * @dev: PCI device
- * @bars: PCI BARs mask
- * @d: IDE port info
- * @noisy: verbose flag
- *
- * Set up the PCI and controller side of the IDE interface. This brings
- * up the PCI side of the device, checks that the device is enabled
- * and enables it if need be
- */
-
-static int ide_setup_pci_controller(struct pci_dev *dev, int bars,
- const struct ide_port_info *d, int noisy)
-{
- int ret;
- u16 pcicmd;
-
- if (noisy)
- ide_setup_pci_noise(dev, d);
-
- ret = ide_pci_enable(dev, bars, d);
- if (ret < 0)
- goto out;
-
- ret = pci_read_config_word(dev, PCI_COMMAND, &pcicmd);
- if (ret < 0) {
- printk(KERN_ERR "%s %s: error accessing PCI regs\n",
- d->name, pci_name(dev));
- goto out_free_bars;
- }
- if (!(pcicmd & PCI_COMMAND_IO)) { /* is device disabled? */
- ret = ide_pci_configure(dev, d);
- if (ret < 0)
- goto out_free_bars;
- printk(KERN_INFO "%s %s: device enabled (Linux)\n",
- d->name, pci_name(dev));
- }
-
- goto out;
-
-out_free_bars:
- pci_release_selected_regions(dev, bars);
-out:
- return ret;
-}
-
-/**
- * ide_pci_setup_ports - configure ports/devices on PCI IDE
- * @dev: PCI device
- * @d: IDE port info
- * @hw: struct ide_hw instances corresponding to this PCI IDE device
- * @hws: struct ide_hw pointers table to update
- *
- * Scan the interfaces attached to this device and do any
- * necessary per port setup. Attach the devices and ask the
- * generic DMA layer to do its work for us.
- *
- * Normally called automaticall from do_ide_pci_setup_device,
- * but is also used directly as a helper function by some controllers
- * where the chipset setup is not the default PCI IDE one.
- */
-
-void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d,
- struct ide_hw *hw, struct ide_hw **hws)
-{
- int channels = (d->host_flags & IDE_HFLAG_SINGLE) ? 1 : 2, port;
- u8 tmp;
-
- /*
- * Set up the IDE ports
- */
-
- for (port = 0; port < channels; ++port) {
- const struct ide_pci_enablebit *e = &d->enablebits[port];
-
- if (e->reg && (pci_read_config_byte(dev, e->reg, &tmp) ||
- (tmp & e->mask) != e->val)) {
- printk(KERN_INFO "%s %s: IDE port disabled\n",
- d->name, pci_name(dev));
- continue; /* port not enabled */
- }
-
- if (ide_hw_configure(dev, d, port, hw + port))
- continue;
-
- *(hws + port) = hw + port;
- }
-}
-EXPORT_SYMBOL_GPL(ide_pci_setup_ports);
-
-/*
- * ide_setup_pci_device() looks at the primary/secondary interfaces
- * on a PCI IDE device and, if they are enabled, prepares the IDE driver
- * for use with them. This generic code works for most PCI chipsets.
- *
- * One thing that is not standardized is the location of the
- * primary/secondary interface "enable/disable" bits. For chipsets that
- * we "know" about, this information is in the struct ide_port_info;
- * for all other chipsets, we just assume both interfaces are enabled.
- */
-static int do_ide_setup_pci_device(struct pci_dev *dev,
- const struct ide_port_info *d,
- u8 noisy)
-{
- int pciirq, ret;
-
- /*
- * Can we trust the reported IRQ?
- */
- pciirq = dev->irq;
-
- /*
- * This allows offboard ide-pci cards the enable a BIOS,
- * verify interrupt settings of split-mirror pci-config
- * space, place chipset into init-mode, and/or preserve
- * an interrupt if the card is not native ide support.
- */
- ret = d->init_chipset ? d->init_chipset(dev) : 0;
- if (ret < 0)
- goto out;
-
- if (ide_pci_is_in_compatibility_mode(dev)) {
- if (noisy)
- printk(KERN_INFO "%s %s: not 100%% native mode: will "
- "probe irqs later\n", d->name, pci_name(dev));
- pciirq = 0;
- } else if (!pciirq && noisy) {
- printk(KERN_WARNING "%s %s: bad irq (%d): will probe later\n",
- d->name, pci_name(dev), pciirq);
- } else if (noisy) {
- printk(KERN_INFO "%s %s: 100%% native mode on irq %d\n",
- d->name, pci_name(dev), pciirq);
- }
-
- ret = pciirq;
-out:
- return ret;
-}
-
-int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
- const struct ide_port_info *d, void *priv)
-{
- struct pci_dev *pdev[] = { dev1, dev2 };
- struct ide_host *host;
- int ret, i, n_ports = dev2 ? 4 : 2, bars;
- struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
-
- if (d->host_flags & IDE_HFLAG_SINGLE)
- bars = (1 << 2) - 1;
- else
- bars = (1 << 4) - 1;
-
- if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
- if (d->host_flags & IDE_HFLAG_CS5520)
- bars |= (1 << 2);
- else
- bars |= (1 << 4);
- }
-
- for (i = 0; i < n_ports / 2; i++) {
- ret = ide_setup_pci_controller(pdev[i], bars, d, !i);
- if (ret < 0) {
- if (i == 1)
- pci_release_selected_regions(pdev[0], bars);
- goto out;
- }
-
- ide_pci_setup_ports(pdev[i], d, &hw[i*2], &hws[i*2]);
- }
-
- host = ide_host_alloc(d, hws, n_ports);
- if (host == NULL) {
- ret = -ENOMEM;
- goto out_free_bars;
- }
-
- host->dev[0] = &dev1->dev;
- if (dev2)
- host->dev[1] = &dev2->dev;
-
- host->host_priv = priv;
- host->irq_flags = IRQF_SHARED;
-
- pci_set_drvdata(pdev[0], host);
- if (dev2)
- pci_set_drvdata(pdev[1], host);
-
- for (i = 0; i < n_ports / 2; i++) {
- ret = do_ide_setup_pci_device(pdev[i], d, !i);
-
- /*
- * FIXME: Mom, mom, they stole me the helper function to undo
- * do_ide_setup_pci_device() on the first device!
- */
- if (ret < 0)
- goto out_free_bars;
-
- /* fixup IRQ */
- if (ide_pci_is_in_compatibility_mode(pdev[i])) {
- hw[i*2].irq = pci_get_legacy_ide_irq(pdev[i], 0);
- hw[i*2 + 1].irq = pci_get_legacy_ide_irq(pdev[i], 1);
- } else
- hw[i*2 + 1].irq = hw[i*2].irq = ret;
- }
-
- ret = ide_host_register(host, d, hws);
- if (ret)
- ide_host_free(host);
- else
- goto out;
-
-out_free_bars:
- i = n_ports / 2;
- while (i--)
- pci_release_selected_regions(pdev[i], bars);
-out:
- return ret;
-}
-EXPORT_SYMBOL_GPL(ide_pci_init_two);
-
-int ide_pci_init_one(struct pci_dev *dev, const struct ide_port_info *d,
- void *priv)
-{
- return ide_pci_init_two(dev, NULL, d, priv);
-}
-EXPORT_SYMBOL_GPL(ide_pci_init_one);
-
-void ide_pci_remove(struct pci_dev *dev)
-{
- struct ide_host *host = pci_get_drvdata(dev);
- struct pci_dev *dev2 = host->dev[1] ? to_pci_dev(host->dev[1]) : NULL;
- int bars;
-
- if (host->host_flags & IDE_HFLAG_SINGLE)
- bars = (1 << 2) - 1;
- else
- bars = (1 << 4) - 1;
-
- if ((host->host_flags & IDE_HFLAG_NO_DMA) == 0) {
- if (host->host_flags & IDE_HFLAG_CS5520)
- bars |= (1 << 2);
- else
- bars |= (1 << 4);
- }
-
- ide_host_remove(host);
-
- if (dev2)
- pci_release_selected_regions(dev2, bars);
- pci_release_selected_regions(dev, bars);
-
- if (dev2)
- pci_disable_device(dev2);
- pci_disable_device(dev);
-}
-EXPORT_SYMBOL_GPL(ide_pci_remove);
-
-#ifdef CONFIG_PM
-int ide_pci_suspend(struct pci_dev *dev, pm_message_t state)
-{
- pci_save_state(dev);
- pci_disable_device(dev);
- pci_set_power_state(dev, pci_choose_state(dev, state));
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ide_pci_suspend);
-
-int ide_pci_resume(struct pci_dev *dev)
-{
- struct ide_host *host = pci_get_drvdata(dev);
- int rc;
-
- pci_set_power_state(dev, PCI_D0);
-
- rc = pci_enable_device(dev);
- if (rc)
- return rc;
-
- pci_restore_state(dev);
- pci_set_master(dev);
-
- if (host->init_chipset)
- host->init_chipset(dev);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ide_pci_resume);
-#endif
diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
deleted file mode 100644
index c4b20f350b84..000000000000
--- a/drivers/ide/siimage.c
+++ /dev/null
@@ -1,843 +0,0 @@
-/*
- * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2003 Red Hat
- * Copyright (C) 2007-2008 MontaVista Software, Inc.
- * Copyright (C) 2007-2008 Bartlomiej Zolnierkiewicz
- *
- * May be copied or modified under the terms of the GNU General Public License
- *
- * Documentation for CMD680:
- * http://gkernel.sourceforge.net/specs/sii/sii-0680a-v1.31.pdf.bz2
- *
- * Documentation for SiI 3112:
- * http://gkernel.sourceforge.net/specs/sii/3112A_SiI-DS-0095-B2.pdf.bz2
- *
- * Errata and other documentation only available under NDA.
- *
- *
- * FAQ Items:
- * If you are using Marvell SATA-IDE adapters with Maxtor drives
- * ensure the system is set up for ATA100/UDMA5, not UDMA6.
- *
- * If you are using WD drives with SATA bridges you must set the
- * drive to "Single". "Master" will hang.
- *
- * If you have strange problems with nVidia chipset systems please
- * see the SI support documentation and update your system BIOS
- * if necessary
- *
- * The Dell DRAC4 has some interesting features including effectively hot
- * unplugging/replugging the virtual CD interface when the DRAC is reset.
- * This often causes drivers/ide/siimage to panic but is ok with the rather
- * smarter code in libata.
- *
- * TODO:
- * - VDMA support
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-#include <linux/io.h>
-
-#define DRV_NAME "siimage"
-
-/**
- * pdev_is_sata - check if device is SATA
- * @pdev: PCI device to check
- *
- * Returns true if this is a SATA controller
- */
-
-static int pdev_is_sata(struct pci_dev *pdev)
-{
-#ifdef CONFIG_BLK_DEV_IDE_SATA
- switch (pdev->device) {
- case PCI_DEVICE_ID_SII_3112:
- case PCI_DEVICE_ID_SII_1210SA:
- return 1;
- case PCI_DEVICE_ID_SII_680:
- return 0;
- }
- BUG();
-#endif
- return 0;
-}
-
-/**
- * is_sata - check if hwif is SATA
- * @hwif: interface to check
- *
- * Returns true if this is a SATA controller
- */
-
-static inline int is_sata(ide_hwif_t *hwif)
-{
- return pdev_is_sata(to_pci_dev(hwif->dev));
-}
-
-/**
- * siimage_selreg - return register base
- * @hwif: interface
- * @r: config offset
- *
- * Turn a config register offset into the right address in either
- * PCI space or MMIO space to access the control register in question
- * Thankfully this is a configuration operation, so isn't performance
- * critical.
- */
-
-static unsigned long siimage_selreg(ide_hwif_t *hwif, int r)
-{
- unsigned long base = (unsigned long)hwif->hwif_data;
-
- base += 0xA0 + r;
- if (hwif->host_flags & IDE_HFLAG_MMIO)
- base += hwif->channel << 6;
- else
- base += hwif->channel << 4;
- return base;
-}
-
-/**
- * siimage_seldev - return register base
- * @hwif: interface
- * @r: config offset
- *
- * Turn a config register offset into the right address in either
- * PCI space or MMIO space to access the control register in question
- * including accounting for the unit shift.
- */
-
-static inline unsigned long siimage_seldev(ide_drive_t *drive, int r)
-{
- ide_hwif_t *hwif = drive->hwif;
- unsigned long base = (unsigned long)hwif->hwif_data;
- u8 unit = drive->dn & 1;
-
- base += 0xA0 + r;
- if (hwif->host_flags & IDE_HFLAG_MMIO)
- base += hwif->channel << 6;
- else
- base += hwif->channel << 4;
- base |= unit << unit;
- return base;
-}
-
-static u8 sil_ioread8(struct pci_dev *dev, unsigned long addr)
-{
- struct ide_host *host = pci_get_drvdata(dev);
- u8 tmp = 0;
-
- if (host->host_priv)
- tmp = readb((void __iomem *)addr);
- else
- pci_read_config_byte(dev, addr, &tmp);
-
- return tmp;
-}
-
-static u16 sil_ioread16(struct pci_dev *dev, unsigned long addr)
-{
- struct ide_host *host = pci_get_drvdata(dev);
- u16 tmp = 0;
-
- if (host->host_priv)
- tmp = readw((void __iomem *)addr);
- else
- pci_read_config_word(dev, addr, &tmp);
-
- return tmp;
-}
-
-static void sil_iowrite8(struct pci_dev *dev, u8 val, unsigned long addr)
-{
- struct ide_host *host = pci_get_drvdata(dev);
-
- if (host->host_priv)
- writeb(val, (void __iomem *)addr);
- else
- pci_write_config_byte(dev, addr, val);
-}
-
-static void sil_iowrite16(struct pci_dev *dev, u16 val, unsigned long addr)
-{
- struct ide_host *host = pci_get_drvdata(dev);
-
- if (host->host_priv)
- writew(val, (void __iomem *)addr);
- else
- pci_write_config_word(dev, addr, val);
-}
-
-static void sil_iowrite32(struct pci_dev *dev, u32 val, unsigned long addr)
-{
- struct ide_host *host = pci_get_drvdata(dev);
-
- if (host->host_priv)
- writel(val, (void __iomem *)addr);
- else
- pci_write_config_dword(dev, addr, val);
-}
-
-/**
- * sil_udma_filter - compute UDMA mask
- * @drive: IDE device
- *
- * Compute the available UDMA speeds for the device on the interface.
- *
- * For the CMD680 this depends on the clocking mode (scsc), for the
- * SI3112 SATA controller life is a bit simpler.
- */
-
-static u8 sil_pata_udma_filter(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned long base = (unsigned long)hwif->hwif_data;
- u8 scsc, mask = 0;
-
- base += (hwif->host_flags & IDE_HFLAG_MMIO) ? 0x4A : 0x8A;
-
- scsc = sil_ioread8(dev, base);
-
- switch (scsc & 0x30) {
- case 0x10: /* 133 */
- mask = ATA_UDMA6;
- break;
- case 0x20: /* 2xPCI */
- mask = ATA_UDMA6;
- break;
- case 0x00: /* 100 */
- mask = ATA_UDMA5;
- break;
- default: /* Disabled ? */
- BUG();
- }
-
- return mask;
-}
-
-static u8 sil_sata_udma_filter(ide_drive_t *drive)
-{
- char *m = (char *)&drive->id[ATA_ID_PROD];
-
- return strstr(m, "Maxtor") ? ATA_UDMA5 : ATA_UDMA6;
-}
-
-/**
- * sil_set_pio_mode - set host controller for PIO mode
- * @hwif: port
- * @drive: drive
- *
- * Load the timing settings for this device mode into the
- * controller.
- */
-
-static void sil_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- static const u16 tf_speed[] = { 0x328a, 0x2283, 0x1281, 0x10c3, 0x10c1 };
- static const u16 data_speed[] = { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 };
-
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- ide_drive_t *pair = ide_get_pair_dev(drive);
- u32 speedt = 0;
- u16 speedp = 0;
- unsigned long addr = siimage_seldev(drive, 0x04);
- unsigned long tfaddr = siimage_selreg(hwif, 0x02);
- unsigned long base = (unsigned long)hwif->hwif_data;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
- u8 tf_pio = pio;
- u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
- u8 addr_mask = hwif->channel ? (mmio ? 0xF4 : 0x84)
- : (mmio ? 0xB4 : 0x80);
- u8 mode = 0;
- u8 unit = drive->dn & 1;
-
- /* trim *taskfile* PIO to the slowest of the master/slave */
- if (pair) {
- u8 pair_pio = pair->pio_mode - XFER_PIO_0;
-
- if (pair_pio < tf_pio)
- tf_pio = pair_pio;
- }
-
- /* cheat for now and use the docs */
- speedp = data_speed[pio];
- speedt = tf_speed[tf_pio];
-
- sil_iowrite16(dev, speedp, addr);
- sil_iowrite16(dev, speedt, tfaddr);
-
- /* now set up IORDY */
- speedp = sil_ioread16(dev, tfaddr - 2);
- speedp &= ~0x200;
-
- mode = sil_ioread8(dev, base + addr_mask);
- mode &= ~(unit ? 0x30 : 0x03);
-
- if (ide_pio_need_iordy(drive, pio)) {
- speedp |= 0x200;
- mode |= unit ? 0x10 : 0x01;
- }
-
- sil_iowrite16(dev, speedp, tfaddr - 2);
- sil_iowrite8(dev, mode, base + addr_mask);
-}
-
-/**
- * sil_set_dma_mode - set host controller for DMA mode
- * @hwif: port
- * @drive: drive
- *
- * Tune the SiI chipset for the desired DMA mode.
- */
-
-static void sil_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- static const u8 ultra6[] = { 0x0F, 0x0B, 0x07, 0x05, 0x03, 0x02, 0x01 };
- static const u8 ultra5[] = { 0x0C, 0x07, 0x05, 0x04, 0x02, 0x01 };
- static const u16 dma[] = { 0x2208, 0x10C2, 0x10C1 };
-
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned long base = (unsigned long)hwif->hwif_data;
- u16 ultra = 0, multi = 0;
- u8 mode = 0, unit = drive->dn & 1;
- u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
- u8 scsc = 0, addr_mask = hwif->channel ? (mmio ? 0xF4 : 0x84)
- : (mmio ? 0xB4 : 0x80);
- unsigned long ma = siimage_seldev(drive, 0x08);
- unsigned long ua = siimage_seldev(drive, 0x0C);
- const u8 speed = drive->dma_mode;
-
- scsc = sil_ioread8 (dev, base + (mmio ? 0x4A : 0x8A));
- mode = sil_ioread8 (dev, base + addr_mask);
- multi = sil_ioread16(dev, ma);
- ultra = sil_ioread16(dev, ua);
-
- mode &= ~(unit ? 0x30 : 0x03);
- ultra &= ~0x3F;
- scsc = ((scsc & 0x30) == 0x00) ? 0 : 1;
-
- scsc = is_sata(hwif) ? 1 : scsc;
-
- if (speed >= XFER_UDMA_0) {
- multi = dma[2];
- ultra |= scsc ? ultra6[speed - XFER_UDMA_0] :
- ultra5[speed - XFER_UDMA_0];
- mode |= unit ? 0x30 : 0x03;
- } else {
- multi = dma[speed - XFER_MW_DMA_0];
- mode |= unit ? 0x20 : 0x02;
- }
-
- sil_iowrite8 (dev, mode, base + addr_mask);
- sil_iowrite16(dev, multi, ma);
- sil_iowrite16(dev, ultra, ua);
-}
-
-static int sil_test_irq(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned long addr = siimage_selreg(hwif, 1);
- u8 val = sil_ioread8(dev, addr);
-
- /* Return 1 if INTRQ asserted */
- return (val & 8) ? 1 : 0;
-}
-
-/**
- * siimage_mmio_dma_test_irq - check we caused an IRQ
- * @drive: drive we are testing
- *
- * Check if we caused an IDE DMA interrupt. We may also have caused
- * SATA status interrupts, if so we clean them up and continue.
- */
-
-static int siimage_mmio_dma_test_irq(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- void __iomem *sata_error_addr
- = (void __iomem *)hwif->sata_scr[SATA_ERROR_OFFSET];
-
- if (sata_error_addr) {
- unsigned long base = (unsigned long)hwif->hwif_data;
- u32 ext_stat = readl((void __iomem *)(base + 0x10));
- u8 watchdog = 0;
-
- if (ext_stat & ((hwif->channel) ? 0x40 : 0x10)) {
- u32 sata_error = readl(sata_error_addr);
-
- writel(sata_error, sata_error_addr);
- watchdog = (sata_error & 0x00680000) ? 1 : 0;
- printk(KERN_WARNING "%s: sata_error = 0x%08x, "
- "watchdog = %d, %s\n",
- drive->name, sata_error, watchdog, __func__);
- } else
- watchdog = (ext_stat & 0x8000) ? 1 : 0;
-
- ext_stat >>= 16;
- if (!(ext_stat & 0x0404) && !watchdog)
- return 0;
- }
-
- /* return 1 if INTR asserted */
- if (readb((void __iomem *)(hwif->dma_base + ATA_DMA_STATUS)) & 4)
- return 1;
-
- return 0;
-}
-
-static int siimage_dma_test_irq(ide_drive_t *drive)
-{
- if (drive->hwif->host_flags & IDE_HFLAG_MMIO)
- return siimage_mmio_dma_test_irq(drive);
- else
- return ide_dma_test_irq(drive);
-}
-
-/**
- * sil_sata_reset_poll - wait for SATA reset
- * @drive: drive we are resetting
- *
- * Poll the SATA phy and see whether it has come back from the dead
- * yet.
- */
-
-static blk_status_t sil_sata_reset_poll(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- void __iomem *sata_status_addr
- = (void __iomem *)hwif->sata_scr[SATA_STATUS_OFFSET];
-
- if (sata_status_addr) {
- /* SATA Status is available only when in MMIO mode */
- u32 sata_stat = readl(sata_status_addr);
-
- if ((sata_stat & 0x03) != 0x03) {
- printk(KERN_WARNING "%s: reset phy dead, status=0x%08x\n",
- hwif->name, sata_stat);
- return BLK_STS_IOERR;
- }
- }
-
- return BLK_STS_OK;
-}
-
-/**
- * sil_sata_pre_reset - reset hook
- * @drive: IDE device being reset
- *
- * For the SATA devices we need to handle recalibration/geometry
- * differently
- */
-
-static void sil_sata_pre_reset(ide_drive_t *drive)
-{
- if (drive->media == ide_disk) {
- drive->special_flags &=
- ~(IDE_SFLAG_SET_GEOMETRY | IDE_SFLAG_RECALIBRATE);
- }
-}
-
-/**
- * init_chipset_siimage - set up an SI device
- * @dev: PCI device
- *
- * Perform the initial PCI set up for this device. Attempt to switch
- * to 133 MHz clocking if the system isn't already set up to do it.
- */
-
-static int init_chipset_siimage(struct pci_dev *dev)
-{
- struct ide_host *host = pci_get_drvdata(dev);
- void __iomem *ioaddr = host->host_priv;
- unsigned long base, scsc_addr;
- u8 rev = dev->revision, tmp;
-
- pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, rev ? 1 : 255);
-
- if (ioaddr)
- pci_set_master(dev);
-
- base = (unsigned long)ioaddr;
-
- if (ioaddr && pdev_is_sata(dev)) {
- u32 tmp32, irq_mask;
-
- /* make sure IDE0/1 interrupts are not masked */
- irq_mask = (1 << 22) | (1 << 23);
- tmp32 = readl(ioaddr + 0x48);
- if (tmp32 & irq_mask) {
- tmp32 &= ~irq_mask;
- writel(tmp32, ioaddr + 0x48);
- readl(ioaddr + 0x48); /* flush */
- }
- writel(0, ioaddr + 0x148);
- writel(0, ioaddr + 0x1C8);
- }
-
- sil_iowrite8(dev, 0, base ? (base + 0xB4) : 0x80);
- sil_iowrite8(dev, 0, base ? (base + 0xF4) : 0x84);
-
- scsc_addr = base ? (base + 0x4A) : 0x8A;
- tmp = sil_ioread8(dev, scsc_addr);
-
- switch (tmp & 0x30) {
- case 0x00:
- /* On 100 MHz clocking, try and switch to 133 MHz */
- sil_iowrite8(dev, tmp | 0x10, scsc_addr);
- break;
- case 0x30:
- /* Clocking is disabled, attempt to force 133MHz clocking. */
- sil_iowrite8(dev, tmp & ~0x20, scsc_addr);
- case 0x10:
- /* On 133Mhz clocking. */
- break;
- case 0x20:
- /* On PCIx2 clocking. */
- break;
- }
-
- tmp = sil_ioread8(dev, scsc_addr);
-
- sil_iowrite8 (dev, 0x72, base + 0xA1);
- sil_iowrite16(dev, 0x328A, base + 0xA2);
- sil_iowrite32(dev, 0x62DD62DD, base + 0xA4);
- sil_iowrite32(dev, 0x43924392, base + 0xA8);
- sil_iowrite32(dev, 0x40094009, base + 0xAC);
- sil_iowrite8 (dev, 0x72, base ? (base + 0xE1) : 0xB1);
- sil_iowrite16(dev, 0x328A, base ? (base + 0xE2) : 0xB2);
- sil_iowrite32(dev, 0x62DD62DD, base ? (base + 0xE4) : 0xB4);
- sil_iowrite32(dev, 0x43924392, base ? (base + 0xE8) : 0xB8);
- sil_iowrite32(dev, 0x40094009, base ? (base + 0xEC) : 0xBC);
-
- if (base && pdev_is_sata(dev)) {
- writel(0xFFFF0000, ioaddr + 0x108);
- writel(0xFFFF0000, ioaddr + 0x188);
- writel(0x00680000, ioaddr + 0x148);
- writel(0x00680000, ioaddr + 0x1C8);
- }
-
- /* report the clocking mode of the controller */
- if (!pdev_is_sata(dev)) {
- static const char *clk_str[] =
- { "== 100", "== 133", "== 2X PCI", "DISABLED!" };
-
- tmp >>= 4;
- printk(KERN_INFO DRV_NAME " %s: BASE CLOCK %s\n",
- pci_name(dev), clk_str[tmp & 3]);
- }
-
- return 0;
-}
-
-/**
- * init_mmio_iops_siimage - set up the iops for MMIO
- * @hwif: interface to set up
- *
- * The basic setup here is fairly simple, we can use standard MMIO
- * operations. However we do have to set the taskfile register offsets
- * by hand as there isn't a standard defined layout for them this time.
- *
- * The hardware supports buffered taskfiles and also some rather nice
- * extended PRD tables. For better SI3112 support use the libata driver
- */
-
-static void init_mmio_iops_siimage(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct ide_host *host = pci_get_drvdata(dev);
- void *addr = host->host_priv;
- u8 ch = hwif->channel;
- struct ide_io_ports *io_ports = &hwif->io_ports;
- unsigned long base;
-
- /*
- * Fill in the basic hwif bits
- */
- hwif->host_flags |= IDE_HFLAG_MMIO;
-
- hwif->hwif_data = addr;
-
- /*
- * Now set up the hw. We have to do this ourselves as the
- * MMIO layout isn't the same as the standard port based I/O.
- */
- memset(io_ports, 0, sizeof(*io_ports));
-
- base = (unsigned long)addr;
- if (ch)
- base += 0xC0;
- else
- base += 0x80;
-
- /*
- * The buffered task file doesn't have status/control, so we
- * can't currently use it sanely since we want to use LBA48 mode.
- */
- io_ports->data_addr = base;
- io_ports->error_addr = base + 1;
- io_ports->nsect_addr = base + 2;
- io_ports->lbal_addr = base + 3;
- io_ports->lbam_addr = base + 4;
- io_ports->lbah_addr = base + 5;
- io_ports->device_addr = base + 6;
- io_ports->status_addr = base + 7;
- io_ports->ctl_addr = base + 10;
-
- if (pdev_is_sata(dev)) {
- base = (unsigned long)addr;
- if (ch)
- base += 0x80;
- hwif->sata_scr[SATA_STATUS_OFFSET] = base + 0x104;
- hwif->sata_scr[SATA_ERROR_OFFSET] = base + 0x108;
- hwif->sata_scr[SATA_CONTROL_OFFSET] = base + 0x100;
- }
-
- hwif->irq = dev->irq;
-
- hwif->dma_base = (unsigned long)addr + (ch ? 0x08 : 0x00);
-}
-
-static int is_dev_seagate_sata(ide_drive_t *drive)
-{
- const char *s = (const char *)&drive->id[ATA_ID_PROD];
- unsigned len = strnlen(s, ATA_ID_PROD_LEN);
-
- if ((len > 4) && (!memcmp(s, "ST", 2)))
- if ((!memcmp(s + len - 2, "AS", 2)) ||
- (!memcmp(s + len - 3, "ASL", 3))) {
- printk(KERN_INFO "%s: applying pessimistic Seagate "
- "errata fix\n", drive->name);
- return 1;
- }
-
- return 0;
-}
-
-/**
- * sil_quirkproc - post probe fixups
- * @drive: drive
- *
- * Called after drive probe we use this to decide whether the
- * Seagate fixup must be applied. This used to be in init_iops but
- * that can occur before we know what drives are present.
- */
-
-static void sil_quirkproc(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
-
- /* Try and rise the rqsize */
- if (!is_sata(hwif) || !is_dev_seagate_sata(drive))
- hwif->rqsize = 128;
-}
-
-/**
- * init_iops_siimage - set up iops
- * @hwif: interface to set up
- *
- * Do the basic setup for the SIIMAGE hardware interface
- * and then do the MMIO setup if we can. This is the first
- * look in we get for setting up the hwif so that we
- * can get the iops right before using them.
- */
-
-static void init_iops_siimage(ide_hwif_t *hwif)
-{
- struct ide_host *host = dev_get_drvdata(hwif->dev);
-
- hwif->hwif_data = NULL;
-
- /* Pessimal until we finish probing */
- hwif->rqsize = 15;
-
- if (host->host_priv)
- init_mmio_iops_siimage(hwif);
-}
-
-/**
- * sil_cable_detect - cable detection
- * @hwif: interface to check
- *
- * Check for the presence of an ATA66 capable cable on the interface.
- */
-
-static u8 sil_cable_detect(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned long addr = siimage_selreg(hwif, 0);
- u8 ata66 = sil_ioread8(dev, addr);
-
- return (ata66 & 0x01) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
-}
-
-static const struct ide_port_ops sil_pata_port_ops = {
- .set_pio_mode = sil_set_pio_mode,
- .set_dma_mode = sil_set_dma_mode,
- .quirkproc = sil_quirkproc,
- .test_irq = sil_test_irq,
- .udma_filter = sil_pata_udma_filter,
- .cable_detect = sil_cable_detect,
-};
-
-static const struct ide_port_ops sil_sata_port_ops = {
- .set_pio_mode = sil_set_pio_mode,
- .set_dma_mode = sil_set_dma_mode,
- .reset_poll = sil_sata_reset_poll,
- .pre_reset = sil_sata_pre_reset,
- .quirkproc = sil_quirkproc,
- .test_irq = sil_test_irq,
- .udma_filter = sil_sata_udma_filter,
- .cable_detect = sil_cable_detect,
-};
-
-static const struct ide_dma_ops sil_dma_ops = {
- .dma_host_set = ide_dma_host_set,
- .dma_setup = ide_dma_setup,
- .dma_start = ide_dma_start,
- .dma_end = ide_dma_end,
- .dma_test_irq = siimage_dma_test_irq,
- .dma_timer_expiry = ide_dma_sff_timer_expiry,
- .dma_lost_irq = ide_dma_lost_irq,
- .dma_sff_read_status = ide_dma_sff_read_status,
-};
-
-#define DECLARE_SII_DEV(p_ops) \
- { \
- .name = DRV_NAME, \
- .init_chipset = init_chipset_siimage, \
- .init_iops = init_iops_siimage, \
- .port_ops = p_ops, \
- .dma_ops = &sil_dma_ops, \
- .pio_mask = ATA_PIO4, \
- .mwdma_mask = ATA_MWDMA2, \
- .udma_mask = ATA_UDMA6, \
- }
-
-static const struct ide_port_info siimage_chipsets[] = {
- /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
- /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
-};
-
-/**
- * siimage_init_one - PCI layer discovery entry
- * @dev: PCI device
- * @id: ident table entry
- *
- * Called by the PCI code when it finds an SiI680 or SiI3112 controller.
- * We then use the IDE PCI generic helper to do most of the work.
- */
-
-static int siimage_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- void __iomem *ioaddr = NULL;
- resource_size_t bar5 = pci_resource_start(dev, 5);
- unsigned long barsize = pci_resource_len(dev, 5);
- int rc;
- struct ide_port_info d;
- u8 idx = id->driver_data;
- u8 BA5_EN;
-
- d = siimage_chipsets[idx];
-
- if (idx) {
- static int first = 1;
-
- if (first) {
- printk(KERN_INFO DRV_NAME ": For full SATA support you "
- "should use the libata sata_sil module.\n");
- first = 0;
- }
-
- d.host_flags |= IDE_HFLAG_NO_ATAPI_DMA;
- }
-
- rc = pci_enable_device(dev);
- if (rc)
- return rc;
-
- pci_read_config_byte(dev, 0x8A, &BA5_EN);
- if ((BA5_EN & 0x01) || bar5) {
- /*
- * Drop back to PIO if we can't map the MMIO. Some systems
- * seem to get terminally confused in the PCI spaces.
- */
- if (!request_mem_region(bar5, barsize, d.name)) {
- printk(KERN_WARNING DRV_NAME " %s: MMIO ports not "
- "available\n", pci_name(dev));
- } else {
- ioaddr = pci_ioremap_bar(dev, 5);
- if (ioaddr == NULL)
- release_mem_region(bar5, barsize);
- }
- }
-
- rc = ide_pci_init_one(dev, &d, ioaddr);
- if (rc) {
- if (ioaddr) {
- iounmap(ioaddr);
- release_mem_region(bar5, barsize);
- }
- pci_disable_device(dev);
- }
-
- return rc;
-}
-
-static void siimage_remove(struct pci_dev *dev)
-{
- struct ide_host *host = pci_get_drvdata(dev);
- void __iomem *ioaddr = host->host_priv;
-
- ide_pci_remove(dev);
-
- if (ioaddr) {
- resource_size_t bar5 = pci_resource_start(dev, 5);
- unsigned long barsize = pci_resource_len(dev, 5);
-
- iounmap(ioaddr);
- release_mem_region(bar5, barsize);
- }
-
- pci_disable_device(dev);
-}
-
-static const struct pci_device_id siimage_pci_tbl[] = {
- { PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_680), 0 },
-#ifdef CONFIG_BLK_DEV_IDE_SATA
- { PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_3112), 1 },
- { PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_1210SA), 1 },
-#endif
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, siimage_pci_tbl);
-
-static struct pci_driver siimage_pci_driver = {
- .name = "SiI_IDE",
- .id_table = siimage_pci_tbl,
- .probe = siimage_init_one,
- .remove = siimage_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init siimage_ide_init(void)
-{
- return ide_pci_register_driver(&siimage_pci_driver);
-}
-
-static void __exit siimage_ide_exit(void)
-{
- pci_unregister_driver(&siimage_pci_driver);
-}
-
-module_init(siimage_ide_init);
-module_exit(siimage_ide_exit);
-
-MODULE_AUTHOR("Andre Hedrick, Alan Cox");
-MODULE_DESCRIPTION("PCI driver module for SiI IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
deleted file mode 100644
index 1a700bef6c56..000000000000
--- a/drivers/ide/sis5513.c
+++ /dev/null
@@ -1,637 +0,0 @@
-/*
- * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2002 Lionel Bouton <Lionel.Bouton@inet6.fr>, Maintainer
- * Copyright (C) 2003 Vojtech Pavlik <vojtech@suse.cz>
- * Copyright (C) 2007-2009 Bartlomiej Zolnierkiewicz
- *
- * May be copied or modified under the terms of the GNU General Public License
- *
- *
- * Thanks :
- *
- * SiS Taiwan : for direct support and hardware.
- * Daniela Engert : for initial ATA100 advices and numerous others.
- * John Fremlin, Manfred Spraul, Dave Morgan, Peter Kjellerstedt :
- * for checking code correctness, providing patches.
- *
- *
- * Original tests and design on the SiS620 chipset.
- * ATA100 tests and design on the SiS735 chipset.
- * ATA16/33 support from specs
- * ATA133 support for SiS961/962 by L.C. Chang <lcchang@sis.com.tw>
- * ATA133 961/962/963 fixes by Vojtech Pavlik <vojtech@suse.cz>
- *
- * Documentation:
- * SiS chipset documentation available under NDA to companies only
- * (not to individuals).
- */
-
-/*
- * The original SiS5513 comes from a SiS5511/55112/5513 chipset. The original
- * SiS5513 was also used in the SiS5596/5513 chipset. Thus if we see a SiS5511
- * or SiS5596, we can assume we see the first MWDMA-16 capable SiS5513 chip.
- *
- * Later SiS chipsets integrated the 5513 functionality into the NorthBridge,
- * starting with SiS5571 and up to SiS745. The PCI ID didn't change, though. We
- * can figure out that we have a more modern and more capable 5513 by looking
- * for the respective NorthBridge IDs.
- *
- * Even later (96x family) SiS chipsets use the MuTIOL link and place the 5513
- * into the SouthBrige. Here we cannot rely on looking up the NorthBridge PCI
- * ID, while the now ATA-133 capable 5513 still has the same PCI ID.
- * Fortunately the 5513 can be 'unmasked' by fiddling with some config space
- * bits, changing its device id to the true one - 5517 for 961 and 5518 for
- * 962/963.
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/ide.h>
-
-#define DRV_NAME "sis5513"
-
-/* registers layout and init values are chipset family dependent */
-#undef ATA_16
-#define ATA_16 0x01
-#define ATA_33 0x02
-#define ATA_66 0x03
-#define ATA_100a 0x04 /* SiS730/SiS550 is ATA100 with ATA66 layout */
-#define ATA_100 0x05
-#define ATA_133a 0x06 /* SiS961b with 133 support */
-#define ATA_133 0x07 /* SiS962/963 */
-
-static u8 chipset_family;
-
-/*
- * Devices supported
- */
-static const struct {
- const char *name;
- u16 host_id;
- u8 chipset_family;
- u8 flags;
-} SiSHostChipInfo[] = {
- { "SiS968", PCI_DEVICE_ID_SI_968, ATA_133 },
- { "SiS966", PCI_DEVICE_ID_SI_966, ATA_133 },
- { "SiS965", PCI_DEVICE_ID_SI_965, ATA_133 },
- { "SiS745", PCI_DEVICE_ID_SI_745, ATA_100 },
- { "SiS735", PCI_DEVICE_ID_SI_735, ATA_100 },
- { "SiS733", PCI_DEVICE_ID_SI_733, ATA_100 },
- { "SiS635", PCI_DEVICE_ID_SI_635, ATA_100 },
- { "SiS633", PCI_DEVICE_ID_SI_633, ATA_100 },
-
- { "SiS730", PCI_DEVICE_ID_SI_730, ATA_100a },
- { "SiS550", PCI_DEVICE_ID_SI_550, ATA_100a },
-
- { "SiS640", PCI_DEVICE_ID_SI_640, ATA_66 },
- { "SiS630", PCI_DEVICE_ID_SI_630, ATA_66 },
- { "SiS620", PCI_DEVICE_ID_SI_620, ATA_66 },
- { "SiS540", PCI_DEVICE_ID_SI_540, ATA_66 },
- { "SiS530", PCI_DEVICE_ID_SI_530, ATA_66 },
-
- { "SiS5600", PCI_DEVICE_ID_SI_5600, ATA_33 },
- { "SiS5598", PCI_DEVICE_ID_SI_5598, ATA_33 },
- { "SiS5597", PCI_DEVICE_ID_SI_5597, ATA_33 },
- { "SiS5591/2", PCI_DEVICE_ID_SI_5591, ATA_33 },
- { "SiS5582", PCI_DEVICE_ID_SI_5582, ATA_33 },
- { "SiS5581", PCI_DEVICE_ID_SI_5581, ATA_33 },
-
- { "SiS5596", PCI_DEVICE_ID_SI_5596, ATA_16 },
- { "SiS5571", PCI_DEVICE_ID_SI_5571, ATA_16 },
- { "SiS5517", PCI_DEVICE_ID_SI_5517, ATA_16 },
- { "SiS551x", PCI_DEVICE_ID_SI_5511, ATA_16 },
-};
-
-/* Cycle time bits and values vary across chip dma capabilities
- These three arrays hold the register layout and the values to set.
- Indexed by chipset_family and (dma_mode - XFER_UDMA_0) */
-
-/* {0, ATA_16, ATA_33, ATA_66, ATA_100a, ATA_100, ATA_133} */
-static u8 cycle_time_offset[] = { 0, 0, 5, 4, 4, 0, 0 };
-static u8 cycle_time_range[] = { 0, 0, 2, 3, 3, 4, 4 };
-static u8 cycle_time_value[][XFER_UDMA_6 - XFER_UDMA_0 + 1] = {
- { 0, 0, 0, 0, 0, 0, 0 }, /* no UDMA */
- { 0, 0, 0, 0, 0, 0, 0 }, /* no UDMA */
- { 3, 2, 1, 0, 0, 0, 0 }, /* ATA_33 */
- { 7, 5, 3, 2, 1, 0, 0 }, /* ATA_66 */
- { 7, 5, 3, 2, 1, 0, 0 }, /* ATA_100a (730 specific),
- different cycle_time range and offset */
- { 11, 7, 5, 4, 2, 1, 0 }, /* ATA_100 */
- { 15, 10, 7, 5, 3, 2, 1 }, /* ATA_133a (earliest 691 southbridges) */
- { 15, 10, 7, 5, 3, 2, 1 }, /* ATA_133 */
-};
-/* CRC Valid Setup Time vary across IDE clock setting 33/66/100/133
- See SiS962 data sheet for more detail */
-static u8 cvs_time_value[][XFER_UDMA_6 - XFER_UDMA_0 + 1] = {
- { 0, 0, 0, 0, 0, 0, 0 }, /* no UDMA */
- { 0, 0, 0, 0, 0, 0, 0 }, /* no UDMA */
- { 2, 1, 1, 0, 0, 0, 0 },
- { 4, 3, 2, 1, 0, 0, 0 },
- { 4, 3, 2, 1, 0, 0, 0 },
- { 6, 4, 3, 1, 1, 1, 0 },
- { 9, 6, 4, 2, 2, 2, 2 },
- { 9, 6, 4, 2, 2, 2, 2 },
-};
-/* Initialize time, Active time, Recovery time vary across
- IDE clock settings. These 3 arrays hold the register value
- for PIO0/1/2/3/4 and DMA0/1/2 mode in order */
-static u8 ini_time_value[][8] = {
- { 0, 0, 0, 0, 0, 0, 0, 0 },
- { 0, 0, 0, 0, 0, 0, 0, 0 },
- { 2, 1, 0, 0, 0, 1, 0, 0 },
- { 4, 3, 1, 1, 1, 3, 1, 1 },
- { 4, 3, 1, 1, 1, 3, 1, 1 },
- { 6, 4, 2, 2, 2, 4, 2, 2 },
- { 9, 6, 3, 3, 3, 6, 3, 3 },
- { 9, 6, 3, 3, 3, 6, 3, 3 },
-};
-static u8 act_time_value[][8] = {
- { 0, 0, 0, 0, 0, 0, 0, 0 },
- { 0, 0, 0, 0, 0, 0, 0, 0 },
- { 9, 9, 9, 2, 2, 7, 2, 2 },
- { 19, 19, 19, 5, 4, 14, 5, 4 },
- { 19, 19, 19, 5, 4, 14, 5, 4 },
- { 28, 28, 28, 7, 6, 21, 7, 6 },
- { 38, 38, 38, 10, 9, 28, 10, 9 },
- { 38, 38, 38, 10, 9, 28, 10, 9 },
-};
-static u8 rco_time_value[][8] = {
- { 0, 0, 0, 0, 0, 0, 0, 0 },
- { 0, 0, 0, 0, 0, 0, 0, 0 },
- { 9, 2, 0, 2, 0, 7, 1, 1 },
- { 19, 5, 1, 5, 2, 16, 3, 2 },
- { 19, 5, 1, 5, 2, 16, 3, 2 },
- { 30, 9, 3, 9, 4, 25, 6, 4 },
- { 40, 12, 4, 12, 5, 34, 12, 5 },
- { 40, 12, 4, 12, 5, 34, 12, 5 },
-};
-
-/*
- * Printing configuration
- */
-/* Used for chipset type printing at boot time */
-static char *chipset_capability[] = {
- "ATA", "ATA 16",
- "ATA 33", "ATA 66",
- "ATA 100 (1st gen)", "ATA 100 (2nd gen)",
- "ATA 133 (1st gen)", "ATA 133 (2nd gen)"
-};
-
-/*
- * Configuration functions
- */
-
-static u8 sis_ata133_get_base(ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
- u32 reg54 = 0;
-
- pci_read_config_dword(dev, 0x54, &reg54);
-
- return ((reg54 & 0x40000000) ? 0x70 : 0x40) + drive->dn * 4;
-}
-
-static void sis_ata16_program_timings(ide_drive_t *drive, const u8 mode)
-{
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
- u16 t1 = 0;
- u8 drive_pci = 0x40 + drive->dn * 2;
-
- const u16 pio_timings[] = { 0x000, 0x607, 0x404, 0x303, 0x301 };
- const u16 mwdma_timings[] = { 0x008, 0x302, 0x301 };
-
- pci_read_config_word(dev, drive_pci, &t1);
-
- /* clear active/recovery timings */
- t1 &= ~0x070f;
- if (mode >= XFER_MW_DMA_0) {
- if (chipset_family > ATA_16)
- t1 &= ~0x8000; /* disable UDMA */
- t1 |= mwdma_timings[mode - XFER_MW_DMA_0];
- } else
- t1 |= pio_timings[mode - XFER_PIO_0];
-
- pci_write_config_word(dev, drive_pci, t1);
-}
-
-static void sis_ata100_program_timings(ide_drive_t *drive, const u8 mode)
-{
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
- u8 t1, drive_pci = 0x40 + drive->dn * 2;
-
- /* timing bits: 7:4 active 3:0 recovery */
- const u8 pio_timings[] = { 0x00, 0x67, 0x44, 0x33, 0x31 };
- const u8 mwdma_timings[] = { 0x08, 0x32, 0x31 };
-
- if (mode >= XFER_MW_DMA_0) {
- u8 t2 = 0;
-
- pci_read_config_byte(dev, drive_pci, &t2);
- t2 &= ~0x80; /* disable UDMA */
- pci_write_config_byte(dev, drive_pci, t2);
-
- t1 = mwdma_timings[mode - XFER_MW_DMA_0];
- } else
- t1 = pio_timings[mode - XFER_PIO_0];
-
- pci_write_config_byte(dev, drive_pci + 1, t1);
-}
-
-static void sis_ata133_program_timings(ide_drive_t *drive, const u8 mode)
-{
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
- u32 t1 = 0;
- u8 drive_pci = sis_ata133_get_base(drive), clk, idx;
-
- pci_read_config_dword(dev, drive_pci, &t1);
-
- t1 &= 0xc0c00fff;
- clk = (t1 & 0x08) ? ATA_133 : ATA_100;
- if (mode >= XFER_MW_DMA_0) {
- t1 &= ~0x04; /* disable UDMA */
- idx = mode - XFER_MW_DMA_0 + 5;
- } else
- idx = mode - XFER_PIO_0;
- t1 |= ini_time_value[clk][idx] << 12;
- t1 |= act_time_value[clk][idx] << 16;
- t1 |= rco_time_value[clk][idx] << 24;
-
- pci_write_config_dword(dev, drive_pci, t1);
-}
-
-static void sis_program_timings(ide_drive_t *drive, const u8 mode)
-{
- if (chipset_family < ATA_100) /* ATA_16/33/66/100a */
- sis_ata16_program_timings(drive, mode);
- else if (chipset_family < ATA_133) /* ATA_100/133a */
- sis_ata100_program_timings(drive, mode);
- else /* ATA_133 */
- sis_ata133_program_timings(drive, mode);
-}
-
-static void config_drive_art_rwp(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u8 reg4bh = 0;
- u8 rw_prefetch = 0;
-
- pci_read_config_byte(dev, 0x4b, &reg4bh);
-
- rw_prefetch = reg4bh & ~(0x11 << drive->dn);
-
- if (drive->media == ide_disk)
- rw_prefetch |= 0x11 << drive->dn;
-
- if (reg4bh != rw_prefetch)
- pci_write_config_byte(dev, 0x4b, rw_prefetch);
-}
-
-static void sis_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- config_drive_art_rwp(drive);
- sis_program_timings(drive, drive->pio_mode);
-}
-
-static void sis_ata133_program_udma_timings(ide_drive_t *drive, const u8 mode)
-{
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
- u32 regdw = 0;
- u8 drive_pci = sis_ata133_get_base(drive), clk, idx;
-
- pci_read_config_dword(dev, drive_pci, &regdw);
-
- regdw |= 0x04;
- regdw &= 0xfffff00f;
- /* check if ATA133 enable */
- clk = (regdw & 0x08) ? ATA_133 : ATA_100;
- idx = mode - XFER_UDMA_0;
- regdw |= cycle_time_value[clk][idx] << 4;
- regdw |= cvs_time_value[clk][idx] << 8;
-
- pci_write_config_dword(dev, drive_pci, regdw);
-}
-
-static void sis_ata33_program_udma_timings(ide_drive_t *drive, const u8 mode)
-{
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
- u8 drive_pci = 0x40 + drive->dn * 2, reg = 0, i = chipset_family;
-
- pci_read_config_byte(dev, drive_pci + 1, &reg);
-
- /* force the UDMA bit on if we want to use UDMA */
- reg |= 0x80;
- /* clean reg cycle time bits */
- reg &= ~((0xff >> (8 - cycle_time_range[i])) << cycle_time_offset[i]);
- /* set reg cycle time bits */
- reg |= cycle_time_value[i][mode - XFER_UDMA_0] << cycle_time_offset[i];
-
- pci_write_config_byte(dev, drive_pci + 1, reg);
-}
-
-static void sis_program_udma_timings(ide_drive_t *drive, const u8 mode)
-{
- if (chipset_family >= ATA_133) /* ATA_133 */
- sis_ata133_program_udma_timings(drive, mode);
- else /* ATA_33/66/100a/100/133a */
- sis_ata33_program_udma_timings(drive, mode);
-}
-
-static void sis_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- const u8 speed = drive->dma_mode;
-
- if (speed >= XFER_UDMA_0)
- sis_program_udma_timings(drive, speed);
- else
- sis_program_timings(drive, speed);
-}
-
-static u8 sis_ata133_udma_filter(ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
- u32 regdw = 0;
- u8 drive_pci = sis_ata133_get_base(drive);
-
- pci_read_config_dword(dev, drive_pci, &regdw);
-
- /* if ATA133 disable, we should not set speed above UDMA5 */
- return (regdw & 0x08) ? ATA_UDMA6 : ATA_UDMA5;
-}
-
-static int sis_find_family(struct pci_dev *dev)
-{
- struct pci_dev *host;
- int i = 0;
-
- chipset_family = 0;
-
- for (i = 0; i < ARRAY_SIZE(SiSHostChipInfo) && !chipset_family; i++) {
-
- host = pci_get_device(PCI_VENDOR_ID_SI, SiSHostChipInfo[i].host_id, NULL);
-
- if (!host)
- continue;
-
- chipset_family = SiSHostChipInfo[i].chipset_family;
-
- /* Special case for SiS630 : 630S/ET is ATA_100a */
- if (SiSHostChipInfo[i].host_id == PCI_DEVICE_ID_SI_630) {
- if (host->revision >= 0x30)
- chipset_family = ATA_100a;
- }
- pci_dev_put(host);
-
- printk(KERN_INFO DRV_NAME " %s: %s %s controller\n",
- pci_name(dev), SiSHostChipInfo[i].name,
- chipset_capability[chipset_family]);
- }
-
- if (!chipset_family) { /* Belongs to pci-quirks */
-
- u32 idemisc;
- u16 trueid;
-
- /* Disable ID masking and register remapping */
- pci_read_config_dword(dev, 0x54, &idemisc);
- pci_write_config_dword(dev, 0x54, (idemisc & 0x7fffffff));
- pci_read_config_word(dev, PCI_DEVICE_ID, &trueid);
- pci_write_config_dword(dev, 0x54, idemisc);
-
- if (trueid == 0x5518) {
- printk(KERN_INFO DRV_NAME " %s: SiS 962/963 MuTIOL IDE UDMA133 controller\n",
- pci_name(dev));
- chipset_family = ATA_133;
-
- /* Check for 5513 compatibility mapping
- * We must use this, else the port enabled code will fail,
- * as it expects the enablebits at 0x4a.
- */
- if ((idemisc & 0x40000000) == 0) {
- pci_write_config_dword(dev, 0x54, idemisc | 0x40000000);
- printk(KERN_INFO DRV_NAME " %s: Switching to 5513 register mapping\n",
- pci_name(dev));
- }
- }
- }
-
- if (!chipset_family) { /* Belongs to pci-quirks */
-
- struct pci_dev *lpc_bridge;
- u16 trueid;
- u8 prefctl;
- u8 idecfg;
-
- pci_read_config_byte(dev, 0x4a, &idecfg);
- pci_write_config_byte(dev, 0x4a, idecfg | 0x10);
- pci_read_config_word(dev, PCI_DEVICE_ID, &trueid);
- pci_write_config_byte(dev, 0x4a, idecfg);
-
- if (trueid == 0x5517) { /* SiS 961/961B */
-
- lpc_bridge = pci_get_slot(dev->bus, 0x10); /* Bus 0, Dev 2, Fn 0 */
- pci_read_config_byte(dev, 0x49, &prefctl);
- pci_dev_put(lpc_bridge);
-
- if (lpc_bridge->revision == 0x10 && (prefctl & 0x80)) {
- printk(KERN_INFO DRV_NAME " %s: SiS 961B MuTIOL IDE UDMA133 controller\n",
- pci_name(dev));
- chipset_family = ATA_133a;
- } else {
- printk(KERN_INFO DRV_NAME " %s: SiS 961 MuTIOL IDE UDMA100 controller\n",
- pci_name(dev));
- chipset_family = ATA_100;
- }
- }
- }
-
- return chipset_family;
-}
-
-static int init_chipset_sis5513(struct pci_dev *dev)
-{
- /* Make general config ops here
- 1/ tell IDE channels to operate in Compatibility mode only
- 2/ tell old chips to allow per drive IDE timings */
-
- u8 reg;
- u16 regw;
-
- switch (chipset_family) {
- case ATA_133:
- /* SiS962 operation mode */
- pci_read_config_word(dev, 0x50, &regw);
- if (regw & 0x08)
- pci_write_config_word(dev, 0x50, regw&0xfff7);
- pci_read_config_word(dev, 0x52, &regw);
- if (regw & 0x08)
- pci_write_config_word(dev, 0x52, regw&0xfff7);
- break;
- case ATA_133a:
- case ATA_100:
- /* Fixup latency */
- pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x80);
- /* Set compatibility bit */
- pci_read_config_byte(dev, 0x49, &reg);
- if (!(reg & 0x01))
- pci_write_config_byte(dev, 0x49, reg|0x01);
- break;
- case ATA_100a:
- case ATA_66:
- /* Fixup latency */
- pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x10);
-
- /* On ATA_66 chips the bit was elsewhere */
- pci_read_config_byte(dev, 0x52, &reg);
- if (!(reg & 0x04))
- pci_write_config_byte(dev, 0x52, reg|0x04);
- break;
- case ATA_33:
- /* On ATA_33 we didn't have a single bit to set */
- pci_read_config_byte(dev, 0x09, &reg);
- if ((reg & 0x0f) != 0x00)
- pci_write_config_byte(dev, 0x09, reg&0xf0);
- fallthrough;
- case ATA_16:
- /* force per drive recovery and active timings
- needed on ATA_33 and below chips */
- pci_read_config_byte(dev, 0x52, &reg);
- if (!(reg & 0x08))
- pci_write_config_byte(dev, 0x52, reg|0x08);
- break;
- }
-
- return 0;
-}
-
-struct sis_laptop {
- u16 device;
- u16 subvendor;
- u16 subdevice;
-};
-
-static const struct sis_laptop sis_laptop[] = {
- /* devid, subvendor, subdev */
- { 0x5513, 0x1043, 0x1107 }, /* ASUS A6K */
- { 0x5513, 0x1734, 0x105f }, /* FSC Amilo A1630 */
- { 0x5513, 0x1071, 0x8640 }, /* EasyNote K5305 */
- /* end marker */
- { 0, }
-};
-
-static u8 sis_cable_detect(ide_hwif_t *hwif)
-{
- struct pci_dev *pdev = to_pci_dev(hwif->dev);
- const struct sis_laptop *lap = &sis_laptop[0];
- u8 ata66 = 0;
-
- while (lap->device) {
- if (lap->device == pdev->device &&
- lap->subvendor == pdev->subsystem_vendor &&
- lap->subdevice == pdev->subsystem_device)
- return ATA_CBL_PATA40_SHORT;
- lap++;
- }
-
- if (chipset_family >= ATA_133) {
- u16 regw = 0;
- u16 reg_addr = hwif->channel ? 0x52: 0x50;
- pci_read_config_word(pdev, reg_addr, &regw);
- ata66 = (regw & 0x8000) ? 0 : 1;
- } else if (chipset_family >= ATA_66) {
- u8 reg48h = 0;
- u8 mask = hwif->channel ? 0x20 : 0x10;
- pci_read_config_byte(pdev, 0x48, &reg48h);
- ata66 = (reg48h & mask) ? 0 : 1;
- }
-
- return ata66 ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
-}
-
-static const struct ide_port_ops sis_port_ops = {
- .set_pio_mode = sis_set_pio_mode,
- .set_dma_mode = sis_set_dma_mode,
- .cable_detect = sis_cable_detect,
-};
-
-static const struct ide_port_ops sis_ata133_port_ops = {
- .set_pio_mode = sis_set_pio_mode,
- .set_dma_mode = sis_set_dma_mode,
- .udma_filter = sis_ata133_udma_filter,
- .cable_detect = sis_cable_detect,
-};
-
-static const struct ide_port_info sis5513_chipset = {
- .name = DRV_NAME,
- .init_chipset = init_chipset_sis5513,
- .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
- .host_flags = IDE_HFLAG_NO_AUTODMA,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
-};
-
-static int sis5513_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- struct ide_port_info d = sis5513_chipset;
- u8 udma_rates[] = { 0x00, 0x00, 0x07, 0x1f, 0x3f, 0x3f, 0x7f, 0x7f };
- int rc;
-
- rc = pci_enable_device(dev);
- if (rc)
- return rc;
-
- if (sis_find_family(dev) == 0)
- return -ENOTSUPP;
-
- if (chipset_family >= ATA_133)
- d.port_ops = &sis_ata133_port_ops;
- else
- d.port_ops = &sis_port_ops;
-
- d.udma_mask = udma_rates[chipset_family];
-
- return ide_pci_init_one(dev, &d, NULL);
-}
-
-static void sis5513_remove(struct pci_dev *dev)
-{
- ide_pci_remove(dev);
- pci_disable_device(dev);
-}
-
-static const struct pci_device_id sis5513_pci_tbl[] = {
- { PCI_VDEVICE(SI, PCI_DEVICE_ID_SI_5513), 0 },
- { PCI_VDEVICE(SI, PCI_DEVICE_ID_SI_5518), 0 },
- { PCI_VDEVICE(SI, PCI_DEVICE_ID_SI_1180), 0 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, sis5513_pci_tbl);
-
-static struct pci_driver sis5513_pci_driver = {
- .name = "SIS_IDE",
- .id_table = sis5513_pci_tbl,
- .probe = sis5513_init_one,
- .remove = sis5513_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init sis5513_ide_init(void)
-{
- return ide_pci_register_driver(&sis5513_pci_driver);
-}
-
-static void __exit sis5513_ide_exit(void)
-{
- pci_unregister_driver(&sis5513_pci_driver);
-}
-
-module_init(sis5513_ide_init);
-module_exit(sis5513_ide_exit);
-
-MODULE_AUTHOR("Lionel Bouton, L C Chang, Andre Hedrick, Vojtech Pavlik");
-MODULE_DESCRIPTION("PCI driver module for SIS IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
deleted file mode 100644
index 5c24c420c438..000000000000
--- a/drivers/ide/sl82c105.c
+++ /dev/null
@@ -1,367 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * SL82C105/Winbond 553 IDE driver
- *
- * Maintainer unknown.
- *
- * Drive tuning added from Rebel.com's kernel sources
- * -- Russell King (15/11/98) linux@arm.linux.org.uk
- *
- * Merge in Russell's HW workarounds, fix various problems
- * with the timing registers setup.
- * -- Benjamin Herrenschmidt (01/11/03) benh@kernel.crashing.org
- *
- * Copyright (C) 2006-2007,2009 MontaVista Software, Inc. <source@mvista.com>
- * Copyright (C) 2007 Bartlomiej Zolnierkiewicz
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-
-#include <asm/io.h>
-
-#define DRV_NAME "sl82c105"
-
-/*
- * SL82C105 PCI config register 0x40 bits.
- */
-#define CTRL_IDE_IRQB (1 << 30)
-#define CTRL_IDE_IRQA (1 << 28)
-#define CTRL_LEGIRQ (1 << 11)
-#define CTRL_P1F16 (1 << 5)
-#define CTRL_P1EN (1 << 4)
-#define CTRL_P0F16 (1 << 1)
-#define CTRL_P0EN (1 << 0)
-
-/*
- * Convert a PIO mode and cycle time to the required on/off times
- * for the interface. This has protection against runaway timings.
- */
-static unsigned int get_pio_timings(ide_drive_t *drive, u8 pio)
-{
- struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
- unsigned int cmd_on, cmd_off;
- u8 iordy = 0;
-
- cmd_on = (t->active + 29) / 30;
- cmd_off = (ide_pio_cycle_time(drive, pio) - 30 * cmd_on + 29) / 30;
-
- if (cmd_on == 0)
- cmd_on = 1;
-
- if (cmd_off == 0)
- cmd_off = 1;
-
- if (ide_pio_need_iordy(drive, pio))
- iordy = 0x40;
-
- return (cmd_on - 1) << 8 | (cmd_off - 1) | iordy;
-}
-
-/*
- * Configure the chipset for PIO mode.
- */
-static void sl82c105_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned long timings = (unsigned long)ide_get_drivedata(drive);
- int reg = 0x44 + drive->dn * 4;
- u16 drv_ctrl;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
-
- drv_ctrl = get_pio_timings(drive, pio);
-
- /*
- * Store the PIO timings so that we can restore them
- * in case DMA will be turned off...
- */
- timings &= 0xffff0000;
- timings |= drv_ctrl;
- ide_set_drivedata(drive, (void *)timings);
-
- pci_write_config_word(dev, reg, drv_ctrl);
- pci_read_config_word (dev, reg, &drv_ctrl);
-
- printk(KERN_DEBUG "%s: selected %s (%dns) (%04X)\n", drive->name,
- ide_xfer_verbose(pio + XFER_PIO_0),
- ide_pio_cycle_time(drive, pio), drv_ctrl);
-}
-
-/*
- * Configure the chipset for DMA mode.
- */
-static void sl82c105_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- static u16 mwdma_timings[] = {0x0707, 0x0201, 0x0200};
- unsigned long timings = (unsigned long)ide_get_drivedata(drive);
- u16 drv_ctrl;
- const u8 speed = drive->dma_mode;
-
- drv_ctrl = mwdma_timings[speed - XFER_MW_DMA_0];
-
- /*
- * Store the DMA timings so that we can actually program
- * them when DMA will be turned on...
- */
- timings &= 0x0000ffff;
- timings |= (unsigned long)drv_ctrl << 16;
- ide_set_drivedata(drive, (void *)timings);
-}
-
-static int sl82c105_test_irq(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u32 val, mask = hwif->channel ? CTRL_IDE_IRQB : CTRL_IDE_IRQA;
-
- pci_read_config_dword(dev, 0x40, &val);
-
- return (val & mask) ? 1 : 0;
-}
-
-/*
- * The SL82C105 holds off all IDE interrupts while in DMA mode until
- * all DMA activity is completed. Sometimes this causes problems (eg,
- * when the drive wants to report an error condition).
- *
- * 0x7e is a "chip testing" register. Bit 2 resets the DMA controller
- * state machine. We need to kick this to work around various bugs.
- */
-static inline void sl82c105_reset_host(struct pci_dev *dev)
-{
- u16 val;
-
- pci_read_config_word(dev, 0x7e, &val);
- pci_write_config_word(dev, 0x7e, val | (1 << 2));
- pci_write_config_word(dev, 0x7e, val & ~(1 << 2));
-}
-
-/*
- * If we get an IRQ timeout, it might be that the DMA state machine
- * got confused. Fix from Todd Inglett. Details from Winbond.
- *
- * This function is called when the IDE timer expires, the drive
- * indicates that it is READY, and we were waiting for DMA to complete.
- */
-static void sl82c105_dma_lost_irq(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u32 val, mask = hwif->channel ? CTRL_IDE_IRQB : CTRL_IDE_IRQA;
- u8 dma_cmd;
-
- printk(KERN_WARNING "sl82c105: lost IRQ, resetting host\n");
-
- /*
- * Check the raw interrupt from the drive.
- */
- pci_read_config_dword(dev, 0x40, &val);
- if (val & mask)
- printk(KERN_INFO "sl82c105: drive was requesting IRQ, "
- "but host lost it\n");
-
- /*
- * Was DMA enabled? If so, disable it - we're resetting the
- * host. The IDE layer will be handling the drive for us.
- */
- dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
- if (dma_cmd & 1) {
- outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD);
- printk(KERN_INFO "sl82c105: DMA was enabled\n");
- }
-
- sl82c105_reset_host(dev);
-}
-
-/*
- * ATAPI devices can cause the SL82C105 DMA state machine to go gaga.
- * Winbond recommend that the DMA state machine is reset prior to
- * setting the bus master DMA enable bit.
- *
- * The generic IDE core will have disabled the BMEN bit before this
- * function is called.
- */
-static void sl82c105_dma_start(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- int reg = 0x44 + drive->dn * 4;
-
- pci_write_config_word(dev, reg,
- (unsigned long)ide_get_drivedata(drive) >> 16);
-
- sl82c105_reset_host(dev);
- ide_dma_start(drive);
-}
-
-static void sl82c105_dma_clear(ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
-
- sl82c105_reset_host(dev);
-}
-
-static int sl82c105_dma_end(ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
- int reg = 0x44 + drive->dn * 4;
- int ret = ide_dma_end(drive);
-
- pci_write_config_word(dev, reg,
- (unsigned long)ide_get_drivedata(drive));
-
- return ret;
-}
-
-/*
- * ATA reset will clear the 16 bits mode in the control
- * register, we need to reprogram it
- */
-static void sl82c105_resetproc(ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
- u32 val;
-
- pci_read_config_dword(dev, 0x40, &val);
- val |= (CTRL_P1F16 | CTRL_P0F16);
- pci_write_config_dword(dev, 0x40, val);
-}
-
-/*
- * Return the revision of the Winbond bridge
- * which this function is part of.
- */
-static u8 sl82c105_bridge_revision(struct pci_dev *dev)
-{
- struct pci_dev *bridge;
-
- /*
- * The bridge should be part of the same device, but function 0.
- */
- bridge = pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus),
- dev->bus->number,
- PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
- if (!bridge)
- return -1;
-
- /*
- * Make sure it is a Winbond 553 and is an ISA bridge.
- */
- if (bridge->vendor != PCI_VENDOR_ID_WINBOND ||
- bridge->device != PCI_DEVICE_ID_WINBOND_83C553 ||
- bridge->class >> 8 != PCI_CLASS_BRIDGE_ISA) {
- pci_dev_put(bridge);
- return -1;
- }
- /*
- * We need to find function 0's revision, not function 1
- */
- pci_dev_put(bridge);
-
- return bridge->revision;
-}
-
-/*
- * Enable the PCI device
- *
- * --BenH: It's arch fixup code that should enable channels that
- * have not been enabled by firmware. I decided we can still enable
- * channel 0 here at least, but channel 1 has to be enabled by
- * firmware or arch code. We still set both to 16 bits mode.
- */
-static int init_chipset_sl82c105(struct pci_dev *dev)
-{
- u32 val;
-
- pci_read_config_dword(dev, 0x40, &val);
- val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16;
- pci_write_config_dword(dev, 0x40, val);
-
- return 0;
-}
-
-static const struct ide_port_ops sl82c105_port_ops = {
- .set_pio_mode = sl82c105_set_pio_mode,
- .set_dma_mode = sl82c105_set_dma_mode,
- .resetproc = sl82c105_resetproc,
- .test_irq = sl82c105_test_irq,
-};
-
-static const struct ide_dma_ops sl82c105_dma_ops = {
- .dma_host_set = ide_dma_host_set,
- .dma_setup = ide_dma_setup,
- .dma_start = sl82c105_dma_start,
- .dma_end = sl82c105_dma_end,
- .dma_test_irq = ide_dma_test_irq,
- .dma_lost_irq = sl82c105_dma_lost_irq,
- .dma_timer_expiry = ide_dma_sff_timer_expiry,
- .dma_clear = sl82c105_dma_clear,
- .dma_sff_read_status = ide_dma_sff_read_status,
-};
-
-static const struct ide_port_info sl82c105_chipset = {
- .name = DRV_NAME,
- .init_chipset = init_chipset_sl82c105,
- .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
- .port_ops = &sl82c105_port_ops,
- .dma_ops = &sl82c105_dma_ops,
- .host_flags = IDE_HFLAG_IO_32BIT |
- IDE_HFLAG_UNMASK_IRQS |
- IDE_HFLAG_SERIALIZE_DMA |
- IDE_HFLAG_NO_AUTODMA,
- .pio_mask = ATA_PIO5,
- .mwdma_mask = ATA_MWDMA2,
-};
-
-static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- struct ide_port_info d = sl82c105_chipset;
- u8 rev = sl82c105_bridge_revision(dev);
-
- if (rev <= 5) {
- /*
- * Never ever EVER under any circumstances enable
- * DMA when the bridge is this old.
- */
- printk(KERN_INFO DRV_NAME ": Winbond W83C553 bridge "
- "revision %d, BM-DMA disabled\n", rev);
- d.dma_ops = NULL;
- d.mwdma_mask = 0;
- d.host_flags &= ~IDE_HFLAG_SERIALIZE_DMA;
- }
-
- return ide_pci_init_one(dev, &d, NULL);
-}
-
-static const struct pci_device_id sl82c105_pci_tbl[] = {
- { PCI_VDEVICE(WINBOND, PCI_DEVICE_ID_WINBOND_82C105), 0 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, sl82c105_pci_tbl);
-
-static struct pci_driver sl82c105_pci_driver = {
- .name = "W82C105_IDE",
- .id_table = sl82c105_pci_tbl,
- .probe = sl82c105_init_one,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init sl82c105_ide_init(void)
-{
- return ide_pci_register_driver(&sl82c105_pci_driver);
-}
-
-static void __exit sl82c105_ide_exit(void)
-{
- pci_unregister_driver(&sl82c105_pci_driver);
-}
-
-module_init(sl82c105_ide_init);
-module_exit(sl82c105_ide_exit);
-
-MODULE_DESCRIPTION("PCI driver module for W82C105 IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
deleted file mode 100644
index f521d5ebf916..000000000000
--- a/drivers/ide/slc90e66.c
+++ /dev/null
@@ -1,182 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2006-2007 MontaVista Software, Inc. <source@mvista.com>
- *
- * This is a look-alike variation of the ICH0 PIIX4 Ultra-66,
- * but this keeps the ISA-Bridge and slots alive.
- *
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#define DRV_NAME "slc90e66"
-
-static DEFINE_SPINLOCK(slc90e66_lock);
-
-static void slc90e66_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- int is_slave = drive->dn & 1;
- int master_port = hwif->channel ? 0x42 : 0x40;
- int slave_port = 0x44;
- unsigned long flags;
- u16 master_data;
- u8 slave_data;
- int control = 0;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
-
- /* ISP RTC */
- static const u8 timings[][2] = {
- { 0, 0 },
- { 0, 0 },
- { 1, 0 },
- { 2, 1 },
- { 2, 3 }, };
-
- spin_lock_irqsave(&slc90e66_lock, flags);
- pci_read_config_word(dev, master_port, &master_data);
-
- if (pio > 1)
- control |= 1; /* Programmable timing on */
- if (drive->media == ide_disk)
- control |= 4; /* Prefetch, post write */
- if (ide_pio_need_iordy(drive, pio))
- control |= 2; /* IORDY */
- if (is_slave) {
- master_data |= 0x4000;
- master_data &= ~0x0070;
- if (pio > 1) {
- /* Set PPE, IE and TIME */
- master_data |= control << 4;
- }
- pci_read_config_byte(dev, slave_port, &slave_data);
- slave_data &= hwif->channel ? 0x0f : 0xf0;
- slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) <<
- (hwif->channel ? 4 : 0);
- } else {
- master_data &= ~0x3307;
- if (pio > 1) {
- /* enable PPE, IE and TIME */
- master_data |= control;
- }
- master_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8);
- }
- pci_write_config_word(dev, master_port, master_data);
- if (is_slave)
- pci_write_config_byte(dev, slave_port, slave_data);
- spin_unlock_irqrestore(&slc90e66_lock, flags);
-}
-
-static void slc90e66_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u8 maslave = hwif->channel ? 0x42 : 0x40;
- int sitre = 0, a_speed = 7 << (drive->dn * 4);
- int u_speed = 0, u_flag = 1 << drive->dn;
- u16 reg4042, reg44, reg48, reg4a;
- const u8 speed = drive->dma_mode;
-
- pci_read_config_word(dev, maslave, &reg4042);
- sitre = (reg4042 & 0x4000) ? 1 : 0;
- pci_read_config_word(dev, 0x44, &reg44);
- pci_read_config_word(dev, 0x48, &reg48);
- pci_read_config_word(dev, 0x4a, &reg4a);
-
- if (speed >= XFER_UDMA_0) {
- u_speed = (speed - XFER_UDMA_0) << (drive->dn * 4);
-
- if (!(reg48 & u_flag))
- pci_write_config_word(dev, 0x48, reg48|u_flag);
- if ((reg4a & a_speed) != u_speed) {
- pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
- pci_read_config_word(dev, 0x4a, &reg4a);
- pci_write_config_word(dev, 0x4a, reg4a|u_speed);
- }
- } else {
- const u8 mwdma_to_pio[] = { 0, 3, 4 };
-
- if (reg48 & u_flag)
- pci_write_config_word(dev, 0x48, reg48 & ~u_flag);
- if (reg4a & a_speed)
- pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
-
- if (speed >= XFER_MW_DMA_0)
- drive->pio_mode =
- mwdma_to_pio[speed - XFER_MW_DMA_0] + XFER_PIO_0;
- else
- drive->pio_mode = XFER_PIO_2; /* for SWDMA2 */
-
- slc90e66_set_pio_mode(hwif, drive);
- }
-}
-
-static u8 slc90e66_cable_detect(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u8 reg47 = 0, mask = hwif->channel ? 0x01 : 0x02;
-
- pci_read_config_byte(dev, 0x47, &reg47);
-
- /* bit[0(1)]: 0:80, 1:40 */
- return (reg47 & mask) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
-}
-
-static const struct ide_port_ops slc90e66_port_ops = {
- .set_pio_mode = slc90e66_set_pio_mode,
- .set_dma_mode = slc90e66_set_dma_mode,
- .cable_detect = slc90e66_cable_detect,
-};
-
-static const struct ide_port_info slc90e66_chipset = {
- .name = DRV_NAME,
- .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
- .port_ops = &slc90e66_port_ops,
- .pio_mask = ATA_PIO4,
- .swdma_mask = ATA_SWDMA2_ONLY,
- .mwdma_mask = ATA_MWDMA12_ONLY,
- .udma_mask = ATA_UDMA4,
-};
-
-static int slc90e66_init_one(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return ide_pci_init_one(dev, &slc90e66_chipset, NULL);
-}
-
-static const struct pci_device_id slc90e66_pci_tbl[] = {
- { PCI_VDEVICE(EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_1), 0 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, slc90e66_pci_tbl);
-
-static struct pci_driver slc90e66_pci_driver = {
- .name = "SLC90e66_IDE",
- .id_table = slc90e66_pci_tbl,
- .probe = slc90e66_init_one,
- .remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init slc90e66_ide_init(void)
-{
- return ide_pci_register_driver(&slc90e66_pci_driver);
-}
-
-static void __exit slc90e66_ide_exit(void)
-{
- pci_unregister_driver(&slc90e66_pci_driver);
-}
-
-module_init(slc90e66_ide_init);
-module_exit(slc90e66_ide_exit);
-
-MODULE_AUTHOR("Andre Hedrick");
-MODULE_DESCRIPTION("PCI driver module for SLC90E66 IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
deleted file mode 100644
index 17e6132b99bf..000000000000
--- a/drivers/ide/tc86c001.c
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * Copyright (C) 2002 Toshiba Corporation
- * Copyright (C) 2005-2006 MontaVista Software, Inc. <source@mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/module.h>
-
-#define DRV_NAME "tc86c001"
-
-static void tc86c001_set_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- unsigned long scr_port = hwif->config_data + (drive->dn ? 0x02 : 0x00);
- u16 mode, scr = inw(scr_port);
- const u8 speed = drive->dma_mode;
-
- switch (speed) {
- case XFER_UDMA_4: mode = 0x00c0; break;
- case XFER_UDMA_3: mode = 0x00b0; break;
- case XFER_UDMA_2: mode = 0x00a0; break;
- case XFER_UDMA_1: mode = 0x0090; break;
- case XFER_UDMA_0: mode = 0x0080; break;
- case XFER_MW_DMA_2: mode = 0x0070; break;
- case XFER_MW_DMA_1: mode = 0x0060; break;
- case XFER_MW_DMA_0: mode = 0x0050; break;
- case XFER_PIO_4: mode = 0x0400; break;
- case XFER_PIO_3: mode = 0x0300; break;
- case XFER_PIO_2: mode = 0x0200; break;
- case XFER_PIO_1: mode = 0x0100; break;
- case XFER_PIO_0:
- default: mode = 0x0000; break;
- }
-
- scr &= (speed < XFER_MW_DMA_0) ? 0xf8ff : 0xff0f;
- scr |= mode;
- outw(scr, scr_port);
-}
-
-static void tc86c001_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- drive->dma_mode = drive->pio_mode;
- tc86c001_set_mode(hwif, drive);
-}
-
-/*
- * HACKITY HACK
- *
- * This is a workaround for the limitation 5 of the TC86C001 IDE controller:
- * if a DMA transfer terminates prematurely, the controller leaves the device's
- * interrupt request (INTRQ) pending and does not generate a PCI interrupt (or
- * set the interrupt bit in the DMA status register), thus no PCI interrupt
- * will occur until a DMA transfer has been successfully completed.
- *
- * We work around this by initiating dummy, zero-length DMA transfer on
- * a DMA timeout expiration. I found no better way to do this with the current
- * IDE core than to temporarily replace a higher level driver's timer expiry
- * handler with our own backing up to that handler in case our recovery fails.
- */
-static int tc86c001_timer_expiry(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- ide_expiry_t *expiry = ide_get_hwifdata(hwif);
- u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
-
- /* Restore a higher level driver's expiry handler first. */
- hwif->expiry = expiry;
-
- if ((dma_stat & 5) == 1) { /* DMA active and no interrupt */
- unsigned long sc_base = hwif->config_data;
- unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04);
- u8 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
-
- printk(KERN_WARNING "%s: DMA interrupt possibly stuck, "
- "attempting recovery...\n", drive->name);
-
- /* Stop DMA */
- outb(dma_cmd & ~0x01, hwif->dma_base + ATA_DMA_CMD);
-
- /* Setup the dummy DMA transfer */
- outw(0, sc_base + 0x0a); /* Sector Count */
- outw(0, twcr_port); /* Transfer Word Count 1 or 2 */
-
- /* Start the dummy DMA transfer */
-
- /* clear R_OR_WCTR for write */
- outb(0x00, hwif->dma_base + ATA_DMA_CMD);
- /* set START_STOPBM */
- outb(0x01, hwif->dma_base + ATA_DMA_CMD);
-
- /*
- * If an interrupt was pending, it should come thru shortly.
- * If not, a higher level driver's expiry handler should
- * eventually cause some kind of recovery from the DMA stall.
- */
- return WAIT_MIN_SLEEP;
- }
-
- /* Chain to the restored expiry handler if DMA wasn't active. */
- if (likely(expiry != NULL))
- return expiry(drive);
-
- /* If there was no handler, "emulate" that for ide_timer_expiry()... */
- return -1;
-}
-
-static void tc86c001_dma_start(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- unsigned long sc_base = hwif->config_data;
- unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04);
- unsigned long nsectors = blk_rq_sectors(hwif->rq);
-
- /*
- * We have to manually load the sector count and size into
- * the appropriate system control registers for DMA to work
- * with LBA48 and ATAPI devices...
- */
- outw(nsectors, sc_base + 0x0a); /* Sector Count */
- outw(SECTOR_SIZE / 2, twcr_port); /* Transfer Word Count 1/2 */
-
- /* Install our timeout expiry hook, saving the current handler... */
- ide_set_hwifdata(hwif, hwif->expiry);
- hwif->expiry = &tc86c001_timer_expiry;
-
- ide_dma_start(drive);
-}
-
-static u8 tc86c001_cable_detect(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned long sc_base = pci_resource_start(dev, 5);
- u16 scr1 = inw(sc_base + 0x00);
-
- /*
- * System Control 1 Register bit 13 (PDIAGN):
- * 0=80-pin cable, 1=40-pin cable
- */
- return (scr1 & 0x2000) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
-}
-
-static void init_hwif_tc86c001(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned long sc_base = pci_resource_start(dev, 5);
- u16 scr1 = inw(sc_base + 0x00);
-
- /* System Control 1 Register bit 15 (Soft Reset) set */
- outw(scr1 | 0x8000, sc_base + 0x00);
-
- /* System Control 1 Register bit 14 (FIFO Reset) set */
- outw(scr1 | 0x4000, sc_base + 0x00);
-
- /* System Control 1 Register: reset clear */
- outw(scr1 & ~0xc000, sc_base + 0x00);
-
- /* Store the system control register base for convenience... */
- hwif->config_data = sc_base;
-
- if (!hwif->dma_base)
- return;
-
- /*
- * Sector Count Control Register bits 0 and 1 set:
- * software sets Sector Count Register for master and slave device
- */
- outw(0x0003, sc_base + 0x0c);
-
- /* Sector Count Register limit */
- hwif->rqsize = 0xffff;
-}
-
-static const struct ide_port_ops tc86c001_port_ops = {
- .set_pio_mode = tc86c001_set_pio_mode,
- .set_dma_mode = tc86c001_set_mode,
- .cable_detect = tc86c001_cable_detect,
-};
-
-static const struct ide_dma_ops tc86c001_dma_ops = {
- .dma_host_set = ide_dma_host_set,
- .dma_setup = ide_dma_setup,
- .dma_start = tc86c001_dma_start,
- .dma_end = ide_dma_end,
- .dma_test_irq = ide_dma_test_irq,
- .dma_lost_irq = ide_dma_lost_irq,
- .dma_timer_expiry = ide_dma_sff_timer_expiry,
- .dma_sff_read_status = ide_dma_sff_read_status,
-};
-
-static const struct ide_port_info tc86c001_chipset = {
- .name = DRV_NAME,
- .init_hwif = init_hwif_tc86c001,
- .port_ops = &tc86c001_port_ops,
- .dma_ops = &tc86c001_dma_ops,
- .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_OFF_BOARD,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA4,
-};
-
-static int tc86c001_init_one(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- int rc;
-
- rc = pci_enable_device(dev);
- if (rc)
- goto out;
-
- rc = pci_request_region(dev, 5, DRV_NAME);
- if (rc) {
- printk(KERN_ERR DRV_NAME ": system control regs already in use");
- goto out_disable;
- }
-
- rc = ide_pci_init_one(dev, &tc86c001_chipset, NULL);
- if (rc)
- goto out_release;
-
- goto out;
-
-out_release:
- pci_release_region(dev, 5);
-out_disable:
- pci_disable_device(dev);
-out:
- return rc;
-}
-
-static void tc86c001_remove(struct pci_dev *dev)
-{
- ide_pci_remove(dev);
- pci_release_region(dev, 5);
- pci_disable_device(dev);
-}
-
-static const struct pci_device_id tc86c001_pci_tbl[] = {
- { PCI_VDEVICE(TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC86C001_IDE), 0 },
- { 0, }
-};
-MODULE_DEVICE_TABLE(pci, tc86c001_pci_tbl);
-
-static struct pci_driver tc86c001_pci_driver = {
- .name = "TC86C001",
- .id_table = tc86c001_pci_tbl,
- .probe = tc86c001_init_one,
- .remove = tc86c001_remove,
-};
-
-static int __init tc86c001_ide_init(void)
-{
- return ide_pci_register_driver(&tc86c001_pci_driver);
-}
-
-static void __exit tc86c001_ide_exit(void)
-{
- pci_unregister_driver(&tc86c001_pci_driver);
-}
-
-module_init(tc86c001_ide_init);
-module_exit(tc86c001_ide_exit);
-
-MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
-MODULE_DESCRIPTION("PCI driver module for TC86C001 IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
deleted file mode 100644
index 16ddd0956832..000000000000
--- a/drivers/ide/triflex.c
+++ /dev/null
@@ -1,143 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * IDE Chipset driver for the Compaq TriFlex IDE controller.
- *
- * Known to work with the Compaq Workstation 5x00 series.
- *
- * Copyright (C) 2002 Hewlett-Packard Development Group, L.P.
- * Author: Torben Mathiasen <torben.mathiasen@hp.com>
- *
- * Loosely based on the piix & svwks drivers.
- *
- * Documentation:
- * Not publicly available.
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#define DRV_NAME "triflex"
-
-static void triflex_set_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- u32 triflex_timings = 0;
- u16 timing = 0;
- u8 channel_offset = hwif->channel ? 0x74 : 0x70, unit = drive->dn & 1;
-
- pci_read_config_dword(dev, channel_offset, &triflex_timings);
-
- switch (drive->dma_mode) {
- case XFER_MW_DMA_2:
- timing = 0x0103;
- break;
- case XFER_MW_DMA_1:
- timing = 0x0203;
- break;
- case XFER_MW_DMA_0:
- timing = 0x0808;
- break;
- case XFER_SW_DMA_2:
- case XFER_SW_DMA_1:
- case XFER_SW_DMA_0:
- timing = 0x0f0f;
- break;
- case XFER_PIO_4:
- timing = 0x0202;
- break;
- case XFER_PIO_3:
- timing = 0x0204;
- break;
- case XFER_PIO_2:
- timing = 0x0404;
- break;
- case XFER_PIO_1:
- timing = 0x0508;
- break;
- case XFER_PIO_0:
- timing = 0x0808;
- break;
- }
-
- triflex_timings &= ~(0xFFFF << (16 * unit));
- triflex_timings |= (timing << (16 * unit));
-
- pci_write_config_dword(dev, channel_offset, triflex_timings);
-}
-
-static void triflex_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- drive->dma_mode = drive->pio_mode;
- triflex_set_mode(hwif, drive);
-}
-
-static const struct ide_port_ops triflex_port_ops = {
- .set_pio_mode = triflex_set_pio_mode,
- .set_dma_mode = triflex_set_mode,
-};
-
-static const struct ide_port_info triflex_device = {
- .name = DRV_NAME,
- .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
- .port_ops = &triflex_port_ops,
- .pio_mask = ATA_PIO4,
- .swdma_mask = ATA_SWDMA2,
- .mwdma_mask = ATA_MWDMA2,
-};
-
-static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- return ide_pci_init_one(dev, &triflex_device, NULL);
-}
-
-static const struct pci_device_id triflex_pci_tbl[] = {
- { PCI_VDEVICE(COMPAQ, PCI_DEVICE_ID_COMPAQ_TRIFLEX_IDE), 0 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, triflex_pci_tbl);
-
-#ifdef CONFIG_PM
-static int triflex_ide_pci_suspend(struct pci_dev *dev, pm_message_t state)
-{
- /*
- * We must not disable or powerdown the device.
- * APM bios refuses to suspend if IDE is not accessible.
- */
- pci_save_state(dev);
- return 0;
-}
-#else
-#define triflex_ide_pci_suspend NULL
-#endif
-
-static struct pci_driver triflex_pci_driver = {
- .name = "TRIFLEX_IDE",
- .id_table = triflex_pci_tbl,
- .probe = triflex_init_one,
- .remove = ide_pci_remove,
- .suspend = triflex_ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init triflex_ide_init(void)
-{
- return ide_pci_register_driver(&triflex_pci_driver);
-}
-
-static void __exit triflex_ide_exit(void)
-{
- pci_unregister_driver(&triflex_pci_driver);
-}
-
-module_init(triflex_ide_init);
-module_exit(triflex_ide_exit);
-
-MODULE_AUTHOR("Torben Mathiasen");
-MODULE_DESCRIPTION("PCI driver module for Compaq Triflex IDE");
-MODULE_LICENSE("GPL");
-
-
diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
deleted file mode 100644
index d550b379b0f1..000000000000
--- a/drivers/ide/trm290.c
+++ /dev/null
@@ -1,374 +0,0 @@
-/*
- * Copyright (c) 1997-1998 Mark Lord
- * Copyright (c) 2007 MontaVista Software, Inc. <source@mvista.com>
- *
- * May be copied or modified under the terms of the GNU General Public License
- *
- * June 22, 2004 - get rid of check_region
- * - Jesper Juhl
- *
- */
-
-/*
- * This module provides support for the bus-master IDE DMA function
- * of the Tekram TRM290 chip, used on a variety of PCI IDE add-on boards,
- * including a "Precision Instruments" board. The TRM290 pre-dates
- * the sff-8038 standard (ide-dma.c) by a few months, and differs
- * significantly enough to warrant separate routines for some functions,
- * while re-using others from ide-dma.c.
- *
- * EXPERIMENTAL! It works for me (a sample of one).
- *
- * Works reliably for me in DMA mode (READs only),
- * DMA WRITEs are disabled by default (see #define below);
- *
- * DMA is not enabled automatically for this chipset,
- * but can be turned on manually (with "hdparm -d1") at run time.
- *
- * I need volunteers with "spare" drives for further testing
- * and development, and maybe to help figure out the peculiarities.
- * Even knowing the registers (below), some things behave strangely.
- */
-
-#define TRM290_NO_DMA_WRITES /* DMA writes seem unreliable sometimes */
-
-/*
- * TRM-290 PCI-IDE2 Bus Master Chip
- * ================================
- * The configuration registers are addressed in normal I/O port space
- * and are used as follows:
- *
- * trm290_base depends on jumper settings, and is probed for by ide-dma.c
- *
- * trm290_base+2 when WRITTEN: chiptest register (byte, write-only)
- * bit7 must always be written as "1"
- * bits6-2 undefined
- * bit1 1=legacy_compatible_mode, 0=native_pci_mode
- * bit0 1=test_mode, 0=normal(default)
- *
- * trm290_base+2 when READ: status register (byte, read-only)
- * bits7-2 undefined
- * bit1 channel0 busmaster interrupt status 0=none, 1=asserted
- * bit0 channel0 interrupt status 0=none, 1=asserted
- *
- * trm290_base+3 Interrupt mask register
- * bits7-5 undefined
- * bit4 legacy_header: 1=present, 0=absent
- * bit3 channel1 busmaster interrupt status 0=none, 1=asserted (read only)
- * bit2 channel1 interrupt status 0=none, 1=asserted (read only)
- * bit1 channel1 interrupt mask: 1=masked, 0=unmasked(default)
- * bit0 channel0 interrupt mask: 1=masked, 0=unmasked(default)
- *
- * trm290_base+1 "CPR" Config Pointer Register (byte)
- * bit7 1=autoincrement CPR bits 2-0 after each access of CDR
- * bit6 1=min. 1 wait-state posted write cycle (default), 0=0 wait-state
- * bit5 0=enabled master burst access (default), 1=disable (write only)
- * bit4 PCI DEVSEL# timing select: 1=medium(default), 0=fast
- * bit3 0=primary IDE channel, 1=secondary IDE channel
- * bits2-0 register index for accesses through CDR port
- *
- * trm290_base+0 "CDR" Config Data Register (word)
- * two sets of seven config registers,
- * selected by CPR bit 3 (channel) and CPR bits 2-0 (index 0 to 6),
- * each index defined below:
- *
- * Index-0 Base address register for command block (word)
- * defaults: 0x1f0 for primary, 0x170 for secondary
- *
- * Index-1 general config register (byte)
- * bit7 1=DMA enable, 0=DMA disable
- * bit6 1=activate IDE_RESET, 0=no action (default)
- * bit5 1=enable IORDY, 0=disable IORDY (default)
- * bit4 0=16-bit data port(default), 1=8-bit (XT) data port
- * bit3 interrupt polarity: 1=active_low, 0=active_high(default)
- * bit2 power-saving-mode(?): 1=enable, 0=disable(default) (write only)
- * bit1 bus_master_mode(?): 1=enable, 0=disable(default)
- * bit0 enable_io_ports: 1=enable(default), 0=disable
- *
- * Index-2 read-ahead counter preload bits 0-7 (byte, write only)
- * bits7-0 bits7-0 of readahead count
- *
- * Index-3 read-ahead config register (byte, write only)
- * bit7 1=enable_readahead, 0=disable_readahead(default)
- * bit6 1=clear_FIFO, 0=no_action
- * bit5 undefined
- * bit4 mode4 timing control: 1=enable, 0=disable(default)
- * bit3 undefined
- * bit2 undefined
- * bits1-0 bits9-8 of read-ahead count
- *
- * Index-4 base address register for control block (word)
- * defaults: 0x3f6 for primary, 0x376 for secondary
- *
- * Index-5 data port timings (shared by both drives) (byte)
- * standard PCI "clk" (clock) counts, default value = 0xf5
- *
- * bits7-6 setup time: 00=1clk, 01=2clk, 10=3clk, 11=4clk
- * bits5-3 hold time: 000=1clk, 001=2clk, 010=3clk,
- * 011=4clk, 100=5clk, 101=6clk,
- * 110=8clk, 111=12clk
- * bits2-0 active time: 000=2clk, 001=3clk, 010=4clk,
- * 011=5clk, 100=6clk, 101=8clk,
- * 110=12clk, 111=16clk
- *
- * Index-6 command/control port timings (shared by both drives) (byte)
- * same layout as Index-5, default value = 0xde
- *
- * Suggested CDR programming for PIO mode0 (600ns):
- * 0x01f0,0x21,0xff,0x80,0x03f6,0xf5,0xde ; primary
- * 0x0170,0x21,0xff,0x80,0x0376,0xf5,0xde ; secondary
- *
- * Suggested CDR programming for PIO mode3 (180ns):
- * 0x01f0,0x21,0xff,0x80,0x03f6,0x09,0xde ; primary
- * 0x0170,0x21,0xff,0x80,0x0376,0x09,0xde ; secondary
- *
- * Suggested CDR programming for PIO mode4 (120ns):
- * 0x01f0,0x21,0xff,0x80,0x03f6,0x00,0xde ; primary
- * 0x0170,0x21,0xff,0x80,0x0376,0x00,0xde ; secondary
- *
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/blkdev.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/ide.h>
-
-#include <asm/io.h>
-
-#define DRV_NAME "trm290"
-
-static void trm290_prepare_drive (ide_drive_t *drive, unsigned int use_dma)
-{
- ide_hwif_t *hwif = drive->hwif;
- u16 reg = 0;
- unsigned long flags;
-
- /* select PIO or DMA */
- reg = use_dma ? (0x21 | 0x82) : (0x21 & ~0x82);
-
- local_irq_save(flags);
-
- if (reg != hwif->select_data) {
- hwif->select_data = reg;
- /* set PIO/DMA */
- outb(0x51 | (hwif->channel << 3), hwif->config_data + 1);
- outw(reg & 0xff, hwif->config_data);
- }
-
- /* enable IRQ if not probing */
- if (drive->dev_flags & IDE_DFLAG_PRESENT) {
- reg = inw(hwif->config_data + 3);
- reg &= 0x13;
- reg &= ~(1 << hwif->channel);
- outw(reg, hwif->config_data + 3);
- }
-
- local_irq_restore(flags);
-}
-
-static void trm290_dev_select(ide_drive_t *drive)
-{
- trm290_prepare_drive(drive, !!(drive->dev_flags & IDE_DFLAG_USING_DMA));
-
- outb(drive->select | ATA_DEVICE_OBS, drive->hwif->io_ports.device_addr);
-}
-
-static int trm290_dma_check(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- if (cmd->tf_flags & IDE_TFLAG_WRITE) {
-#ifdef TRM290_NO_DMA_WRITES
- /* always use PIO for writes */
- return 1;
-#endif
- }
- return 0;
-}
-
-static int trm290_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- ide_hwif_t *hwif = drive->hwif;
- unsigned int count, rw = (cmd->tf_flags & IDE_TFLAG_WRITE) ? 1 : 2;
-
- count = ide_build_dmatable(drive, cmd);
- if (count == 0)
- /* try PIO instead of DMA */
- return 1;
-
- outl(hwif->dmatable_dma | rw, hwif->dma_base);
- /* start DMA */
- outw(count * 2 - 1, hwif->dma_base + 2);
-
- return 0;
-}
-
-static void trm290_dma_start(ide_drive_t *drive)
-{
- trm290_prepare_drive(drive, 1);
-}
-
-static int trm290_dma_end(ide_drive_t *drive)
-{
- u16 status = inw(drive->hwif->dma_base + 2);
-
- trm290_prepare_drive(drive, 0);
-
- return status != 0x00ff;
-}
-
-static int trm290_dma_test_irq(ide_drive_t *drive)
-{
- u16 status = inw(drive->hwif->dma_base + 2);
-
- return status == 0x00ff;
-}
-
-static void trm290_dma_host_set(ide_drive_t *drive, int on)
-{
-}
-
-static void init_hwif_trm290(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned int cfg_base = pci_resource_start(dev, 4);
- unsigned long flags;
- u8 reg = 0;
-
- if ((dev->class & 5) && cfg_base)
- printk(KERN_INFO DRV_NAME " %s: chip", pci_name(dev));
- else {
- cfg_base = 0x3df0;
- printk(KERN_INFO DRV_NAME " %s: using default", pci_name(dev));
- }
- printk(KERN_CONT " config base at 0x%04x\n", cfg_base);
- hwif->config_data = cfg_base;
- hwif->dma_base = (cfg_base + 4) ^ (hwif->channel ? 0x80 : 0);
-
- printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
- hwif->name, hwif->dma_base, hwif->dma_base + 3);
-
- if (ide_allocate_dma_engine(hwif))
- return;
-
- local_irq_save(flags);
- /* put config reg into first byte of hwif->select_data */
- outb(0x51 | (hwif->channel << 3), hwif->config_data + 1);
- /* select PIO as default */
- hwif->select_data = 0x21;
- outb(hwif->select_data, hwif->config_data);
- /* get IRQ info */
- reg = inb(hwif->config_data + 3);
- /* mask IRQs for both ports */
- reg = (reg & 0x10) | 0x03;
- outb(reg, hwif->config_data + 3);
- local_irq_restore(flags);
-
- if (reg & 0x10)
- /* legacy mode */
- hwif->irq = hwif->channel ? 15 : 14;
-
-#if 1
- {
- /*
- * My trm290-based card doesn't seem to work with all possible values
- * for the control basereg, so this kludge ensures that we use only
- * values that are known to work. Ugh. -ml
- */
- u16 new, old, compat = hwif->channel ? 0x374 : 0x3f4;
- static u16 next_offset = 0;
- u8 old_mask;
-
- outb(0x54 | (hwif->channel << 3), hwif->config_data + 1);
- old = inw(hwif->config_data);
- old &= ~1;
- old_mask = inb(old + 2);
- if (old != compat && old_mask == 0xff) {
- /* leave lower 10 bits untouched */
- compat += (next_offset += 0x400);
- hwif->io_ports.ctl_addr = compat + 2;
- outw(compat | 1, hwif->config_data);
- new = inw(hwif->config_data);
- printk(KERN_INFO "%s: control basereg workaround: "
- "old=0x%04x, new=0x%04x\n",
- hwif->name, old, new & ~1);
- }
- }
-#endif
-}
-
-static const struct ide_tp_ops trm290_tp_ops = {
- .exec_command = ide_exec_command,
- .read_status = ide_read_status,
- .read_altstatus = ide_read_altstatus,
- .write_devctl = ide_write_devctl,
-
- .dev_select = trm290_dev_select,
- .tf_load = ide_tf_load,
- .tf_read = ide_tf_read,
-
- .input_data = ide_input_data,
- .output_data = ide_output_data,
-};
-
-static const struct ide_dma_ops trm290_dma_ops = {
- .dma_host_set = trm290_dma_host_set,
- .dma_setup = trm290_dma_setup,
- .dma_start = trm290_dma_start,
- .dma_end = trm290_dma_end,
- .dma_test_irq = trm290_dma_test_irq,
- .dma_lost_irq = ide_dma_lost_irq,
- .dma_check = trm290_dma_check,
-};
-
-static const struct ide_port_info trm290_chipset = {
- .name = DRV_NAME,
- .init_hwif = init_hwif_trm290,
- .tp_ops = &trm290_tp_ops,
- .dma_ops = &trm290_dma_ops,
- .host_flags = IDE_HFLAG_TRM290 |
- IDE_HFLAG_NO_ATAPI_DMA |
-#if 0 /* play it safe for now */
- IDE_HFLAG_TRUST_BIOS_FOR_DMA |
-#endif
- IDE_HFLAG_NO_AUTODMA |
- IDE_HFLAG_NO_LBA48,
-};
-
-static int trm290_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- return ide_pci_init_one(dev, &trm290_chipset, NULL);
-}
-
-static const struct pci_device_id trm290_pci_tbl[] = {
- { PCI_VDEVICE(TEKRAM, PCI_DEVICE_ID_TEKRAM_DC290), 0 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, trm290_pci_tbl);
-
-static struct pci_driver trm290_pci_driver = {
- .name = "TRM290_IDE",
- .id_table = trm290_pci_tbl,
- .probe = trm290_init_one,
- .remove = ide_pci_remove,
-};
-
-static int __init trm290_ide_init(void)
-{
- return ide_pci_register_driver(&trm290_pci_driver);
-}
-
-static void __exit trm290_ide_exit(void)
-{
- pci_unregister_driver(&trm290_pci_driver);
-}
-
-module_init(trm290_ide_init);
-module_exit(trm290_ide_exit);
-
-MODULE_AUTHOR("Mark Lord");
-MODULE_DESCRIPTION("PCI driver module for Tekram TRM290 IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/tx4938ide.c b/drivers/ide/tx4938ide.c
deleted file mode 100644
index 962eb92501b5..000000000000
--- a/drivers/ide/tx4938ide.c
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * TX4938 internal IDE driver
- * Based on tx4939ide.c.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * (C) Copyright TOSHIBA CORPORATION 2005-2007
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-
-#include <asm/ide.h>
-#include <asm/txx9/tx4938.h>
-
-static void tx4938ide_tune_ebusc(unsigned int ebus_ch,
- unsigned int gbus_clock,
- u8 pio)
-{
- struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
- u64 cr = __raw_readq(&tx4938_ebuscptr->cr[ebus_ch]);
- unsigned int sp = (cr >> 4) & 3;
- unsigned int clock = gbus_clock / (4 - sp);
- unsigned int cycle = 1000000000 / clock;
- unsigned int shwt;
- int wt;
-
- /* Minimum DIOx- active time */
- wt = DIV_ROUND_UP(t->act8b, cycle) - 2;
- /* IORDY setup time: 35ns */
- wt = max_t(int, wt, DIV_ROUND_UP(35, cycle));
- /* actual wait-cycle is max(wt & ~1, 1) */
- if (wt > 2 && (wt & 1))
- wt++;
- wt &= ~1;
- /* Address-valid to DIOR/DIOW setup */
- shwt = DIV_ROUND_UP(t->setup, cycle);
-
- /* -DIOx recovery time (SHWT * 4) and cycle time requirement */
- while ((shwt * 4 + wt + (wt ? 2 : 3)) * cycle < t->cycle)
- shwt++;
- if (shwt > 7) {
- pr_warn("tx4938ide: SHWT violation (%d)\n", shwt);
- shwt = 7;
- }
- pr_debug("tx4938ide: ebus %d, bus cycle %dns, WT %d, SHWT %d\n",
- ebus_ch, cycle, wt, shwt);
-
- __raw_writeq((cr & ~0x3f007ull) | (wt << 12) | shwt,
- &tx4938_ebuscptr->cr[ebus_ch]);
-}
-
-static void tx4938ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct tx4938ide_platform_info *pdata = dev_get_platdata(hwif->dev);
- u8 safe = drive->pio_mode - XFER_PIO_0;
- ide_drive_t *pair;
-
- pair = ide_get_pair_dev(drive);
- if (pair)
- safe = min_t(u8, safe, pair->pio_mode - XFER_PIO_0);
- tx4938ide_tune_ebusc(pdata->ebus_ch, pdata->gbus_clock, safe);
-}
-
-#ifdef __BIG_ENDIAN
-
-/* custom iops (independent from SWAP_IO_SPACE) */
-static void tx4938ide_input_data_swap(ide_drive_t *drive, struct ide_cmd *cmd,
- void *buf, unsigned int len)
-{
- unsigned long port = drive->hwif->io_ports.data_addr;
- unsigned short *ptr = buf;
- unsigned int count = (len + 1) / 2;
-
- while (count--)
- *ptr++ = cpu_to_le16(__raw_readw((void __iomem *)port));
- __ide_flush_dcache_range((unsigned long)buf, roundup(len, 2));
-}
-
-static void tx4938ide_output_data_swap(ide_drive_t *drive, struct ide_cmd *cmd,
- void *buf, unsigned int len)
-{
- unsigned long port = drive->hwif->io_ports.data_addr;
- unsigned short *ptr = buf;
- unsigned int count = (len + 1) / 2;
-
- while (count--) {
- __raw_writew(le16_to_cpu(*ptr), (void __iomem *)port);
- ptr++;
- }
- __ide_flush_dcache_range((unsigned long)buf, roundup(len, 2));
-}
-
-static const struct ide_tp_ops tx4938ide_tp_ops = {
- .exec_command = ide_exec_command,
- .read_status = ide_read_status,
- .read_altstatus = ide_read_altstatus,
- .write_devctl = ide_write_devctl,
-
- .dev_select = ide_dev_select,
- .tf_load = ide_tf_load,
- .tf_read = ide_tf_read,
-
- .input_data = tx4938ide_input_data_swap,
- .output_data = tx4938ide_output_data_swap,
-};
-
-#endif /* __BIG_ENDIAN */
-
-static const struct ide_port_ops tx4938ide_port_ops = {
- .set_pio_mode = tx4938ide_set_pio_mode,
-};
-
-static const struct ide_port_info tx4938ide_port_info __initconst = {
- .port_ops = &tx4938ide_port_ops,
-#ifdef __BIG_ENDIAN
- .tp_ops = &tx4938ide_tp_ops,
-#endif
- .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA,
- .pio_mask = ATA_PIO5,
- .chipset = ide_generic,
-};
-
-static int __init tx4938ide_probe(struct platform_device *pdev)
-{
- struct ide_hw hw, *hws[] = { &hw };
- struct ide_host *host;
- struct resource *res;
- struct tx4938ide_platform_info *pdata = dev_get_platdata(&pdev->dev);
- int irq, ret, i;
- unsigned long mapbase, mapctl;
- struct ide_port_info d = tx4938ide_port_info;
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
- if (!devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), "tx4938ide"))
- return -EBUSY;
- mapbase = (unsigned long)devm_ioremap(&pdev->dev, res->start,
- 8 << pdata->ioport_shift);
- mapctl = (unsigned long)devm_ioremap(&pdev->dev,
- res->start + 0x10000 +
- (6 << pdata->ioport_shift),
- 1 << pdata->ioport_shift);
- if (!mapbase || !mapctl)
- return -EBUSY;
-
- memset(&hw, 0, sizeof(hw));
- if (pdata->ioport_shift) {
- unsigned long port = mapbase;
- unsigned long ctl = mapctl;
-
- hw.io_ports_array[0] = port;
-#ifdef __BIG_ENDIAN
- port++;
- ctl++;
-#endif
- for (i = 1; i <= 7; i++)
- hw.io_ports_array[i] =
- port + (i << pdata->ioport_shift);
- hw.io_ports.ctl_addr = ctl;
- } else
- ide_std_init_ports(&hw, mapbase, mapctl);
- hw.irq = irq;
- hw.dev = &pdev->dev;
-
- pr_info("TX4938 IDE interface (base %#lx, ctl %#lx, irq %d)\n",
- mapbase, mapctl, hw.irq);
- if (pdata->gbus_clock)
- tx4938ide_tune_ebusc(pdata->ebus_ch, pdata->gbus_clock, 0);
- else
- d.port_ops = NULL;
- ret = ide_host_add(&d, hws, 1, &host);
- if (!ret)
- platform_set_drvdata(pdev, host);
- return ret;
-}
-
-static int __exit tx4938ide_remove(struct platform_device *pdev)
-{
- struct ide_host *host = platform_get_drvdata(pdev);
-
- ide_host_remove(host);
- return 0;
-}
-
-static struct platform_driver tx4938ide_driver = {
- .driver = {
- .name = "tx4938ide",
- },
- .remove = __exit_p(tx4938ide_remove),
-};
-
-module_platform_driver_probe(tx4938ide_driver, tx4938ide_probe);
-
-MODULE_DESCRIPTION("TX4938 internal IDE driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:tx4938ide");
diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c
deleted file mode 100644
index b1bbf807bb3d..000000000000
--- a/drivers/ide/tx4939ide.c
+++ /dev/null
@@ -1,628 +0,0 @@
-/*
- * TX4939 internal IDE driver
- * Based on RBTX49xx patch from CELF patch archive.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * (C) Copyright TOSHIBA CORPORATION 2005-2007
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/scatterlist.h>
-
-#include <asm/ide.h>
-
-#define MODNAME "tx4939ide"
-
-/* ATA Shadow Registers (8-bit except for Data which is 16-bit) */
-#define TX4939IDE_Data 0x000
-#define TX4939IDE_Error_Feature 0x001
-#define TX4939IDE_Sec 0x002
-#define TX4939IDE_LBA0 0x003
-#define TX4939IDE_LBA1 0x004
-#define TX4939IDE_LBA2 0x005
-#define TX4939IDE_DevHead 0x006
-#define TX4939IDE_Stat_Cmd 0x007
-#define TX4939IDE_AltStat_DevCtl 0x402
-/* H/W DMA Registers */
-#define TX4939IDE_DMA_Cmd 0x800 /* 8-bit */
-#define TX4939IDE_DMA_Stat 0x802 /* 8-bit */
-#define TX4939IDE_PRD_Ptr 0x804 /* 32-bit */
-/* ATA100 CORE Registers (16-bit) */
-#define TX4939IDE_Sys_Ctl 0xc00
-#define TX4939IDE_Xfer_Cnt_1 0xc08
-#define TX4939IDE_Xfer_Cnt_2 0xc0a
-#define TX4939IDE_Sec_Cnt 0xc10
-#define TX4939IDE_Start_Lo_Addr 0xc18
-#define TX4939IDE_Start_Up_Addr 0xc20
-#define TX4939IDE_Add_Ctl 0xc28
-#define TX4939IDE_Lo_Burst_Cnt 0xc30
-#define TX4939IDE_Up_Burst_Cnt 0xc38
-#define TX4939IDE_PIO_Addr 0xc88
-#define TX4939IDE_H_Rst_Tim 0xc90
-#define TX4939IDE_Int_Ctl 0xc98
-#define TX4939IDE_Pkt_Cmd 0xcb8
-#define TX4939IDE_Bxfer_Cnt_Hi 0xcc0
-#define TX4939IDE_Bxfer_Cnt_Lo 0xcc8
-#define TX4939IDE_Dev_TErr 0xcd0
-#define TX4939IDE_Pkt_Xfer_Ctl 0xcd8
-#define TX4939IDE_Start_TAddr 0xce0
-
-/* bits for Int_Ctl */
-#define TX4939IDE_INT_ADDRERR 0x80
-#define TX4939IDE_INT_REACHMUL 0x40
-#define TX4939IDE_INT_DEVTIMING 0x20
-#define TX4939IDE_INT_UDMATERM 0x10
-#define TX4939IDE_INT_TIMER 0x08
-#define TX4939IDE_INT_BUSERR 0x04
-#define TX4939IDE_INT_XFEREND 0x02
-#define TX4939IDE_INT_HOST 0x01
-
-#define TX4939IDE_IGNORE_INTS \
- (TX4939IDE_INT_ADDRERR | TX4939IDE_INT_REACHMUL | \
- TX4939IDE_INT_DEVTIMING | TX4939IDE_INT_UDMATERM | \
- TX4939IDE_INT_TIMER | TX4939IDE_INT_XFEREND)
-
-#ifdef __BIG_ENDIAN
-#define tx4939ide_swizzlel(a) ((a) ^ 4)
-#define tx4939ide_swizzlew(a) ((a) ^ 6)
-#define tx4939ide_swizzleb(a) ((a) ^ 7)
-#else
-#define tx4939ide_swizzlel(a) (a)
-#define tx4939ide_swizzlew(a) (a)
-#define tx4939ide_swizzleb(a) (a)
-#endif
-
-static u16 tx4939ide_readw(void __iomem *base, u32 reg)
-{
- return __raw_readw(base + tx4939ide_swizzlew(reg));
-}
-static u8 tx4939ide_readb(void __iomem *base, u32 reg)
-{
- return __raw_readb(base + tx4939ide_swizzleb(reg));
-}
-static void tx4939ide_writel(u32 val, void __iomem *base, u32 reg)
-{
- __raw_writel(val, base + tx4939ide_swizzlel(reg));
-}
-static void tx4939ide_writew(u16 val, void __iomem *base, u32 reg)
-{
- __raw_writew(val, base + tx4939ide_swizzlew(reg));
-}
-static void tx4939ide_writeb(u8 val, void __iomem *base, u32 reg)
-{
- __raw_writeb(val, base + tx4939ide_swizzleb(reg));
-}
-
-#define TX4939IDE_BASE(hwif) ((void __iomem *)(hwif)->extra_base)
-
-static void tx4939ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- int is_slave = drive->dn;
- u32 mask, val;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
- u8 safe = pio;
- ide_drive_t *pair;
-
- pair = ide_get_pair_dev(drive);
- if (pair)
- safe = min_t(u8, safe, pair->pio_mode - XFER_PIO_0);
- /*
- * Update Command Transfer Mode for master/slave and Data
- * Transfer Mode for this drive.
- */
- mask = is_slave ? 0x07f00000 : 0x000007f0;
- val = ((safe << 8) | (pio << 4)) << (is_slave ? 16 : 0);
- hwif->select_data = (hwif->select_data & ~mask) | val;
- /* tx4939ide_tf_load_fixup() will set the Sys_Ctl register */
-}
-
-static void tx4939ide_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- u32 mask, val;
- const u8 mode = drive->dma_mode;
-
- /* Update Data Transfer Mode for this drive. */
- if (mode >= XFER_UDMA_0)
- val = mode - XFER_UDMA_0 + 8;
- else
- val = mode - XFER_MW_DMA_0 + 5;
- if (drive->dn) {
- mask = 0x00f00000;
- val <<= 20;
- } else {
- mask = 0x000000f0;
- val <<= 4;
- }
- hwif->select_data = (hwif->select_data & ~mask) | val;
- /* tx4939ide_tf_load_fixup() will set the Sys_Ctl register */
-}
-
-static u16 tx4939ide_check_error_ints(ide_hwif_t *hwif)
-{
- void __iomem *base = TX4939IDE_BASE(hwif);
- u16 ctl = tx4939ide_readw(base, TX4939IDE_Int_Ctl);
-
- if (ctl & TX4939IDE_INT_BUSERR) {
- /* reset FIFO */
- u16 sysctl = tx4939ide_readw(base, TX4939IDE_Sys_Ctl);
-
- tx4939ide_writew(sysctl | 0x4000, base, TX4939IDE_Sys_Ctl);
- /* wait 12GBUSCLK (typ. 60ns @ GBUS200MHz, max 270ns) */
- ndelay(270);
- tx4939ide_writew(sysctl, base, TX4939IDE_Sys_Ctl);
- }
- if (ctl & (TX4939IDE_INT_ADDRERR |
- TX4939IDE_INT_DEVTIMING | TX4939IDE_INT_BUSERR))
- pr_err("%s: Error interrupt %#x (%s%s%s )\n",
- hwif->name, ctl,
- ctl & TX4939IDE_INT_ADDRERR ? " Address-Error" : "",
- ctl & TX4939IDE_INT_DEVTIMING ? " DEV-Timing" : "",
- ctl & TX4939IDE_INT_BUSERR ? " Bus-Error" : "");
- return ctl;
-}
-
-static void tx4939ide_clear_irq(ide_drive_t *drive)
-{
- ide_hwif_t *hwif;
- void __iomem *base;
- u16 ctl;
-
- /*
- * tx4939ide_dma_test_irq() and tx4939ide_dma_end() do all job
- * for DMA case.
- */
- if (drive->waiting_for_dma)
- return;
- hwif = drive->hwif;
- base = TX4939IDE_BASE(hwif);
- ctl = tx4939ide_check_error_ints(hwif);
- tx4939ide_writew(ctl, base, TX4939IDE_Int_Ctl);
-}
-
-static u8 tx4939ide_cable_detect(ide_hwif_t *hwif)
-{
- void __iomem *base = TX4939IDE_BASE(hwif);
-
- return tx4939ide_readw(base, TX4939IDE_Sys_Ctl) & 0x2000 ?
- ATA_CBL_PATA40 : ATA_CBL_PATA80;
-}
-
-#ifdef __BIG_ENDIAN
-static void tx4939ide_dma_host_set(ide_drive_t *drive, int on)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 unit = drive->dn;
- void __iomem *base = TX4939IDE_BASE(hwif);
- u8 dma_stat = tx4939ide_readb(base, TX4939IDE_DMA_Stat);
-
- if (on)
- dma_stat |= (1 << (5 + unit));
- else
- dma_stat &= ~(1 << (5 + unit));
-
- tx4939ide_writeb(dma_stat, base, TX4939IDE_DMA_Stat);
-}
-#else
-#define tx4939ide_dma_host_set ide_dma_host_set
-#endif
-
-static u8 tx4939ide_clear_dma_status(void __iomem *base)
-{
- u8 dma_stat;
-
- /* read DMA status for INTR & ERROR flags */
- dma_stat = tx4939ide_readb(base, TX4939IDE_DMA_Stat);
- /* clear INTR & ERROR flags */
- tx4939ide_writeb(dma_stat | ATA_DMA_INTR | ATA_DMA_ERR, base,
- TX4939IDE_DMA_Stat);
- /* recover intmask cleared by writing to bit2 of DMA_Stat */
- tx4939ide_writew(TX4939IDE_IGNORE_INTS << 8, base, TX4939IDE_Int_Ctl);
- return dma_stat;
-}
-
-#ifdef __BIG_ENDIAN
-/* custom ide_build_dmatable to handle swapped layout */
-static int tx4939ide_build_dmatable(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- ide_hwif_t *hwif = drive->hwif;
- u32 *table = (u32 *)hwif->dmatable_cpu;
- unsigned int count = 0;
- int i;
- struct scatterlist *sg;
-
- for_each_sg(hwif->sg_table, sg, cmd->sg_nents, i) {
- u32 cur_addr, cur_len, bcount;
-
- cur_addr = sg_dma_address(sg);
- cur_len = sg_dma_len(sg);
-
- /*
- * Fill in the DMA table, without crossing any 64kB boundaries.
- */
-
- while (cur_len) {
- if (count++ >= PRD_ENTRIES)
- goto use_pio_instead;
-
- bcount = 0x10000 - (cur_addr & 0xffff);
- if (bcount > cur_len)
- bcount = cur_len;
- /*
- * This workaround for zero count seems required.
- * (standard ide_build_dmatable does it too)
- */
- if (bcount == 0x10000)
- bcount = 0x8000;
- *table++ = bcount & 0xffff;
- *table++ = cur_addr;
- cur_addr += bcount;
- cur_len -= bcount;
- }
- }
-
- if (count) {
- *(table - 2) |= 0x80000000;
- return count;
- }
-
-use_pio_instead:
- printk(KERN_ERR "%s: %s\n", drive->name,
- count ? "DMA table too small" : "empty DMA table?");
-
- return 0; /* revert to PIO for this request */
-}
-#else
-#define tx4939ide_build_dmatable ide_build_dmatable
-#endif
-
-static int tx4939ide_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- ide_hwif_t *hwif = drive->hwif;
- void __iomem *base = TX4939IDE_BASE(hwif);
- u8 rw = (cmd->tf_flags & IDE_TFLAG_WRITE) ? 0 : ATA_DMA_WR;
-
- /* fall back to PIO! */
- if (tx4939ide_build_dmatable(drive, cmd) == 0)
- return 1;
-
- /* PRD table */
- tx4939ide_writel(hwif->dmatable_dma, base, TX4939IDE_PRD_Ptr);
-
- /* specify r/w */
- tx4939ide_writeb(rw, base, TX4939IDE_DMA_Cmd);
-
- /* clear INTR & ERROR flags */
- tx4939ide_clear_dma_status(base);
-
- tx4939ide_writew(SECTOR_SIZE / 2, base, drive->dn ?
- TX4939IDE_Xfer_Cnt_2 : TX4939IDE_Xfer_Cnt_1);
-
- tx4939ide_writew(blk_rq_sectors(cmd->rq), base, TX4939IDE_Sec_Cnt);
-
- return 0;
-}
-
-static int tx4939ide_dma_end(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 dma_stat, dma_cmd;
- void __iomem *base = TX4939IDE_BASE(hwif);
- u16 ctl = tx4939ide_readw(base, TX4939IDE_Int_Ctl);
-
- /* get DMA command mode */
- dma_cmd = tx4939ide_readb(base, TX4939IDE_DMA_Cmd);
- /* stop DMA */
- tx4939ide_writeb(dma_cmd & ~ATA_DMA_START, base, TX4939IDE_DMA_Cmd);
-
- /* read and clear the INTR & ERROR bits */
- dma_stat = tx4939ide_clear_dma_status(base);
-
-#define CHECK_DMA_MASK (ATA_DMA_ACTIVE | ATA_DMA_ERR | ATA_DMA_INTR)
-
- /* verify good DMA status */
- if ((dma_stat & CHECK_DMA_MASK) == 0 &&
- (ctl & (TX4939IDE_INT_XFEREND | TX4939IDE_INT_HOST)) ==
- (TX4939IDE_INT_XFEREND | TX4939IDE_INT_HOST))
- /* INT_IDE lost... bug? */
- return 0;
- return ((dma_stat & CHECK_DMA_MASK) !=
- ATA_DMA_INTR) ? 0x10 | dma_stat : 0;
-}
-
-/* returns 1 if DMA IRQ issued, 0 otherwise */
-static int tx4939ide_dma_test_irq(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- void __iomem *base = TX4939IDE_BASE(hwif);
- u16 ctl, ide_int;
- u8 dma_stat, stat;
- int found = 0;
-
- ctl = tx4939ide_check_error_ints(hwif);
- ide_int = ctl & (TX4939IDE_INT_XFEREND | TX4939IDE_INT_HOST);
- switch (ide_int) {
- case TX4939IDE_INT_HOST:
- /* On error, XFEREND might not be asserted. */
- stat = tx4939ide_readb(base, TX4939IDE_AltStat_DevCtl);
- if ((stat & (ATA_BUSY | ATA_DRQ | ATA_ERR)) == ATA_ERR)
- found = 1;
- else
- /* Wait for XFEREND (Mask HOST and unmask XFEREND) */
- ctl &= ~TX4939IDE_INT_XFEREND << 8;
- ctl |= ide_int << 8;
- break;
- case TX4939IDE_INT_HOST | TX4939IDE_INT_XFEREND:
- dma_stat = tx4939ide_readb(base, TX4939IDE_DMA_Stat);
- if (!(dma_stat & ATA_DMA_INTR))
- pr_warn("%s: weird interrupt status. "
- "DMA_Stat %#02x int_ctl %#04x\n",
- hwif->name, dma_stat, ctl);
- found = 1;
- break;
- }
- /*
- * Do not clear XFEREND, HOST now. They will be cleared by
- * clearing bit2 of DMA_Stat.
- */
- ctl &= ~ide_int;
- tx4939ide_writew(ctl, base, TX4939IDE_Int_Ctl);
- return found;
-}
-
-#ifdef __BIG_ENDIAN
-static u8 tx4939ide_dma_sff_read_status(ide_hwif_t *hwif)
-{
- void __iomem *base = TX4939IDE_BASE(hwif);
-
- return tx4939ide_readb(base, TX4939IDE_DMA_Stat);
-}
-#else
-#define tx4939ide_dma_sff_read_status ide_dma_sff_read_status
-#endif
-
-static void tx4939ide_init_hwif(ide_hwif_t *hwif)
-{
- void __iomem *base = TX4939IDE_BASE(hwif);
-
- /* Soft Reset */
- tx4939ide_writew(0x8000, base, TX4939IDE_Sys_Ctl);
- /* at least 20 GBUSCLK (typ. 100ns @ GBUS200MHz, max 450ns) */
- ndelay(450);
- tx4939ide_writew(0x0000, base, TX4939IDE_Sys_Ctl);
- /* mask some interrupts and clear all interrupts */
- tx4939ide_writew((TX4939IDE_IGNORE_INTS << 8) | 0xff, base,
- TX4939IDE_Int_Ctl);
-
- tx4939ide_writew(0x0008, base, TX4939IDE_Lo_Burst_Cnt);
- tx4939ide_writew(0, base, TX4939IDE_Up_Burst_Cnt);
-}
-
-static int tx4939ide_init_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
-{
- hwif->dma_base =
- hwif->extra_base + tx4939ide_swizzleb(TX4939IDE_DMA_Cmd);
- /*
- * Note that we cannot use ATA_DMA_TABLE_OFS, ATA_DMA_STATUS
- * for big endian.
- */
- return ide_allocate_dma_engine(hwif);
-}
-
-static void tx4939ide_tf_load_fixup(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- void __iomem *base = TX4939IDE_BASE(hwif);
- u16 sysctl = hwif->select_data >> (drive->dn ? 16 : 0);
-
- /*
- * Fix ATA100 CORE System Control Register. (The write to the
- * Device/Head register may write wrong data to the System
- * Control Register)
- * While Sys_Ctl is written here, dev_select() is not needed.
- */
- tx4939ide_writew(sysctl, base, TX4939IDE_Sys_Ctl);
-}
-
-static void tx4939ide_tf_load(ide_drive_t *drive, struct ide_taskfile *tf,
- u8 valid)
-{
- ide_tf_load(drive, tf, valid);
-
- if (valid & IDE_VALID_DEVICE)
- tx4939ide_tf_load_fixup(drive);
-}
-
-#ifdef __BIG_ENDIAN
-
-/* custom iops (independent from SWAP_IO_SPACE) */
-static void tx4939ide_input_data_swap(ide_drive_t *drive, struct ide_cmd *cmd,
- void *buf, unsigned int len)
-{
- unsigned long port = drive->hwif->io_ports.data_addr;
- unsigned short *ptr = buf;
- unsigned int count = (len + 1) / 2;
-
- while (count--)
- *ptr++ = cpu_to_le16(__raw_readw((void __iomem *)port));
- __ide_flush_dcache_range((unsigned long)buf, roundup(len, 2));
-}
-
-static void tx4939ide_output_data_swap(ide_drive_t *drive, struct ide_cmd *cmd,
- void *buf, unsigned int len)
-{
- unsigned long port = drive->hwif->io_ports.data_addr;
- unsigned short *ptr = buf;
- unsigned int count = (len + 1) / 2;
-
- while (count--) {
- __raw_writew(le16_to_cpu(*ptr), (void __iomem *)port);
- ptr++;
- }
- __ide_flush_dcache_range((unsigned long)buf, roundup(len, 2));
-}
-
-static const struct ide_tp_ops tx4939ide_tp_ops = {
- .exec_command = ide_exec_command,
- .read_status = ide_read_status,
- .read_altstatus = ide_read_altstatus,
- .write_devctl = ide_write_devctl,
-
- .dev_select = ide_dev_select,
- .tf_load = tx4939ide_tf_load,
- .tf_read = ide_tf_read,
-
- .input_data = tx4939ide_input_data_swap,
- .output_data = tx4939ide_output_data_swap,
-};
-
-#else /* __LITTLE_ENDIAN */
-
-static const struct ide_tp_ops tx4939ide_tp_ops = {
- .exec_command = ide_exec_command,
- .read_status = ide_read_status,
- .read_altstatus = ide_read_altstatus,
- .write_devctl = ide_write_devctl,
-
- .dev_select = ide_dev_select,
- .tf_load = tx4939ide_tf_load,
- .tf_read = ide_tf_read,
-
- .input_data = ide_input_data,
- .output_data = ide_output_data,
-};
-
-#endif /* __LITTLE_ENDIAN */
-
-static const struct ide_port_ops tx4939ide_port_ops = {
- .set_pio_mode = tx4939ide_set_pio_mode,
- .set_dma_mode = tx4939ide_set_dma_mode,
- .clear_irq = tx4939ide_clear_irq,
- .cable_detect = tx4939ide_cable_detect,
-};
-
-static const struct ide_dma_ops tx4939ide_dma_ops = {
- .dma_host_set = tx4939ide_dma_host_set,
- .dma_setup = tx4939ide_dma_setup,
- .dma_start = ide_dma_start,
- .dma_end = tx4939ide_dma_end,
- .dma_test_irq = tx4939ide_dma_test_irq,
- .dma_lost_irq = ide_dma_lost_irq,
- .dma_timer_expiry = ide_dma_sff_timer_expiry,
- .dma_sff_read_status = tx4939ide_dma_sff_read_status,
-};
-
-static const struct ide_port_info tx4939ide_port_info __initconst = {
- .init_hwif = tx4939ide_init_hwif,
- .init_dma = tx4939ide_init_dma,
- .port_ops = &tx4939ide_port_ops,
- .dma_ops = &tx4939ide_dma_ops,
- .tp_ops = &tx4939ide_tp_ops,
- .host_flags = IDE_HFLAG_MMIO,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA5,
- .chipset = ide_generic,
-};
-
-static int __init tx4939ide_probe(struct platform_device *pdev)
-{
- struct ide_hw hw, *hws[] = { &hw };
- struct ide_host *host;
- struct resource *res;
- int irq, ret;
- unsigned long mapbase;
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
- if (!devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), MODNAME))
- return -EBUSY;
- mapbase = (unsigned long)devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!mapbase)
- return -EBUSY;
- memset(&hw, 0, sizeof(hw));
- hw.io_ports.data_addr =
- mapbase + tx4939ide_swizzlew(TX4939IDE_Data);
- hw.io_ports.error_addr =
- mapbase + tx4939ide_swizzleb(TX4939IDE_Error_Feature);
- hw.io_ports.nsect_addr =
- mapbase + tx4939ide_swizzleb(TX4939IDE_Sec);
- hw.io_ports.lbal_addr =
- mapbase + tx4939ide_swizzleb(TX4939IDE_LBA0);
- hw.io_ports.lbam_addr =
- mapbase + tx4939ide_swizzleb(TX4939IDE_LBA1);
- hw.io_ports.lbah_addr =
- mapbase + tx4939ide_swizzleb(TX4939IDE_LBA2);
- hw.io_ports.device_addr =
- mapbase + tx4939ide_swizzleb(TX4939IDE_DevHead);
- hw.io_ports.command_addr =
- mapbase + tx4939ide_swizzleb(TX4939IDE_Stat_Cmd);
- hw.io_ports.ctl_addr =
- mapbase + tx4939ide_swizzleb(TX4939IDE_AltStat_DevCtl);
- hw.irq = irq;
- hw.dev = &pdev->dev;
-
- pr_info("TX4939 IDE interface (base %#lx, irq %d)\n", mapbase, irq);
- host = ide_host_alloc(&tx4939ide_port_info, hws, 1);
- if (!host)
- return -ENOMEM;
- /* use extra_base for base address of the all registers */
- host->ports[0]->extra_base = mapbase;
- ret = ide_host_register(host, &tx4939ide_port_info, hws);
- if (ret) {
- ide_host_free(host);
- return ret;
- }
- platform_set_drvdata(pdev, host);
- return 0;
-}
-
-static int __exit tx4939ide_remove(struct platform_device *pdev)
-{
- struct ide_host *host = platform_get_drvdata(pdev);
-
- ide_host_remove(host);
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int tx4939ide_resume(struct platform_device *dev)
-{
- struct ide_host *host = platform_get_drvdata(dev);
- ide_hwif_t *hwif = host->ports[0];
-
- tx4939ide_init_hwif(hwif);
- return 0;
-}
-#else
-#define tx4939ide_resume NULL
-#endif
-
-static struct platform_driver tx4939ide_driver = {
- .driver = {
- .name = MODNAME,
- },
- .remove = __exit_p(tx4939ide_remove),
- .resume = tx4939ide_resume,
-};
-
-module_platform_driver_probe(tx4939ide_driver, tx4939ide_probe);
-
-MODULE_DESCRIPTION("TX4939 internal IDE driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:tx4939ide");
diff --git a/drivers/ide/umc8672.c b/drivers/ide/umc8672.c
deleted file mode 100644
index cf996f788292..000000000000
--- a/drivers/ide/umc8672.c
+++ /dev/null
@@ -1,184 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1995-1996 Linus Torvalds & author (see below)
- */
-
-/*
- * Principal Author/Maintainer: PODIEN@hml2.atlas.de (Wolfram Podien)
- *
- * This file provides support for the advanced features
- * of the UMC 8672 IDE interface.
- *
- * Version 0.01 Initial version, hacked out of ide.c,
- * and #include'd rather than compiled separately.
- * This will get cleaned up in a subsequent release.
- *
- * Version 0.02 now configs/compiles separate from ide.c -ml
- * Version 0.03 enhanced auto-tune, fix display bug
- * Version 0.05 replace sti() with restore_flags() -ml
- * add detection of possible race condition -ml
- */
-
-/*
- * VLB Controller Support from
- * Wolfram Podien
- * Rohoefe 3
- * D28832 Achim
- * Germany
- *
- * To enable UMC8672 support there must a lilo line like
- * append="ide0=umc8672"...
- * To set the speed according to the abilities of the hardware there must be a
- * line like
- * #define UMC_DRIVE0 11
- * in the beginning of the driver, which sets the speed of drive 0 to 11 (there
- * are some lines present). 0 - 11 are allowed speed values. These values are
- * the results from the DOS speed test program supplied from UMC. 11 is the
- * highest speed (about PIO mode 3)
- */
-#define REALLY_SLOW_IO /* some systems can safely undef this */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/ioport.h>
-#include <linux/blkdev.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#include <asm/io.h>
-
-#define DRV_NAME "umc8672"
-
-/*
- * Default speeds. These can be changed with "auto-tune" and/or hdparm.
- */
-#define UMC_DRIVE0 1 /* DOS measured drive speeds */
-#define UMC_DRIVE1 1 /* 0 to 11 allowed */
-#define UMC_DRIVE2 1 /* 11 = Fastest Speed */
-#define UMC_DRIVE3 1 /* In case of crash reduce speed */
-
-static u8 current_speeds[4] = {UMC_DRIVE0, UMC_DRIVE1, UMC_DRIVE2, UMC_DRIVE3};
-static const u8 pio_to_umc [5] = {0, 3, 7, 10, 11}; /* rough guesses */
-
-/* 0 1 2 3 4 5 6 7 8 9 10 11 */
-static const u8 speedtab [3][12] = {
- {0x0f, 0x0b, 0x02, 0x02, 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x1},
- {0x03, 0x02, 0x02, 0x02, 0x02, 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x1},
- {0xff, 0xcb, 0xc0, 0x58, 0x36, 0x33, 0x23, 0x22, 0x21, 0x11, 0x10, 0x0}
-};
-
-static void out_umc(char port, char wert)
-{
- outb_p(port, 0x108);
- outb_p(wert, 0x109);
-}
-
-static inline u8 in_umc(char port)
-{
- outb_p(port, 0x108);
- return inb_p(0x109);
-}
-
-static void umc_set_speeds(u8 speeds[])
-{
- int i, tmp;
-
- outb_p(0x5A, 0x108); /* enable umc */
-
- out_umc(0xd7, (speedtab[0][speeds[2]] | (speedtab[0][speeds[3]]<<4)));
- out_umc(0xd6, (speedtab[0][speeds[0]] | (speedtab[0][speeds[1]]<<4)));
- tmp = 0;
- for (i = 3; i >= 0; i--)
- tmp = (tmp << 2) | speedtab[1][speeds[i]];
- out_umc(0xdc, tmp);
- for (i = 0; i < 4; i++) {
- out_umc(0xd0 + i, speedtab[2][speeds[i]]);
- out_umc(0xd8 + i, speedtab[2][speeds[i]]);
- }
- outb_p(0xa5, 0x108); /* disable umc */
-
- printk("umc8672: drive speeds [0 to 11]: %d %d %d %d\n",
- speeds[0], speeds[1], speeds[2], speeds[3]);
-}
-
-static void umc_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- ide_hwif_t *mate = hwif->mate;
- unsigned long flags;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
-
- printk("%s: setting umc8672 to PIO mode%d (speed %d)\n",
- drive->name, pio, pio_to_umc[pio]);
- if (mate)
- spin_lock_irqsave(&mate->lock, flags);
- if (mate && mate->handler) {
- printk(KERN_ERR "umc8672: other interface is busy: exiting tune_umc()\n");
- } else {
- current_speeds[drive->name[2] - 'a'] = pio_to_umc[pio];
- umc_set_speeds(current_speeds);
- }
- if (mate)
- spin_unlock_irqrestore(&mate->lock, flags);
-}
-
-static const struct ide_port_ops umc8672_port_ops = {
- .set_pio_mode = umc_set_pio_mode,
-};
-
-static const struct ide_port_info umc8672_port_info __initconst = {
- .name = DRV_NAME,
- .chipset = ide_umc8672,
- .port_ops = &umc8672_port_ops,
- .host_flags = IDE_HFLAG_NO_DMA,
- .pio_mask = ATA_PIO4,
-};
-
-static int __init umc8672_probe(void)
-{
- unsigned long flags;
-
- if (!request_region(0x108, 2, "umc8672")) {
- printk(KERN_ERR "umc8672: ports 0x108-0x109 already in use.\n");
- return 1;
- }
- local_irq_save(flags);
- outb_p(0x5A, 0x108); /* enable umc */
- if (in_umc (0xd5) != 0xa0) {
- local_irq_restore(flags);
- printk(KERN_ERR "umc8672: not found\n");
- release_region(0x108, 2);
- return 1;
- }
- outb_p(0xa5, 0x108); /* disable umc */
-
- umc_set_speeds(current_speeds);
- local_irq_restore(flags);
-
- return ide_legacy_device_add(&umc8672_port_info, 0);
-}
-
-static bool probe_umc8672;
-
-module_param_named(probe, probe_umc8672, bool, 0);
-MODULE_PARM_DESC(probe, "probe for UMC8672 chipset");
-
-static int __init umc8672_init(void)
-{
- if (probe_umc8672 == 0)
- goto out;
-
- if (umc8672_probe() == 0)
- return 0;
-out:
- return -ENODEV;
-}
-
-module_init(umc8672_init);
-
-MODULE_AUTHOR("Wolfram Podien");
-MODULE_DESCRIPTION("Support for UMC 8672 IDE chipset");
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
deleted file mode 100644
index 63a3aca506fc..000000000000
--- a/drivers/ide/via82cxxx.c
+++ /dev/null
@@ -1,532 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * VIA IDE driver for Linux. Supported southbridges:
- *
- * vt82c576, vt82c586, vt82c586a, vt82c586b, vt82c596a, vt82c596b,
- * vt82c686, vt82c686a, vt82c686b, vt8231, vt8233, vt8233c, vt8233a,
- * vt8235, vt8237, vt8237a
- *
- * Copyright (c) 2000-2002 Vojtech Pavlik
- * Copyright (c) 2007-2010 Bartlomiej Zolnierkiewicz
- *
- * Based on the work of:
- * Michel Aubry
- * Jeff Garzik
- * Andre Hedrick
- *
- * Documentation:
- * Obsolete device documentation publicly available from via.com.tw
- * Current device documentation available under NDA only
- */
-
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/ide.h>
-#include <linux/dmi.h>
-
-#ifdef CONFIG_PPC_CHRP
-#include <asm/processor.h>
-#endif
-
-#define DRV_NAME "via82cxxx"
-
-#define VIA_IDE_ENABLE 0x40
-#define VIA_IDE_CONFIG 0x41
-#define VIA_FIFO_CONFIG 0x43
-#define VIA_MISC_1 0x44
-#define VIA_MISC_2 0x45
-#define VIA_MISC_3 0x46
-#define VIA_DRIVE_TIMING 0x48
-#define VIA_8BIT_TIMING 0x4e
-#define VIA_ADDRESS_SETUP 0x4c
-#define VIA_UDMA_TIMING 0x50
-
-#define VIA_BAD_PREQ 0x01 /* Crashes if PREQ# till DDACK# set */
-#define VIA_BAD_CLK66 0x02 /* 66 MHz clock doesn't work correctly */
-#define VIA_SET_FIFO 0x04 /* Needs to have FIFO split set */
-#define VIA_NO_UNMASK 0x08 /* Doesn't work with IRQ unmasking on */
-#define VIA_BAD_ID 0x10 /* Has wrong vendor ID (0x1107) */
-#define VIA_BAD_AST 0x20 /* Don't touch Address Setup Timing */
-#define VIA_SATA_PATA 0x80 /* SATA/PATA combined configuration */
-
-enum {
- VIA_IDFLAG_SINGLE = (1 << 1), /* single channel controller */
-};
-
-/*
- * VIA SouthBridge chips.
- */
-
-static struct via_isa_bridge {
- char *name;
- u16 id;
- u8 rev_min;
- u8 rev_max;
- u8 udma_mask;
- u8 flags;
-} via_isa_bridges[] = {
- { "vx855", PCI_DEVICE_ID_VIA_VX855, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_SATA_PATA },
- { "vx800", PCI_DEVICE_ID_VIA_VX800, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_SATA_PATA },
- { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_SATA_PATA },
- { "vt8261", PCI_DEVICE_ID_VIA_8261, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
- { "vt8237s", PCI_DEVICE_ID_VIA_8237S, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
- { "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
- { "vt6415", PCI_DEVICE_ID_VIA_6415, 0x00, 0xff, ATA_UDMA6, VIA_BAD_AST },
- { "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
- { "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
- { "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
- { "vt8235", PCI_DEVICE_ID_VIA_8235, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
- { "vt8233a", PCI_DEVICE_ID_VIA_8233A, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
- { "vt8233c", PCI_DEVICE_ID_VIA_8233C_0, 0x00, 0x2f, ATA_UDMA5, },
- { "vt8233", PCI_DEVICE_ID_VIA_8233_0, 0x00, 0x2f, ATA_UDMA5, },
- { "vt8231", PCI_DEVICE_ID_VIA_8231, 0x00, 0x2f, ATA_UDMA5, },
- { "vt82c686b", PCI_DEVICE_ID_VIA_82C686, 0x40, 0x4f, ATA_UDMA5, },
- { "vt82c686a", PCI_DEVICE_ID_VIA_82C686, 0x10, 0x2f, ATA_UDMA4, },
- { "vt82c686", PCI_DEVICE_ID_VIA_82C686, 0x00, 0x0f, ATA_UDMA2, VIA_BAD_CLK66 },
- { "vt82c596b", PCI_DEVICE_ID_VIA_82C596, 0x10, 0x2f, ATA_UDMA4, },
- { "vt82c596a", PCI_DEVICE_ID_VIA_82C596, 0x00, 0x0f, ATA_UDMA2, VIA_BAD_CLK66 },
- { "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x47, 0x4f, ATA_UDMA2, VIA_SET_FIFO },
- { "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x40, 0x46, ATA_UDMA2, VIA_SET_FIFO | VIA_BAD_PREQ },
- { "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x30, 0x3f, ATA_UDMA2, VIA_SET_FIFO },
- { "vt82c586a", PCI_DEVICE_ID_VIA_82C586_0, 0x20, 0x2f, ATA_UDMA2, VIA_SET_FIFO },
- { "vt82c586", PCI_DEVICE_ID_VIA_82C586_0, 0x00, 0x0f, 0x00, VIA_SET_FIFO },
- { "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, 0x00, VIA_SET_FIFO | VIA_NO_UNMASK },
- { "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, 0x00, VIA_SET_FIFO | VIA_NO_UNMASK | VIA_BAD_ID },
- { "vtxxxx", PCI_DEVICE_ID_VIA_ANON, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
- { NULL }
-};
-
-static unsigned int via_clock;
-static char *via_dma[] = { "16", "25", "33", "44", "66", "100", "133" };
-
-struct via82cxxx_dev
-{
- struct via_isa_bridge *via_config;
- unsigned int via_80w;
-};
-
-/**
- * via_set_speed - write timing registers
- * @dev: PCI device
- * @dn: device
- * @timing: IDE timing data to use
- *
- * via_set_speed writes timing values to the chipset registers
- */
-
-static void via_set_speed(ide_hwif_t *hwif, u8 dn, struct ide_timing *timing)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct ide_host *host = pci_get_drvdata(dev);
- struct via82cxxx_dev *vdev = host->host_priv;
- u8 t;
-
- if (~vdev->via_config->flags & VIA_BAD_AST) {
- pci_read_config_byte(dev, VIA_ADDRESS_SETUP, &t);
- t = (t & ~(3 << ((3 - dn) << 1))) | ((clamp_val(timing->setup, 1, 4) - 1) << ((3 - dn) << 1));
- pci_write_config_byte(dev, VIA_ADDRESS_SETUP, t);
- }
-
- pci_write_config_byte(dev, VIA_8BIT_TIMING + (1 - (dn >> 1)),
- ((clamp_val(timing->act8b, 1, 16) - 1) << 4) | (clamp_val(timing->rec8b, 1, 16) - 1));
-
- pci_write_config_byte(dev, VIA_DRIVE_TIMING + (3 - dn),
- ((clamp_val(timing->active, 1, 16) - 1) << 4) | (clamp_val(timing->recover, 1, 16) - 1));
-
- switch (vdev->via_config->udma_mask) {
- case ATA_UDMA2: t = timing->udma ? (0xe0 | (clamp_val(timing->udma, 2, 5) - 2)) : 0x03; break;
- case ATA_UDMA4: t = timing->udma ? (0xe8 | (clamp_val(timing->udma, 2, 9) - 2)) : 0x0f; break;
- case ATA_UDMA5: t = timing->udma ? (0xe0 | (clamp_val(timing->udma, 2, 9) - 2)) : 0x07; break;
- case ATA_UDMA6: t = timing->udma ? (0xe0 | (clamp_val(timing->udma, 2, 9) - 2)) : 0x07; break;
- }
-
- /* Set UDMA unless device is not UDMA capable */
- if (vdev->via_config->udma_mask) {
- u8 udma_etc;
-
- pci_read_config_byte(dev, VIA_UDMA_TIMING + 3 - dn, &udma_etc);
-
- /* clear transfer mode bit */
- udma_etc &= ~0x20;
-
- if (timing->udma) {
- /* preserve 80-wire cable detection bit */
- udma_etc &= 0x10;
- udma_etc |= t;
- }
-
- pci_write_config_byte(dev, VIA_UDMA_TIMING + 3 - dn, udma_etc);
- }
-}
-
-/**
- * via_set_drive - configure transfer mode
- * @hwif: port
- * @drive: Drive to set up
- *
- * via_set_drive() computes timing values configures the chipset to
- * a desired transfer mode. It also can be called by upper layers.
- */
-
-static void via_set_drive(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- ide_drive_t *peer = ide_get_pair_dev(drive);
- struct ide_host *host = dev_get_drvdata(hwif->dev);
- struct via82cxxx_dev *vdev = host->host_priv;
- struct ide_timing t, p;
- unsigned int T, UT;
- const u8 speed = drive->dma_mode;
-
- T = 1000000000 / via_clock;
-
- switch (vdev->via_config->udma_mask) {
- case ATA_UDMA2: UT = T; break;
- case ATA_UDMA4: UT = T/2; break;
- case ATA_UDMA5: UT = T/3; break;
- case ATA_UDMA6: UT = T/4; break;
- default: UT = T;
- }
-
- ide_timing_compute(drive, speed, &t, T, UT);
-
- if (peer) {
- ide_timing_compute(peer, peer->pio_mode, &p, T, UT);
- ide_timing_merge(&p, &t, &t, IDE_TIMING_8BIT);
- }
-
- via_set_speed(hwif, drive->dn, &t);
-}
-
-/**
- * via_set_pio_mode - set host controller for PIO mode
- * @hwif: port
- * @drive: drive
- *
- * A callback from the upper layers for PIO-only tuning.
- */
-
-static void via_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- drive->dma_mode = drive->pio_mode;
- via_set_drive(hwif, drive);
-}
-
-static struct via_isa_bridge *via_config_find(struct pci_dev **isa)
-{
- struct via_isa_bridge *via_config;
-
- for (via_config = via_isa_bridges;
- via_config->id != PCI_DEVICE_ID_VIA_ANON; via_config++)
- if ((*isa = pci_get_device(PCI_VENDOR_ID_VIA +
- !!(via_config->flags & VIA_BAD_ID),
- via_config->id, NULL))) {
-
- if ((*isa)->revision >= via_config->rev_min &&
- (*isa)->revision <= via_config->rev_max)
- break;
- pci_dev_put(*isa);
- }
-
- return via_config;
-}
-
-/*
- * Check and handle 80-wire cable presence
- */
-static void via_cable_detect(struct via82cxxx_dev *vdev, u32 u)
-{
- int i;
-
- switch (vdev->via_config->udma_mask) {
- case ATA_UDMA4:
- for (i = 24; i >= 0; i -= 8)
- if (((u >> (i & 16)) & 8) &&
- ((u >> i) & 0x20) &&
- (((u >> i) & 7) < 2)) {
- /*
- * 2x PCI clock and
- * UDMA w/ < 3T/cycle
- */
- vdev->via_80w |= (1 << (1 - (i >> 4)));
- }
- break;
-
- case ATA_UDMA5:
- for (i = 24; i >= 0; i -= 8)
- if (((u >> i) & 0x10) ||
- (((u >> i) & 0x20) &&
- (((u >> i) & 7) < 4))) {
- /* BIOS 80-wire bit or
- * UDMA w/ < 60ns/cycle
- */
- vdev->via_80w |= (1 << (1 - (i >> 4)));
- }
- break;
-
- case ATA_UDMA6:
- for (i = 24; i >= 0; i -= 8)
- if (((u >> i) & 0x10) ||
- (((u >> i) & 0x20) &&
- (((u >> i) & 7) < 6))) {
- /* BIOS 80-wire bit or
- * UDMA w/ < 60ns/cycle
- */
- vdev->via_80w |= (1 << (1 - (i >> 4)));
- }
- break;
- }
-}
-
-/**
- * init_chipset_via82cxxx - initialization handler
- * @dev: PCI device
- *
- * The initialization callback. Here we determine the IDE chip type
- * and initialize its drive independent registers.
- */
-
-static int init_chipset_via82cxxx(struct pci_dev *dev)
-{
- struct ide_host *host = pci_get_drvdata(dev);
- struct via82cxxx_dev *vdev = host->host_priv;
- struct via_isa_bridge *via_config = vdev->via_config;
- u8 t, v;
- u32 u;
-
- /*
- * Detect cable and configure Clk66
- */
- pci_read_config_dword(dev, VIA_UDMA_TIMING, &u);
-
- via_cable_detect(vdev, u);
-
- if (via_config->udma_mask == ATA_UDMA4) {
- /* Enable Clk66 */
- pci_write_config_dword(dev, VIA_UDMA_TIMING, u|0x80008);
- } else if (via_config->flags & VIA_BAD_CLK66) {
- /* Would cause trouble on 596a and 686 */
- pci_write_config_dword(dev, VIA_UDMA_TIMING, u & ~0x80008);
- }
-
- /*
- * Check whether interfaces are enabled.
- */
-
- pci_read_config_byte(dev, VIA_IDE_ENABLE, &v);
-
- /*
- * Set up FIFO sizes and thresholds.
- */
-
- pci_read_config_byte(dev, VIA_FIFO_CONFIG, &t);
-
- /* Disable PREQ# till DDACK# */
- if (via_config->flags & VIA_BAD_PREQ) {
- /* Would crash on 586b rev 41 */
- t &= 0x7f;
- }
-
- /* Fix FIFO split between channels */
- if (via_config->flags & VIA_SET_FIFO) {
- t &= (t & 0x9f);
- switch (v & 3) {
- case 2: t |= 0x00; break; /* 16 on primary */
- case 1: t |= 0x60; break; /* 16 on secondary */
- case 3: t |= 0x20; break; /* 8 pri 8 sec */
- }
- }
-
- pci_write_config_byte(dev, VIA_FIFO_CONFIG, t);
-
- return 0;
-}
-
-/*
- * Cable special cases
- */
-
-static const struct dmi_system_id cable_dmi_table[] = {
- {
- .ident = "Acer Ferrari 3400",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "Acer,Inc."),
- DMI_MATCH(DMI_BOARD_NAME, "Ferrari 3400"),
- },
- },
- { }
-};
-
-static int via_cable_override(struct pci_dev *pdev)
-{
- /* Systems by DMI */
- if (dmi_check_system(cable_dmi_table))
- return 1;
-
- /* Arima W730-K8/Targa Visionary 811/... */
- if (pdev->subsystem_vendor == 0x161F &&
- pdev->subsystem_device == 0x2032)
- return 1;
-
- return 0;
-}
-
-static u8 via82cxxx_cable_detect(ide_hwif_t *hwif)
-{
- struct pci_dev *pdev = to_pci_dev(hwif->dev);
- struct ide_host *host = pci_get_drvdata(pdev);
- struct via82cxxx_dev *vdev = host->host_priv;
-
- if (via_cable_override(pdev))
- return ATA_CBL_PATA40_SHORT;
-
- if ((vdev->via_config->flags & VIA_SATA_PATA) && hwif->channel == 0)
- return ATA_CBL_SATA;
-
- if ((vdev->via_80w >> hwif->channel) & 1)
- return ATA_CBL_PATA80;
- else
- return ATA_CBL_PATA40;
-}
-
-static const struct ide_port_ops via_port_ops = {
- .set_pio_mode = via_set_pio_mode,
- .set_dma_mode = via_set_drive,
- .cable_detect = via82cxxx_cable_detect,
-};
-
-static const struct ide_port_info via82cxxx_chipset = {
- .name = DRV_NAME,
- .init_chipset = init_chipset_via82cxxx,
- .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
- .port_ops = &via_port_ops,
- .host_flags = IDE_HFLAG_PIO_NO_BLACKLIST |
- IDE_HFLAG_POST_SET_MODE |
- IDE_HFLAG_IO_32BIT,
- .pio_mask = ATA_PIO5,
- .swdma_mask = ATA_SWDMA2,
- .mwdma_mask = ATA_MWDMA2,
-};
-
-static int via_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
- struct pci_dev *isa = NULL;
- struct via_isa_bridge *via_config;
- struct via82cxxx_dev *vdev;
- int rc;
- u8 idx = id->driver_data;
- struct ide_port_info d;
-
- d = via82cxxx_chipset;
-
- /*
- * Find the ISA bridge and check we know what it is.
- */
- via_config = via_config_find(&isa);
-
- /*
- * Print the boot message.
- */
- printk(KERN_INFO DRV_NAME " %s: VIA %s (rev %02x) IDE %sDMA%s\n",
- pci_name(dev), via_config->name, isa->revision,
- via_config->udma_mask ? "U" : "MW",
- via_dma[via_config->udma_mask ?
- (fls(via_config->udma_mask) - 1) : 0]);
-
- pci_dev_put(isa);
-
- /*
- * Determine system bus clock.
- */
- via_clock = (ide_pci_clk ? ide_pci_clk : 33) * 1000;
-
- switch (via_clock) {
- case 33000: via_clock = 33333; break;
- case 37000: via_clock = 37500; break;
- case 41000: via_clock = 41666; break;
- }
-
- if (via_clock < 20000 || via_clock > 50000) {
- printk(KERN_WARNING DRV_NAME ": User given PCI clock speed "
- "impossible (%d), using 33 MHz instead.\n", via_clock);
- via_clock = 33333;
- }
-
- if (idx == 1)
- d.enablebits[1].reg = d.enablebits[0].reg = 0;
- else
- d.host_flags |= IDE_HFLAG_NO_AUTODMA;
-
- if (idx == VIA_IDFLAG_SINGLE)
- d.host_flags |= IDE_HFLAG_SINGLE;
-
- if ((via_config->flags & VIA_NO_UNMASK) == 0)
- d.host_flags |= IDE_HFLAG_UNMASK_IRQS;
-
- d.udma_mask = via_config->udma_mask;
-
- vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
- if (!vdev) {
- printk(KERN_ERR DRV_NAME " %s: out of memory :(\n",
- pci_name(dev));
- return -ENOMEM;
- }
-
- vdev->via_config = via_config;
-
- rc = ide_pci_init_one(dev, &d, vdev);
- if (rc)
- kfree(vdev);
-
- return rc;
-}
-
-static void via_remove(struct pci_dev *dev)
-{
- struct ide_host *host = pci_get_drvdata(dev);
- struct via82cxxx_dev *vdev = host->host_priv;
-
- ide_pci_remove(dev);
- kfree(vdev);
-}
-
-static const struct pci_device_id via_pci_tbl[] = {
- { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C576_1), 0 },
- { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C586_1), 0 },
- { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_CX700_IDE), 0 },
- { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_VX855_IDE), VIA_IDFLAG_SINGLE },
- { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_6410), 1 },
- { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_6415), 1 },
- { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_SATA_EIDE), 1 },
- { 0, },
-};
-MODULE_DEVICE_TABLE(pci, via_pci_tbl);
-
-static struct pci_driver via_pci_driver = {
- .name = "VIA_IDE",
- .id_table = via_pci_tbl,
- .probe = via_init_one,
- .remove = via_remove,
- .suspend = ide_pci_suspend,
- .resume = ide_pci_resume,
-};
-
-static int __init via_ide_init(void)
-{
- return ide_pci_register_driver(&via_pci_driver);
-}
-
-static void __exit via_ide_exit(void)
-{
- pci_unregister_driver(&via_pci_driver);
-}
-
-module_init(via_ide_init);
-module_exit(via_ide_exit);
-
-MODULE_AUTHOR("Vojtech Pavlik, Bartlomiej Zolnierkiewicz, Michel Aubry, Jeff Garzik, Andre Hedrick");
-MODULE_DESCRIPTION("PCI driver module for VIA IDE");
-MODULE_LICENSE("GPL");
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index cceda3cecbcf..8b1723635cce 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -229,7 +229,6 @@ config DMARD10
config HID_SENSOR_ACCEL_3D
depends on HID_SENSOR_HUB
select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
select HID_SENSOR_IIO_TRIGGER
tristate "HID Accelerometers 3D"
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index 9d3952b4674f..a27db78ea13e 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -771,6 +771,13 @@ static int ad7124_of_parse_channel_config(struct iio_dev *indio_dev,
if (ret)
goto err;
+ if (channel >= indio_dev->num_channels) {
+ dev_err(indio_dev->dev.parent,
+ "Channel index >= number of channels\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
ret = of_property_read_u32_array(child, "diff-channels",
ain, 2);
if (ret)
@@ -850,6 +857,11 @@ static int ad7124_setup(struct ad7124_state *st)
return ret;
}
+static void ad7124_reg_disable(void *r)
+{
+ regulator_disable(r);
+}
+
static int ad7124_probe(struct spi_device *spi)
{
const struct ad7124_chip_info *info;
@@ -895,17 +907,20 @@ static int ad7124_probe(struct spi_device *spi)
ret = regulator_enable(st->vref[i]);
if (ret)
return ret;
+
+ ret = devm_add_action_or_reset(&spi->dev, ad7124_reg_disable,
+ st->vref[i]);
+ if (ret)
+ return ret;
}
st->mclk = devm_clk_get(&spi->dev, "mclk");
- if (IS_ERR(st->mclk)) {
- ret = PTR_ERR(st->mclk);
- goto error_regulator_disable;
- }
+ if (IS_ERR(st->mclk))
+ return PTR_ERR(st->mclk);
ret = clk_prepare_enable(st->mclk);
if (ret < 0)
- goto error_regulator_disable;
+ return ret;
ret = ad7124_soft_reset(st);
if (ret < 0)
@@ -935,11 +950,6 @@ error_remove_trigger:
ad_sd_cleanup_buffer_and_trigger(indio_dev);
error_clk_disable_unprepare:
clk_disable_unprepare(st->mclk);
-error_regulator_disable:
- for (i = ARRAY_SIZE(st->vref) - 1; i >= 0; i--) {
- if (!IS_ERR_OR_NULL(st->vref[i]))
- regulator_disable(st->vref[i]);
- }
return ret;
}
@@ -948,17 +958,11 @@ static int ad7124_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad7124_state *st = iio_priv(indio_dev);
- int i;
iio_device_unregister(indio_dev);
ad_sd_cleanup_buffer_and_trigger(indio_dev);
clk_disable_unprepare(st->mclk);
- for (i = ARRAY_SIZE(st->vref) - 1; i >= 0; i--) {
- if (!IS_ERR_OR_NULL(st->vref[i]))
- regulator_disable(st->vref[i]);
- }
-
return 0;
}
diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
index 2ed580521d81..1141cc13a124 100644
--- a/drivers/iio/adc/ad7192.c
+++ b/drivers/iio/adc/ad7192.c
@@ -912,7 +912,7 @@ static int ad7192_probe(struct spi_device *spi)
{
struct ad7192_state *st;
struct iio_dev *indio_dev;
- int ret, voltage_uv = 0;
+ int ret;
if (!spi->irq) {
dev_err(&spi->dev, "no IRQ?\n");
@@ -949,15 +949,12 @@ static int ad7192_probe(struct spi_device *spi)
goto error_disable_avdd;
}
- voltage_uv = regulator_get_voltage(st->avdd);
-
- if (voltage_uv > 0) {
- st->int_vref_mv = voltage_uv / 1000;
- } else {
- ret = voltage_uv;
+ ret = regulator_get_voltage(st->avdd);
+ if (ret < 0) {
dev_err(&spi->dev, "Device tree error, reference voltage undefined\n");
goto error_disable_avdd;
}
+ st->int_vref_mv = ret / 1000;
spi_set_drvdata(spi, indio_dev);
st->chip_info = of_device_get_match_data(&spi->dev);
@@ -1014,7 +1011,9 @@ static int ad7192_probe(struct spi_device *spi)
return 0;
error_disable_clk:
- clk_disable_unprepare(st->mclk);
+ if (st->clock_sel == AD7192_CLK_EXT_MCLK1_2 ||
+ st->clock_sel == AD7192_CLK_EXT_MCLK2)
+ clk_disable_unprepare(st->mclk);
error_remove_trigger:
ad_sd_cleanup_buffer_and_trigger(indio_dev);
error_disable_dvdd:
@@ -1031,7 +1030,9 @@ static int ad7192_remove(struct spi_device *spi)
struct ad7192_state *st = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
- clk_disable_unprepare(st->mclk);
+ if (st->clock_sel == AD7192_CLK_EXT_MCLK1_2 ||
+ st->clock_sel == AD7192_CLK_EXT_MCLK2)
+ clk_disable_unprepare(st->mclk);
ad_sd_cleanup_buffer_and_trigger(indio_dev);
regulator_disable(st->dvdd);
diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c
index c945f1349623..60f21fed6dcb 100644
--- a/drivers/iio/adc/ad7768-1.c
+++ b/drivers/iio/adc/ad7768-1.c
@@ -167,6 +167,10 @@ struct ad7768_state {
* transfer buffers to live in their own cache lines.
*/
union {
+ struct {
+ __be32 chan;
+ s64 timestamp;
+ } scan;
__be32 d32;
u8 d8[2];
} data ____cacheline_aligned;
@@ -469,11 +473,11 @@ static irqreturn_t ad7768_trigger_handler(int irq, void *p)
mutex_lock(&st->lock);
- ret = spi_read(st->spi, &st->data.d32, 3);
+ ret = spi_read(st->spi, &st->data.scan.chan, 3);
if (ret < 0)
goto err_unlock;
- iio_push_to_buffers_with_timestamp(indio_dev, &st->data.d32,
+ iio_push_to_buffers_with_timestamp(indio_dev, &st->data.scan,
iio_get_time_ns(indio_dev));
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index 5e980a06258e..440ef4c7be07 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -279,6 +279,7 @@ static int ad7793_setup(struct iio_dev *indio_dev,
id &= AD7793_ID_MASK;
if (id != st->chip_info->id) {
+ ret = -ENODEV;
dev_err(&st->sd.spi->dev, "device ID query failed\n");
goto out;
}
diff --git a/drivers/iio/adc/ad7923.c b/drivers/iio/adc/ad7923.c
index 9a649745cd0a..069b561ee768 100644
--- a/drivers/iio/adc/ad7923.c
+++ b/drivers/iio/adc/ad7923.c
@@ -59,8 +59,10 @@ struct ad7923_state {
/*
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache lines.
+ * Ensure rx_buf can be directly used in iio_push_to_buffers_with_timetamp
+ * Length = 8 channels + 4 extra for 8 byte timestamp
*/
- __be16 rx_buf[4] ____cacheline_aligned;
+ __be16 rx_buf[12] ____cacheline_aligned;
__be16 tx_buf[4];
};
diff --git a/drivers/iio/common/hid-sensors/Kconfig b/drivers/iio/common/hid-sensors/Kconfig
index 24d492567336..2a3dd3b907be 100644
--- a/drivers/iio/common/hid-sensors/Kconfig
+++ b/drivers/iio/common/hid-sensors/Kconfig
@@ -19,6 +19,7 @@ config HID_SENSOR_IIO_TRIGGER
tristate "Common module (trigger) for all HID Sensor IIO drivers"
depends on HID_SENSOR_HUB && HID_SENSOR_IIO_COMMON && IIO_BUFFER
select IIO_TRIGGER
+ select IIO_TRIGGERED_BUFFER
help
Say yes here to build trigger support for HID sensors.
Triggers will be send if all requested attributes were read.
diff --git a/drivers/iio/dac/ad5770r.c b/drivers/iio/dac/ad5770r.c
index 7ab2ccf90863..8107f7bbbe3c 100644
--- a/drivers/iio/dac/ad5770r.c
+++ b/drivers/iio/dac/ad5770r.c
@@ -524,23 +524,29 @@ static int ad5770r_channel_config(struct ad5770r_state *st)
device_for_each_child_node(&st->spi->dev, child) {
ret = fwnode_property_read_u32(child, "num", &num);
if (ret)
- return ret;
- if (num >= AD5770R_MAX_CHANNELS)
- return -EINVAL;
+ goto err_child_out;
+ if (num >= AD5770R_MAX_CHANNELS) {
+ ret = -EINVAL;
+ goto err_child_out;
+ }
ret = fwnode_property_read_u32_array(child,
"adi,range-microamp",
tmp, 2);
if (ret)
- return ret;
+ goto err_child_out;
min = tmp[0] / 1000;
max = tmp[1] / 1000;
ret = ad5770r_store_output_range(st, min, max, num);
if (ret)
- return ret;
+ goto err_child_out;
}
+ return 0;
+
+err_child_out:
+ fwnode_handle_put(child);
return ret;
}
diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
index 5824f2edf975..20b5ac7ab66a 100644
--- a/drivers/iio/gyro/Kconfig
+++ b/drivers/iio/gyro/Kconfig
@@ -111,7 +111,6 @@ config FXAS21002C_SPI
config HID_SENSOR_GYRO_3D
depends on HID_SENSOR_HUB
select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
select HID_SENSOR_IIO_TRIGGER
tristate "HID Gyroscope 3D"
diff --git a/drivers/iio/gyro/fxas21002c_core.c b/drivers/iio/gyro/fxas21002c_core.c
index 1a20c6b88e7d..645461c70454 100644
--- a/drivers/iio/gyro/fxas21002c_core.c
+++ b/drivers/iio/gyro/fxas21002c_core.c
@@ -399,6 +399,7 @@ static int fxas21002c_temp_get(struct fxas21002c_data *data, int *val)
ret = regmap_field_read(data->regmap_fields[F_TEMP], &temp);
if (ret < 0) {
dev_err(dev, "failed to read temp: %d\n", ret);
+ fxas21002c_pm_put(data);
goto data_unlock;
}
@@ -432,6 +433,7 @@ static int fxas21002c_axis_get(struct fxas21002c_data *data,
&axis_be, sizeof(axis_be));
if (ret < 0) {
dev_err(dev, "failed to read axis: %d: %d\n", index, ret);
+ fxas21002c_pm_put(data);
goto data_unlock;
}
diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
index ac90be03332a..f17a93519535 100644
--- a/drivers/iio/gyro/mpu3050-core.c
+++ b/drivers/iio/gyro/mpu3050-core.c
@@ -272,7 +272,16 @@ static int mpu3050_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_OFFSET:
switch (chan->type) {
case IIO_TEMP:
- /* The temperature scaling is (x+23000)/280 Celsius */
+ /*
+ * The temperature scaling is (x+23000)/280 Celsius
+ * for the "best fit straight line" temperature range
+ * of -30C..85C. The 23000 includes room temperature
+ * offset of +35C, 280 is the precision scale and x is
+ * the 16-bit signed integer reported by hardware.
+ *
+ * Temperature value itself represents temperature of
+ * the sensor die.
+ */
*val = 23000;
return IIO_VAL_INT;
default:
@@ -329,7 +338,7 @@ static int mpu3050_read_raw(struct iio_dev *indio_dev,
goto out_read_raw_unlock;
}
- *val = be16_to_cpu(raw_val);
+ *val = (s16)be16_to_cpu(raw_val);
ret = IIO_VAL_INT;
goto out_read_raw_unlock;
diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
index 6549fcf6db69..2de5494e7c22 100644
--- a/drivers/iio/humidity/Kconfig
+++ b/drivers/iio/humidity/Kconfig
@@ -52,7 +52,6 @@ config HID_SENSOR_HUMIDITY
tristate "HID Environmental humidity sensor"
depends on HID_SENSOR_HUB
select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
select HID_SENSOR_IIO_TRIGGER
help
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index d92c58a94fe4..59efb36db2c7 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -1778,7 +1778,6 @@ static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (!indio_dev->info)
goto out_unlock;
- ret = -EINVAL;
list_for_each_entry(h, &iio_dev_opaque->ioctl_handlers, entry) {
ret = h->ioctl(indio_dev, filp, cmd, arg);
if (ret != IIO_IOCTL_UNHANDLED)
@@ -1786,7 +1785,7 @@ static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
if (ret == IIO_IOCTL_UNHANDLED)
- ret = -EINVAL;
+ ret = -ENODEV;
out_unlock:
mutex_unlock(&indio_dev->info_exist_lock);
@@ -1926,9 +1925,6 @@ EXPORT_SYMBOL(__iio_device_register);
**/
void iio_device_unregister(struct iio_dev *indio_dev)
{
- struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
- struct iio_ioctl_handler *h, *t;
-
cdev_device_del(&indio_dev->chrdev, &indio_dev->dev);
mutex_lock(&indio_dev->info_exist_lock);
@@ -1939,9 +1935,6 @@ void iio_device_unregister(struct iio_dev *indio_dev)
indio_dev->info = NULL;
- list_for_each_entry_safe(h, t, &iio_dev_opaque->ioctl_handlers, entry)
- list_del(&h->entry);
-
iio_device_wakeup_eventset(indio_dev);
iio_buffer_wakeup_poll(indio_dev);
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 33ad4dd0b5c7..917f9becf9c7 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -256,7 +256,6 @@ config ISL29125
config HID_SENSOR_ALS
depends on HID_SENSOR_HUB
select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
select HID_SENSOR_IIO_TRIGGER
tristate "HID ALS"
@@ -270,7 +269,6 @@ config HID_SENSOR_ALS
config HID_SENSOR_PROX
depends on HID_SENSOR_HUB
select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
select HID_SENSOR_IIO_TRIGGER
tristate "HID PROX"
diff --git a/drivers/iio/light/gp2ap002.c b/drivers/iio/light/gp2ap002.c
index d048ae257c51..f960be7d4001 100644
--- a/drivers/iio/light/gp2ap002.c
+++ b/drivers/iio/light/gp2ap002.c
@@ -582,7 +582,7 @@ static int gp2ap002_probe(struct i2c_client *client,
"gp2ap002", indio_dev);
if (ret) {
dev_err(dev, "unable to request IRQ\n");
- goto out_disable_vio;
+ goto out_put_pm;
}
gp2ap002->irq = client->irq;
@@ -612,8 +612,9 @@ static int gp2ap002_probe(struct i2c_client *client,
return 0;
-out_disable_pm:
+out_put_pm:
pm_runtime_put_noidle(dev);
+out_disable_pm:
pm_runtime_disable(dev);
out_disable_vio:
regulator_disable(gp2ap002->vio);
diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c
index 0f787bfc88fc..c9d8f07a6fcd 100644
--- a/drivers/iio/light/tsl2583.c
+++ b/drivers/iio/light/tsl2583.c
@@ -341,6 +341,14 @@ static int tsl2583_als_calibrate(struct iio_dev *indio_dev)
return lux_val;
}
+ /* Avoid division by zero of lux_value later on */
+ if (lux_val == 0) {
+ dev_err(&chip->client->dev,
+ "%s: lux_val of 0 will produce out of range trim_value\n",
+ __func__);
+ return -ENODATA;
+ }
+
gain_trim_val = (unsigned int)(((chip->als_settings.als_cal_target)
* chip->als_settings.als_gain_trim) / lux_val);
if ((gain_trim_val < 250) || (gain_trim_val > 4000)) {
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index 5d4ffd66032e..74ad5701c6c2 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -95,7 +95,6 @@ config MAG3110
config HID_SENSOR_MAGNETOMETER_3D
depends on HID_SENSOR_HUB
select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
select HID_SENSOR_IIO_TRIGGER
tristate "HID Magenetometer 3D"
diff --git a/drivers/iio/orientation/Kconfig b/drivers/iio/orientation/Kconfig
index a505583cc2fd..396cbbb867f4 100644
--- a/drivers/iio/orientation/Kconfig
+++ b/drivers/iio/orientation/Kconfig
@@ -9,7 +9,6 @@ menu "Inclinometer sensors"
config HID_SENSOR_INCLINOMETER_3D
depends on HID_SENSOR_HUB
select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
select HID_SENSOR_IIO_TRIGGER
tristate "HID Inclinometer 3D"
@@ -20,7 +19,6 @@ config HID_SENSOR_INCLINOMETER_3D
config HID_SENSOR_DEVICE_ROTATION
depends on HID_SENSOR_HUB
select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
select HID_SENSOR_IIO_TRIGGER
tristate "HID Device Rotation"
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index 689b978db4f9..fc0d3cfca418 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -79,7 +79,6 @@ config DPS310
config HID_SENSOR_PRESS
depends on HID_SENSOR_HUB
select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
select HID_SENSOR_IIO_TRIGGER
tristate "HID PRESS"
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index c685f10b5ae4..cc206bfa09c7 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -160,6 +160,7 @@ static int lidar_get_measurement(struct lidar_data *data, u16 *reg)
ret = lidar_write_control(data, LIDAR_REG_CONTROL_ACQUIRE);
if (ret < 0) {
dev_err(&client->dev, "cannot send start measurement command");
+ pm_runtime_put_noidle(&client->dev);
return ret;
}
diff --git a/drivers/iio/temperature/Kconfig b/drivers/iio/temperature/Kconfig
index f1f2a1499c9e..4df60082c1fa 100644
--- a/drivers/iio/temperature/Kconfig
+++ b/drivers/iio/temperature/Kconfig
@@ -45,7 +45,6 @@ config HID_SENSOR_TEMP
tristate "HID Environmental temperature sensor"
depends on HID_SENSOR_HUB
select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
select HID_SENSOR_IIO_TRIGGER
help
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 2b9ffc21cbc4..ab148a696c0c 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -473,6 +473,7 @@ static void cma_release_dev(struct rdma_id_private *id_priv)
list_del(&id_priv->list);
cma_dev_put(id_priv->cma_dev);
id_priv->cma_dev = NULL;
+ id_priv->id.device = NULL;
if (id_priv->id.route.addr.dev_addr.sgid_attr) {
rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr);
id_priv->id.route.addr.dev_addr.sgid_attr = NULL;
@@ -1860,6 +1861,7 @@ static void _destroy_id(struct rdma_id_private *id_priv,
iw_destroy_cm_id(id_priv->cm_id.iw);
}
cma_leave_mc_groups(id_priv);
+ rdma_restrack_del(&id_priv->res);
cma_release_dev(id_priv);
}
@@ -1873,7 +1875,6 @@ static void _destroy_id(struct rdma_id_private *id_priv,
kfree(id_priv->id.route.path_rec);
put_net(id_priv->id.route.addr.dev_addr.net);
- rdma_restrack_del(&id_priv->res);
kfree(id_priv);
}
@@ -3774,7 +3775,7 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
}
id_priv->backlog = backlog;
- if (id->device) {
+ if (id_priv->cma_dev) {
if (rdma_cap_ib_cm(id->device, 1)) {
ret = cma_ib_listen(id_priv);
if (ret)
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index d5e15a8c870d..64e4be1cbec7 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -3248,6 +3248,11 @@ static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
goto err_free_attr;
}
+ if (!rdma_is_port_valid(uobj->context->device, cmd.flow_attr.port)) {
+ err = -EINVAL;
+ goto err_uobj;
+ }
+
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
if (!qp) {
err = -EINVAL;
diff --git a/drivers/infiniband/core/uverbs_std_types_device.c b/drivers/infiniband/core/uverbs_std_types_device.c
index 9ec6971056fa..049684880ae0 100644
--- a/drivers/infiniband/core/uverbs_std_types_device.c
+++ b/drivers/infiniband/core/uverbs_std_types_device.c
@@ -117,8 +117,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_INFO_HANDLES)(
return ret;
uapi_object = uapi_get_object(attrs->ufile->device->uapi, object_id);
- if (!uapi_object)
- return -EINVAL;
+ if (IS_ERR(uapi_object))
+ return PTR_ERR(uapi_object);
handles = gather_objects_handle(attrs->ufile, uapi_object, attrs,
out_len, &total);
@@ -331,6 +331,9 @@ static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_GID_TABLE)(
if (ret)
return ret;
+ if (!user_entry_size)
+ return -EINVAL;
+
max_entries = uverbs_attr_ptr_get_array_size(
attrs, UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES,
user_entry_size);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 22898d97ecbd..230a6ae0ab5a 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -581,12 +581,9 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->cq_caps.max_cq_moderation_count = MLX4_MAX_CQ_COUNT;
props->cq_caps.max_cq_moderation_period = MLX4_MAX_CQ_PERIOD;
- if (!mlx4_is_slave(dev->dev))
- err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
-
if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
resp.response_length += sizeof(resp.hca_core_clock_offset);
- if (!err && !mlx4_is_slave(dev->dev)) {
+ if (!mlx4_get_internal_clock_params(dev->dev, &clock_params)) {
resp.comp_mask |= MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET;
resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
}
@@ -1702,9 +1699,6 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
int is_bonded = mlx4_is_bonded(dev);
- if (!rdma_is_port_valid(qp->device, flow_attr->port))
- return ERR_PTR(-EINVAL);
-
if (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP)
return ERR_PTR(-EOPNOTSUPP);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index eb92cefffd77..9ce01f729673 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -849,15 +849,14 @@ static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_udata *udata)
ib_umem_release(cq->buf.umem);
}
-static void init_cq_frag_buf(struct mlx5_ib_cq *cq,
- struct mlx5_ib_cq_buf *buf)
+static void init_cq_frag_buf(struct mlx5_ib_cq_buf *buf)
{
int i;
void *cqe;
struct mlx5_cqe64 *cqe64;
for (i = 0; i < buf->nent; i++) {
- cqe = get_cqe(cq, i);
+ cqe = mlx5_frag_buf_get_wqe(&buf->fbc, i);
cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
cqe64->op_own = MLX5_CQE_INVALID << 4;
}
@@ -883,7 +882,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
if (err)
goto err_db;
- init_cq_frag_buf(cq, &cq->buf);
+ init_cq_frag_buf(&cq->buf);
*inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) *
@@ -1184,7 +1183,7 @@ static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
if (err)
goto ex;
- init_cq_frag_buf(cq, cq->resize_buf);
+ init_cq_frag_buf(cq->resize_buf);
return 0;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index a0b677accd96..eb9b0a2707f8 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -630,9 +630,8 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
case UVERBS_OBJECT_QP:
{
struct mlx5_ib_qp *qp = to_mqp(uobj->object);
- enum ib_qp_type qp_type = qp->ibqp.qp_type;
- if (qp_type == IB_QPT_RAW_PACKET ||
+ if (qp->type == IB_QPT_RAW_PACKET ||
(qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
struct mlx5_ib_raw_packet_qp *raw_packet_qp =
&qp->raw_packet_qp;
@@ -649,10 +648,9 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
sq->tisn) == obj_id);
}
- if (qp_type == MLX5_IB_QPT_DCT)
+ if (qp->type == MLX5_IB_QPT_DCT)
return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
qp->dct.mdct.mqp.qpn) == obj_id;
-
return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
qp->ibqp.qp_num) == obj_id;
}
diff --git a/drivers/infiniband/hw/mlx5/dm.c b/drivers/infiniband/hw/mlx5/dm.c
index 094bf85589db..001d766cf291 100644
--- a/drivers/infiniband/hw/mlx5/dm.c
+++ b/drivers/infiniband/hw/mlx5/dm.c
@@ -217,6 +217,9 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DM_MAP_OP_ADDR)(
if (err)
return err;
+ if (op >= BITS_PER_TYPE(u32))
+ return -EOPNOTSUPP;
+
if (!(MLX5_CAP_DEV_MEM(dev->mdev, memic_operations) & BIT(op)))
return -EOPNOTSUPP;
diff --git a/drivers/infiniband/hw/mlx5/doorbell.c b/drivers/infiniband/hw/mlx5/doorbell.c
index 61475b571531..7af4df7a6823 100644
--- a/drivers/infiniband/hw/mlx5/doorbell.c
+++ b/drivers/infiniband/hw/mlx5/doorbell.c
@@ -41,6 +41,7 @@ struct mlx5_ib_user_db_page {
struct ib_umem *umem;
unsigned long user_virt;
int refcnt;
+ struct mm_struct *mm;
};
int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
@@ -53,7 +54,8 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
mutex_lock(&context->db_page_mutex);
list_for_each_entry(page, &context->db_page_list, list)
- if (page->user_virt == (virt & PAGE_MASK))
+ if ((current->mm == page->mm) &&
+ (page->user_virt == (virt & PAGE_MASK)))
goto found;
page = kmalloc(sizeof(*page), GFP_KERNEL);
@@ -71,6 +73,8 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
kfree(page);
goto out;
}
+ mmgrab(current->mm);
+ page->mm = current->mm;
list_add(&page->list, &context->db_page_list);
@@ -91,6 +95,7 @@ void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db)
if (!--db->u.user_page->refcnt) {
list_del(&db->u.user_page->list);
+ mmdrop(db->u.user_page->mm);
ib_umem_release(db->u.user_page->umem);
kfree(db->u.user_page);
}
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
index 2fc6a60c4e77..18ee2f293825 100644
--- a/drivers/infiniband/hw/mlx5/fs.c
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -1194,9 +1194,8 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
goto free_ucmd;
}
- if (flow_attr->port > dev->num_ports ||
- (flow_attr->flags &
- ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP | IB_FLOW_ATTR_FLAGS_EGRESS))) {
+ if (flow_attr->flags &
+ ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP | IB_FLOW_ATTR_FLAGS_EGRESS)) {
err = -EINVAL;
goto free_ucmd;
}
@@ -2134,6 +2133,12 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)(
if (err)
goto end;
+ if (obj->ns_type == MLX5_FLOW_NAMESPACE_FDB &&
+ mlx5_eswitch_mode(dev->mdev) != MLX5_ESWITCH_OFFLOADS) {
+ err = -EINVAL;
+ goto end;
+ }
+
uobj->object = obj;
obj->mdev = dev->mdev;
atomic_set(&obj->usecnt, 0);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 6d1dd09a4388..644d5d0ac544 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -4419,6 +4419,7 @@ static int mlx5r_mp_probe(struct auxiliary_device *adev,
if (bound) {
rdma_roce_rescan_device(&dev->ib_dev);
+ mpi->ibdev->ib_active = true;
break;
}
}
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 4388afeff251..425423dfac72 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -743,10 +743,10 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
MLX5_IB_UMR_OCTOWORD;
ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
- if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
+ if ((dev->mdev->profile.mask & MLX5_PROF_MASK_MR_CACHE) &&
!dev->is_rep && mlx5_core_is_pf(dev->mdev) &&
mlx5_ib_can_load_pas_with_umr(dev, 0))
- ent->limit = dev->mdev->profile->mr_cache[i].limit;
+ ent->limit = dev->mdev->profile.mr_cache[i].limit;
else
ent->limit = 0;
spin_lock_irq(&ent->lock);
@@ -1940,8 +1940,8 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
mlx5r_deref_wait_odp_mkey(&mr->mmkey);
if (ibmr->type == IB_MR_TYPE_INTEGRITY) {
- xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), ibmr,
- NULL, GFP_KERNEL);
+ xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key),
+ mr->sig, NULL, GFP_KERNEL);
if (mr->mtt_mr) {
rc = mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL);
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 2af26737d32d..a6712e373eed 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -346,13 +346,15 @@ static inline enum comp_state do_read(struct rxe_qp *qp,
ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
&wqe->dma, payload_addr(pkt),
payload_size(pkt), to_mr_obj, NULL);
- if (ret)
+ if (ret) {
+ wqe->status = IB_WC_LOC_PROT_ERR;
return COMPST_ERROR;
+ }
if (wqe->dma.resid == 0 && (pkt->mask & RXE_END_MASK))
return COMPST_COMP_ACK;
- else
- return COMPST_UPDATE_COMP;
+
+ return COMPST_UPDATE_COMP;
}
static inline enum comp_state do_atomic(struct rxe_qp *qp,
@@ -366,10 +368,12 @@ static inline enum comp_state do_atomic(struct rxe_qp *qp,
ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
&wqe->dma, &atomic_orig,
sizeof(u64), to_mr_obj, NULL);
- if (ret)
+ if (ret) {
+ wqe->status = IB_WC_LOC_PROT_ERR;
return COMPST_ERROR;
- else
- return COMPST_COMP_ACK;
+ }
+
+ return COMPST_COMP_ACK;
}
static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 34ae957a315c..b0f350d674fd 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -242,6 +242,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
if (err) {
vfree(qp->sq.queue->buf);
kfree(qp->sq.queue);
+ qp->sq.queue = NULL;
return err;
}
@@ -295,6 +296,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
if (err) {
vfree(qp->rq.queue->buf);
kfree(qp->rq.queue);
+ qp->rq.queue = NULL;
return err;
}
}
@@ -355,6 +357,11 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
err2:
rxe_queue_cleanup(qp->sq.queue);
err1:
+ qp->pd = NULL;
+ qp->rcq = NULL;
+ qp->scq = NULL;
+ qp->srq = NULL;
+
if (srq)
rxe_drop_ref(srq);
rxe_drop_ref(scq);
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index d2313efb26db..3f175f220a22 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -300,7 +300,6 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
struct siw_ucontext *uctx =
rdma_udata_to_drv_context(udata, struct siw_ucontext,
base_ucontext);
- struct siw_cq *scq = NULL, *rcq = NULL;
unsigned long flags;
int num_sqe, num_rqe, rv = 0;
size_t length;
@@ -343,10 +342,8 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
rv = -EINVAL;
goto err_out;
}
- scq = to_siw_cq(attrs->send_cq);
- rcq = to_siw_cq(attrs->recv_cq);
- if (!scq || (!rcq && !attrs->srq)) {
+ if (!attrs->send_cq || (!attrs->recv_cq && !attrs->srq)) {
siw_dbg(base_dev, "send CQ or receive CQ invalid\n");
rv = -EINVAL;
goto err_out;
@@ -378,7 +375,7 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
else {
/* Zero sized SQ is not supported */
rv = -EINVAL;
- goto err_out;
+ goto err_out_xa;
}
if (num_rqe)
num_rqe = roundup_pow_of_two(num_rqe);
@@ -401,8 +398,8 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
}
}
qp->pd = pd;
- qp->scq = scq;
- qp->rcq = rcq;
+ qp->scq = to_siw_cq(attrs->send_cq);
+ qp->rcq = to_siw_cq(attrs->recv_cq);
if (attrs->srq) {
/*
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
index d5a90a66b45c..5b05cf3837da 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
@@ -163,6 +163,7 @@ static size_t ipoib_get_size(const struct net_device *dev)
static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
.kind = "ipoib",
+ .netns_refund = true,
.maxtype = IFLA_IPOIB_MAX,
.policy = ipoib_policy,
.priv_size = sizeof(struct ipoib_dev_priv),
diff --git a/drivers/interconnect/qcom/bcm-voter.c b/drivers/interconnect/qcom/bcm-voter.c
index d1591a28b743..8f385f9c2dd3 100644
--- a/drivers/interconnect/qcom/bcm-voter.c
+++ b/drivers/interconnect/qcom/bcm-voter.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
*/
#include <asm/div64.h>
@@ -205,6 +205,7 @@ struct bcm_voter *of_bcm_voter_get(struct device *dev, const char *name)
}
mutex_unlock(&bcm_voter_lock);
+ of_node_put(node);
return voter;
}
EXPORT_SYMBOL_GPL(of_bcm_voter_get);
@@ -362,6 +363,7 @@ static const struct of_device_id bcm_voter_of_match[] = {
{ .compatible = "qcom,bcm-voter" },
{ }
};
+MODULE_DEVICE_TABLE(of, bcm_voter_of_match);
static struct platform_driver qcom_icc_bcm_voter_driver = {
.probe = qcom_icc_bcm_voter_probe,
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 80e8e1916dd1..3ac42bbdefc6 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -884,7 +884,7 @@ static inline u64 build_inv_address(u64 address, size_t size)
* The msb-bit must be clear on the address. Just set all the
* lower bits.
*/
- address |= 1ull << (msb_diff - 1);
+ address |= (1ull << msb_diff) - 1;
}
/* Clear bits 11:0 */
@@ -1714,6 +1714,8 @@ static void amd_iommu_probe_finalize(struct device *dev)
domain = iommu_get_domain_for_dev(dev);
if (domain->type == IOMMU_DOMAIN_DMA)
iommu_setup_dma_ops(dev, IOVA_START_PFN << PAGE_SHIFT, 0);
+ else
+ set_dma_ops(dev, NULL);
}
static void amd_iommu_release_device(struct device *dev)
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 1757ac1e1623..84057cb9596c 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -1142,7 +1142,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
err = iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
if (err)
- goto err_unmap;
+ goto err_sysfs;
}
drhd->iommu = iommu;
@@ -1150,6 +1150,8 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
return 0;
+err_sysfs:
+ iommu_device_sysfs_remove(&iommu->iommu);
err_unmap:
unmap_iommu(iommu);
error_free_seq_id:
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 708f430af1c4..be35284a2016 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -2525,9 +2525,9 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
struct device *dev,
u32 pasid)
{
- int flags = PASID_FLAG_SUPERVISOR_MODE;
struct dma_pte *pgd = domain->pgd;
int agaw, level;
+ int flags = 0;
/*
* Skip top levels of page tables for iommu which has
@@ -2543,7 +2543,10 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
if (level != 4 && level != 5)
return -EINVAL;
- flags |= (level == 5) ? PASID_FLAG_FL5LP : 0;
+ if (pasid != PASID_RID2PASID)
+ flags |= PASID_FLAG_SUPERVISOR_MODE;
+ if (level == 5)
+ flags |= PASID_FLAG_FL5LP;
if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
flags |= PASID_FLAG_PAGE_SNOOP;
@@ -4606,6 +4609,8 @@ static int auxiliary_link_device(struct dmar_domain *domain,
if (!sinfo) {
sinfo = kzalloc(sizeof(*sinfo), GFP_ATOMIC);
+ if (!sinfo)
+ return -ENOMEM;
sinfo->domain = domain;
sinfo->pdev = dev;
list_add(&sinfo->link_phys, &info->subdevices);
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index 72646bafc52f..72dc84821dad 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -699,7 +699,8 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
* Since it is a second level only translation setup, we should
* set SRE bit as well (addresses are expected to be GPAs).
*/
- pasid_set_sre(pte);
+ if (pasid != PASID_RID2PASID)
+ pasid_set_sre(pte);
pasid_set_present(pte);
pasid_flush_caches(iommu, pte, pasid, did);
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 7c02481a81b4..c6e5ee4d9cef 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -1136,6 +1136,7 @@ static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_IOMMU, VIRTIO_DEV_ANY_ID },
{ 0 },
};
+MODULE_DEVICE_TABLE(virtio, id_table);
static struct virtio_driver virtio_iommu_drv = {
.driver.name = KBUILD_MODNAME,
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index b90e825df7e1..62543a4eccc0 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -596,7 +596,7 @@ config IRQ_IDT3243X
config APPLE_AIC
bool "Apple Interrupt Controller (AIC)"
depends on ARM64
- default ARCH_APPLE
+ depends on ARCH_APPLE || COMPILE_TEST
help
Support for the Apple Interrupt Controller found on Apple Silicon SoCs,
such as the M1.
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 37a23aa6de37..66d623f91678 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -642,11 +642,45 @@ static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
nmi_exit();
}
+static u32 do_read_iar(struct pt_regs *regs)
+{
+ u32 iar;
+
+ if (gic_supports_nmi() && unlikely(!interrupts_enabled(regs))) {
+ u64 pmr;
+
+ /*
+ * We were in a context with IRQs disabled. However, the
+ * entry code has set PMR to a value that allows any
+ * interrupt to be acknowledged, and not just NMIs. This can
+ * lead to surprising effects if the NMI has been retired in
+ * the meantime, and that there is an IRQ pending. The IRQ
+ * would then be taken in NMI context, something that nobody
+ * wants to debug twice.
+ *
+ * Until we sort this, drop PMR again to a level that will
+ * actually only allow NMIs before reading IAR, and then
+ * restore it to what it was.
+ */
+ pmr = gic_read_pmr();
+ gic_pmr_mask_irqs();
+ isb();
+
+ iar = gic_read_iar();
+
+ gic_write_pmr(pmr);
+ } else {
+ iar = gic_read_iar();
+ }
+
+ return iar;
+}
+
static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
{
u32 irqnr;
- irqnr = gic_read_iar();
+ irqnr = do_read_iar(regs);
/* Check for special IDs first */
if ((irqnr >= 1020 && irqnr <= 1023))
diff --git a/drivers/irqchip/irq-mvebu-icu.c b/drivers/irqchip/irq-mvebu-icu.c
index 91adf771f185..090bc3f4f7d8 100644
--- a/drivers/irqchip/irq-mvebu-icu.c
+++ b/drivers/irqchip/irq-mvebu-icu.c
@@ -359,10 +359,8 @@ static int mvebu_icu_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
icu->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(icu->base)) {
- dev_err(&pdev->dev, "Failed to map icu base address.\n");
+ if (IS_ERR(icu->base))
return PTR_ERR(icu->base);
- }
/*
* Legacy bindings: ICU is one node with one MSI parent: force manually
diff --git a/drivers/irqchip/irq-mvebu-sei.c b/drivers/irqchip/irq-mvebu-sei.c
index 18832ccc8ff8..3a7b7a7f20ca 100644
--- a/drivers/irqchip/irq-mvebu-sei.c
+++ b/drivers/irqchip/irq-mvebu-sei.c
@@ -384,10 +384,8 @@ static int mvebu_sei_probe(struct platform_device *pdev)
sei->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
sei->base = devm_ioremap_resource(sei->dev, sei->res);
- if (IS_ERR(sei->base)) {
- dev_err(sei->dev, "Failed to remap SEI resource\n");
+ if (IS_ERR(sei->base))
return PTR_ERR(sei->base);
- }
/* Retrieve the SEI capabilities with the interrupt ranges */
sei->caps = of_device_get_match_data(&pdev->dev);
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index b9db90c4aa56..4704f2ee5797 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -892,10 +892,8 @@ static int stm32_exti_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host_data->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(host_data->base)) {
- dev_err(dev, "Unable to map registers\n");
+ if (IS_ERR(host_data->base))
return PTR_ERR(host_data->base);
- }
for (i = 0; i < drv_data->bank_nr; i++)
stm32_exti_chip_init(host_data, i, np);
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 70061991915a..cd5642cef01f 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -46,7 +46,7 @@ static void hfcsusb_start_endpoint(struct hfcsusb *hw, int channel);
static void hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel);
static int hfcsusb_setup_bch(struct bchannel *bch, int protocol);
static void deactivate_bchannel(struct bchannel *bch);
-static void hfcsusb_ph_info(struct hfcsusb *hw);
+static int hfcsusb_ph_info(struct hfcsusb *hw);
/* start next background transfer for control channel */
static void
@@ -241,7 +241,7 @@ hfcusb_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
* send full D/B channel status information
* as MPH_INFORMATION_IND
*/
-static void
+static int
hfcsusb_ph_info(struct hfcsusb *hw)
{
struct ph_info *phi;
@@ -250,7 +250,7 @@ hfcsusb_ph_info(struct hfcsusb *hw)
phi = kzalloc(struct_size(phi, bch, dch->dev.nrbchan), GFP_ATOMIC);
if (!phi)
- return;
+ return -ENOMEM;
phi->dch.ch.protocol = hw->protocol;
phi->dch.ch.Flags = dch->Flags;
@@ -263,6 +263,8 @@ hfcsusb_ph_info(struct hfcsusb *hw)
_queue_data(&dch->dev.D, MPH_INFORMATION_IND, MISDN_ID_ANY,
struct_size(phi, bch, dch->dev.nrbchan), phi, GFP_ATOMIC);
kfree(phi);
+
+ return 0;
}
/*
@@ -347,8 +349,7 @@ hfcusb_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
ret = l1_event(dch->l1, hh->prim);
break;
case MPH_INFORMATION_REQ:
- hfcsusb_ph_info(hw);
- ret = 0;
+ ret = hfcsusb_ph_info(hw);
break;
}
@@ -403,8 +404,7 @@ hfc_l1callback(struct dchannel *dch, u_int cmd)
hw->name, __func__, cmd);
return -1;
}
- hfcsusb_ph_info(hw);
- return 0;
+ return hfcsusb_ph_info(hw);
}
static int
@@ -746,8 +746,7 @@ hfcsusb_setup_bch(struct bchannel *bch, int protocol)
handle_led(hw, (bch->nr == 1) ? LED_B1_OFF :
LED_B2_OFF);
}
- hfcsusb_ph_info(hw);
- return 0;
+ return hfcsusb_ph_info(hw);
}
static void
diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
index a16c7a2a7f3d..88d592bafdb0 100644
--- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c
+++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
@@ -630,17 +630,19 @@ static void
release_io(struct inf_hw *hw)
{
if (hw->cfg.mode) {
- if (hw->cfg.p) {
+ if (hw->cfg.mode == AM_MEMIO) {
release_mem_region(hw->cfg.start, hw->cfg.size);
- iounmap(hw->cfg.p);
+ if (hw->cfg.p)
+ iounmap(hw->cfg.p);
} else
release_region(hw->cfg.start, hw->cfg.size);
hw->cfg.mode = AM_NONE;
}
if (hw->addr.mode) {
- if (hw->addr.p) {
+ if (hw->addr.mode == AM_MEMIO) {
release_mem_region(hw->addr.start, hw->addr.size);
- iounmap(hw->addr.p);
+ if (hw->addr.p)
+ iounmap(hw->addr.p);
} else
release_region(hw->addr.start, hw->addr.size);
hw->addr.mode = AM_NONE;
@@ -670,9 +672,12 @@ setup_io(struct inf_hw *hw)
(ulong)hw->cfg.start, (ulong)hw->cfg.size);
return err;
}
- if (hw->ci->cfg_mode == AM_MEMIO)
- hw->cfg.p = ioremap(hw->cfg.start, hw->cfg.size);
hw->cfg.mode = hw->ci->cfg_mode;
+ if (hw->ci->cfg_mode == AM_MEMIO) {
+ hw->cfg.p = ioremap(hw->cfg.start, hw->cfg.size);
+ if (!hw->cfg.p)
+ return -ENOMEM;
+ }
if (debug & DEBUG_HW)
pr_notice("%s: IO cfg %lx (%lu bytes) mode%d\n",
hw->name, (ulong)hw->cfg.start,
@@ -697,12 +702,12 @@ setup_io(struct inf_hw *hw)
(ulong)hw->addr.start, (ulong)hw->addr.size);
return err;
}
+ hw->addr.mode = hw->ci->addr_mode;
if (hw->ci->addr_mode == AM_MEMIO) {
hw->addr.p = ioremap(hw->addr.start, hw->addr.size);
- if (unlikely(!hw->addr.p))
+ if (!hw->addr.p)
return -ENOMEM;
}
- hw->addr.mode = hw->ci->addr_mode;
if (debug & DEBUG_HW)
pr_notice("%s: IO addr %lx (%lu bytes) mode%d\n",
hw->name, (ulong)hw->addr.start,
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index ee925b58bbce..2a1ddd47a096 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -1100,7 +1100,6 @@ nj_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
card->typ = NETJET_S_TJ300;
card->base = pci_resource_start(pdev, 0);
- card->irq = pdev->irq;
pci_set_drvdata(pdev, card);
err = setup_instance(card);
if (err)
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index fc433e63b1dc..b1590cb4a188 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -307,7 +307,7 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip)
usleep_range(3000, 6000);
ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
if (ret)
- return ret;
+ goto out;
status &= LP5523_ENG_STATUS_MASK;
if (status != LP5523_ENG_STATUS_MASK) {
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 0a4551e165ab..5fc989a6d452 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -364,7 +364,6 @@ struct cached_dev {
/* The rest of this all shows up in sysfs */
unsigned int sequential_cutoff;
- unsigned int readahead;
unsigned int io_disable:1;
unsigned int verify:1;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 29c231758293..6d1de889baeb 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -880,9 +880,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
struct bio *bio, unsigned int sectors)
{
int ret = MAP_CONTINUE;
- unsigned int reada = 0;
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
struct bio *miss, *cache_bio;
+ unsigned int size_limit;
s->cache_missed = 1;
@@ -892,14 +892,10 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
goto out_submit;
}
- if (!(bio->bi_opf & REQ_RAHEAD) &&
- !(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
- s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
- reada = min_t(sector_t, dc->readahead >> 9,
- get_capacity(bio->bi_bdev->bd_disk) -
- bio_end_sector(bio));
-
- s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
+ /* Limitation for valid replace key size and cache_bio bvecs number */
+ size_limit = min_t(unsigned int, BIO_MAX_VECS * PAGE_SECTORS,
+ (1 << KEY_SIZE_BITS) - 1);
+ s->insert_bio_sectors = min3(size_limit, sectors, bio_sectors(bio));
s->iop.replace_key = KEY(s->iop.inode,
bio->bi_iter.bi_sector + s->insert_bio_sectors,
@@ -911,7 +907,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
s->iop.replace = true;
- miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
+ miss = bio_next_split(bio, s->insert_bio_sectors, GFP_NOIO,
+ &s->d->bio_split);
/* btree_search_recurse()'s btree iterator is no good anymore */
ret = miss == bio ? MAP_DONE : -EINTR;
@@ -933,9 +930,6 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
goto out_put;
- if (reada)
- bch_mark_cache_readahead(s->iop.c, s->d);
-
s->cache_miss = miss;
s->iop.bio = cache_bio;
bio_get(cache_bio);
diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c
index 503aafe188dc..4c7ee5fedb9d 100644
--- a/drivers/md/bcache/stats.c
+++ b/drivers/md/bcache/stats.c
@@ -46,7 +46,6 @@ read_attribute(cache_misses);
read_attribute(cache_bypass_hits);
read_attribute(cache_bypass_misses);
read_attribute(cache_hit_ratio);
-read_attribute(cache_readaheads);
read_attribute(cache_miss_collisions);
read_attribute(bypassed);
@@ -64,7 +63,6 @@ SHOW(bch_stats)
DIV_SAFE(var(cache_hits) * 100,
var(cache_hits) + var(cache_misses)));
- var_print(cache_readaheads);
var_print(cache_miss_collisions);
sysfs_hprint(bypassed, var(sectors_bypassed) << 9);
#undef var
@@ -86,7 +84,6 @@ static struct attribute *bch_stats_files[] = {
&sysfs_cache_bypass_hits,
&sysfs_cache_bypass_misses,
&sysfs_cache_hit_ratio,
- &sysfs_cache_readaheads,
&sysfs_cache_miss_collisions,
&sysfs_bypassed,
NULL
@@ -113,7 +110,6 @@ void bch_cache_accounting_clear(struct cache_accounting *acc)
acc->total.cache_misses = 0;
acc->total.cache_bypass_hits = 0;
acc->total.cache_bypass_misses = 0;
- acc->total.cache_readaheads = 0;
acc->total.cache_miss_collisions = 0;
acc->total.sectors_bypassed = 0;
}
@@ -145,7 +141,6 @@ static void scale_stats(struct cache_stats *stats, unsigned long rescale_at)
scale_stat(&stats->cache_misses);
scale_stat(&stats->cache_bypass_hits);
scale_stat(&stats->cache_bypass_misses);
- scale_stat(&stats->cache_readaheads);
scale_stat(&stats->cache_miss_collisions);
scale_stat(&stats->sectors_bypassed);
}
@@ -168,7 +163,6 @@ static void scale_accounting(struct timer_list *t)
move_stat(cache_misses);
move_stat(cache_bypass_hits);
move_stat(cache_bypass_misses);
- move_stat(cache_readaheads);
move_stat(cache_miss_collisions);
move_stat(sectors_bypassed);
@@ -209,14 +203,6 @@ void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
mark_cache_stats(&c->accounting.collector, hit, bypass);
}
-void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d)
-{
- struct cached_dev *dc = container_of(d, struct cached_dev, disk);
-
- atomic_inc(&dc->accounting.collector.cache_readaheads);
- atomic_inc(&c->accounting.collector.cache_readaheads);
-}
-
void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d)
{
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
diff --git a/drivers/md/bcache/stats.h b/drivers/md/bcache/stats.h
index abfaabf7e7fc..ca4f435f7216 100644
--- a/drivers/md/bcache/stats.h
+++ b/drivers/md/bcache/stats.h
@@ -7,7 +7,6 @@ struct cache_stat_collector {
atomic_t cache_misses;
atomic_t cache_bypass_hits;
atomic_t cache_bypass_misses;
- atomic_t cache_readaheads;
atomic_t cache_miss_collisions;
atomic_t sectors_bypassed;
};
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index cc89f3156d1a..05ac1d6fbbf3 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -137,7 +137,6 @@ rw_attribute(io_disable);
rw_attribute(discard);
rw_attribute(running);
rw_attribute(label);
-rw_attribute(readahead);
rw_attribute(errors);
rw_attribute(io_error_limit);
rw_attribute(io_error_halflife);
@@ -260,7 +259,6 @@ SHOW(__bch_cached_dev)
var_printf(partial_stripes_expensive, "%u");
var_hprint(sequential_cutoff);
- var_hprint(readahead);
sysfs_print(running, atomic_read(&dc->running));
sysfs_print(state, states[BDEV_STATE(&dc->sb)]);
@@ -365,7 +363,6 @@ STORE(__cached_dev)
sysfs_strtoul_clamp(sequential_cutoff,
dc->sequential_cutoff,
0, UINT_MAX);
- d_strtoi_h(readahead);
if (attr == &sysfs_clear_stats)
bch_cache_accounting_clear(&dc->accounting);
@@ -538,7 +535,6 @@ static struct attribute *bch_cached_dev_files[] = {
&sysfs_running,
&sysfs_state,
&sysfs_label,
- &sysfs_readahead,
#ifdef CONFIG_BCACHE_DEBUG
&sysfs_verify,
&sysfs_bypass_torture_test,
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 781942aeddd1..20f2510db1f6 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -66,14 +66,14 @@ struct superblock {
__u8 magic[8];
__u8 version;
__u8 log2_interleave_sectors;
- __u16 integrity_tag_size;
- __u32 journal_sections;
- __u64 provided_data_sectors; /* userspace uses this value */
- __u32 flags;
+ __le16 integrity_tag_size;
+ __le32 journal_sections;
+ __le64 provided_data_sectors; /* userspace uses this value */
+ __le32 flags;
__u8 log2_sectors_per_block;
__u8 log2_blocks_per_bitmap_bit;
__u8 pad[2];
- __u64 recalc_sector;
+ __le64 recalc_sector;
__u8 pad2[8];
__u8 salt[SALT_SIZE];
};
@@ -86,16 +86,16 @@ struct superblock {
#define JOURNAL_ENTRY_ROUNDUP 8
-typedef __u64 commit_id_t;
+typedef __le64 commit_id_t;
#define JOURNAL_MAC_PER_SECTOR 8
struct journal_entry {
union {
struct {
- __u32 sector_lo;
- __u32 sector_hi;
+ __le32 sector_lo;
+ __le32 sector_hi;
} s;
- __u64 sector;
+ __le64 sector;
} u;
commit_id_t last_bytes[];
/* __u8 tag[0]; */
@@ -806,7 +806,7 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result
}
if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
- uint64_t section_le;
+ __le64 section_le;
r = crypto_shash_update(desc, (__u8 *)&ic->sb->salt, SALT_SIZE);
if (unlikely(r < 0)) {
@@ -1640,7 +1640,7 @@ static void integrity_end_io(struct bio *bio)
static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
const char *data, char *result)
{
- __u64 sector_le = cpu_to_le64(sector);
+ __le64 sector_le = cpu_to_le64(sector);
SHASH_DESC_ON_STACK(req, ic->internal_hash);
int r;
unsigned digest_size;
@@ -2689,30 +2689,26 @@ next_chunk:
if (unlikely(dm_integrity_failed(ic)))
goto err;
- if (!ic->discard) {
- io_req.bi_op = REQ_OP_READ;
- io_req.bi_op_flags = 0;
- io_req.mem.type = DM_IO_VMA;
- io_req.mem.ptr.addr = ic->recalc_buffer;
- io_req.notify.fn = NULL;
- io_req.client = ic->io;
- io_loc.bdev = ic->dev->bdev;
- io_loc.sector = get_data_sector(ic, area, offset);
- io_loc.count = n_sectors;
+ io_req.bi_op = REQ_OP_READ;
+ io_req.bi_op_flags = 0;
+ io_req.mem.type = DM_IO_VMA;
+ io_req.mem.ptr.addr = ic->recalc_buffer;
+ io_req.notify.fn = NULL;
+ io_req.client = ic->io;
+ io_loc.bdev = ic->dev->bdev;
+ io_loc.sector = get_data_sector(ic, area, offset);
+ io_loc.count = n_sectors;
- r = dm_io(&io_req, 1, &io_loc, NULL);
- if (unlikely(r)) {
- dm_integrity_io_error(ic, "reading data", r);
- goto err;
- }
+ r = dm_io(&io_req, 1, &io_loc, NULL);
+ if (unlikely(r)) {
+ dm_integrity_io_error(ic, "reading data", r);
+ goto err;
+ }
- t = ic->recalc_tags;
- for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
- integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
- t += ic->tag_size;
- }
- } else {
- t = ic->recalc_tags + (n_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
+ t = ic->recalc_tags;
+ for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
+ integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
+ t += ic->tag_size;
}
metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
@@ -3826,7 +3822,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
for (i = 0; i < ic->journal_sections; i++) {
struct scatterlist sg;
struct skcipher_request *section_req;
- __u32 section_le = cpu_to_le32(i);
+ __le32 section_le = cpu_to_le32(i);
memset(crypt_iv, 0x00, ivsize);
memset(crypt_data, 0x00, crypt_len);
@@ -4368,13 +4364,11 @@ try_smaller_buffer:
goto bad;
}
INIT_WORK(&ic->recalc_work, integrity_recalc);
- if (!ic->discard) {
- ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
- if (!ic->recalc_buffer) {
- ti->error = "Cannot allocate buffer for recalculating";
- r = -ENOMEM;
- goto bad;
- }
+ ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
+ if (!ic->recalc_buffer) {
+ ti->error = "Cannot allocate buffer for recalculating";
+ r = -ENOMEM;
+ goto bad;
}
ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
ic->tag_size, GFP_KERNEL);
@@ -4383,9 +4377,6 @@ try_smaller_buffer:
r = -ENOMEM;
goto bad;
}
- if (ic->discard)
- memset(ic->recalc_tags, DISCARD_FILLER,
- (RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size);
} else {
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
ti->error = "Recalculate can only be specified with internal_hash";
@@ -4579,7 +4570,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
static struct target_type integrity_target = {
.name = "integrity",
- .version = {1, 9, 0},
+ .version = {1, 10, 0},
.module = THIS_MODULE,
.features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
.ctr = dm_integrity_ctr,
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index a2acb014c13a..751ec5ea1dbb 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -855,7 +855,7 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new)
static uint32_t __minimum_chunk_size(struct origin *o)
{
struct dm_snapshot *snap;
- unsigned chunk_size = 0;
+ unsigned chunk_size = rounddown_pow_of_two(UINT_MAX);
if (o)
list_for_each_entry(snap, &o->snapshots, list)
@@ -1409,6 +1409,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (!s->store->chunk_size) {
ti->error = "Chunk size not set";
+ r = -EINVAL;
goto bad_read_metadata;
}
diff --git a/drivers/md/dm-verity-verify-sig.c b/drivers/md/dm-verity-verify-sig.c
index 29385dc470d5..db61a1f43ae9 100644
--- a/drivers/md/dm-verity-verify-sig.c
+++ b/drivers/md/dm-verity-verify-sig.c
@@ -15,7 +15,7 @@
#define DM_VERITY_VERIFY_ERR(s) DM_VERITY_ROOT_HASH_VERIFICATION " " s
static bool require_signatures;
-module_param(require_signatures, bool, false);
+module_param(require_signatures, bool, 0444);
MODULE_PARM_DESC(require_signatures,
"Verify the roothash of dm-verity hash tree");
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 841e1c1aa5e6..7d4ff8a5c55e 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5311,8 +5311,6 @@ static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
unsigned int chunk_sectors;
unsigned int bio_sectors = bio_sectors(bio);
- WARN_ON_ONCE(bio->bi_bdev->bd_partno);
-
chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors);
return chunk_sectors >=
((sector & (chunk_sectors - 1)) + bio_sectors);
diff --git a/drivers/media/dvb-frontends/sp8870.c b/drivers/media/dvb-frontends/sp8870.c
index 655db8272268..9767159aeb9b 100644
--- a/drivers/media/dvb-frontends/sp8870.c
+++ b/drivers/media/dvb-frontends/sp8870.c
@@ -281,7 +281,7 @@ static int sp8870_set_frontend_parameters(struct dvb_frontend *fe)
// read status reg in order to clear pending irqs
err = sp8870_readreg(state, 0x200);
- if (err)
+ if (err < 0)
return err;
// system controller start
diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
index 83bd9a412a56..1e3b68a8743a 100644
--- a/drivers/media/platform/rcar_drif.c
+++ b/drivers/media/platform/rcar_drif.c
@@ -915,7 +915,6 @@ static int rcar_drif_g_fmt_sdr_cap(struct file *file, void *priv,
{
struct rcar_drif_sdr *sdr = video_drvdata(file);
- memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
f->fmt.sdr.pixelformat = sdr->fmt->pixelformat;
f->fmt.sdr.buffersize = sdr->fmt->buffersize;
diff --git a/drivers/media/usb/gspca/cpia1.c b/drivers/media/usb/gspca/cpia1.c
index a4f7431486f3..d93d384286c1 100644
--- a/drivers/media/usb/gspca/cpia1.c
+++ b/drivers/media/usb/gspca/cpia1.c
@@ -1424,7 +1424,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
{
struct sd *sd = (struct sd *) gspca_dev;
struct cam *cam;
- int ret;
sd->mainsFreq = FREQ_DEF == V4L2_CID_POWER_LINE_FREQUENCY_60HZ;
reset_camera_params(gspca_dev);
@@ -1436,10 +1435,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->cam_mode = mode;
cam->nmodes = ARRAY_SIZE(mode);
- ret = goto_low_power(gspca_dev);
- if (ret)
- gspca_err(gspca_dev, "Cannot go to low power mode: %d\n",
- ret);
+ goto_low_power(gspca_dev);
/* Check the firmware version. */
sd->params.version.firmwareVersion = 0;
get_version_information(gspca_dev);
diff --git a/drivers/media/usb/gspca/m5602/m5602_mt9m111.c b/drivers/media/usb/gspca/m5602/m5602_mt9m111.c
index bfa3b381d8a2..bf1af6ed9131 100644
--- a/drivers/media/usb/gspca/m5602/m5602_mt9m111.c
+++ b/drivers/media/usb/gspca/m5602/m5602_mt9m111.c
@@ -195,7 +195,7 @@ static const struct v4l2_ctrl_config mt9m111_greenbal_cfg = {
int mt9m111_probe(struct sd *sd)
{
u8 data[2] = {0x00, 0x00};
- int i, rc = 0;
+ int i, err;
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
if (force_sensor) {
@@ -213,18 +213,18 @@ int mt9m111_probe(struct sd *sd)
/* Do the preinit */
for (i = 0; i < ARRAY_SIZE(preinit_mt9m111); i++) {
if (preinit_mt9m111[i][0] == BRIDGE) {
- rc |= m5602_write_bridge(sd,
- preinit_mt9m111[i][1],
- preinit_mt9m111[i][2]);
+ err = m5602_write_bridge(sd,
+ preinit_mt9m111[i][1],
+ preinit_mt9m111[i][2]);
} else {
data[0] = preinit_mt9m111[i][2];
data[1] = preinit_mt9m111[i][3];
- rc |= m5602_write_sensor(sd,
- preinit_mt9m111[i][1], data, 2);
+ err = m5602_write_sensor(sd,
+ preinit_mt9m111[i][1], data, 2);
}
+ if (err < 0)
+ return err;
}
- if (rc < 0)
- return rc;
if (m5602_read_sensor(sd, MT9M111_SC_CHIPVER, data, 2))
return -ENODEV;
diff --git a/drivers/media/usb/gspca/m5602/m5602_po1030.c b/drivers/media/usb/gspca/m5602/m5602_po1030.c
index d680b777f097..8fd99ceee4b6 100644
--- a/drivers/media/usb/gspca/m5602/m5602_po1030.c
+++ b/drivers/media/usb/gspca/m5602/m5602_po1030.c
@@ -154,8 +154,8 @@ static const struct v4l2_ctrl_config po1030_greenbal_cfg = {
int po1030_probe(struct sd *sd)
{
- int rc = 0;
u8 dev_id_h = 0, i;
+ int err;
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
if (force_sensor) {
@@ -174,14 +174,14 @@ int po1030_probe(struct sd *sd)
for (i = 0; i < ARRAY_SIZE(preinit_po1030); i++) {
u8 data = preinit_po1030[i][2];
if (preinit_po1030[i][0] == SENSOR)
- rc |= m5602_write_sensor(sd,
- preinit_po1030[i][1], &data, 1);
+ err = m5602_write_sensor(sd, preinit_po1030[i][1],
+ &data, 1);
else
- rc |= m5602_write_bridge(sd, preinit_po1030[i][1],
- data);
+ err = m5602_write_bridge(sd, preinit_po1030[i][1],
+ data);
+ if (err < 0)
+ return err;
}
- if (rc < 0)
- return rc;
if (m5602_read_sensor(sd, PO1030_DEVID_H, &dev_id_h, 1))
return -ENODEV;
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index 8004dd64d09a..d971acd98236 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -129,7 +129,7 @@ static int msb_sg_compare_to_buffer(struct scatterlist *sg,
* Each zone consists of 512 eraseblocks, out of which in first
* zone 494 are used and 496 are for all following zones.
* Therefore zone #0 hosts blocks 0-493, zone #1 blocks 494-988, etc...
-*/
+ */
static int msb_get_zone_from_lba(int lba)
{
if (lba < 494)
@@ -348,8 +348,9 @@ again:
switch (msb->state) {
case MSB_RP_SEND_BLOCK_ADDRESS:
/* msb_write_regs sometimes "fails" because it needs to update
- the reg window, and thus it returns request for that.
- Then we stay in this state and retry */
+ * the reg window, and thus it returns request for that.
+ * Then we stay in this state and retry
+ */
if (!msb_write_regs(msb,
offsetof(struct ms_register, param),
sizeof(struct ms_param_register),
@@ -368,7 +369,8 @@ again:
case MSB_RP_SEND_INT_REQ:
msb->state = MSB_RP_RECEIVE_INT_REQ_RESULT;
/* If dont actually need to send the int read request (only in
- serial mode), then just fall through */
+ * serial mode), then just fall through
+ */
if (msb_read_int_reg(msb, -1))
return 0;
fallthrough;
@@ -702,7 +704,8 @@ static int h_msb_parallel_switch(struct memstick_dev *card,
case MSB_PS_SWICH_HOST:
/* Set parallel interface on our side + send a dummy request
- to see if card responds */
+ * to see if card responds
+ */
host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4);
memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
msb->state = MSB_PS_CONFIRM;
@@ -821,6 +824,7 @@ static int msb_mark_page_bad(struct msb_data *msb, int pba, int page)
static int msb_erase_block(struct msb_data *msb, u16 pba)
{
int error, try;
+
if (msb->read_only)
return -EROFS;
@@ -997,6 +1001,7 @@ static int msb_write_block(struct msb_data *msb,
u16 pba, u32 lba, struct scatterlist *sg, int offset)
{
int error, current_try = 1;
+
BUG_ON(sg->length < msb->page_size);
if (msb->read_only)
@@ -1045,11 +1050,12 @@ static int msb_write_block(struct msb_data *msb,
error = msb_run_state_machine(msb, h_msb_write_block);
/* Sector we just wrote to is assumed erased since its pba
- was erased. If it wasn't erased, write will succeed
- and will just clear the bits that were set in the block
- thus test that what we have written,
- matches what we expect.
- We do trust the blocks that we erased */
+ * was erased. If it wasn't erased, write will succeed
+ * and will just clear the bits that were set in the block
+ * thus test that what we have written,
+ * matches what we expect.
+ * We do trust the blocks that we erased
+ */
if (!error && (verify_writes ||
!test_bit(pba, msb->erased_blocks_bitmap)))
error = msb_verify_block(msb, pba, sg, offset);
@@ -1493,6 +1499,7 @@ static int msb_ftl_scan(struct msb_data *msb)
static void msb_cache_flush_timer(struct timer_list *t)
{
struct msb_data *msb = from_timer(msb, t, cache_flush_timer);
+
msb->need_flush_cache = true;
queue_work(msb->io_queue, &msb->io_work);
}
@@ -1673,7 +1680,8 @@ static int msb_cache_read(struct msb_data *msb, int lba,
* This table content isn't that importaint,
* One could put here different values, providing that they still
* cover whole disk.
- * 64 MB entry is what windows reports for my 64M memstick */
+ * 64 MB entry is what windows reports for my 64M memstick
+ */
static const struct chs_entry chs_table[] = {
/* size sectors cylynders heads */
@@ -1706,8 +1714,9 @@ static int msb_init_card(struct memstick_dev *card)
return error;
/* Due to a bug in Jmicron driver written by Alex Dubov,
- its serial mode barely works,
- so we switch to parallel mode right away */
+ * its serial mode barely works,
+ * so we switch to parallel mode right away
+ */
if (host->caps & MEMSTICK_CAP_PAR4)
msb_switch_to_parallel(msb);
@@ -2033,6 +2042,7 @@ static blk_status_t msb_queue_rq(struct blk_mq_hw_ctx *hctx,
static int msb_check_card(struct memstick_dev *card)
{
struct msb_data *msb = memstick_get_drvdata(card);
+
return (msb->card_dead == 0);
}
@@ -2333,6 +2343,7 @@ static struct memstick_driver msb_driver = {
static int __init msb_init(void)
{
int rc = memstick_register_driver(&msb_driver);
+
if (rc)
pr_err("failed to register memstick driver (error %d)\n", rc);
diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c
index 102dbb8080da..29271ad4728a 100644
--- a/drivers/memstick/host/rtsx_usb_ms.c
+++ b/drivers/memstick/host/rtsx_usb_ms.c
@@ -799,9 +799,9 @@ static int rtsx_usb_ms_drv_probe(struct platform_device *pdev)
return 0;
err_out:
- memstick_free_host(msh);
pm_runtime_disable(ms_dev(host));
pm_runtime_put_noidle(ms_dev(host));
+ memstick_free_host(msh);
return err;
}
@@ -828,9 +828,6 @@ static int rtsx_usb_ms_drv_remove(struct platform_device *pdev)
}
mutex_unlock(&host->host_mutex);
- memstick_remove_host(msh);
- memstick_free_host(msh);
-
/* Balance possible unbalanced usage count
* e.g. unconditional module removal
*/
@@ -838,10 +835,11 @@ static int rtsx_usb_ms_drv_remove(struct platform_device *pdev)
pm_runtime_put(ms_dev(host));
pm_runtime_disable(ms_dev(host));
- platform_set_drvdata(pdev, NULL);
-
+ memstick_remove_host(msh);
dev_dbg(ms_dev(host),
": Realtek USB Memstick controller has been removed\n");
+ memstick_free_host(msh);
+ platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/mfd/mt6358-irq.c b/drivers/mfd/mt6358-irq.c
index db734f2831ff..83f3ffbdbb4c 100644
--- a/drivers/mfd/mt6358-irq.c
+++ b/drivers/mfd/mt6358-irq.c
@@ -5,6 +5,8 @@
#include <linux/interrupt.h>
#include <linux/mfd/mt6358/core.h>
#include <linux/mfd/mt6358/registers.h>
+#include <linux/mfd/mt6359/core.h>
+#include <linux/mfd/mt6359/registers.h>
#include <linux/mfd/mt6397/core.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -13,7 +15,9 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
-static struct irq_top_t mt6358_ints[] = {
+#define MTK_PMIC_REG_WIDTH 16
+
+static const struct irq_top_t mt6358_ints[] = {
MT6358_TOP_GEN(BUCK),
MT6358_TOP_GEN(LDO),
MT6358_TOP_GEN(PSC),
@@ -24,6 +28,31 @@ static struct irq_top_t mt6358_ints[] = {
MT6358_TOP_GEN(MISC),
};
+static const struct irq_top_t mt6359_ints[] = {
+ MT6359_TOP_GEN(BUCK),
+ MT6359_TOP_GEN(LDO),
+ MT6359_TOP_GEN(PSC),
+ MT6359_TOP_GEN(SCK),
+ MT6359_TOP_GEN(BM),
+ MT6359_TOP_GEN(HK),
+ MT6359_TOP_GEN(AUD),
+ MT6359_TOP_GEN(MISC),
+};
+
+static struct pmic_irq_data mt6358_irqd = {
+ .num_top = ARRAY_SIZE(mt6358_ints),
+ .num_pmic_irqs = MT6358_IRQ_NR,
+ .top_int_status_reg = MT6358_TOP_INT_STATUS0,
+ .pmic_ints = mt6358_ints,
+};
+
+static struct pmic_irq_data mt6359_irqd = {
+ .num_top = ARRAY_SIZE(mt6359_ints),
+ .num_pmic_irqs = MT6359_IRQ_NR,
+ .top_int_status_reg = MT6359_TOP_INT_STATUS0,
+ .pmic_ints = mt6359_ints,
+};
+
static void pmic_irq_enable(struct irq_data *data)
{
unsigned int hwirq = irqd_to_hwirq(data);
@@ -62,15 +91,15 @@ static void pmic_irq_sync_unlock(struct irq_data *data)
/* Find out the IRQ group */
top_gp = 0;
while ((top_gp + 1) < irqd->num_top &&
- i >= mt6358_ints[top_gp + 1].hwirq_base)
+ i >= irqd->pmic_ints[top_gp + 1].hwirq_base)
top_gp++;
/* Find the IRQ registers */
- gp_offset = i - mt6358_ints[top_gp].hwirq_base;
- int_regs = gp_offset / MT6358_REG_WIDTH;
- shift = gp_offset % MT6358_REG_WIDTH;
- en_reg = mt6358_ints[top_gp].en_reg +
- (mt6358_ints[top_gp].en_reg_shift * int_regs);
+ gp_offset = i - irqd->pmic_ints[top_gp].hwirq_base;
+ int_regs = gp_offset / MTK_PMIC_REG_WIDTH;
+ shift = gp_offset % MTK_PMIC_REG_WIDTH;
+ en_reg = irqd->pmic_ints[top_gp].en_reg +
+ (irqd->pmic_ints[top_gp].en_reg_shift * int_regs);
regmap_update_bits(chip->regmap, en_reg, BIT(shift),
irqd->enable_hwirq[i] << shift);
@@ -95,10 +124,11 @@ static void mt6358_irq_sp_handler(struct mt6397_chip *chip,
unsigned int irq_status, sta_reg, status;
unsigned int hwirq, virq;
int i, j, ret;
+ struct pmic_irq_data *irqd = chip->irq_data;
- for (i = 0; i < mt6358_ints[top_gp].num_int_regs; i++) {
- sta_reg = mt6358_ints[top_gp].sta_reg +
- mt6358_ints[top_gp].sta_reg_shift * i;
+ for (i = 0; i < irqd->pmic_ints[top_gp].num_int_regs; i++) {
+ sta_reg = irqd->pmic_ints[top_gp].sta_reg +
+ irqd->pmic_ints[top_gp].sta_reg_shift * i;
ret = regmap_read(chip->regmap, sta_reg, &irq_status);
if (ret) {
@@ -114,8 +144,8 @@ static void mt6358_irq_sp_handler(struct mt6397_chip *chip,
do {
j = __ffs(status);
- hwirq = mt6358_ints[top_gp].hwirq_base +
- MT6358_REG_WIDTH * i + j;
+ hwirq = irqd->pmic_ints[top_gp].hwirq_base +
+ MTK_PMIC_REG_WIDTH * i + j;
virq = irq_find_mapping(chip->irq_domain, hwirq);
if (virq)
@@ -131,12 +161,12 @@ static void mt6358_irq_sp_handler(struct mt6397_chip *chip,
static irqreturn_t mt6358_irq_handler(int irq, void *data)
{
struct mt6397_chip *chip = data;
- struct pmic_irq_data *mt6358_irq_data = chip->irq_data;
+ struct pmic_irq_data *irqd = chip->irq_data;
unsigned int bit, i, top_irq_status = 0;
int ret;
ret = regmap_read(chip->regmap,
- mt6358_irq_data->top_int_status_reg,
+ irqd->top_int_status_reg,
&top_irq_status);
if (ret) {
dev_err(chip->dev,
@@ -144,8 +174,8 @@ static irqreturn_t mt6358_irq_handler(int irq, void *data)
return IRQ_NONE;
}
- for (i = 0; i < mt6358_irq_data->num_top; i++) {
- bit = BIT(mt6358_ints[i].top_offset);
+ for (i = 0; i < irqd->num_top; i++) {
+ bit = BIT(irqd->pmic_ints[i].top_offset);
if (top_irq_status & bit) {
mt6358_irq_sp_handler(chip, i);
top_irq_status &= ~bit;
@@ -180,17 +210,22 @@ int mt6358_irq_init(struct mt6397_chip *chip)
int i, j, ret;
struct pmic_irq_data *irqd;
- irqd = devm_kzalloc(chip->dev, sizeof(*irqd), GFP_KERNEL);
- if (!irqd)
- return -ENOMEM;
+ switch (chip->chip_id) {
+ case MT6358_CHIP_ID:
+ chip->irq_data = &mt6358_irqd;
+ break;
- chip->irq_data = irqd;
+ case MT6359_CHIP_ID:
+ chip->irq_data = &mt6359_irqd;
+ break;
- mutex_init(&chip->irqlock);
- irqd->top_int_status_reg = MT6358_TOP_INT_STATUS0;
- irqd->num_pmic_irqs = MT6358_IRQ_NR;
- irqd->num_top = ARRAY_SIZE(mt6358_ints);
+ default:
+ dev_err(chip->dev, "unsupported chip: 0x%x\n", chip->chip_id);
+ return -ENODEV;
+ }
+ mutex_init(&chip->irqlock);
+ irqd = chip->irq_data;
irqd->enable_hwirq = devm_kcalloc(chip->dev,
irqd->num_pmic_irqs,
sizeof(*irqd->enable_hwirq),
@@ -207,10 +242,10 @@ int mt6358_irq_init(struct mt6397_chip *chip)
/* Disable all interrupts for initializing */
for (i = 0; i < irqd->num_top; i++) {
- for (j = 0; j < mt6358_ints[i].num_int_regs; j++)
+ for (j = 0; j < irqd->pmic_ints[i].num_int_regs; j++)
regmap_write(chip->regmap,
- mt6358_ints[i].en_reg +
- mt6358_ints[i].en_reg_shift * j, 0);
+ irqd->pmic_ints[i].en_reg +
+ irqd->pmic_ints[i].en_reg_shift * j, 0);
}
chip->irq_domain = irq_domain_add_linear(chip->dev->of_node,
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index 7518d74c3b4c..9a615f75fbde 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -13,9 +13,11 @@
#include <linux/mfd/core.h>
#include <linux/mfd/mt6323/core.h>
#include <linux/mfd/mt6358/core.h>
+#include <linux/mfd/mt6359/core.h>
#include <linux/mfd/mt6397/core.h>
#include <linux/mfd/mt6323/registers.h>
#include <linux/mfd/mt6358/registers.h>
+#include <linux/mfd/mt6359/registers.h>
#include <linux/mfd/mt6397/registers.h>
#define MT6323_RTC_BASE 0x8000
@@ -99,6 +101,17 @@ static const struct mfd_cell mt6358_devs[] = {
},
};
+static const struct mfd_cell mt6359_devs[] = {
+ { .name = "mt6359-regulator", },
+ {
+ .name = "mt6359-rtc",
+ .num_resources = ARRAY_SIZE(mt6358_rtc_resources),
+ .resources = mt6358_rtc_resources,
+ .of_compatible = "mediatek,mt6358-rtc",
+ },
+ { .name = "mt6359-sound", },
+};
+
static const struct mfd_cell mt6397_devs[] = {
{
.name = "mt6397-rtc",
@@ -149,6 +162,14 @@ static const struct chip_data mt6358_core = {
.irq_init = mt6358_irq_init,
};
+static const struct chip_data mt6359_core = {
+ .cid_addr = MT6359_SWCID,
+ .cid_shift = 8,
+ .cells = mt6359_devs,
+ .cell_size = ARRAY_SIZE(mt6359_devs),
+ .irq_init = mt6358_irq_init,
+};
+
static const struct chip_data mt6397_core = {
.cid_addr = MT6397_CID,
.cid_shift = 0,
@@ -219,6 +240,9 @@ static const struct of_device_id mt6397_of_match[] = {
.compatible = "mediatek,mt6358",
.data = &mt6358_core,
}, {
+ .compatible = "mediatek,mt6359",
+ .data = &mt6359_core,
+ }, {
.compatible = "mediatek,mt6397",
.data = &mt6397_core,
}, {
diff --git a/drivers/misc/cardreader/rtl8411.c b/drivers/misc/cardreader/rtl8411.c
index a07674ed0596..4c5621b17a6f 100644
--- a/drivers/misc/cardreader/rtl8411.c
+++ b/drivers/misc/cardreader/rtl8411.c
@@ -468,6 +468,7 @@ static void rtl8411_init_common_params(struct rtsx_pcr *pcr)
pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
pcr->aspm_en = ASPM_L1_EN;
+ pcr->aspm_mode = ASPM_MODE_CFG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(23, 7, 14);
pcr->rx_initial_phase = SET_CLOCK_PHASE(4, 3, 10);
pcr->ic_version = rtl8411_get_ic_version(pcr);
diff --git a/drivers/misc/cardreader/rts5209.c b/drivers/misc/cardreader/rts5209.c
index 39a6a7ecc32e..29f5414072bf 100644
--- a/drivers/misc/cardreader/rts5209.c
+++ b/drivers/misc/cardreader/rts5209.c
@@ -255,6 +255,7 @@ void rts5209_init_params(struct rtsx_pcr *pcr)
pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
pcr->aspm_en = ASPM_L1_EN;
+ pcr->aspm_mode = ASPM_MODE_CFG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 16);
pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c
index 8200af22b529..4bcfbc9afbac 100644
--- a/drivers/misc/cardreader/rts5227.c
+++ b/drivers/misc/cardreader/rts5227.c
@@ -358,6 +358,7 @@ void rts5227_init_params(struct rtsx_pcr *pcr)
pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
pcr->aspm_en = ASPM_L1_EN;
+ pcr->aspm_mode = ASPM_MODE_CFG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 15);
pcr->rx_initial_phase = SET_CLOCK_PHASE(30, 7, 7);
@@ -483,6 +484,7 @@ void rts522a_init_params(struct rtsx_pcr *pcr)
rts5227_init_params(pcr);
pcr->ops = &rts522a_pcr_ops;
+ pcr->aspm_mode = ASPM_MODE_REG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(20, 20, 11);
pcr->reg_pm_ctrl3 = RTS522A_PM_CTRL3;
diff --git a/drivers/misc/cardreader/rts5228.c b/drivers/misc/cardreader/rts5228.c
index 781a86def59a..ffc128278613 100644
--- a/drivers/misc/cardreader/rts5228.c
+++ b/drivers/misc/cardreader/rts5228.c
@@ -718,6 +718,7 @@ void rts5228_init_params(struct rtsx_pcr *pcr)
pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
pcr->aspm_en = ASPM_L1_EN;
+ pcr->aspm_mode = ASPM_MODE_REG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(28, 27, 11);
pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
diff --git a/drivers/misc/cardreader/rts5229.c b/drivers/misc/cardreader/rts5229.c
index 89e6f124ca5c..c748eaf1ec1f 100644
--- a/drivers/misc/cardreader/rts5229.c
+++ b/drivers/misc/cardreader/rts5229.c
@@ -246,6 +246,7 @@ void rts5229_init_params(struct rtsx_pcr *pcr)
pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
pcr->aspm_en = ASPM_L1_EN;
+ pcr->aspm_mode = ASPM_MODE_CFG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 15);
pcr->rx_initial_phase = SET_CLOCK_PHASE(30, 6, 6);
diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c
index b2676e7f5027..53f3a1f45c4a 100644
--- a/drivers/misc/cardreader/rts5249.c
+++ b/drivers/misc/cardreader/rts5249.c
@@ -566,6 +566,7 @@ void rts5249_init_params(struct rtsx_pcr *pcr)
pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
pcr->aspm_en = ASPM_L1_EN;
+ pcr->aspm_mode = ASPM_MODE_CFG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(1, 29, 16);
pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
@@ -729,6 +730,7 @@ static const struct pcr_ops rts524a_pcr_ops = {
void rts524a_init_params(struct rtsx_pcr *pcr)
{
rts5249_init_params(pcr);
+ pcr->aspm_mode = ASPM_MODE_REG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 29, 11);
pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
pcr->option.ltr_l1off_snooze_sspwrgate =
@@ -845,6 +847,7 @@ static const struct pcr_ops rts525a_pcr_ops = {
void rts525a_init_params(struct rtsx_pcr *pcr)
{
rts5249_init_params(pcr);
+ pcr->aspm_mode = ASPM_MODE_REG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(25, 29, 11);
pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
pcr->option.ltr_l1off_snooze_sspwrgate =
diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c
index 080a7d67a8e1..9b42b20a3e5a 100644
--- a/drivers/misc/cardreader/rts5260.c
+++ b/drivers/misc/cardreader/rts5260.c
@@ -628,6 +628,7 @@ void rts5260_init_params(struct rtsx_pcr *pcr)
pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
pcr->aspm_en = ASPM_L1_EN;
+ pcr->aspm_mode = ASPM_MODE_REG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 29, 11);
pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c
index 6c64dade8e1a..1fd4e0e50730 100644
--- a/drivers/misc/cardreader/rts5261.c
+++ b/drivers/misc/cardreader/rts5261.c
@@ -783,6 +783,7 @@ void rts5261_init_params(struct rtsx_pcr *pcr)
pcr->sd30_drive_sel_1v8 = 0x00;
pcr->sd30_drive_sel_3v3 = 0x00;
pcr->aspm_en = ASPM_L1_EN;
+ pcr->aspm_mode = ASPM_MODE_REG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 11);
pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index 273311184669..baf83594a01d 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -85,12 +85,18 @@ static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
if (pcr->aspm_enabled == enable)
return;
- if (pcr->aspm_en & 0x02)
- rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
- FORCE_ASPM_CTL1, enable ? 0 : FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
- else
- rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
- FORCE_ASPM_CTL1, FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
+ if (pcr->aspm_mode == ASPM_MODE_CFG) {
+ pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC,
+ enable ? pcr->aspm_en : 0);
+ } else if (pcr->aspm_mode == ASPM_MODE_REG) {
+ if (pcr->aspm_en & 0x02)
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
+ FORCE_ASPM_CTL1, enable ? 0 : FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
+ else
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
+ FORCE_ASPM_CTL1, FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
+ }
if (!enable && (pcr->aspm_en & 0x02))
mdelay(10);
@@ -1394,7 +1400,8 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
return err;
}
- rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
+ if (pcr->aspm_mode == ASPM_MODE_REG)
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
/* No CD interrupt if probing driver with card inserted.
* So we need to initialize pcr->card_exist here.
@@ -1410,6 +1417,8 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
{
int err;
+ u16 cfg_val;
+ u8 val;
spin_lock_init(&pcr->lock);
mutex_init(&pcr->pcr_mutex);
@@ -1477,6 +1486,21 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
if (!pcr->slots)
return -ENOMEM;
+ if (pcr->aspm_mode == ASPM_MODE_CFG) {
+ pcie_capability_read_word(pcr->pci, PCI_EXP_LNKCTL, &cfg_val);
+ if (cfg_val & PCI_EXP_LNKCTL_ASPM_L1)
+ pcr->aspm_enabled = true;
+ else
+ pcr->aspm_enabled = false;
+
+ } else if (pcr->aspm_mode == ASPM_MODE_REG) {
+ rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val);
+ if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1)
+ pcr->aspm_enabled = false;
+ else
+ pcr->aspm_enabled = true;
+ }
+
if (pcr->ops->fetch_vendor_settings)
pcr->ops->fetch_vendor_settings(pcr);
@@ -1506,7 +1530,6 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
struct pcr_handle *handle;
u32 base, len;
int ret, i, bar = 0;
- u8 val;
dev_dbg(&(pcidev->dev),
": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
@@ -1572,11 +1595,6 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
- rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val);
- if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1)
- pcr->aspm_enabled = false;
- else
- pcr->aspm_enabled = true;
pcr->card_inserted = 0;
pcr->card_removed = 0;
INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 926408b41270..7a6f01ace78a 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -763,7 +763,8 @@ static int at24_probe(struct i2c_client *client)
at24->nvmem = devm_nvmem_register(dev, &nvmem_config);
if (IS_ERR(at24->nvmem)) {
pm_runtime_disable(dev);
- regulator_disable(at24->vcc_reg);
+ if (!pm_runtime_status_suspended(dev))
+ regulator_disable(at24->vcc_reg);
return PTR_ERR(at24->nvmem);
}
@@ -774,7 +775,8 @@ static int at24_probe(struct i2c_client *client)
err = at24_read(at24, 0, &test_byte, 1);
if (err) {
pm_runtime_disable(dev);
- regulator_disable(at24->vcc_reg);
+ if (!pm_runtime_status_suspended(dev))
+ regulator_disable(at24->vcc_reg);
return -ENODEV;
}
diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
index ff8791a651fd..af3c497defb1 100644
--- a/drivers/misc/habanalabs/common/command_submission.c
+++ b/drivers/misc/habanalabs/common/command_submission.c
@@ -2017,7 +2017,7 @@ wait_again:
if (completion_value >= target_value) {
*status = CS_WAIT_STATUS_COMPLETED;
} else {
- timeout -= jiffies_to_usecs(completion_rc);
+ timeout = completion_rc;
goto wait_again;
}
} else {
diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c
index 832dd5c5bb06..0713b2c12d54 100644
--- a/drivers/misc/habanalabs/common/firmware_if.c
+++ b/drivers/misc/habanalabs/common/firmware_if.c
@@ -362,12 +362,9 @@ static int fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg,
}
if (err_val & CPU_BOOT_ERR0_SECURITY_NOT_RDY) {
- dev_warn(hdev->dev,
+ dev_err(hdev->dev,
"Device boot warning - security not ready\n");
- /* This is a warning so we don't want it to disable the
- * device
- */
- err_val &= ~CPU_BOOT_ERR0_SECURITY_NOT_RDY;
+ err_exists = true;
}
if (err_val & CPU_BOOT_ERR0_SECURITY_FAIL) {
@@ -403,7 +400,8 @@ static int fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg,
err_exists = true;
}
- if (err_exists)
+ if (err_exists && ((err_val & ~CPU_BOOT_ERR0_ENABLED) &
+ lower_32_bits(hdev->boot_error_status_mask)))
return -EIO;
return 0;
@@ -661,18 +659,13 @@ int hl_fw_cpucp_total_energy_get(struct hl_device *hdev, u64 *total_energy)
return rc;
}
-int get_used_pll_index(struct hl_device *hdev, enum pll_index input_pll_index,
+int get_used_pll_index(struct hl_device *hdev, u32 input_pll_index,
enum pll_index *pll_index)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u8 pll_byte, pll_bit_off;
bool dynamic_pll;
-
- if (input_pll_index >= PLL_MAX) {
- dev_err(hdev->dev, "PLL index %d is out of range\n",
- input_pll_index);
- return -EINVAL;
- }
+ int fw_pll_idx;
dynamic_pll = prop->fw_security_status_valid &&
(prop->fw_app_security_map & CPU_BOOT_DEV_STS0_DYN_PLL_EN);
@@ -680,28 +673,39 @@ int get_used_pll_index(struct hl_device *hdev, enum pll_index input_pll_index,
if (!dynamic_pll) {
/*
* in case we are working with legacy FW (each asic has unique
- * PLL numbering) extract the legacy numbering
+ * PLL numbering) use the driver based index as they are
+ * aligned with fw legacy numbering
*/
- *pll_index = hdev->legacy_pll_map[input_pll_index];
+ *pll_index = input_pll_index;
return 0;
}
+ /* retrieve a FW compatible PLL index based on
+ * ASIC specific user request
+ */
+ fw_pll_idx = hdev->asic_funcs->map_pll_idx_to_fw_idx(input_pll_index);
+ if (fw_pll_idx < 0) {
+ dev_err(hdev->dev, "Invalid PLL index (%u) error %d\n",
+ input_pll_index, fw_pll_idx);
+ return -EINVAL;
+ }
+
/* PLL map is a u8 array */
- pll_byte = prop->cpucp_info.pll_map[input_pll_index >> 3];
- pll_bit_off = input_pll_index & 0x7;
+ pll_byte = prop->cpucp_info.pll_map[fw_pll_idx >> 3];
+ pll_bit_off = fw_pll_idx & 0x7;
if (!(pll_byte & BIT(pll_bit_off))) {
dev_err(hdev->dev, "PLL index %d is not supported\n",
- input_pll_index);
+ fw_pll_idx);
return -EINVAL;
}
- *pll_index = input_pll_index;
+ *pll_index = fw_pll_idx;
return 0;
}
-int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, enum pll_index pll_index,
+int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u32 pll_index,
u16 *pll_freq_arr)
{
struct cpucp_packet pkt;
@@ -844,8 +848,13 @@ int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
if (rc) {
dev_err(hdev->dev, "Failed to read preboot version\n");
detect_cpu_boot_status(hdev, status);
- fw_read_errors(hdev, boot_err0_reg,
- cpu_security_boot_status_reg);
+
+ /* If we read all FF, then something is totally wrong, no point
+ * of reading specific errors
+ */
+ if (status != -1)
+ fw_read_errors(hdev, boot_err0_reg,
+ cpu_security_boot_status_reg);
return -EIO;
}
diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h
index 44e89da30b4a..6579f8767abd 100644
--- a/drivers/misc/habanalabs/common/habanalabs.h
+++ b/drivers/misc/habanalabs/common/habanalabs.h
@@ -930,6 +930,9 @@ enum div_select_defs {
* driver is ready to receive asynchronous events. This
* function should be called during the first init and
* after every hard-reset of the device
+ * @get_msi_info: Retrieve asic-specific MSI ID of the f/w async event
+ * @map_pll_idx_to_fw_idx: convert driver specific per asic PLL index to
+ * generic f/w compatible PLL Indexes
*/
struct hl_asic_funcs {
int (*early_init)(struct hl_device *hdev);
@@ -1054,6 +1057,7 @@ struct hl_asic_funcs {
u32 block_id, u32 block_size);
void (*enable_events_from_fw)(struct hl_device *hdev);
void (*get_msi_info)(u32 *table);
+ int (*map_pll_idx_to_fw_idx)(u32 pll_idx);
};
@@ -1950,8 +1954,6 @@ struct hl_mmu_funcs {
* @aggregated_cs_counters: aggregated cs counters among all contexts
* @mmu_priv: device-specific MMU data.
* @mmu_func: device-related MMU functions.
- * @legacy_pll_map: map holding map between dynamic (common) PLL indexes and
- * static (asic specific) PLL indexes.
* @dram_used_mem: current DRAM memory consumption.
* @timeout_jiffies: device CS timeout value.
* @max_power: the max power of the device, as configured by the sysadmin. This
@@ -1960,6 +1962,12 @@ struct hl_mmu_funcs {
* @clock_gating_mask: is clock gating enabled. bitmask that represents the
* different engines. See debugfs-driver-habanalabs for
* details.
+ * @boot_error_status_mask: contains a mask of the device boot error status.
+ * Each bit represents a different error, according to
+ * the defines in hl_boot_if.h. If the bit is cleared,
+ * the error will be ignored by the driver during
+ * device initialization. Mainly used to debug and
+ * workaround firmware bugs
* @in_reset: is device in reset flow.
* @curr_pll_profile: current PLL profile.
* @card_type: Various ASICs have several card types. This indicates the card
@@ -2071,12 +2079,11 @@ struct hl_device {
struct hl_mmu_priv mmu_priv;
struct hl_mmu_funcs mmu_func[MMU_NUM_PGT_LOCATIONS];
- enum pll_index *legacy_pll_map;
-
atomic64_t dram_used_mem;
u64 timeout_jiffies;
u64 max_power;
u64 clock_gating_mask;
+ u64 boot_error_status_mask;
atomic_t in_reset;
enum hl_pll_frequency curr_pll_profile;
enum cpucp_card_types card_type;
@@ -2387,9 +2394,9 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
struct hl_info_pci_counters *counters);
int hl_fw_cpucp_total_energy_get(struct hl_device *hdev,
u64 *total_energy);
-int get_used_pll_index(struct hl_device *hdev, enum pll_index input_pll_index,
+int get_used_pll_index(struct hl_device *hdev, u32 input_pll_index,
enum pll_index *pll_index);
-int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, enum pll_index pll_index,
+int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u32 pll_index,
u16 *pll_freq_arr);
int hl_fw_cpucp_power_get(struct hl_device *hdev, u64 *power);
int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
@@ -2411,9 +2418,9 @@ int hl_pci_set_outbound_region(struct hl_device *hdev,
int hl_pci_init(struct hl_device *hdev);
void hl_pci_fini(struct hl_device *hdev);
-long hl_get_frequency(struct hl_device *hdev, enum pll_index pll_index,
+long hl_get_frequency(struct hl_device *hdev, u32 pll_index,
bool curr);
-void hl_set_frequency(struct hl_device *hdev, enum pll_index pll_index,
+void hl_set_frequency(struct hl_device *hdev, u32 pll_index,
u64 freq);
int hl_get_temperature(struct hl_device *hdev,
int sensor_index, u32 attr, long *value);
diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c
index 7135f1e03864..64d1530db985 100644
--- a/drivers/misc/habanalabs/common/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/common/habanalabs_drv.c
@@ -30,6 +30,7 @@ static DEFINE_MUTEX(hl_devs_idr_lock);
static int timeout_locked = 30;
static int reset_on_lockup = 1;
static int memory_scrub = 1;
+static ulong boot_error_status_mask = ULONG_MAX;
module_param(timeout_locked, int, 0444);
MODULE_PARM_DESC(timeout_locked,
@@ -43,6 +44,10 @@ module_param(memory_scrub, int, 0444);
MODULE_PARM_DESC(memory_scrub,
"Scrub device memory in various states (0 = no, 1 = yes, default yes)");
+module_param(boot_error_status_mask, ulong, 0444);
+MODULE_PARM_DESC(boot_error_status_mask,
+ "Mask of the error status during device CPU boot (If bitX is cleared then error X is masked. Default all 1's)");
+
#define PCI_VENDOR_ID_HABANALABS 0x1da3
#define PCI_IDS_GOYA 0x0001
@@ -319,6 +324,8 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
hdev->major = hl_major;
hdev->reset_on_lockup = reset_on_lockup;
hdev->memory_scrub = memory_scrub;
+ hdev->boot_error_status_mask = boot_error_status_mask;
+
hdev->pldm = 0;
set_driver_behavior_per_device(hdev);
diff --git a/drivers/misc/habanalabs/common/sysfs.c b/drivers/misc/habanalabs/common/sysfs.c
index 9fa61573a89d..c9f649b31e3a 100644
--- a/drivers/misc/habanalabs/common/sysfs.c
+++ b/drivers/misc/habanalabs/common/sysfs.c
@@ -9,7 +9,7 @@
#include <linux/pci.h>
-long hl_get_frequency(struct hl_device *hdev, enum pll_index pll_index,
+long hl_get_frequency(struct hl_device *hdev, u32 pll_index,
bool curr)
{
struct cpucp_packet pkt;
@@ -44,7 +44,7 @@ long hl_get_frequency(struct hl_device *hdev, enum pll_index pll_index,
return (long) result;
}
-void hl_set_frequency(struct hl_device *hdev, enum pll_index pll_index,
+void hl_set_frequency(struct hl_device *hdev, u32 pll_index,
u64 freq)
{
struct cpucp_packet pkt;
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
index b751652f80a8..9e4a6bb3acd1 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -105,36 +105,6 @@
#define GAUDI_PLL_MAX 10
-/*
- * this enum kept here for compatibility with old FW (in which each asic has
- * unique PLL numbering
- */
-enum gaudi_pll_index {
- GAUDI_CPU_PLL = 0,
- GAUDI_PCI_PLL,
- GAUDI_SRAM_PLL,
- GAUDI_HBM_PLL,
- GAUDI_NIC_PLL,
- GAUDI_DMA_PLL,
- GAUDI_MESH_PLL,
- GAUDI_MME_PLL,
- GAUDI_TPC_PLL,
- GAUDI_IF_PLL,
-};
-
-static enum pll_index gaudi_pll_map[PLL_MAX] = {
- [CPU_PLL] = GAUDI_CPU_PLL,
- [PCI_PLL] = GAUDI_PCI_PLL,
- [SRAM_PLL] = GAUDI_SRAM_PLL,
- [HBM_PLL] = GAUDI_HBM_PLL,
- [NIC_PLL] = GAUDI_NIC_PLL,
- [DMA_PLL] = GAUDI_DMA_PLL,
- [MESH_PLL] = GAUDI_MESH_PLL,
- [MME_PLL] = GAUDI_MME_PLL,
- [TPC_PLL] = GAUDI_TPC_PLL,
- [IF_PLL] = GAUDI_IF_PLL,
-};
-
static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = {
"gaudi cq 0_0", "gaudi cq 0_1", "gaudi cq 0_2", "gaudi cq 0_3",
"gaudi cq 1_0", "gaudi cq 1_1", "gaudi cq 1_2", "gaudi cq 1_3",
@@ -810,7 +780,7 @@ static int gaudi_fetch_psoc_frequency(struct hl_device *hdev)
freq = 0;
}
} else {
- rc = hl_fw_cpucp_pll_info_get(hdev, CPU_PLL, pll_freq_arr);
+ rc = hl_fw_cpucp_pll_info_get(hdev, HL_GAUDI_CPU_PLL, pll_freq_arr);
if (rc)
return rc;
@@ -1652,9 +1622,6 @@ static int gaudi_sw_init(struct hl_device *hdev)
hdev->asic_specific = gaudi;
- /* store legacy PLL map */
- hdev->legacy_pll_map = gaudi_pll_map;
-
/* Create DMA pool for small allocations */
hdev->dma_pool = dma_pool_create(dev_name(hdev->dev),
&hdev->pdev->dev, GAUDI_DMA_POOL_BLK_SIZE, 8, 0);
@@ -5612,6 +5579,7 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
struct hl_cs_job *job;
u32 cb_size, ctl, err_cause;
struct hl_cb *cb;
+ u64 id;
int rc;
cb = hl_cb_kernel_create(hdev, PAGE_SIZE, false);
@@ -5678,8 +5646,9 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
}
release_cb:
+ id = cb->id;
hl_cb_put(cb);
- hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
+ hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, id << PAGE_SHIFT);
return rc;
}
@@ -8783,6 +8752,23 @@ static void gaudi_enable_events_from_fw(struct hl_device *hdev)
WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_INTS_REGISTER);
}
+static int gaudi_map_pll_idx_to_fw_idx(u32 pll_idx)
+{
+ switch (pll_idx) {
+ case HL_GAUDI_CPU_PLL: return CPU_PLL;
+ case HL_GAUDI_PCI_PLL: return PCI_PLL;
+ case HL_GAUDI_NIC_PLL: return NIC_PLL;
+ case HL_GAUDI_DMA_PLL: return DMA_PLL;
+ case HL_GAUDI_MESH_PLL: return MESH_PLL;
+ case HL_GAUDI_MME_PLL: return MME_PLL;
+ case HL_GAUDI_TPC_PLL: return TPC_PLL;
+ case HL_GAUDI_IF_PLL: return IF_PLL;
+ case HL_GAUDI_SRAM_PLL: return SRAM_PLL;
+ case HL_GAUDI_HBM_PLL: return HBM_PLL;
+ default: return -EINVAL;
+ }
+}
+
static const struct hl_asic_funcs gaudi_funcs = {
.early_init = gaudi_early_init,
.early_fini = gaudi_early_fini,
@@ -8866,7 +8852,8 @@ static const struct hl_asic_funcs gaudi_funcs = {
.ack_protection_bits_errors = gaudi_ack_protection_bits_errors,
.get_hw_block_id = gaudi_get_hw_block_id,
.hw_block_mmap = gaudi_block_mmap,
- .enable_events_from_fw = gaudi_enable_events_from_fw
+ .enable_events_from_fw = gaudi_enable_events_from_fw,
+ .map_pll_idx_to_fw_idx = gaudi_map_pll_idx_to_fw_idx
};
/**
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c b/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c
index 8c49da4bcbd5..9b60eadd4c35 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c
@@ -13,7 +13,7 @@ void gaudi_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq)
struct gaudi_device *gaudi = hdev->asic_specific;
if (freq == PLL_LAST)
- hl_set_frequency(hdev, MME_PLL, gaudi->max_freq_value);
+ hl_set_frequency(hdev, HL_GAUDI_MME_PLL, gaudi->max_freq_value);
}
int gaudi_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
@@ -23,7 +23,7 @@ int gaudi_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, MME_PLL, false);
+ value = hl_get_frequency(hdev, HL_GAUDI_MME_PLL, false);
if (value < 0) {
dev_err(hdev->dev, "Failed to retrieve device max clock %ld\n",
@@ -33,7 +33,7 @@ int gaudi_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
*max_clk = (value / 1000 / 1000);
- value = hl_get_frequency(hdev, MME_PLL, true);
+ value = hl_get_frequency(hdev, HL_GAUDI_MME_PLL, true);
if (value < 0) {
dev_err(hdev->dev,
@@ -57,7 +57,7 @@ static ssize_t clk_max_freq_mhz_show(struct device *dev,
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, MME_PLL, false);
+ value = hl_get_frequency(hdev, HL_GAUDI_MME_PLL, false);
gaudi->max_freq_value = value;
@@ -85,7 +85,7 @@ static ssize_t clk_max_freq_mhz_store(struct device *dev,
gaudi->max_freq_value = value * 1000 * 1000;
- hl_set_frequency(hdev, MME_PLL, gaudi->max_freq_value);
+ hl_set_frequency(hdev, HL_GAUDI_MME_PLL, gaudi->max_freq_value);
fail:
return count;
@@ -100,7 +100,7 @@ static ssize_t clk_cur_freq_mhz_show(struct device *dev,
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, MME_PLL, true);
+ value = hl_get_frequency(hdev, HL_GAUDI_MME_PLL, true);
return sprintf(buf, "%lu\n", (value / 1000 / 1000));
}
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index e27338f4aad2..e0ad2a269779 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -118,30 +118,6 @@
#define IS_MME_IDLE(mme_arch_sts) \
(((mme_arch_sts) & MME_ARCH_IDLE_MASK) == MME_ARCH_IDLE_MASK)
-/*
- * this enum kept here for compatibility with old FW (in which each asic has
- * unique PLL numbering
- */
-enum goya_pll_index {
- GOYA_CPU_PLL = 0,
- GOYA_IC_PLL,
- GOYA_MC_PLL,
- GOYA_MME_PLL,
- GOYA_PCI_PLL,
- GOYA_EMMC_PLL,
- GOYA_TPC_PLL,
-};
-
-static enum pll_index goya_pll_map[PLL_MAX] = {
- [CPU_PLL] = GOYA_CPU_PLL,
- [IC_PLL] = GOYA_IC_PLL,
- [MC_PLL] = GOYA_MC_PLL,
- [MME_PLL] = GOYA_MME_PLL,
- [PCI_PLL] = GOYA_PCI_PLL,
- [EMMC_PLL] = GOYA_EMMC_PLL,
- [TPC_PLL] = GOYA_TPC_PLL,
-};
-
static const char goya_irq_name[GOYA_MSIX_ENTRIES][GOYA_MAX_STRING_LEN] = {
"goya cq 0", "goya cq 1", "goya cq 2", "goya cq 3",
"goya cq 4", "goya cpu eq"
@@ -775,7 +751,8 @@ static void goya_fetch_psoc_frequency(struct hl_device *hdev)
freq = 0;
}
} else {
- rc = hl_fw_cpucp_pll_info_get(hdev, PCI_PLL, pll_freq_arr);
+ rc = hl_fw_cpucp_pll_info_get(hdev, HL_GOYA_PCI_PLL,
+ pll_freq_arr);
if (rc)
return;
@@ -897,9 +874,6 @@ static int goya_sw_init(struct hl_device *hdev)
hdev->asic_specific = goya;
- /* store legacy PLL map */
- hdev->legacy_pll_map = goya_pll_map;
-
/* Create DMA pool for small allocations */
hdev->dma_pool = dma_pool_create(dev_name(hdev->dev),
&hdev->pdev->dev, GOYA_DMA_POOL_BLK_SIZE, 8, 0);
@@ -5512,6 +5486,20 @@ static void goya_enable_events_from_fw(struct hl_device *hdev)
GOYA_ASYNC_EVENT_ID_INTS_REGISTER);
}
+static int goya_map_pll_idx_to_fw_idx(u32 pll_idx)
+{
+ switch (pll_idx) {
+ case HL_GOYA_CPU_PLL: return CPU_PLL;
+ case HL_GOYA_PCI_PLL: return PCI_PLL;
+ case HL_GOYA_MME_PLL: return MME_PLL;
+ case HL_GOYA_TPC_PLL: return TPC_PLL;
+ case HL_GOYA_IC_PLL: return IC_PLL;
+ case HL_GOYA_MC_PLL: return MC_PLL;
+ case HL_GOYA_EMMC_PLL: return EMMC_PLL;
+ default: return -EINVAL;
+ }
+}
+
static const struct hl_asic_funcs goya_funcs = {
.early_init = goya_early_init,
.early_fini = goya_early_fini,
@@ -5595,7 +5583,8 @@ static const struct hl_asic_funcs goya_funcs = {
.ack_protection_bits_errors = goya_ack_protection_bits_errors,
.get_hw_block_id = goya_get_hw_block_id,
.hw_block_mmap = goya_block_mmap,
- .enable_events_from_fw = goya_enable_events_from_fw
+ .enable_events_from_fw = goya_enable_events_from_fw,
+ .map_pll_idx_to_fw_idx = goya_map_pll_idx_to_fw_idx
};
/*
diff --git a/drivers/misc/habanalabs/goya/goya_hwmgr.c b/drivers/misc/habanalabs/goya/goya_hwmgr.c
index 3acb36a1a902..7d007125727f 100644
--- a/drivers/misc/habanalabs/goya/goya_hwmgr.c
+++ b/drivers/misc/habanalabs/goya/goya_hwmgr.c
@@ -13,19 +13,19 @@ void goya_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq)
switch (freq) {
case PLL_HIGH:
- hl_set_frequency(hdev, MME_PLL, hdev->high_pll);
- hl_set_frequency(hdev, TPC_PLL, hdev->high_pll);
- hl_set_frequency(hdev, IC_PLL, hdev->high_pll);
+ hl_set_frequency(hdev, HL_GOYA_MME_PLL, hdev->high_pll);
+ hl_set_frequency(hdev, HL_GOYA_TPC_PLL, hdev->high_pll);
+ hl_set_frequency(hdev, HL_GOYA_IC_PLL, hdev->high_pll);
break;
case PLL_LOW:
- hl_set_frequency(hdev, MME_PLL, GOYA_PLL_FREQ_LOW);
- hl_set_frequency(hdev, TPC_PLL, GOYA_PLL_FREQ_LOW);
- hl_set_frequency(hdev, IC_PLL, GOYA_PLL_FREQ_LOW);
+ hl_set_frequency(hdev, HL_GOYA_MME_PLL, GOYA_PLL_FREQ_LOW);
+ hl_set_frequency(hdev, HL_GOYA_TPC_PLL, GOYA_PLL_FREQ_LOW);
+ hl_set_frequency(hdev, HL_GOYA_IC_PLL, GOYA_PLL_FREQ_LOW);
break;
case PLL_LAST:
- hl_set_frequency(hdev, MME_PLL, goya->mme_clk);
- hl_set_frequency(hdev, TPC_PLL, goya->tpc_clk);
- hl_set_frequency(hdev, IC_PLL, goya->ic_clk);
+ hl_set_frequency(hdev, HL_GOYA_MME_PLL, goya->mme_clk);
+ hl_set_frequency(hdev, HL_GOYA_TPC_PLL, goya->tpc_clk);
+ hl_set_frequency(hdev, HL_GOYA_IC_PLL, goya->ic_clk);
break;
default:
dev_err(hdev->dev, "unknown frequency setting\n");
@@ -39,7 +39,7 @@ int goya_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, MME_PLL, false);
+ value = hl_get_frequency(hdev, HL_GOYA_MME_PLL, false);
if (value < 0) {
dev_err(hdev->dev, "Failed to retrieve device max clock %ld\n",
@@ -49,7 +49,7 @@ int goya_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
*max_clk = (value / 1000 / 1000);
- value = hl_get_frequency(hdev, MME_PLL, true);
+ value = hl_get_frequency(hdev, HL_GOYA_MME_PLL, true);
if (value < 0) {
dev_err(hdev->dev,
@@ -72,7 +72,7 @@ static ssize_t mme_clk_show(struct device *dev, struct device_attribute *attr,
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, MME_PLL, false);
+ value = hl_get_frequency(hdev, HL_GOYA_MME_PLL, false);
if (value < 0)
return value;
@@ -105,7 +105,7 @@ static ssize_t mme_clk_store(struct device *dev, struct device_attribute *attr,
goto fail;
}
- hl_set_frequency(hdev, MME_PLL, value);
+ hl_set_frequency(hdev, HL_GOYA_MME_PLL, value);
goya->mme_clk = value;
fail:
@@ -121,7 +121,7 @@ static ssize_t tpc_clk_show(struct device *dev, struct device_attribute *attr,
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, TPC_PLL, false);
+ value = hl_get_frequency(hdev, HL_GOYA_TPC_PLL, false);
if (value < 0)
return value;
@@ -154,7 +154,7 @@ static ssize_t tpc_clk_store(struct device *dev, struct device_attribute *attr,
goto fail;
}
- hl_set_frequency(hdev, TPC_PLL, value);
+ hl_set_frequency(hdev, HL_GOYA_TPC_PLL, value);
goya->tpc_clk = value;
fail:
@@ -170,7 +170,7 @@ static ssize_t ic_clk_show(struct device *dev, struct device_attribute *attr,
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, IC_PLL, false);
+ value = hl_get_frequency(hdev, HL_GOYA_IC_PLL, false);
if (value < 0)
return value;
@@ -203,7 +203,7 @@ static ssize_t ic_clk_store(struct device *dev, struct device_attribute *attr,
goto fail;
}
- hl_set_frequency(hdev, IC_PLL, value);
+ hl_set_frequency(hdev, HL_GOYA_IC_PLL, value);
goya->ic_clk = value;
fail:
@@ -219,7 +219,7 @@ static ssize_t mme_clk_curr_show(struct device *dev,
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, MME_PLL, true);
+ value = hl_get_frequency(hdev, HL_GOYA_MME_PLL, true);
if (value < 0)
return value;
@@ -236,7 +236,7 @@ static ssize_t tpc_clk_curr_show(struct device *dev,
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, TPC_PLL, true);
+ value = hl_get_frequency(hdev, HL_GOYA_TPC_PLL, true);
if (value < 0)
return value;
@@ -253,7 +253,7 @@ static ssize_t ic_clk_curr_show(struct device *dev,
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, IC_PLL, true);
+ value = hl_get_frequency(hdev, HL_GOYA_IC_PLL, true);
if (value < 0)
return value;
diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c
index 2bdf560ee681..0f9ea75b0b18 100644
--- a/drivers/misc/ics932s401.c
+++ b/drivers/misc/ics932s401.c
@@ -134,7 +134,7 @@ static struct ics932s401_data *ics932s401_update_device(struct device *dev)
for (i = 0; i < NUM_MIRRORED_REGS; i++) {
temp = i2c_smbus_read_word_data(client, regs_to_copy[i]);
if (temp < 0)
- data->regs[regs_to_copy[i]] = 0;
+ temp = 0;
data->regs[regs_to_copy[i]] = temp >> 8;
}
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index 64d33e368509..67c5b452dd35 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -101,8 +101,9 @@
printk(KERN_INFO a); \
} while (0)
#define v2printk(a...) do { \
- if (verbose > 1) \
+ if (verbose > 1) { \
printk(KERN_INFO a); \
+ } \
touch_nmi_watchdog(); \
} while (0)
#define eprintk(a...) do { \
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
index c394c0b08519..7ac788fae1b8 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.h
+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
@@ -271,6 +271,7 @@ struct lis3lv02d {
int regs_size;
u8 *reg_cache;
bool regs_stored;
+ bool init_required;
u8 odr_mask; /* ODR bit mask */
u8 whoami; /* indicates measurement precision */
s16 (*read_data) (struct lis3lv02d *lis3, int reg);
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index a98f6b895af7..aab3ebfa9fc4 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -277,6 +277,9 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
return ret;
}
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_request_autosuspend(dev->dev);
+
list_move_tail(&cb->list, &cl->rd_pending);
return 0;
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 689eb9afeeed..88f4c215caa6 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Block driver for media (i.e., flash cards)
*
@@ -1004,6 +1005,12 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
switch (mq_rq->drv_op) {
case MMC_DRV_OP_IOCTL:
+ if (card->ext_csd.cmdq_en) {
+ ret = mmc_cmdq_disable(card);
+ if (ret)
+ break;
+ }
+ fallthrough;
case MMC_DRV_OP_IOCTL_RPMB:
idata = mq_rq->drv_op_data;
for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) {
@@ -1014,6 +1021,8 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
/* Always switch back to main area after RPMB access */
if (rpmb_ioctl)
mmc_blk_part_switch(card, 0);
+ else if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
+ mmc_cmdq_enable(card);
break;
case MMC_DRV_OP_BOOT_WP:
ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
@@ -1159,7 +1168,7 @@ static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
struct mmc_card *card = md->queue.card;
int ret = 0;
- ret = mmc_flush_cache(card);
+ ret = mmc_flush_cache(card->host);
blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
}
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index f194940c5974..b039dcff17f8 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1582,7 +1582,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
{
struct mmc_command cmd = {};
unsigned int qty = 0, busy_timeout = 0;
- bool use_r1b_resp = false;
+ bool use_r1b_resp;
int err;
mmc_retune_hold(card->host);
@@ -1650,23 +1650,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
cmd.opcode = MMC_ERASE;
cmd.arg = arg;
busy_timeout = mmc_erase_timeout(card, arg, qty);
- /*
- * If the host controller supports busy signalling and the timeout for
- * the erase operation does not exceed the max_busy_timeout, we should
- * use R1B response. Or we need to prevent the host from doing hw busy
- * detection, which is done by converting to a R1 response instead.
- * Note, some hosts requires R1B, which also means they are on their own
- * when it comes to deal with the busy timeout.
- */
- if (!(card->host->caps & MMC_CAP_NEED_RSP_BUSY) &&
- card->host->max_busy_timeout &&
- busy_timeout > card->host->max_busy_timeout) {
- cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
- } else {
- cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
- cmd.busy_timeout = busy_timeout;
- use_r1b_resp = true;
- }
+ use_r1b_resp = mmc_prepare_busy_cmd(card->host, &cmd, busy_timeout);
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err) {
@@ -1687,7 +1671,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
goto out;
/* Let's poll to find out when the erase operation completes. */
- err = mmc_poll_for_busy(card, busy_timeout, MMC_BUSY_ERASE);
+ err = mmc_poll_for_busy(card, busy_timeout, false, MMC_BUSY_ERASE);
out:
mmc_retune_release(card->host);
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index db3c9c68875d..0c4de2030b3f 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -30,6 +30,7 @@ struct mmc_bus_ops {
int (*hw_reset)(struct mmc_host *);
int (*sw_reset)(struct mmc_host *);
bool (*cache_enabled)(struct mmc_host *);
+ int (*flush_cache)(struct mmc_host *);
};
void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
@@ -172,4 +173,12 @@ static inline bool mmc_cache_enabled(struct mmc_host *host)
return false;
}
+static inline int mmc_flush_cache(struct mmc_host *host)
+{
+ if (host->bus_ops->flush_cache)
+ return host->bus_ops->flush_cache(host);
+
+ return 0;
+}
+
#endif
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 9ec84c86c46a..3fdbc801e64a 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -26,6 +26,7 @@
static DECLARE_FAULT_ATTR(fail_default_attr);
static char *fail_request;
module_param(fail_request, charp, 0);
+MODULE_PARM_DESC(fail_request, "default fault injection attributes");
#endif /* CONFIG_FAIL_MMC_REQUEST */
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 0b0577990ddc..eda4a1892c33 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -388,6 +388,9 @@ int mmc_of_parse(struct mmc_host *host)
host->caps2 |= MMC_CAP2_NO_SD;
if (device_property_read_bool(dev, "no-mmc"))
host->caps2 |= MMC_CAP2_NO_MMC;
+ if (device_property_read_bool(dev, "no-mmc-hs400"))
+ host->caps2 &= ~(MMC_CAP2_HS400_1_8V | MMC_CAP2_HS400_1_2V |
+ MMC_CAP2_HS400_ES);
/* Must be after "non-removable" check */
if (device_property_read_u32(dev, "fixed-emmc-driver-type", &drv_type) == 0) {
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 8674c3e0c02c..838726b68ff3 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -28,6 +28,7 @@
#define DEFAULT_CMD6_TIMEOUT_MS 500
#define MIN_CACHE_EN_TIMEOUT_MS 1600
+#define CACHE_FLUSH_TIMEOUT_MS 30000 /* 30s */
static const unsigned int tran_exp[] = {
10000, 100000, 1000000, 10000000,
@@ -1905,11 +1906,20 @@ static int mmc_can_sleep(struct mmc_card *card)
return card->ext_csd.rev >= 3;
}
+static int mmc_sleep_busy_cb(void *cb_data, bool *busy)
+{
+ struct mmc_host *host = cb_data;
+
+ *busy = host->ops->card_busy(host);
+ return 0;
+}
+
static int mmc_sleep(struct mmc_host *host)
{
struct mmc_command cmd = {};
struct mmc_card *card = host->card;
unsigned int timeout_ms = DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000);
+ bool use_r1b_resp;
int err;
/* Re-tuning can't be done once the card is deselected */
@@ -1922,35 +1932,27 @@ static int mmc_sleep(struct mmc_host *host)
cmd.opcode = MMC_SLEEP_AWAKE;
cmd.arg = card->rca << 16;
cmd.arg |= 1 << 15;
-
- /*
- * If the max_busy_timeout of the host is specified, validate it against
- * the sleep cmd timeout. A failure means we need to prevent the host
- * from doing hw busy detection, which is done by converting to a R1
- * response instead of a R1B. Note, some hosts requires R1B, which also
- * means they are on their own when it comes to deal with the busy
- * timeout.
- */
- if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
- (timeout_ms > host->max_busy_timeout)) {
- cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
- } else {
- cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
- cmd.busy_timeout = timeout_ms;
- }
+ use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, timeout_ms);
err = mmc_wait_for_cmd(host, &cmd, 0);
if (err)
goto out_release;
/*
- * If the host does not wait while the card signals busy, then we will
- * will have to wait the sleep/awake timeout. Note, we cannot use the
- * SEND_STATUS command to poll the status because that command (and most
- * others) is invalid while the card sleeps.
+ * If the host does not wait while the card signals busy, then we can
+ * try to poll, but only if the host supports HW polling, as the
+ * SEND_STATUS cmd is not allowed. If we can't poll, then we simply need
+ * to wait the sleep/awake timeout.
*/
- if (!cmd.busy_timeout || !(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
+ if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp)
+ goto out_release;
+
+ if (!host->ops->card_busy) {
mmc_delay(timeout_ms);
+ goto out_release;
+ }
+
+ err = __mmc_poll_for_busy(card, timeout_ms, &mmc_sleep_busy_cb, host);
out_release:
mmc_retune_release(host);
@@ -2035,6 +2037,25 @@ static bool _mmc_cache_enabled(struct mmc_host *host)
host->card->ext_csd.cache_ctrl & 1;
}
+/*
+ * Flush the internal cache of the eMMC to non-volatile storage.
+ */
+static int _mmc_flush_cache(struct mmc_host *host)
+{
+ int err = 0;
+
+ if (_mmc_cache_enabled(host)) {
+ err = mmc_switch(host->card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_FLUSH_CACHE, 1,
+ CACHE_FLUSH_TIMEOUT_MS);
+ if (err)
+ pr_err("%s: cache flush error %d\n",
+ mmc_hostname(host), err);
+ }
+
+ return err;
+}
+
static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
{
int err = 0;
@@ -2046,7 +2067,7 @@ static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
if (mmc_card_suspended(host->card))
goto out;
- err = mmc_flush_cache(host->card);
+ err = _mmc_flush_cache(host);
if (err)
goto out;
@@ -2187,7 +2208,7 @@ static int _mmc_hw_reset(struct mmc_host *host)
* In the case of recovery, we can't expect flushing the cache to work
* always, but we have a go and ignore errors.
*/
- mmc_flush_cache(host->card);
+ _mmc_flush_cache(host);
if ((host->caps & MMC_CAP_HW_RESET) && host->ops->hw_reset &&
mmc_can_reset(card)) {
@@ -2215,6 +2236,7 @@ static const struct mmc_bus_ops mmc_ops = {
.shutdown = mmc_shutdown,
.hw_reset = _mmc_hw_reset,
.cache_enabled = _mmc_cache_enabled,
+ .flush_cache = _mmc_flush_cache,
};
/*
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 5756781fef37..973756ed4016 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -20,7 +20,6 @@
#include "mmc_ops.h"
#define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */
-#define MMC_CACHE_FLUSH_TIMEOUT_MS (30 * 1000) /* 30s */
#define MMC_SANITIZE_TIMEOUT_MS (240 * 1000) /* 240s */
static const u8 tuning_blk_pattern_4bit[] = {
@@ -53,6 +52,12 @@ static const u8 tuning_blk_pattern_8bit[] = {
0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
};
+struct mmc_busy_data {
+ struct mmc_card *card;
+ bool retry_crc_err;
+ enum mmc_busy_cmd busy_cmd;
+};
+
int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
{
int err;
@@ -246,9 +251,8 @@ mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
* NOTE: void *buf, caller for the buf is required to use DMA-capable
* buffer or on-stack buffer (with some overhead in callee).
*/
-static int
-mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
- u32 opcode, void *buf, unsigned len)
+int mmc_send_adtc_data(struct mmc_card *card, struct mmc_host *host, u32 opcode,
+ u32 args, void *buf, unsigned len)
{
struct mmc_request mrq = {};
struct mmc_command cmd = {};
@@ -259,7 +263,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
mrq.data = &data;
cmd.opcode = opcode;
- cmd.arg = 0;
+ cmd.arg = args;
/* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
* rely on callers to never use this with "native" calls for reading
@@ -305,7 +309,7 @@ static int mmc_spi_send_cxd(struct mmc_host *host, u32 *cxd, u32 opcode)
if (!cxd_tmp)
return -ENOMEM;
- ret = mmc_send_cxd_data(NULL, host, opcode, cxd_tmp, 16);
+ ret = mmc_send_adtc_data(NULL, host, opcode, 0, cxd_tmp, 16);
if (ret)
goto err;
@@ -353,7 +357,7 @@ int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
if (!ext_csd)
return -ENOMEM;
- err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
+ err = mmc_send_adtc_data(card, card->host, MMC_SEND_EXT_CSD, 0, ext_csd,
512);
if (err)
kfree(ext_csd);
@@ -424,10 +428,10 @@ int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
return mmc_switch_status_error(card->host, status);
}
-static int mmc_busy_status(struct mmc_card *card, bool retry_crc_err,
- enum mmc_busy_cmd busy_cmd, bool *busy)
+static int mmc_busy_cb(void *cb_data, bool *busy)
{
- struct mmc_host *host = card->host;
+ struct mmc_busy_data *data = cb_data;
+ struct mmc_host *host = data->card->host;
u32 status = 0;
int err;
@@ -436,22 +440,23 @@ static int mmc_busy_status(struct mmc_card *card, bool retry_crc_err,
return 0;
}
- err = mmc_send_status(card, &status);
- if (retry_crc_err && err == -EILSEQ) {
+ err = mmc_send_status(data->card, &status);
+ if (data->retry_crc_err && err == -EILSEQ) {
*busy = true;
return 0;
}
if (err)
return err;
- switch (busy_cmd) {
+ switch (data->busy_cmd) {
case MMC_BUSY_CMD6:
- err = mmc_switch_status_error(card->host, status);
+ err = mmc_switch_status_error(host, status);
break;
case MMC_BUSY_ERASE:
err = R1_STATUS(status) ? -EIO : 0;
break;
case MMC_BUSY_HPI:
+ case MMC_BUSY_EXTR_SINGLE:
break;
default:
err = -EINVAL;
@@ -464,9 +469,9 @@ static int mmc_busy_status(struct mmc_card *card, bool retry_crc_err,
return 0;
}
-static int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
- bool send_status, bool retry_crc_err,
- enum mmc_busy_cmd busy_cmd)
+int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
+ int (*busy_cb)(void *cb_data, bool *busy),
+ void *cb_data)
{
struct mmc_host *host = card->host;
int err;
@@ -475,16 +480,6 @@ static int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
bool expired = false;
bool busy = false;
- /*
- * In cases when not allowed to poll by using CMD13 or because we aren't
- * capable of polling by using ->card_busy(), then rely on waiting the
- * stated timeout to be sufficient.
- */
- if (!send_status && !host->ops->card_busy) {
- mmc_delay(timeout_ms);
- return 0;
- }
-
timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
do {
/*
@@ -493,7 +488,7 @@ static int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
*/
expired = time_after(jiffies, timeout);
- err = mmc_busy_status(card, retry_crc_err, busy_cmd, &busy);
+ err = (*busy_cb)(cb_data, &busy);
if (err)
return err;
@@ -516,9 +511,36 @@ static int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
}
int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
- enum mmc_busy_cmd busy_cmd)
+ bool retry_crc_err, enum mmc_busy_cmd busy_cmd)
+{
+ struct mmc_busy_data cb_data;
+
+ cb_data.card = card;
+ cb_data.retry_crc_err = retry_crc_err;
+ cb_data.busy_cmd = busy_cmd;
+
+ return __mmc_poll_for_busy(card, timeout_ms, &mmc_busy_cb, &cb_data);
+}
+
+bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
+ unsigned int timeout_ms)
{
- return __mmc_poll_for_busy(card, timeout_ms, true, false, busy_cmd);
+ /*
+ * If the max_busy_timeout of the host is specified, make sure it's
+ * enough to fit the used timeout_ms. In case it's not, let's instruct
+ * the host to avoid HW busy detection, by converting to a R1 response
+ * instead of a R1B. Note, some hosts requires R1B, which also means
+ * they are on their own when it comes to deal with the busy timeout.
+ */
+ if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
+ (timeout_ms > host->max_busy_timeout)) {
+ cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1 | MMC_RSP_R1;
+ return false;
+ }
+
+ cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1B | MMC_RSP_R1B;
+ cmd->busy_timeout = timeout_ms;
+ return true;
}
/**
@@ -543,7 +565,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
struct mmc_host *host = card->host;
int err;
struct mmc_command cmd = {};
- bool use_r1b_resp = true;
+ bool use_r1b_resp;
unsigned char old_timing = host->ios.timing;
mmc_retune_hold(host);
@@ -554,29 +576,12 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
timeout_ms = card->ext_csd.generic_cmd6_time;
}
- /*
- * If the max_busy_timeout of the host is specified, make sure it's
- * enough to fit the used timeout_ms. In case it's not, let's instruct
- * the host to avoid HW busy detection, by converting to a R1 response
- * instead of a R1B. Note, some hosts requires R1B, which also means
- * they are on their own when it comes to deal with the busy timeout.
- */
- if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
- (timeout_ms > host->max_busy_timeout))
- use_r1b_resp = false;
-
cmd.opcode = MMC_SWITCH;
cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
(index << 16) |
(value << 8) |
set;
- cmd.flags = MMC_CMD_AC;
- if (use_r1b_resp) {
- cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
- cmd.busy_timeout = timeout_ms;
- } else {
- cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
- }
+ use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, timeout_ms);
err = mmc_wait_for_cmd(host, &cmd, retries);
if (err)
@@ -587,9 +592,18 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
mmc_host_is_spi(host))
goto out_tim;
+ /*
+ * If the host doesn't support HW polling via the ->card_busy() ops and
+ * when it's not allowed to poll by using CMD13, then we need to rely on
+ * waiting the stated timeout to be sufficient.
+ */
+ if (!send_status && !host->ops->card_busy) {
+ mmc_delay(timeout_ms);
+ goto out_tim;
+ }
+
/* Let's try to poll to find out when the command is completed. */
- err = __mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err,
- MMC_BUSY_CMD6);
+ err = mmc_poll_for_busy(card, timeout_ms, retry_crc_err, MMC_BUSY_CMD6);
if (err)
goto out;
@@ -686,7 +700,7 @@ out:
}
EXPORT_SYMBOL_GPL(mmc_send_tuning);
-int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
+int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode)
{
struct mmc_command cmd = {};
@@ -709,7 +723,7 @@ int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
return mmc_wait_for_cmd(host, &cmd, 0);
}
-EXPORT_SYMBOL_GPL(mmc_abort_tuning);
+EXPORT_SYMBOL_GPL(mmc_send_abort_tuning);
static int
mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
@@ -813,28 +827,17 @@ static int mmc_send_hpi_cmd(struct mmc_card *card)
{
unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time;
struct mmc_host *host = card->host;
- bool use_r1b_resp = true;
+ bool use_r1b_resp = false;
struct mmc_command cmd = {};
int err;
cmd.opcode = card->ext_csd.hpi_cmd;
cmd.arg = card->rca << 16 | 1;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
- /*
- * Make sure the host's max_busy_timeout fit the needed timeout for HPI.
- * In case it doesn't, let's instruct the host to avoid HW busy
- * detection, by using a R1 response instead of R1B.
- */
- if (host->max_busy_timeout && busy_timeout_ms > host->max_busy_timeout)
- use_r1b_resp = false;
-
- if (cmd.opcode == MMC_STOP_TRANSMISSION && use_r1b_resp) {
- cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
- cmd.busy_timeout = busy_timeout_ms;
- } else {
- cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
- use_r1b_resp = false;
- }
+ if (cmd.opcode == MMC_STOP_TRANSMISSION)
+ use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd,
+ busy_timeout_ms);
err = mmc_wait_for_cmd(host, &cmd, 0);
if (err) {
@@ -848,7 +851,7 @@ static int mmc_send_hpi_cmd(struct mmc_card *card)
return 0;
/* Let's poll to find out when the HPI request completes. */
- return mmc_poll_for_busy(card, busy_timeout_ms, MMC_BUSY_HPI);
+ return mmc_poll_for_busy(card, busy_timeout_ms, false, MMC_BUSY_HPI);
}
/**
@@ -961,26 +964,6 @@ void mmc_run_bkops(struct mmc_card *card)
}
EXPORT_SYMBOL(mmc_run_bkops);
-/*
- * Flush the cache to the non-volatile storage.
- */
-int mmc_flush_cache(struct mmc_card *card)
-{
- int err = 0;
-
- if (mmc_cache_enabled(card->host)) {
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_FLUSH_CACHE, 1,
- MMC_CACHE_FLUSH_TIMEOUT_MS);
- if (err)
- pr_err("%s: cache flush error %d\n",
- mmc_hostname(card->host), err);
- }
-
- return err;
-}
-EXPORT_SYMBOL(mmc_flush_cache);
-
static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
{
u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index 7bc1cfb0654c..41ab4f573a31 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -14,10 +14,12 @@ enum mmc_busy_cmd {
MMC_BUSY_CMD6,
MMC_BUSY_ERASE,
MMC_BUSY_HPI,
+ MMC_BUSY_EXTR_SINGLE,
};
struct mmc_host;
struct mmc_card;
+struct mmc_command;
int mmc_select_card(struct mmc_card *card);
int mmc_deselect_cards(struct mmc_host *host);
@@ -25,6 +27,8 @@ int mmc_set_dsr(struct mmc_host *host);
int mmc_go_idle(struct mmc_host *host);
int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr);
int mmc_set_relative_addr(struct mmc_card *card);
+int mmc_send_adtc_data(struct mmc_card *card, struct mmc_host *host, u32 opcode,
+ u32 args, void *buf, unsigned len);
int mmc_send_csd(struct mmc_card *card, u32 *csd);
int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries);
int mmc_send_status(struct mmc_card *card, u32 *status);
@@ -35,15 +39,19 @@ int mmc_bus_test(struct mmc_card *card, u8 bus_width);
int mmc_can_ext_csd(struct mmc_card *card);
int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal);
+bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
+ unsigned int timeout_ms);
+int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
+ int (*busy_cb)(void *cb_data, bool *busy),
+ void *cb_data);
int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
- enum mmc_busy_cmd busy_cmd);
+ bool retry_crc_err, enum mmc_busy_cmd busy_cmd);
int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
unsigned int timeout_ms, unsigned char timing,
bool send_status, bool retry_crc_err, unsigned int retries);
int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
unsigned int timeout_ms);
void mmc_run_bkops(struct mmc_card *card);
-int mmc_flush_cache(struct mmc_card *card);
int mmc_cmdq_enable(struct mmc_card *card);
int mmc_cmdq_disable(struct mmc_card *card);
int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 2c48d6504101..4646b7a03db6 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -66,6 +66,14 @@ static const unsigned int sd_au_size[] = {
__res & __mask; \
})
+#define SD_POWEROFF_NOTIFY_TIMEOUT_MS 2000
+#define SD_WRITE_EXTR_SINGLE_TIMEOUT_MS 1000
+
+struct sd_busy_data {
+ struct mmc_card *card;
+ u8 *reg_buf;
+};
+
/*
* Given the decoded CSD structure, decode the raw CID to our CID structure.
*/
@@ -222,7 +230,9 @@ static int mmc_decode_scr(struct mmc_card *card)
else
card->erased_byte = 0x0;
- if (scr->sda_spec3)
+ if (scr->sda_spec4)
+ scr->cmds = UNSTUFF_BITS(resp, 32, 4);
+ else if (scr->sda_spec3)
scr->cmds = UNSTUFF_BITS(resp, 32, 2);
/* SD Spec says: any SD Card shall set at least bits 0 and 2 */
@@ -847,11 +857,13 @@ try_again:
return err;
/*
- * In case CCS and S18A in the response is set, start Signal Voltage
- * Switch procedure. SPI mode doesn't support CMD11.
+ * In case the S18A bit is set in the response, let's start the signal
+ * voltage switch procedure. SPI mode doesn't support CMD11.
+ * Note that, according to the spec, the S18A bit is not valid unless
+ * the CCS bit is set as well. We deliberately deviate from the spec in
+ * regards to this, which allows UHS-I to be supported for SDSC cards.
*/
- if (!mmc_host_is_spi(host) && rocr &&
- ((*rocr & 0x41000000) == 0x41000000)) {
+ if (!mmc_host_is_spi(host) && rocr && (*rocr & 0x01000000)) {
err = mmc_set_uhs_voltage(host, pocr);
if (err == -EAGAIN) {
retries--;
@@ -994,6 +1006,380 @@ static bool mmc_sd_card_using_v18(struct mmc_card *card)
(SD_MODE_UHS_SDR50 | SD_MODE_UHS_SDR104 | SD_MODE_UHS_DDR50);
}
+static int sd_write_ext_reg(struct mmc_card *card, u8 fno, u8 page, u16 offset,
+ u8 reg_data)
+{
+ struct mmc_host *host = card->host;
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_data data = {};
+ struct scatterlist sg;
+ u8 *reg_buf;
+
+ reg_buf = kzalloc(512, GFP_KERNEL);
+ if (!reg_buf)
+ return -ENOMEM;
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+
+ /*
+ * Arguments of CMD49:
+ * [31:31] MIO (0 = memory).
+ * [30:27] FNO (function number).
+ * [26:26] MW - mask write mode (0 = disable).
+ * [25:18] page number.
+ * [17:9] offset address.
+ * [8:0] length (0 = 1 byte).
+ */
+ cmd.arg = fno << 27 | page << 18 | offset << 9;
+
+ /* The first byte in the buffer is the data to be written. */
+ reg_buf[0] = reg_data;
+
+ data.flags = MMC_DATA_WRITE;
+ data.blksz = 512;
+ data.blocks = 1;
+ data.sg = &sg;
+ data.sg_len = 1;
+ sg_init_one(&sg, reg_buf, 512);
+
+ cmd.opcode = SD_WRITE_EXTR_SINGLE;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ mmc_set_data_timeout(&data, card);
+ mmc_wait_for_req(host, &mrq);
+
+ kfree(reg_buf);
+
+ /*
+ * Note that, the SD card is allowed to signal busy on DAT0 up to 1s
+ * after the CMD49. Although, let's leave this to be managed by the
+ * caller.
+ */
+
+ if (cmd.error)
+ return cmd.error;
+ if (data.error)
+ return data.error;
+
+ return 0;
+}
+
+static int sd_read_ext_reg(struct mmc_card *card, u8 fno, u8 page,
+ u16 offset, u16 len, u8 *reg_buf)
+{
+ u32 cmd_args;
+
+ /*
+ * Command arguments of CMD48:
+ * [31:31] MIO (0 = memory).
+ * [30:27] FNO (function number).
+ * [26:26] reserved (0).
+ * [25:18] page number.
+ * [17:9] offset address.
+ * [8:0] length (0 = 1 byte, 1ff = 512 bytes).
+ */
+ cmd_args = fno << 27 | page << 18 | offset << 9 | (len -1);
+
+ return mmc_send_adtc_data(card, card->host, SD_READ_EXTR_SINGLE,
+ cmd_args, reg_buf, 512);
+}
+
+static int sd_parse_ext_reg_power(struct mmc_card *card, u8 fno, u8 page,
+ u16 offset)
+{
+ int err;
+ u8 *reg_buf;
+
+ reg_buf = kzalloc(512, GFP_KERNEL);
+ if (!reg_buf)
+ return -ENOMEM;
+
+ /* Read the extension register for power management function. */
+ err = sd_read_ext_reg(card, fno, page, offset, 512, reg_buf);
+ if (err) {
+ pr_warn("%s: error %d reading PM func of ext reg\n",
+ mmc_hostname(card->host), err);
+ goto out;
+ }
+
+ /* PM revision consists of 4 bits. */
+ card->ext_power.rev = reg_buf[0] & 0xf;
+
+ /* Power Off Notification support at bit 4. */
+ if (reg_buf[1] & BIT(4))
+ card->ext_power.feature_support |= SD_EXT_POWER_OFF_NOTIFY;
+
+ /* Power Sustenance support at bit 5. */
+ if (reg_buf[1] & BIT(5))
+ card->ext_power.feature_support |= SD_EXT_POWER_SUSTENANCE;
+
+ /* Power Down Mode support at bit 6. */
+ if (reg_buf[1] & BIT(6))
+ card->ext_power.feature_support |= SD_EXT_POWER_DOWN_MODE;
+
+ card->ext_power.fno = fno;
+ card->ext_power.page = page;
+ card->ext_power.offset = offset;
+
+out:
+ kfree(reg_buf);
+ return err;
+}
+
+static int sd_parse_ext_reg_perf(struct mmc_card *card, u8 fno, u8 page,
+ u16 offset)
+{
+ int err;
+ u8 *reg_buf;
+
+ reg_buf = kzalloc(512, GFP_KERNEL);
+ if (!reg_buf)
+ return -ENOMEM;
+
+ err = sd_read_ext_reg(card, fno, page, offset, 512, reg_buf);
+ if (err) {
+ pr_warn("%s: error %d reading PERF func of ext reg\n",
+ mmc_hostname(card->host), err);
+ goto out;
+ }
+
+ /* PERF revision. */
+ card->ext_perf.rev = reg_buf[0];
+
+ /* FX_EVENT support at bit 0. */
+ if (reg_buf[1] & BIT(0))
+ card->ext_perf.feature_support |= SD_EXT_PERF_FX_EVENT;
+
+ /* Card initiated self-maintenance support at bit 0. */
+ if (reg_buf[2] & BIT(0))
+ card->ext_perf.feature_support |= SD_EXT_PERF_CARD_MAINT;
+
+ /* Host initiated self-maintenance support at bit 1. */
+ if (reg_buf[2] & BIT(1))
+ card->ext_perf.feature_support |= SD_EXT_PERF_HOST_MAINT;
+
+ /* Cache support at bit 0. */
+ if (reg_buf[4] & BIT(0))
+ card->ext_perf.feature_support |= SD_EXT_PERF_CACHE;
+
+ /* Command queue support indicated via queue depth bits (0 to 4). */
+ if (reg_buf[6] & 0x1f)
+ card->ext_perf.feature_support |= SD_EXT_PERF_CMD_QUEUE;
+
+ card->ext_perf.fno = fno;
+ card->ext_perf.page = page;
+ card->ext_perf.offset = offset;
+
+out:
+ kfree(reg_buf);
+ return err;
+}
+
+static int sd_parse_ext_reg(struct mmc_card *card, u8 *gen_info_buf,
+ u16 *next_ext_addr)
+{
+ u8 num_regs, fno, page;
+ u16 sfc, offset, ext = *next_ext_addr;
+ u32 reg_addr;
+
+ /*
+ * Parse only one register set per extension, as that is sufficient to
+ * support the standard functions. This means another 48 bytes in the
+ * buffer must be available.
+ */
+ if (ext + 48 > 512)
+ return -EFAULT;
+
+ /* Standard Function Code */
+ memcpy(&sfc, &gen_info_buf[ext], 2);
+
+ /* Address to the next extension. */
+ memcpy(next_ext_addr, &gen_info_buf[ext + 40], 2);
+
+ /* Number of registers for this extension. */
+ num_regs = gen_info_buf[ext + 42];
+
+ /* We support only one register per extension. */
+ if (num_regs != 1)
+ return 0;
+
+ /* Extension register address. */
+ memcpy(&reg_addr, &gen_info_buf[ext + 44], 4);
+
+ /* 9 bits (0 to 8) contains the offset address. */
+ offset = reg_addr & 0x1ff;
+
+ /* 8 bits (9 to 16) contains the page number. */
+ page = reg_addr >> 9 & 0xff ;
+
+ /* 4 bits (18 to 21) contains the function number. */
+ fno = reg_addr >> 18 & 0xf;
+
+ /* Standard Function Code for power management. */
+ if (sfc == 0x1)
+ return sd_parse_ext_reg_power(card, fno, page, offset);
+
+ /* Standard Function Code for performance enhancement. */
+ if (sfc == 0x2)
+ return sd_parse_ext_reg_perf(card, fno, page, offset);
+
+ return 0;
+}
+
+static int sd_read_ext_regs(struct mmc_card *card)
+{
+ int err, i;
+ u8 num_ext, *gen_info_buf;
+ u16 rev, len, next_ext_addr;
+
+ if (mmc_host_is_spi(card->host))
+ return 0;
+
+ if (!(card->scr.cmds & SD_SCR_CMD48_SUPPORT))
+ return 0;
+
+ gen_info_buf = kzalloc(512, GFP_KERNEL);
+ if (!gen_info_buf)
+ return -ENOMEM;
+
+ /*
+ * Read 512 bytes of general info, which is found at function number 0,
+ * at page 0 and with no offset.
+ */
+ err = sd_read_ext_reg(card, 0, 0, 0, 512, gen_info_buf);
+ if (err) {
+ pr_warn("%s: error %d reading general info of SD ext reg\n",
+ mmc_hostname(card->host), err);
+ goto out;
+ }
+
+ /* General info structure revision. */
+ memcpy(&rev, &gen_info_buf[0], 2);
+
+ /* Length of general info in bytes. */
+ memcpy(&len, &gen_info_buf[2], 2);
+
+ /* Number of extensions to be find. */
+ num_ext = gen_info_buf[4];
+
+ /* We support revision 0, but limit it to 512 bytes for simplicity. */
+ if (rev != 0 || len > 512) {
+ pr_warn("%s: non-supported SD ext reg layout\n",
+ mmc_hostname(card->host));
+ goto out;
+ }
+
+ /*
+ * Parse the extension registers. The first extension should start
+ * immediately after the general info header (16 bytes).
+ */
+ next_ext_addr = 16;
+ for (i = 0; i < num_ext; i++) {
+ err = sd_parse_ext_reg(card, gen_info_buf, &next_ext_addr);
+ if (err) {
+ pr_warn("%s: error %d parsing SD ext reg\n",
+ mmc_hostname(card->host), err);
+ goto out;
+ }
+ }
+
+out:
+ kfree(gen_info_buf);
+ return err;
+}
+
+static bool sd_cache_enabled(struct mmc_host *host)
+{
+ return host->card->ext_perf.feature_enabled & SD_EXT_PERF_CACHE;
+}
+
+static int sd_flush_cache(struct mmc_host *host)
+{
+ struct mmc_card *card = host->card;
+ u8 *reg_buf, fno, page;
+ u16 offset;
+ int err;
+
+ if (!sd_cache_enabled(host))
+ return 0;
+
+ reg_buf = kzalloc(512, GFP_KERNEL);
+ if (!reg_buf)
+ return -ENOMEM;
+
+ /*
+ * Set Flush Cache at bit 0 in the performance enhancement register at
+ * 261 bytes offset.
+ */
+ fno = card->ext_perf.fno;
+ page = card->ext_perf.page;
+ offset = card->ext_perf.offset + 261;
+
+ err = sd_write_ext_reg(card, fno, page, offset, BIT(0));
+ if (err) {
+ pr_warn("%s: error %d writing Cache Flush bit\n",
+ mmc_hostname(host), err);
+ goto out;
+ }
+
+ err = mmc_poll_for_busy(card, SD_WRITE_EXTR_SINGLE_TIMEOUT_MS, false,
+ MMC_BUSY_EXTR_SINGLE);
+ if (err)
+ goto out;
+
+ /*
+ * Read the Flush Cache bit. The card shall reset it, to confirm that
+ * it's has completed the flushing of the cache.
+ */
+ err = sd_read_ext_reg(card, fno, page, offset, 1, reg_buf);
+ if (err) {
+ pr_warn("%s: error %d reading Cache Flush bit\n",
+ mmc_hostname(host), err);
+ goto out;
+ }
+
+ if (reg_buf[0] & BIT(0))
+ err = -ETIMEDOUT;
+out:
+ kfree(reg_buf);
+ return err;
+}
+
+static int sd_enable_cache(struct mmc_card *card)
+{
+ u8 *reg_buf;
+ int err;
+
+ card->ext_perf.feature_enabled &= ~SD_EXT_PERF_CACHE;
+
+ reg_buf = kzalloc(512, GFP_KERNEL);
+ if (!reg_buf)
+ return -ENOMEM;
+
+ /*
+ * Set Cache Enable at bit 0 in the performance enhancement register at
+ * 260 bytes offset.
+ */
+ err = sd_write_ext_reg(card, card->ext_perf.fno, card->ext_perf.page,
+ card->ext_perf.offset + 260, BIT(0));
+ if (err) {
+ pr_warn("%s: error %d writing Cache Enable bit\n",
+ mmc_hostname(card->host), err);
+ goto out;
+ }
+
+ err = mmc_poll_for_busy(card, SD_WRITE_EXTR_SINGLE_TIMEOUT_MS, false,
+ MMC_BUSY_EXTR_SINGLE);
+ if (!err)
+ card->ext_perf.feature_enabled |= SD_EXT_PERF_CACHE;
+
+out:
+ kfree(reg_buf);
+ return err;
+}
+
/*
* Handle the detection and initialisation of a card.
*
@@ -1142,6 +1528,20 @@ retry:
}
}
+ if (!oldcard) {
+ /* Read/parse the extension registers. */
+ err = sd_read_ext_regs(card);
+ if (err)
+ goto free_card;
+ }
+
+ /* Enable internal SD cache if supported. */
+ if (card->ext_perf.feature_support & SD_EXT_PERF_CACHE) {
+ err = sd_enable_cache(card);
+ if (err)
+ goto free_card;
+ }
+
if (host->cqe_ops && !host->cqe_enabled) {
err = host->cqe_ops->cqe_enable(host, card);
if (!err) {
@@ -1213,21 +1613,84 @@ static void mmc_sd_detect(struct mmc_host *host)
}
}
+static int sd_can_poweroff_notify(struct mmc_card *card)
+{
+ return card->ext_power.feature_support & SD_EXT_POWER_OFF_NOTIFY;
+}
+
+static int sd_busy_poweroff_notify_cb(void *cb_data, bool *busy)
+{
+ struct sd_busy_data *data = cb_data;
+ struct mmc_card *card = data->card;
+ int err;
+
+ /*
+ * Read the status register for the power management function. It's at
+ * one byte offset and is one byte long. The Power Off Notification
+ * Ready is bit 0.
+ */
+ err = sd_read_ext_reg(card, card->ext_power.fno, card->ext_power.page,
+ card->ext_power.offset + 1, 1, data->reg_buf);
+ if (err) {
+ pr_warn("%s: error %d reading status reg of PM func\n",
+ mmc_hostname(card->host), err);
+ return err;
+ }
+
+ *busy = !(data->reg_buf[0] & BIT(0));
+ return 0;
+}
+
+static int sd_poweroff_notify(struct mmc_card *card)
+{
+ struct sd_busy_data cb_data;
+ u8 *reg_buf;
+ int err;
+
+ reg_buf = kzalloc(512, GFP_KERNEL);
+ if (!reg_buf)
+ return -ENOMEM;
+
+ /*
+ * Set the Power Off Notification bit in the power management settings
+ * register at 2 bytes offset.
+ */
+ err = sd_write_ext_reg(card, card->ext_power.fno, card->ext_power.page,
+ card->ext_power.offset + 2, BIT(0));
+ if (err) {
+ pr_warn("%s: error %d writing Power Off Notify bit\n",
+ mmc_hostname(card->host), err);
+ goto out;
+ }
+
+ cb_data.card = card;
+ cb_data.reg_buf = reg_buf;
+ err = __mmc_poll_for_busy(card, SD_POWEROFF_NOTIFY_TIMEOUT_MS,
+ &sd_busy_poweroff_notify_cb, &cb_data);
+
+out:
+ kfree(reg_buf);
+ return err;
+}
+
static int _mmc_sd_suspend(struct mmc_host *host)
{
+ struct mmc_card *card = host->card;
int err = 0;
mmc_claim_host(host);
- if (mmc_card_suspended(host->card))
+ if (mmc_card_suspended(card))
goto out;
- if (!mmc_host_is_spi(host))
+ if (sd_can_poweroff_notify(card))
+ err = sd_poweroff_notify(card);
+ else if (!mmc_host_is_spi(host))
err = mmc_deselect_cards(host);
if (!err) {
mmc_power_off(host);
- mmc_card_set_suspended(host->card);
+ mmc_card_set_suspended(card);
}
out:
@@ -1331,6 +1794,8 @@ static const struct mmc_bus_ops mmc_sd_ops = {
.alive = mmc_sd_alive,
.shutdown = mmc_sd_suspend,
.hw_reset = mmc_sd_hw_reset,
+ .cache_enabled = sd_cache_enabled,
+ .flush_cache = sd_flush_cache,
};
/*
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c
index d61ff811218c..ef8d1dce5af1 100644
--- a/drivers/mmc/core/sd_ops.c
+++ b/drivers/mmc/core/sd_ops.c
@@ -17,6 +17,7 @@
#include "core.h"
#include "sd_ops.h"
+#include "mmc_ops.h"
int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)
{
@@ -309,43 +310,18 @@ int mmc_app_send_scr(struct mmc_card *card)
int mmc_sd_switch(struct mmc_card *card, int mode, int group,
u8 value, u8 *resp)
{
- struct mmc_request mrq = {};
- struct mmc_command cmd = {};
- struct mmc_data data = {};
- struct scatterlist sg;
+ u32 cmd_args;
/* NOTE: caller guarantees resp is heap-allocated */
mode = !!mode;
value &= 0xF;
+ cmd_args = mode << 31 | 0x00FFFFFF;
+ cmd_args &= ~(0xF << (group * 4));
+ cmd_args |= value << (group * 4);
- mrq.cmd = &cmd;
- mrq.data = &data;
-
- cmd.opcode = SD_SWITCH;
- cmd.arg = mode << 31 | 0x00FFFFFF;
- cmd.arg &= ~(0xF << (group * 4));
- cmd.arg |= value << (group * 4);
- cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
-
- data.blksz = 64;
- data.blocks = 1;
- data.flags = MMC_DATA_READ;
- data.sg = &sg;
- data.sg_len = 1;
-
- sg_init_one(&sg, resp, 64);
-
- mmc_set_data_timeout(&data, card);
-
- mmc_wait_for_req(card->host, &mrq);
-
- if (cmd.error)
- return cmd.error;
- if (data.error)
- return data.error;
-
- return 0;
+ return mmc_send_adtc_data(card, card->host, SD_SWITCH, cmd_args, resp,
+ 64);
}
int mmc_app_sd_status(struct mmc_card *card, void *ssr)
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 3eb94ac2712e..68edf7a615be 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -937,11 +937,9 @@ static void mmc_sdio_detect(struct mmc_host *host)
/* Make sure card is powered before detecting it */
if (host->caps & MMC_CAP_POWER_OFF_CARD) {
- err = pm_runtime_get_sync(&host->card->dev);
- if (err < 0) {
- pm_runtime_put_noidle(&host->card->dev);
+ err = pm_runtime_resume_and_get(&host->card->dev);
+ if (err < 0)
goto out;
- }
}
mmc_claim_host(host);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index a4d4c757eea0..561184fa7eb9 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -412,7 +412,7 @@ config MMC_SDHCI_MILBEAUT
config MMC_SDHCI_IPROC
tristate "SDHCI support for the BCM2835 & iProc SD/MMC Controller"
- depends on ARCH_BCM2835 || ARCH_BCM_IPROC || COMPILE_TEST
+ depends on ARCH_BCM2835 || ARCH_BCM_IPROC || ARCH_BRCMSTB || COMPILE_TEST
depends on MMC_SDHCI_PLTFM
depends on OF || ACPI
default ARCH_BCM_IPROC
diff --git a/drivers/mmc/host/cqhci-core.c b/drivers/mmc/host/cqhci-core.c
index 93b0432bb601..38559a956330 100644
--- a/drivers/mmc/host/cqhci-core.c
+++ b/drivers/mmc/host/cqhci-core.c
@@ -45,17 +45,23 @@ static inline u8 *get_link_desc(struct cqhci_host *cq_host, u8 tag)
return desc + cq_host->task_desc_len;
}
+static inline size_t get_trans_desc_offset(struct cqhci_host *cq_host, u8 tag)
+{
+ return cq_host->trans_desc_len * cq_host->mmc->max_segs * tag;
+}
+
static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag)
{
- return cq_host->trans_desc_dma_base +
- (cq_host->mmc->max_segs * tag *
- cq_host->trans_desc_len);
+ size_t offset = get_trans_desc_offset(cq_host, tag);
+
+ return cq_host->trans_desc_dma_base + offset;
}
static inline u8 *get_trans_desc(struct cqhci_host *cq_host, u8 tag)
{
- return cq_host->trans_desc_base +
- (cq_host->trans_desc_len * cq_host->mmc->max_segs * tag);
+ size_t offset = get_trans_desc_offset(cq_host, tag);
+
+ return cq_host->trans_desc_base + offset;
}
static void setup_trans_desc(struct cqhci_host *cq_host, u8 tag)
@@ -146,7 +152,7 @@ static void cqhci_dumpregs(struct cqhci_host *cq_host)
}
/*
- * The allocated descriptor table for task, link & transfer descritors
+ * The allocated descriptor table for task, link & transfer descriptors
* looks like:
* |----------|
* |task desc | |->|----------|
@@ -194,8 +200,7 @@ static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
- cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs *
- cq_host->mmc->cqe_qdepth;
+ cq_host->data_size = get_trans_desc_offset(cq_host, cq_host->mmc->cqe_qdepth);
pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c
index 73731cd3ba23..9901208be797 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.c
+++ b/drivers/mmc/host/dw_mmc-pltfm.c
@@ -17,7 +17,6 @@
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/of.h>
-#include <linux/clk.h>
#include "dw_mmc.h"
#include "dw_mmc-pltfm.h"
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index b3c636edbb46..0db17bcc9c16 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -674,7 +674,7 @@ static void jz4740_mmc_send_command(struct jz4740_mmc_host *host,
cmdat |= JZ_MMC_CMDAT_WRITE;
if (host->use_dma) {
/*
- * The 4780's MMC controller has integrated DMA ability
+ * The JZ4780's MMC controller has integrated DMA ability
* in addition to being able to use the external DMA
* controller. It moves DMA control bits to a separate
* register. The DMA_SEL bit chooses the external
@@ -866,7 +866,7 @@ static int jz4740_mmc_set_clock_rate(struct jz4740_mmc_host *host, int rate)
writew(div, host->base + JZ_REG_MMC_CLKRT);
if (real_rate > 25000000) {
- if (host->version >= JZ_MMC_X1000) {
+ if (host->version >= JZ_MMC_JZ4780) {
writel(JZ_MMC_LPM_DRV_RISING_QTR_PHASE_DLY |
JZ_MMC_LPM_SMP_RISING_QTR_OR_HALF_PHASE_DLY |
JZ_MMC_LPM_LOW_POWER_MODE_EN,
@@ -959,6 +959,7 @@ static const struct of_device_id jz4740_mmc_of_match[] = {
{ .compatible = "ingenic,jz4740-mmc", .data = (void *) JZ_MMC_JZ4740 },
{ .compatible = "ingenic,jz4725b-mmc", .data = (void *)JZ_MMC_JZ4725B },
{ .compatible = "ingenic,jz4760-mmc", .data = (void *) JZ_MMC_JZ4760 },
+ { .compatible = "ingenic,jz4775-mmc", .data = (void *) JZ_MMC_JZ4780 },
{ .compatible = "ingenic,jz4780-mmc", .data = (void *) JZ_MMC_JZ4780 },
{ .compatible = "ingenic,x1000-mmc", .data = (void *) JZ_MMC_X1000 },
{},
@@ -1013,7 +1014,6 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
host->base = devm_ioremap_resource(&pdev->dev, host->mem_res);
if (IS_ERR(host->base)) {
ret = PTR_ERR(host->base);
- dev_err(&pdev->dev, "Failed to ioremap base memory\n");
goto err_free_host;
}
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index b8b771b643cc..3f28eb4d17fe 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -165,6 +165,7 @@ struct meson_host {
unsigned int bounce_buf_size;
void *bounce_buf;
+ void __iomem *bounce_iomem_buf;
dma_addr_t bounce_dma_addr;
struct sd_emmc_desc *descs;
dma_addr_t descs_dma_addr;
@@ -236,7 +237,8 @@ static void meson_mmc_get_transfer_mode(struct mmc_host *mmc,
if (host->dram_access_quirk)
return;
- if (data->blocks > 1) {
+ /* SD_IO_RW_EXTENDED (CMD53) can also use block mode under the hood */
+ if (data->blocks > 1 || mrq->cmd->opcode == SD_IO_RW_EXTENDED) {
/*
* In block mode DMA descriptor format, "length" field indicates
* number of blocks and there is no way to pass DMA size that
@@ -258,7 +260,9 @@ static void meson_mmc_get_transfer_mode(struct mmc_host *mmc,
for_each_sg(data->sg, sg, data->sg_len, i) {
/* check for 8 byte alignment */
if (sg->offset % 8) {
- WARN_ONCE(1, "unaligned scatterlist buffer\n");
+ dev_warn_once(mmc_dev(mmc),
+ "unaligned sg offset %u, disabling descriptor DMA for transfer\n",
+ sg->offset);
return;
}
}
@@ -742,6 +746,47 @@ static void meson_mmc_desc_chain_transfer(struct mmc_host *mmc, u32 cmd_cfg)
writel(start, host->regs + SD_EMMC_START);
}
+/* local sg copy to buffer version with _to/fromio usage for dram_access_quirk */
+static void meson_mmc_copy_buffer(struct meson_host *host, struct mmc_data *data,
+ size_t buflen, bool to_buffer)
+{
+ unsigned int sg_flags = SG_MITER_ATOMIC;
+ struct scatterlist *sgl = data->sg;
+ unsigned int nents = data->sg_len;
+ struct sg_mapping_iter miter;
+ unsigned int offset = 0;
+
+ if (to_buffer)
+ sg_flags |= SG_MITER_FROM_SG;
+ else
+ sg_flags |= SG_MITER_TO_SG;
+
+ sg_miter_start(&miter, sgl, nents, sg_flags);
+
+ while ((offset < buflen) && sg_miter_next(&miter)) {
+ unsigned int len;
+
+ len = min(miter.length, buflen - offset);
+
+ /* When dram_access_quirk, the bounce buffer is a iomem mapping */
+ if (host->dram_access_quirk) {
+ if (to_buffer)
+ memcpy_toio(host->bounce_iomem_buf + offset, miter.addr, len);
+ else
+ memcpy_fromio(miter.addr, host->bounce_iomem_buf + offset, len);
+ } else {
+ if (to_buffer)
+ memcpy(host->bounce_buf + offset, miter.addr, len);
+ else
+ memcpy(miter.addr, host->bounce_buf + offset, len);
+ }
+
+ offset += len;
+ }
+
+ sg_miter_stop(&miter);
+}
+
static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
{
struct meson_host *host = mmc_priv(mmc);
@@ -785,8 +830,7 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
if (data->flags & MMC_DATA_WRITE) {
cmd_cfg |= CMD_CFG_DATA_WR;
WARN_ON(xfer_bytes > host->bounce_buf_size);
- sg_copy_to_buffer(data->sg, data->sg_len,
- host->bounce_buf, xfer_bytes);
+ meson_mmc_copy_buffer(host, data, xfer_bytes, true);
dma_wmb();
}
@@ -955,8 +999,7 @@ static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
if (meson_mmc_bounce_buf_read(data)) {
xfer_bytes = data->blksz * data->blocks;
WARN_ON(xfer_bytes > host->bounce_buf_size);
- sg_copy_from_buffer(data->sg, data->sg_len,
- host->bounce_buf, xfer_bytes);
+ meson_mmc_copy_buffer(host, data, xfer_bytes, false);
}
next_cmd = meson_mmc_get_next_command(cmd);
@@ -1176,7 +1219,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
* instead of the DDR memory
*/
host->bounce_buf_size = SD_EMMC_SRAM_DATA_BUF_LEN;
- host->bounce_buf = host->regs + SD_EMMC_SRAM_DATA_BUF_OFF;
+ host->bounce_iomem_buf = host->regs + SD_EMMC_SRAM_DATA_BUF_OFF;
host->bounce_dma_addr = res->start + SD_EMMC_SRAM_DATA_BUF_OFF;
} else {
/* data bounce buffer */
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 9776a03a10f5..65c65bb5737f 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -504,7 +504,7 @@ mmc_spi_command_send(struct mmc_spi_host *host,
/* else: R1 (most commands) */
}
- dev_dbg(&host->spi->dev, " mmc_spi: CMD%d, resp %s\n",
+ dev_dbg(&host->spi->dev, " CMD%d, resp %s\n",
cmd->opcode, maptype(cmd));
/* send command, leaving chipselect active */
@@ -928,8 +928,7 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
while (length) {
t->len = min(length, blk_size);
- dev_dbg(&host->spi->dev,
- " mmc_spi: %s block, %d bytes\n",
+ dev_dbg(&host->spi->dev, " %s block, %d bytes\n",
(direction == DMA_TO_DEVICE) ? "write" : "read",
t->len);
@@ -974,7 +973,7 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
int tmp;
const unsigned statlen = sizeof(scratch->status);
- dev_dbg(&spi->dev, " mmc_spi: STOP_TRAN\n");
+ dev_dbg(&spi->dev, " STOP_TRAN\n");
/* Tweak the per-block message we set up earlier by morphing
* it to hold single buffer with the token followed by some
@@ -1175,7 +1174,7 @@ static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
canpower = host->pdata && host->pdata->setpower;
- dev_dbg(&host->spi->dev, "mmc_spi: power %s (%d)%s\n",
+ dev_dbg(&host->spi->dev, "power %s (%d)%s\n",
mmc_powerstring(ios->power_mode),
ios->vdd,
canpower ? ", can switch" : "");
@@ -1248,8 +1247,7 @@ static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
host->spi->max_speed_hz = ios->clock;
status = spi_setup(host->spi);
- dev_dbg(&host->spi->dev,
- "mmc_spi: clock to %d Hz, %d\n",
+ dev_dbg(&host->spi->dev, " clock to %d Hz, %d\n",
host->spi->max_speed_hz, status);
}
}
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 898ed1b023df..4dfc246c5f95 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -724,10 +724,8 @@ static inline void msdc_dma_setup(struct msdc_host *host, struct msdc_dma *dma,
writel(lower_32_bits(dma->gpd_addr), host->base + MSDC_DMA_SA);
}
-static void msdc_prepare_data(struct msdc_host *host, struct mmc_request *mrq)
+static void msdc_prepare_data(struct msdc_host *host, struct mmc_data *data)
{
- struct mmc_data *data = mrq->data;
-
if (!(data->host_cookie & MSDC_PREPARE_FLAG)) {
data->host_cookie |= MSDC_PREPARE_FLAG;
data->sg_count = dma_map_sg(host->dev, data->sg, data->sg_len,
@@ -735,10 +733,8 @@ static void msdc_prepare_data(struct msdc_host *host, struct mmc_request *mrq)
}
}
-static void msdc_unprepare_data(struct msdc_host *host, struct mmc_request *mrq)
+static void msdc_unprepare_data(struct msdc_host *host, struct mmc_data *data)
{
- struct mmc_data *data = mrq->data;
-
if (data->host_cookie & MSDC_ASYNC_FLAG)
return;
@@ -1140,7 +1136,7 @@ static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq)
msdc_track_cmd_data(host, mrq->cmd, mrq->data);
if (mrq->data)
- msdc_unprepare_data(host, mrq);
+ msdc_unprepare_data(host, mrq->data);
if (host->error)
msdc_reset_hw(host);
mmc_request_done(mmc_from_priv(host), mrq);
@@ -1311,7 +1307,7 @@ static void msdc_ops_request(struct mmc_host *mmc, struct mmc_request *mrq)
host->mrq = mrq;
if (mrq->data)
- msdc_prepare_data(host, mrq);
+ msdc_prepare_data(host, mrq->data);
/* if SBC is required, we have HW option and SW option.
* if HW option is enabled, and SBC does not have "special" flags,
@@ -1332,7 +1328,7 @@ static void msdc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
if (!data)
return;
- msdc_prepare_data(host, mrq);
+ msdc_prepare_data(host, data);
data->host_cookie |= MSDC_ASYNC_FLAG;
}
@@ -1340,19 +1336,18 @@ static void msdc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
int err)
{
struct msdc_host *host = mmc_priv(mmc);
- struct mmc_data *data;
+ struct mmc_data *data = mrq->data;
- data = mrq->data;
if (!data)
return;
+
if (data->host_cookie) {
data->host_cookie &= ~MSDC_ASYNC_FLAG;
- msdc_unprepare_data(host, mrq);
+ msdc_unprepare_data(host, data);
}
}
-static void msdc_data_xfer_next(struct msdc_host *host,
- struct mmc_request *mrq, struct mmc_data *data)
+static void msdc_data_xfer_next(struct msdc_host *host, struct mmc_request *mrq)
{
if (mmc_op_multi(mrq->cmd->opcode) && mrq->stop && !mrq->stop->error &&
!mrq->sbc)
@@ -1411,7 +1406,7 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
(int)data->error, data->bytes_xfered);
}
- msdc_data_xfer_next(host, mrq, data);
+ msdc_data_xfer_next(host, mrq);
done = true;
}
return done;
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c
index 9d480a05f655..3629550528b6 100644
--- a/drivers/mmc/host/of_mmc_spi.c
+++ b/drivers/mmc/host/of_mmc_spi.c
@@ -22,8 +22,8 @@
MODULE_LICENSE("GPL");
struct of_mmc_spi {
- int detect_irq;
struct mmc_spi_platform_data pdata;
+ int detect_irq;
};
static struct of_mmc_spi *to_of_mmc_spi(struct device *dev)
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 635bf31a6735..e49ca0f7fe9a 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -692,14 +692,19 @@ static int renesas_sdhi_execute_tuning(struct mmc_host *mmc, u32 opcode)
/* Issue CMD19 twice for each tap */
for (i = 0; i < 2 * priv->tap_num; i++) {
+ int cmd_error;
+
/* Set sampling clock position */
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, i % priv->tap_num);
- if (mmc_send_tuning(mmc, opcode, NULL) == 0)
+ if (mmc_send_tuning(mmc, opcode, &cmd_error) == 0)
set_bit(i, priv->taps);
if (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_SMPCMP) == 0)
set_bit(i, priv->smpcmp);
+
+ if (cmd_error)
+ mmc_send_abort_tuning(mmc, opcode);
}
ret = renesas_sdhi_select_tuning(host);
@@ -939,7 +944,7 @@ static const struct soc_device_attribute sdhi_quirks_match[] = {
{ .soc_id = "r8a7795", .revision = "ES3.*", .data = &sdhi_quirks_bad_taps2367 },
{ .soc_id = "r8a7796", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
{ .soc_id = "r8a7796", .revision = "ES1.*", .data = &sdhi_quirks_r8a7796_es13 },
- { .soc_id = "r8a7796", .revision = "ES3.*", .data = &sdhi_quirks_bad_taps1357 },
+ { .soc_id = "r8a77961", .data = &sdhi_quirks_bad_taps1357 },
{ .soc_id = "r8a77965", .data = &sdhi_quirks_r8a77965 },
{ .soc_id = "r8a77980", .data = &sdhi_quirks_nohs400 },
{ .soc_id = "r8a77990", .data = &sdhi_quirks_r8a77990 },
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 0ca6f6d30b75..8d5929a32d34 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1578,17 +1578,12 @@ static int s3cmci_probe(struct platform_device *pdev)
goto probe_iounmap;
}
- if (request_irq(host->irq, s3cmci_irq, 0, DRIVER_NAME, host)) {
+ if (request_irq(host->irq, s3cmci_irq, IRQF_NO_AUTOEN, DRIVER_NAME, host)) {
dev_err(&pdev->dev, "failed to request mci interrupt.\n");
ret = -ENOENT;
goto probe_iounmap;
}
- /* We get spurious interrupts even when we have set the IMSK
- * register to ignore everything, so use disable_irq() to make
- * ensure we don't lock the system with un-serviceable requests. */
-
- disable_irq(host->irq);
host->irq_state = false;
/* Depending on the dma state, get a DMA channel to use. */
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index c3fbf8c825c4..8fe65f172a61 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -822,6 +822,17 @@ static const struct dmi_system_id sdhci_acpi_quirks[] = {
},
.driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
},
+ {
+ /*
+ * The Toshiba WT8-B's microSD slot always reports the card being
+ * write-protected.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TOSHIBA ENCORE 2 WT8-B"),
+ },
+ .driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
+ },
{} /* Terminating entry */
};
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index b991cf0e60c5..72c0bf0c1887 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -324,11 +324,6 @@ static inline int is_imx53_esdhc(struct pltfm_imx_data *data)
return data->socdata == &esdhc_imx53_data;
}
-static inline int is_imx6q_usdhc(struct pltfm_imx_data *data)
-{
- return data->socdata == &usdhc_imx6q_data;
-}
-
static inline int esdhc_is_usdhc(struct pltfm_imx_data *data)
{
return !!(data->socdata->flags & ESDHC_FLAG_USDHC);
@@ -427,9 +422,6 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
| FIELD_PREP(SDHCI_RETUNING_MODE_MASK,
SDHCI_TUNING_MODE_3);
- if (imx_data->socdata->flags & ESDHC_FLAG_HS400)
- val |= SDHCI_SUPPORT_HS400;
-
/*
* Do not advertise faster UHS modes if there are no
* pinctrl states for 100MHz/200MHz.
@@ -1591,7 +1583,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
if (imx_data->socdata->flags & ESDHC_FLAG_HS400)
- host->quirks2 |= SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400;
+ host->mmc->caps2 |= MMC_CAP2_HS400;
if (imx_data->socdata->flags & ESDHC_FLAG_BROKEN_AUTO_CMD23)
host->quirks2 |= SDHCI_QUIRK2_ACMD23_BROKEN;
@@ -1628,6 +1620,14 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
if (err)
goto disable_ahb_clk;
+ /*
+ * Setup the wakeup capability here, let user to decide
+ * whether need to enable this wakeup through sysfs interface.
+ */
+ if ((host->mmc->pm_caps & MMC_PM_KEEP_POWER) &&
+ (host->mmc->pm_caps & MMC_PM_WAKE_SDIO_IRQ))
+ device_set_wakeup_capable(&pdev->dev, true);
+
pm_runtime_set_active(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
pm_runtime_use_autosuspend(&pdev->dev);
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index ddeaf8e1f72f..cce390fe9cf3 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -286,11 +286,35 @@ static const struct sdhci_iproc_data bcm2711_data = {
.mmc_caps = MMC_CAP_3_3V_DDR,
};
+static const struct sdhci_pltfm_data sdhci_bcm7211a0_pltfm_data = {
+ .quirks = SDHCI_QUIRK_MISSING_CAPS |
+ SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
+ SDHCI_QUIRK_BROKEN_DMA |
+ SDHCI_QUIRK_BROKEN_ADMA,
+ .ops = &sdhci_iproc_ops,
+};
+
+#define BCM7211A0_BASE_CLK_MHZ 100
+static const struct sdhci_iproc_data bcm7211a0_data = {
+ .pdata = &sdhci_bcm7211a0_pltfm_data,
+ .caps = ((BCM7211A0_BASE_CLK_MHZ / 2) << SDHCI_TIMEOUT_CLK_SHIFT) |
+ (BCM7211A0_BASE_CLK_MHZ << SDHCI_CLOCK_BASE_SHIFT) |
+ ((0x2 << SDHCI_MAX_BLOCK_SHIFT)
+ & SDHCI_MAX_BLOCK_MASK) |
+ SDHCI_CAN_VDD_330 |
+ SDHCI_CAN_VDD_180 |
+ SDHCI_CAN_DO_SUSPEND |
+ SDHCI_CAN_DO_HISPD,
+ .caps1 = SDHCI_DRIVER_TYPE_C |
+ SDHCI_DRIVER_TYPE_D,
+};
+
static const struct of_device_id sdhci_iproc_of_match[] = {
{ .compatible = "brcm,bcm2835-sdhci", .data = &bcm2835_data },
{ .compatible = "brcm,bcm2711-emmc2", .data = &bcm2711_data },
{ .compatible = "brcm,sdhci-iproc-cygnus", .data = &iproc_cygnus_data},
{ .compatible = "brcm,sdhci-iproc", .data = &iproc_data },
+ { .compatible = "brcm,bcm7211a0-sdhci", .data = &bcm7211a0_data },
{ }
};
MODULE_DEVICE_TABLE(of, sdhci_iproc_of_match);
@@ -384,6 +408,11 @@ err:
return ret;
}
+static void sdhci_iproc_shutdown(struct platform_device *pdev)
+{
+ sdhci_pltfm_suspend(&pdev->dev);
+}
+
static struct platform_driver sdhci_iproc_driver = {
.driver = {
.name = "sdhci-iproc",
@@ -394,6 +423,7 @@ static struct platform_driver sdhci_iproc_driver = {
},
.probe = sdhci_iproc_probe,
.remove = sdhci_pltfm_unregister,
+ .shutdown = sdhci_iproc_shutdown,
};
module_platform_driver(sdhci_iproc_driver);
diff --git a/drivers/mmc/host/sdhci-of-aspeed.c b/drivers/mmc/host/sdhci-of-aspeed.c
index d001c51074a0..6e4e132903a6 100644
--- a/drivers/mmc/host/sdhci-of-aspeed.c
+++ b/drivers/mmc/host/sdhci-of-aspeed.c
@@ -31,6 +31,11 @@
#define ASPEED_SDC_S0_PHASE_OUT_EN GENMASK(1, 0)
#define ASPEED_SDC_PHASE_MAX 31
+/* SDIO{10,20} */
+#define ASPEED_SDC_CAP1_1_8V (0 * 32 + 26)
+/* SDIO{14,24} */
+#define ASPEED_SDC_CAP2_SDR104 (1 * 32 + 1)
+
struct aspeed_sdc {
struct clk *clk;
struct resource *res;
@@ -72,6 +77,37 @@ struct aspeed_sdhci {
const struct aspeed_sdhci_phase_desc *phase_desc;
};
+/*
+ * The function sets the mirror register for updating
+ * capbilities of the current slot.
+ *
+ * slot | capability | caps_reg | mirror_reg
+ * -----|-------------|----------|------------
+ * 0 | CAP1_1_8V | SDIO140 | SDIO10
+ * 0 | CAP2_SDR104 | SDIO144 | SDIO14
+ * 1 | CAP1_1_8V | SDIO240 | SDIO20
+ * 1 | CAP2_SDR104 | SDIO244 | SDIO24
+ */
+static void aspeed_sdc_set_slot_capability(struct sdhci_host *host, struct aspeed_sdc *sdc,
+ int capability, bool enable, u8 slot)
+{
+ u32 mirror_reg_offset;
+ u32 cap_val;
+ u8 cap_reg;
+
+ if (slot > 1)
+ return;
+
+ cap_reg = capability / 32;
+ cap_val = sdhci_readl(host, 0x40 + (cap_reg * 4));
+ if (enable)
+ cap_val |= BIT(capability % 32);
+ else
+ cap_val &= ~BIT(capability % 32);
+ mirror_reg_offset = ((slot + 1) * 0x10) + (cap_reg * 4);
+ writel(cap_val, sdc->regs + mirror_reg_offset);
+}
+
static void aspeed_sdc_configure_8bit_mode(struct aspeed_sdc *sdc,
struct aspeed_sdhci *sdhci,
bool bus8)
@@ -150,7 +186,7 @@ static int aspeed_sdhci_phase_to_tap(struct device *dev, unsigned long rate_hz,
tap = div_u64(phase_period_ps, prop_delay_ps);
if (tap > ASPEED_SDHCI_NR_TAPS) {
- dev_warn(dev,
+ dev_dbg(dev,
"Requested out of range phase tap %d for %d degrees of phase compensation at %luHz, clamping to tap %d\n",
tap, phase_deg, rate_hz, ASPEED_SDHCI_NR_TAPS);
tap = ASPEED_SDHCI_NR_TAPS;
@@ -328,6 +364,7 @@ static inline int aspeed_sdhci_calculate_slot(struct aspeed_sdhci *dev,
static int aspeed_sdhci_probe(struct platform_device *pdev)
{
const struct aspeed_sdhci_pdata *aspeed_pdata;
+ struct device_node *np = pdev->dev.of_node;
struct sdhci_pltfm_host *pltfm_host;
struct aspeed_sdhci *dev;
struct sdhci_host *host;
@@ -372,6 +409,17 @@ static int aspeed_sdhci_probe(struct platform_device *pdev)
sdhci_get_of_property(pdev);
+ if (of_property_read_bool(np, "mmc-hs200-1_8v") ||
+ of_property_read_bool(np, "sd-uhs-sdr104")) {
+ aspeed_sdc_set_slot_capability(host, dev->parent, ASPEED_SDC_CAP1_1_8V,
+ true, slot);
+ }
+
+ if (of_property_read_bool(np, "sd-uhs-sdr104")) {
+ aspeed_sdc_set_slot_capability(host, dev->parent, ASPEED_SDC_CAP2_SDR104,
+ true, slot);
+ }
+
pltfm_host->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pltfm_host->clk))
return PTR_ERR(pltfm_host->clk);
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index 7893fd3599b6..8f4d1f003f65 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -1173,10 +1173,9 @@ static int sdhci_omap_probe(struct platform_device *pdev)
* as part of pm_runtime_get_sync.
*/
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret) {
dev_err(dev, "pm_runtime_get_sync failed\n");
- pm_runtime_put_noidle(dev);
goto err_rpm_disable;
}
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
index 592d79082f58..4fd99c1e82ba 100644
--- a/drivers/mmc/host/sdhci-pci-gli.c
+++ b/drivers/mmc/host/sdhci-pci-gli.c
@@ -94,7 +94,7 @@
#define PCIE_GLI_9763E_CFG2 0x8A4
#define GLI_9763E_CFG2_L1DLY GENMASK(28, 19)
-#define GLI_9763E_CFG2_L1DLY_MID 0x50
+#define GLI_9763E_CFG2_L1DLY_MID 0x54
#define PCIE_GLI_9763E_MMC_CTRL 0x960
#define GLI_9763E_HS400_SLOW BIT(3)
@@ -627,8 +627,13 @@ static void sdhci_gli_voltage_switch(struct sdhci_host *host)
*
* Wait 5ms after set 1.8V signal enable in Host Control 2 register
* to ensure 1.8V signal enable bit is set by GL9750/GL9755.
+ *
+ * ...however, the controller in the NUC10i3FNK4 (a 9755) requires
+ * slightly longer than 5ms before the control register reports that
+ * 1.8V is ready, and far longer still before the card will actually
+ * work reliably.
*/
- usleep_range(5000, 5500);
+ usleep_range(100000, 110000);
}
static void sdhci_gl9750_reset(struct sdhci_host *host, u8 mask)
@@ -842,7 +847,7 @@ static void gli_set_gl9763e(struct sdhci_pci_slot *slot)
pci_read_config_dword(pdev, PCIE_GLI_9763E_CFG2, &value);
value &= ~GLI_9763E_CFG2_L1DLY;
- /* set ASPM L1 entry delay to 20us */
+ /* set ASPM L1 entry delay to 21us */
value |= FIELD_PREP(GLI_9763E_CFG2_L1DLY, GLI_9763E_CFG2_L1DLY_MID);
pci_write_config_dword(pdev, PCIE_GLI_9763E_CFG2, value);
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
index 5dc36efff47f..11e375579cfb 100644
--- a/drivers/mmc/host/sdhci-sprd.c
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -393,6 +393,7 @@ static void sdhci_sprd_request_done(struct sdhci_host *host,
static struct sdhci_ops sdhci_sprd_ops = {
.read_l = sdhci_sprd_readl,
.write_l = sdhci_sprd_writel,
+ .write_w = sdhci_sprd_writew,
.write_b = sdhci_sprd_writeb,
.set_clock = sdhci_sprd_set_clock,
.get_max_clock = sdhci_sprd_get_max_clock,
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index bf238ade1602..6aaf5c3ce34c 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2680,7 +2680,7 @@ void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
sdhci_end_tuning(host);
- mmc_abort_tuning(host->mmc, opcode);
+ mmc_send_abort_tuning(host->mmc, opcode);
}
EXPORT_SYMBOL_GPL(sdhci_abort_tuning);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 0770c036e2ff..c35ed4be75b7 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -201,8 +201,10 @@
#define SDHCI_CAPABILITIES 0x40
#define SDHCI_TIMEOUT_CLK_MASK GENMASK(5, 0)
+#define SDHCI_TIMEOUT_CLK_SHIFT 0
#define SDHCI_TIMEOUT_CLK_UNIT 0x00000080
#define SDHCI_CLOCK_BASE_MASK GENMASK(13, 8)
+#define SDHCI_CLOCK_BASE_SHIFT 8
#define SDHCI_CLOCK_V3_BASE_MASK GENMASK(15, 8)
#define SDHCI_MAX_BLOCK_MASK 0x00030000
#define SDHCI_MAX_BLOCK_SHIFT 16
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index 1fad6e442688..f654afbe8e83 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -809,11 +809,9 @@ static int sdhci_am654_probe(struct platform_device *pdev)
/* Clocks are enabled using pm_runtime */
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_put_noidle(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
goto pm_runtime_disable;
- }
base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(base)) {
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index 615f3d008af1..b9b79b1089a0 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -1801,6 +1801,7 @@ static int usdhi6_probe(struct platform_device *pdev)
version = usdhi6_read(host, USDHI6_VERSION);
if ((version & 0xfff) != 0xa0d) {
+ ret = -EPERM;
dev_err(dev, "Version not recognized %x\n", version);
goto e_clk_off;
}
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index a1d098560099..c32df5530b94 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -857,6 +857,9 @@ static void via_sdc_data_isr(struct via_crdr_mmc_host *host, u16 intmask)
{
BUG_ON(intmask == 0);
+ if (!host->data)
+ return;
+
if (intmask & VIA_CRDR_SDSTS_DT)
host->data->error = -ETIMEDOUT;
else if (intmask & (VIA_CRDR_SDSTS_RC | VIA_CRDR_SDSTS_WC))
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index 739cf63ef6e2..4950d10d3a19 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -2279,7 +2279,7 @@ static int vub300_probe(struct usb_interface *interface,
if (retval < 0)
goto error5;
retval =
- usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0),
+ usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
SET_ROM_WAIT_STATES,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
firmware_rom_wait_states, 0x0000, NULL, 0, HZ);
diff --git a/drivers/mtd/nand/raw/cs553x_nand.c b/drivers/mtd/nand/raw/cs553x_nand.c
index 6edf78c16fc8..df40927e5678 100644
--- a/drivers/mtd/nand/raw/cs553x_nand.c
+++ b/drivers/mtd/nand/raw/cs553x_nand.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/iopoll.h>
@@ -240,6 +241,15 @@ static int cs_calculate_ecc(struct nand_chip *this, const u_char *dat,
return 0;
}
+static int cs553x_ecc_correct(struct nand_chip *chip,
+ unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ return ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
+ chip->ecc.size, false);
+}
+
static struct cs553x_nand_controller *controllers[4];
static int cs553x_attach_chip(struct nand_chip *chip)
@@ -251,7 +261,7 @@ static int cs553x_attach_chip(struct nand_chip *chip)
chip->ecc.bytes = 3;
chip->ecc.hwctl = cs_enable_hwecc;
chip->ecc.calculate = cs_calculate_ecc;
- chip->ecc.correct = rawnand_sw_hamming_correct;
+ chip->ecc.correct = cs553x_ecc_correct;
chip->ecc.strength = 1;
return 0;
diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
index bf695255b43a..a3e66155ae40 100644
--- a/drivers/mtd/nand/raw/fsmc_nand.c
+++ b/drivers/mtd/nand/raw/fsmc_nand.c
@@ -25,6 +25,7 @@
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
#include <linux/mtd/rawnand.h>
#include <linux/platform_device.h>
#include <linux/of.h>
@@ -432,6 +433,15 @@ static int fsmc_read_hwecc_ecc1(struct nand_chip *chip, const u8 *data,
return 0;
}
+static int fsmc_correct_ecc1(struct nand_chip *chip,
+ unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ return ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
+ chip->ecc.size, false);
+}
+
/* Count the number of 0's in buff upto a max of max_bits */
static int count_written_bits(u8 *buff, int size, int max_bits)
{
@@ -917,7 +927,7 @@ static int fsmc_nand_attach_chip(struct nand_chip *nand)
case NAND_ECC_ENGINE_TYPE_ON_HOST:
dev_info(host->dev, "Using 1-bit HW ECC scheme\n");
nand->ecc.calculate = fsmc_read_hwecc_ecc1;
- nand->ecc.correct = rawnand_sw_hamming_correct;
+ nand->ecc.correct = fsmc_correct_ecc1;
nand->ecc.hwctl = fsmc_enable_hwecc;
nand->ecc.bytes = 3;
nand->ecc.strength = 1;
diff --git a/drivers/mtd/nand/raw/lpc32xx_slc.c b/drivers/mtd/nand/raw/lpc32xx_slc.c
index 6b7269cfb7d8..d7dfc6fd85ca 100644
--- a/drivers/mtd/nand/raw/lpc32xx_slc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_slc.c
@@ -27,6 +27,7 @@
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/mtd/lpc32xx_slc.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
#define LPC32XX_MODNAME "lpc32xx-nand"
@@ -345,6 +346,18 @@ static int lpc32xx_nand_ecc_calculate(struct nand_chip *chip,
}
/*
+ * Corrects the data
+ */
+static int lpc32xx_nand_ecc_correct(struct nand_chip *chip,
+ unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ return ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
+ chip->ecc.size, false);
+}
+
+/*
* Read a single byte from NAND device
*/
static uint8_t lpc32xx_nand_read_byte(struct nand_chip *chip)
@@ -802,7 +815,7 @@ static int lpc32xx_nand_attach_chip(struct nand_chip *chip)
chip->ecc.write_oob = lpc32xx_nand_write_oob_syndrome;
chip->ecc.read_oob = lpc32xx_nand_read_oob_syndrome;
chip->ecc.calculate = lpc32xx_nand_ecc_calculate;
- chip->ecc.correct = rawnand_sw_hamming_correct;
+ chip->ecc.correct = lpc32xx_nand_ecc_correct;
chip->ecc.hwctl = lpc32xx_nand_ecc_enable;
/*
diff --git a/drivers/mtd/nand/raw/ndfc.c b/drivers/mtd/nand/raw/ndfc.c
index 338d6b1a189e..98d5a94c3a24 100644
--- a/drivers/mtd/nand/raw/ndfc.c
+++ b/drivers/mtd/nand/raw/ndfc.c
@@ -22,6 +22,7 @@
#include <linux/mtd/ndfc.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <asm/io.h>
@@ -100,6 +101,15 @@ static int ndfc_calculate_ecc(struct nand_chip *chip,
return 0;
}
+static int ndfc_correct_ecc(struct nand_chip *chip,
+ unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ return ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
+ chip->ecc.size, false);
+}
+
/*
* Speedups for buffer read/write/verify
*
@@ -145,7 +155,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
chip->controller = &ndfc->ndfc_control;
chip->legacy.read_buf = ndfc_read_buf;
chip->legacy.write_buf = ndfc_write_buf;
- chip->ecc.correct = rawnand_sw_hamming_correct;
+ chip->ecc.correct = ndfc_correct_ecc;
chip->ecc.hwctl = ndfc_enable_hwecc;
chip->ecc.calculate = ndfc_calculate_ecc;
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
diff --git a/drivers/mtd/nand/raw/sharpsl.c b/drivers/mtd/nand/raw/sharpsl.c
index 5612ee628425..2f1fe464e663 100644
--- a/drivers/mtd/nand/raw/sharpsl.c
+++ b/drivers/mtd/nand/raw/sharpsl.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/sharpsl.h>
@@ -96,6 +97,15 @@ static int sharpsl_nand_calculate_ecc(struct nand_chip *chip,
return readb(sharpsl->io + ECCCNTR) != 0;
}
+static int sharpsl_nand_correct_ecc(struct nand_chip *chip,
+ unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ return ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
+ chip->ecc.size, false);
+}
+
static int sharpsl_attach_chip(struct nand_chip *chip)
{
if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
@@ -106,7 +116,7 @@ static int sharpsl_attach_chip(struct nand_chip *chip)
chip->ecc.strength = 1;
chip->ecc.hwctl = sharpsl_nand_enable_hwecc;
chip->ecc.calculate = sharpsl_nand_calculate_ecc;
- chip->ecc.correct = rawnand_sw_hamming_correct;
+ chip->ecc.correct = sharpsl_nand_correct_ecc;
return 0;
}
diff --git a/drivers/mtd/nand/raw/tmio_nand.c b/drivers/mtd/nand/raw/tmio_nand.c
index de8e919d0ebe..6d93dd31969b 100644
--- a/drivers/mtd/nand/raw/tmio_nand.c
+++ b/drivers/mtd/nand/raw/tmio_nand.c
@@ -34,6 +34,7 @@
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/slab.h>
@@ -292,11 +293,12 @@ static int tmio_nand_correct_data(struct nand_chip *chip, unsigned char *buf,
int r0, r1;
/* assume ecc.size = 512 and ecc.bytes = 6 */
- r0 = rawnand_sw_hamming_correct(chip, buf, read_ecc, calc_ecc);
+ r0 = ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
+ chip->ecc.size, false);
if (r0 < 0)
return r0;
- r1 = rawnand_sw_hamming_correct(chip, buf + 256, read_ecc + 3,
- calc_ecc + 3);
+ r1 = ecc_sw_hamming_correct(buf + 256, read_ecc + 3, calc_ecc + 3,
+ chip->ecc.size, false);
if (r1 < 0)
return r1;
return r0 + r1;
diff --git a/drivers/mtd/nand/raw/txx9ndfmc.c b/drivers/mtd/nand/raw/txx9ndfmc.c
index 1a9449e53bf9..b8894ac27073 100644
--- a/drivers/mtd/nand/raw/txx9ndfmc.c
+++ b/drivers/mtd/nand/raw/txx9ndfmc.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/io.h>
@@ -193,8 +194,8 @@ static int txx9ndfmc_correct_data(struct nand_chip *chip, unsigned char *buf,
int stat;
for (eccsize = chip->ecc.size; eccsize > 0; eccsize -= 256) {
- stat = rawnand_sw_hamming_correct(chip, buf, read_ecc,
- calc_ecc);
+ stat = ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
+ chip->ecc.size, false);
if (stat < 0)
return stat;
corrected += stat;
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 17f63f95f4a2..3131fae0c715 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -473,20 +473,26 @@ static int spinand_erase_op(struct spinand_device *spinand,
return spi_mem_exec_op(spinand->spimem, &op);
}
-static int spinand_wait(struct spinand_device *spinand, u8 *s)
+static int spinand_wait(struct spinand_device *spinand,
+ unsigned long initial_delay_us,
+ unsigned long poll_delay_us,
+ u8 *s)
{
- unsigned long timeo = jiffies + msecs_to_jiffies(400);
+ struct spi_mem_op op = SPINAND_GET_FEATURE_OP(REG_STATUS,
+ spinand->scratchbuf);
u8 status;
int ret;
- do {
- ret = spinand_read_status(spinand, &status);
- if (ret)
- return ret;
+ ret = spi_mem_poll_status(spinand->spimem, &op, STATUS_BUSY, 0,
+ initial_delay_us,
+ poll_delay_us,
+ SPINAND_WAITRDY_TIMEOUT_MS);
+ if (ret)
+ return ret;
- if (!(status & STATUS_BUSY))
- goto out;
- } while (time_before(jiffies, timeo));
+ status = *spinand->scratchbuf;
+ if (!(status & STATUS_BUSY))
+ goto out;
/*
* Extra read, just in case the STATUS_READY bit has changed
@@ -526,7 +532,10 @@ static int spinand_reset_op(struct spinand_device *spinand)
if (ret)
return ret;
- return spinand_wait(spinand, NULL);
+ return spinand_wait(spinand,
+ SPINAND_RESET_INITIAL_DELAY_US,
+ SPINAND_RESET_POLL_DELAY_US,
+ NULL);
}
static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
@@ -549,7 +558,10 @@ static int spinand_read_page(struct spinand_device *spinand,
if (ret)
return ret;
- ret = spinand_wait(spinand, &status);
+ ret = spinand_wait(spinand,
+ SPINAND_READ_INITIAL_DELAY_US,
+ SPINAND_READ_POLL_DELAY_US,
+ &status);
if (ret < 0)
return ret;
@@ -585,7 +597,10 @@ static int spinand_write_page(struct spinand_device *spinand,
if (ret)
return ret;
- ret = spinand_wait(spinand, &status);
+ ret = spinand_wait(spinand,
+ SPINAND_WRITE_INITIAL_DELAY_US,
+ SPINAND_WRITE_POLL_DELAY_US,
+ &status);
if (!ret && (status & STATUS_PROG_FAILED))
return -EIO;
@@ -768,7 +783,11 @@ static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
if (ret)
return ret;
- ret = spinand_wait(spinand, &status);
+ ret = spinand_wait(spinand,
+ SPINAND_ERASE_INITIAL_DELAY_US,
+ SPINAND_ERASE_POLL_DELAY_US,
+ &status);
+
if (!ret && (status & STATUS_ERASE_FAILED))
ret = -EIO;
diff --git a/drivers/mtd/parsers/ofpart_core.c b/drivers/mtd/parsers/ofpart_core.c
index 0fd8d2a0db97..192190c42fc8 100644
--- a/drivers/mtd/parsers/ofpart_core.c
+++ b/drivers/mtd/parsers/ofpart_core.c
@@ -57,20 +57,22 @@ static int parse_fixed_partitions(struct mtd_info *master,
if (!mtd_node)
return 0;
- ofpart_node = of_get_child_by_name(mtd_node, "partitions");
- if (!ofpart_node && !master->parent) {
- /*
- * We might get here even when ofpart isn't used at all (e.g.,
- * when using another parser), so don't be louder than
- * KERN_DEBUG
- */
- pr_debug("%s: 'partitions' subnode not found on %pOF. Trying to parse direct subnodes as partitions.\n",
- master->name, mtd_node);
+ if (!master->parent) { /* Master */
+ ofpart_node = of_get_child_by_name(mtd_node, "partitions");
+ if (!ofpart_node) {
+ /*
+ * We might get here even when ofpart isn't used at all (e.g.,
+ * when using another parser), so don't be louder than
+ * KERN_DEBUG
+ */
+ pr_debug("%s: 'partitions' subnode not found on %pOF. Trying to parse direct subnodes as partitions.\n",
+ master->name, mtd_node);
+ ofpart_node = mtd_node;
+ dedicated = false;
+ }
+ } else { /* Partition */
ofpart_node = mtd_node;
- dedicated = false;
}
- if (!ofpart_node)
- return 0;
of_id = of_match_node(parse_ofpart_match_table, ofpart_node);
if (dedicated && !of_id) {
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index ba8e70a8e312..6b12ce822e51 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -327,6 +327,8 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr)
break;
}
+ dev->base_addr = ioaddr;
+
/* Reserve any actual interrupt. */
if (dev->irq) {
retval = request_irq(dev->irq, cops_interrupt, 0, dev->name, dev);
@@ -334,8 +336,6 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr)
goto err_out;
}
- dev->base_addr = ioaddr;
-
lp = netdev_priv(dev);
spin_lock_init(&lp->lock);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 20bbda1b36e1..c5a646d06102 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1526,6 +1526,7 @@ static struct slave *bond_alloc_slave(struct bonding *bond,
slave->bond = bond;
slave->dev = slave_dev;
+ INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
if (bond_kobj_init(slave))
return NULL;
@@ -1538,7 +1539,6 @@ static struct slave *bond_alloc_slave(struct bonding *bond,
return NULL;
}
}
- INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
return slave;
}
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index da6fffb4d5a8..4ffbfd534f18 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -269,9 +269,6 @@ static netdev_tx_t caif_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ser_device *ser;
- if (WARN_ON(!dev))
- return -EINVAL;
-
ser = netdev_priv(dev);
/* Send flow off once, on high water mark */
@@ -353,6 +350,7 @@ static int ldisc_open(struct tty_struct *tty)
rtnl_lock();
result = register_netdevice(dev);
if (result) {
+ tty_kref_put(tty);
rtnl_unlock();
free_netdev(dev);
return -ENODEV;
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index 029e77dfa773..a45865bd7254 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -82,6 +82,8 @@ struct mcba_priv {
bool can_ka_first_pass;
bool can_speed_check;
atomic_t free_ctx_cnt;
+ void *rxbuf[MCBA_MAX_RX_URBS];
+ dma_addr_t rxbuf_dma[MCBA_MAX_RX_URBS];
};
/* CAN frame */
@@ -633,6 +635,7 @@ static int mcba_usb_start(struct mcba_priv *priv)
for (i = 0; i < MCBA_MAX_RX_URBS; i++) {
struct urb *urb = NULL;
u8 *buf;
+ dma_addr_t buf_dma;
/* create a URB, and a buffer for it */
urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -642,7 +645,7 @@ static int mcba_usb_start(struct mcba_priv *priv)
}
buf = usb_alloc_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
- GFP_KERNEL, &urb->transfer_dma);
+ GFP_KERNEL, &buf_dma);
if (!buf) {
netdev_err(netdev, "No memory left for USB buffer\n");
usb_free_urb(urb);
@@ -661,11 +664,14 @@ static int mcba_usb_start(struct mcba_priv *priv)
if (err) {
usb_unanchor_urb(urb);
usb_free_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
- buf, urb->transfer_dma);
+ buf, buf_dma);
usb_free_urb(urb);
break;
}
+ priv->rxbuf[i] = buf;
+ priv->rxbuf_dma[i] = buf_dma;
+
/* Drop reference, USB core will take care of freeing it */
usb_free_urb(urb);
}
@@ -708,7 +714,14 @@ static int mcba_usb_open(struct net_device *netdev)
static void mcba_urb_unlink(struct mcba_priv *priv)
{
+ int i;
+
usb_kill_anchored_urbs(&priv->rx_submitted);
+
+ for (i = 0; i < MCBA_MAX_RX_URBS; ++i)
+ usb_free_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
+ priv->rxbuf[i], priv->rxbuf_dma[i]);
+
usb_kill_anchored_urbs(&priv->tx_submitted);
}
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 9150038b60cb..3b018fcf4412 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -821,11 +821,9 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
bcm_sf2_sw_mac_link_set(ds, port, interface, true);
if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
- u32 reg_rgmii_ctrl;
+ u32 reg_rgmii_ctrl = 0;
u32 reg, offset;
- reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
-
if (priv->type == BCM4908_DEVICE_ID ||
priv->type == BCM7445_DEVICE_ID)
offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
@@ -836,6 +834,7 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
interface == PHY_INTERFACE_MODE_RGMII_TXID ||
interface == PHY_INTERFACE_MODE_MII ||
interface == PHY_INTERFACE_MODE_REVMII) {
+ reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
reg = reg_readl(priv, reg_rgmii_ctrl);
reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 55e5d479acce..854e25f43fa7 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -1530,6 +1530,7 @@ static const struct ksz_chip_data ksz9477_switch_chips[] = {
.num_statics = 16,
.cpu_ports = 0x7F, /* can be configured as cpu port */
.port_cnt = 7, /* total physical port count */
+ .phy_errata_9477 = true,
},
};
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 96f7c9eede35..9b90f3d3a8f5 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -1262,14 +1262,6 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
{
struct mt7530_priv *priv = ds->priv;
- /* The real fabric path would be decided on the membership in the
- * entry of VLAN table. PCR_MATRIX set up here with ALL_MEMBERS
- * means potential VLAN can be consisting of certain subset of all
- * ports.
- */
- mt7530_rmw(priv, MT7530_PCR_P(port),
- PCR_MATRIX_MASK, PCR_MATRIX(MT7530_ALL_MEMBERS));
-
/* Trapped into security mode allows packet forwarding through VLAN
* table lookup. CPU port is set to fallback mode to let untagged
* frames pass through.
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 2473bebe48e6..f966a253d1c7 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -1227,12 +1227,17 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
if (taprio->num_entries > VSC9959_TAS_GCL_ENTRY_MAX)
return -ERANGE;
- /* Set port num and disable ALWAYS_GUARD_BAND_SCH_Q, which means set
- * guard band to be implemented for nonschedule queues to schedule
- * queues transition.
+ /* Enable guard band. The switch will schedule frames without taking
+ * their length into account. Thus we'll always need to enable the
+ * guard band which reserves the time of a maximum sized frame at the
+ * end of the time window.
+ *
+ * Although the ALWAYS_GUARD_BAND_SCH_Q bit is global for all ports, we
+ * need to set PORT_NUM, because subsequent writes to PARAM_CFG_REG_n
+ * operate on the port number.
*/
- ocelot_rmw(ocelot,
- QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(port),
+ ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(port) |
+ QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q,
QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM_M |
QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q,
QSYS_TAS_PARAM_CFG_CTRL);
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
index b777d3f37573..12cd04b56803 100644
--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
@@ -167,9 +167,10 @@ enum sja1105_hostcmd {
SJA1105_HOSTCMD_INVALIDATE = 4,
};
+/* Command and entry overlap */
static void
-sja1105_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
- enum packing_op op)
+sja1105et_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
{
const int size = SJA1105_SIZE_DYN_CMD;
@@ -179,6 +180,20 @@ sja1105_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
sja1105_packing(buf, &cmd->index, 9, 0, size, op);
}
+/* Command and entry are separate */
+static void
+sja1105pqrs_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_VL_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 9, 0, size, op);
+}
+
static size_t sja1105et_vl_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -641,7 +656,7 @@ static size_t sja1105pqrs_cbs_entry_packing(void *buf, void *entry_ptr,
const struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
[BLK_IDX_VL_LOOKUP] = {
.entry_packing = sja1105et_vl_lookup_entry_packing,
- .cmd_packing = sja1105_vl_lookup_cmd_packing,
+ .cmd_packing = sja1105et_vl_lookup_cmd_packing,
.access = OP_WRITE,
.max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
.packed_size = SJA1105ET_SIZE_VL_LOOKUP_DYN_CMD,
@@ -725,7 +740,7 @@ const struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
const struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
[BLK_IDX_VL_LOOKUP] = {
.entry_packing = sja1105_vl_lookup_entry_packing,
- .cmd_packing = sja1105_vl_lookup_cmd_packing,
+ .cmd_packing = sja1105pqrs_vl_lookup_cmd_packing,
.access = (OP_READ | OP_WRITE),
.max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
.packed_size = SJA1105PQRS_SIZE_VL_LOOKUP_DYN_CMD,
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 405024b637d6..b88d9ef45a1f 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -26,6 +26,7 @@
#include "sja1105_tas.h"
#define SJA1105_UNKNOWN_MULTICAST 0x010000000000ull
+#define SJA1105_DEFAULT_VLAN (VLAN_N_VID - 1)
static const struct dsa_switch_ops sja1105_switch_ops;
@@ -207,6 +208,7 @@ static int sja1105_init_mii_settings(struct sja1105_private *priv,
default:
dev_err(dev, "Unsupported PHY mode %s!\n",
phy_modes(ports[i].phy_mode));
+ return -EINVAL;
}
/* Even though the SerDes port is able to drive SGMII autoneg
@@ -321,6 +323,13 @@ static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
return 0;
}
+/* Set up a default VLAN for untagged traffic injected from the CPU
+ * using management routes (e.g. STP, PTP) as opposed to tag_8021q.
+ * All DT-defined ports are members of this VLAN, and there are no
+ * restrictions on forwarding (since the CPU selects the destination).
+ * Frames from this VLAN will always be transmitted as untagged, and
+ * neither the bridge nor the 8021q module cannot create this VLAN ID.
+ */
static int sja1105_init_static_vlan(struct sja1105_private *priv)
{
struct sja1105_table *table;
@@ -330,17 +339,13 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
.vmemb_port = 0,
.vlan_bc = 0,
.tag_port = 0,
- .vlanid = 1,
+ .vlanid = SJA1105_DEFAULT_VLAN,
};
struct dsa_switch *ds = priv->ds;
int port;
table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
- /* The static VLAN table will only contain the initial pvid of 1.
- * All other VLANs are to be configured through dynamic entries,
- * and kept in the static configuration table as backing memory.
- */
if (table->entry_count) {
kfree(table->entries);
table->entry_count = 0;
@@ -353,9 +358,6 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
table->entry_count = 1;
- /* VLAN 1: all DT-defined ports are members; no restrictions on
- * forwarding; always transmit as untagged.
- */
for (port = 0; port < ds->num_ports; port++) {
struct sja1105_bridge_vlan *v;
@@ -366,15 +368,12 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
pvid.vlan_bc |= BIT(port);
pvid.tag_port &= ~BIT(port);
- /* Let traffic that don't need dsa_8021q (e.g. STP, PTP) be
- * transmitted as untagged.
- */
v = kzalloc(sizeof(*v), GFP_KERNEL);
if (!v)
return -ENOMEM;
v->port = port;
- v->vid = 1;
+ v->vid = SJA1105_DEFAULT_VLAN;
v->untagged = true;
if (dsa_is_cpu_port(ds, port))
v->pvid = true;
@@ -2817,11 +2816,22 @@ static int sja1105_vlan_add_one(struct dsa_switch *ds, int port, u16 vid,
bool pvid = flags & BRIDGE_VLAN_INFO_PVID;
struct sja1105_bridge_vlan *v;
- list_for_each_entry(v, vlan_list, list)
- if (v->port == port && v->vid == vid &&
- v->untagged == untagged && v->pvid == pvid)
+ list_for_each_entry(v, vlan_list, list) {
+ if (v->port == port && v->vid == vid) {
/* Already added */
- return 0;
+ if (v->untagged == untagged && v->pvid == pvid)
+ /* Nothing changed */
+ return 0;
+
+ /* It's the same VLAN, but some of the flags changed
+ * and the user did not bother to delete it first.
+ * Update it and trigger sja1105_build_vlan_table.
+ */
+ v->untagged = untagged;
+ v->pvid = pvid;
+ return 1;
+ }
+ }
v = kzalloc(sizeof(*v), GFP_KERNEL);
if (!v) {
@@ -2976,13 +2986,13 @@ static int sja1105_setup(struct dsa_switch *ds)
rc = sja1105_static_config_load(priv, ports);
if (rc < 0) {
dev_err(ds->dev, "Failed to load static config: %d\n", rc);
- return rc;
+ goto out_ptp_clock_unregister;
}
/* Configure the CGU (PHY link modes and speeds) */
rc = sja1105_clocking_setup(priv);
if (rc < 0) {
dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc);
- return rc;
+ goto out_static_config_free;
}
/* On SJA1105, VLAN filtering per se is always enabled in hardware.
* The only thing we can do to disable it is lie about what the 802.1Q
@@ -3003,7 +3013,7 @@ static int sja1105_setup(struct dsa_switch *ds)
rc = sja1105_devlink_setup(ds);
if (rc < 0)
- return rc;
+ goto out_static_config_free;
/* The DSA/switchdev model brings up switch ports in standalone mode by
* default, and that means vlan_filtering is 0 since they're not under
@@ -3012,6 +3022,17 @@ static int sja1105_setup(struct dsa_switch *ds)
rtnl_lock();
rc = sja1105_setup_8021q_tagging(ds, true);
rtnl_unlock();
+ if (rc)
+ goto out_devlink_teardown;
+
+ return 0;
+
+out_devlink_teardown:
+ sja1105_devlink_teardown(ds);
+out_ptp_clock_unregister:
+ sja1105_ptp_clock_unregister(ds);
+out_static_config_free:
+ sja1105_static_config_free(&priv->static_config);
return rc;
}
@@ -3646,8 +3667,10 @@ static int sja1105_probe(struct spi_device *spi)
priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers,
sizeof(struct sja1105_cbs_entry),
GFP_KERNEL);
- if (!priv->cbs)
- return -ENOMEM;
+ if (!priv->cbs) {
+ rc = -ENOMEM;
+ goto out_unregister_switch;
+ }
}
/* Connections between dsa_port and sja1105_port */
@@ -3672,7 +3695,7 @@ static int sja1105_probe(struct spi_device *spi)
dev_err(ds->dev,
"failed to create deferred xmit thread: %d\n",
rc);
- goto out;
+ goto out_destroy_workers;
}
skb_queue_head_init(&sp->xmit_queue);
sp->xmit_tpid = ETH_P_SJA1105;
@@ -3682,7 +3705,8 @@ static int sja1105_probe(struct spi_device *spi)
}
return 0;
-out:
+
+out_destroy_workers:
while (port-- > 0) {
struct sja1105_port *sp = &priv->ports[port];
@@ -3691,6 +3715,10 @@ out:
kthread_destroy_worker(sp->xmit_worker);
}
+
+out_unregister_switch:
+ dsa_unregister_switch(ds);
+
return rc;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 881f88754bf6..52571486705e 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -236,36 +236,48 @@ static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring,
struct ena_tx_buffer *tx_info,
struct xdp_frame *xdpf,
- void **push_hdr,
- u32 *push_len)
+ struct ena_com_tx_ctx *ena_tx_ctx)
{
struct ena_adapter *adapter = xdp_ring->adapter;
struct ena_com_buf *ena_buf;
- dma_addr_t dma = 0;
+ int push_len = 0;
+ dma_addr_t dma;
+ void *data;
u32 size;
tx_info->xdpf = xdpf;
+ data = tx_info->xdpf->data;
size = tx_info->xdpf->len;
- ena_buf = tx_info->bufs;
- /* llq push buffer */
- *push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
- *push_hdr = tx_info->xdpf->data;
+ if (xdp_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+ /* Designate part of the packet for LLQ */
+ push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
+
+ ena_tx_ctx->push_header = data;
+
+ size -= push_len;
+ data += push_len;
+ }
+
+ ena_tx_ctx->header_len = push_len;
- if (size - *push_len > 0) {
+ if (size > 0) {
dma = dma_map_single(xdp_ring->dev,
- *push_hdr + *push_len,
- size - *push_len,
+ data,
+ size,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(xdp_ring->dev, dma)))
goto error_report_dma_error;
- tx_info->map_linear_data = 1;
- tx_info->num_of_bufs = 1;
- }
+ tx_info->map_linear_data = 0;
- ena_buf->paddr = dma;
- ena_buf->len = size;
+ ena_buf = tx_info->bufs;
+ ena_buf->paddr = dma;
+ ena_buf->len = size;
+
+ ena_tx_ctx->ena_bufs = ena_buf;
+ ena_tx_ctx->num_bufs = tx_info->num_of_bufs = 1;
+ }
return 0;
@@ -274,10 +286,6 @@ error_report_dma_error:
&xdp_ring->syncp);
netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
- xdp_return_frame_rx_napi(tx_info->xdpf);
- tx_info->xdpf = NULL;
- tx_info->num_of_bufs = 0;
-
return -EINVAL;
}
@@ -289,8 +297,6 @@ static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
struct ena_com_tx_ctx ena_tx_ctx = {};
struct ena_tx_buffer *tx_info;
u16 next_to_use, req_id;
- void *push_hdr;
- u32 push_len;
int rc;
next_to_use = xdp_ring->next_to_use;
@@ -298,15 +304,11 @@ static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
tx_info = &xdp_ring->tx_buffer_info[req_id];
tx_info->num_of_bufs = 0;
- rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &push_hdr, &push_len);
+ rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &ena_tx_ctx);
if (unlikely(rc))
return rc;
- ena_tx_ctx.ena_bufs = tx_info->bufs;
- ena_tx_ctx.push_header = push_hdr;
- ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
ena_tx_ctx.req_id = req_id;
- ena_tx_ctx.header_len = push_len;
rc = ena_xmit_common(dev,
xdp_ring,
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index b3d74332ed33..7748b276e5fd 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1849,6 +1849,7 @@ out_free_netdev:
free_netdev(netdev);
out_pci_release:
pci_release_mem_regions(pdev);
+ pci_disable_pcie_error_reporting(pdev);
out_pci_disable:
pci_disable_device(pdev);
return err;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index c0986096c701..5bace8a93d73 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8247,9 +8247,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
BNX2_WR(bp, PCI_COMMAND, reg);
} else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
!(bp->flags & BNX2_FLAG_PCIX)) {
-
dev_err(&pdev->dev,
"5706 A1 can only be used in a PCIX bus, aborting\n");
+ rc = -EPERM;
goto err_out_unmap;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index d21f085044cd..27943b0446c2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1223,8 +1223,10 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
goto failed;
/* SR-IOV capability was enabled but there are no VFs*/
- if (iov->total == 0)
+ if (iov->total == 0) {
+ err = -EINVAL;
goto failed;
+ }
iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 2985844634c8..aef3fccc27a9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -282,7 +282,8 @@ static bool bnxt_vf_pciid(enum board_idx idx)
{
return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
- idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF);
+ idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
+ idx == NETXTREME_E_P5_VF_HV);
}
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
@@ -6932,17 +6933,10 @@ ctx_err:
static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
__le64 *pg_dir)
{
- u8 pg_size = 0;
-
if (!rmem->nr_pages)
return;
- if (BNXT_PAGE_SHIFT == 13)
- pg_size = 1 << 4;
- else if (BNXT_PAGE_SIZE == 16)
- pg_size = 2 << 4;
-
- *pg_attr = pg_size;
+ BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
if (rmem->depth >= 1) {
if (rmem->depth == 2)
*pg_attr |= 2;
@@ -7314,7 +7308,7 @@ skip_rdma:
entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
- entries = ctx->qp_max_l2_entries + extra_qps + ctx->qp_min_qp1_entries;
+ entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
entries = roundup(entries, ctx->tqm_entries_multiple);
entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
@@ -10785,37 +10779,125 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
return rc;
}
+static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
+ u8 **nextp)
+{
+ struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
+ int hdr_count = 0;
+ u8 *nexthdr;
+ int start;
+
+ /* Check that there are at most 2 IPv6 extension headers, no
+ * fragment header, and each is <= 64 bytes.
+ */
+ start = nw_off + sizeof(*ip6h);
+ nexthdr = &ip6h->nexthdr;
+ while (ipv6_ext_hdr(*nexthdr)) {
+ struct ipv6_opt_hdr *hp;
+ int hdrlen;
+
+ if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
+ *nexthdr == NEXTHDR_FRAGMENT)
+ return false;
+ hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
+ skb_headlen(skb), NULL);
+ if (!hp)
+ return false;
+ if (*nexthdr == NEXTHDR_AUTH)
+ hdrlen = ipv6_authlen(hp);
+ else
+ hdrlen = ipv6_optlen(hp);
+
+ if (hdrlen > 64)
+ return false;
+ nexthdr = &hp->nexthdr;
+ start += hdrlen;
+ hdr_count++;
+ }
+ if (nextp) {
+ /* Caller will check inner protocol */
+ if (skb->encapsulation) {
+ *nextp = nexthdr;
+ return true;
+ }
+ *nextp = NULL;
+ }
+ /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
+ return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
+}
+
+/* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
+static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
+{
+ struct udphdr *uh = udp_hdr(skb);
+ __be16 udp_port = uh->dest;
+
+ if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
+ return false;
+ if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
+ struct ethhdr *eh = inner_eth_hdr(skb);
+
+ switch (eh->h_proto) {
+ case htons(ETH_P_IP):
+ return true;
+ case htons(ETH_P_IPV6):
+ return bnxt_exthdr_check(bp, skb,
+ skb_inner_network_offset(skb),
+ NULL);
+ }
+ }
+ return false;
+}
+
+static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
+{
+ switch (l4_proto) {
+ case IPPROTO_UDP:
+ return bnxt_udp_tunl_check(bp, skb);
+ case IPPROTO_IPIP:
+ return true;
+ case IPPROTO_GRE: {
+ switch (skb->inner_protocol) {
+ default:
+ return false;
+ case htons(ETH_P_IP):
+ return true;
+ case htons(ETH_P_IPV6):
+ fallthrough;
+ }
+ }
+ case IPPROTO_IPV6:
+ /* Check ext headers of inner ipv6 */
+ return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
+ NULL);
+ }
+ return false;
+}
+
static netdev_features_t bnxt_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
- struct bnxt *bp;
- __be16 udp_port;
- u8 l4_proto = 0;
+ struct bnxt *bp = netdev_priv(dev);
+ u8 *l4_proto;
features = vlan_features_check(skb, features);
- if (!skb->encapsulation)
- return features;
-
switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP):
- l4_proto = ip_hdr(skb)->protocol;
+ if (!skb->encapsulation)
+ return features;
+ l4_proto = &ip_hdr(skb)->protocol;
+ if (bnxt_tunl_check(bp, skb, *l4_proto))
+ return features;
break;
case htons(ETH_P_IPV6):
- l4_proto = ipv6_hdr(skb)->nexthdr;
+ if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
+ &l4_proto))
+ break;
+ if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
+ return features;
break;
- default:
- return features;
}
-
- if (l4_proto != IPPROTO_UDP)
- return features;
-
- bp = netdev_priv(dev);
- /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
- udp_port = udp_hdr(skb)->dest;
- if (udp_port == bp->vxlan_port || udp_port == bp->nge_port)
- return features;
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
@@ -11668,6 +11750,8 @@ static void bnxt_fw_init_one_p3(struct bnxt *bp)
bnxt_hwrm_coal_params_qcaps(bp);
}
+static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
+
static int bnxt_fw_init_one(struct bnxt *bp)
{
int rc;
@@ -11682,6 +11766,9 @@ static int bnxt_fw_init_one(struct bnxt *bp)
netdev_err(bp->dev, "Firmware init phase 2 failed\n");
return rc;
}
+ rc = bnxt_probe_phy(bp, false);
+ if (rc)
+ return rc;
rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
if (rc)
return rc;
@@ -13073,6 +13160,7 @@ init_err_pci_clean:
bnxt_hwrm_func_drv_unrgtr(bp);
bnxt_free_hwrm_short_cmd_req(bp);
bnxt_free_hwrm_resources(bp);
+ bnxt_ethtool_free(bp);
kfree(bp->fw_health);
bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 98e0cef4532c..30e47ea343f9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1457,6 +1457,16 @@ struct bnxt_ctx_pg_info {
#define BNXT_BACKING_STORE_CFG_LEGACY_LEN 256
+#define BNXT_SET_CTX_PAGE_ATTR(attr) \
+do { \
+ if (BNXT_PAGE_SIZE == 0x2000) \
+ attr = FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K; \
+ else if (BNXT_PAGE_SIZE == 0x10000) \
+ attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K; \
+ else \
+ attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K; \
+} while (0)
+
struct bnxt_ctx_mem_info {
u32 qp_max_entries;
u16 qp_min_qp1_entries;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 6bc7d41d519b..a0c7b1167dbb 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -2867,6 +2867,9 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
struct gem_stats *hwstat = &bp->hw_stats.gem;
struct net_device_stats *nstat = &bp->dev->stats;
+ if (!netif_running(bp->dev))
+ return nstat;
+
gem_update_stats(bp);
nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 7c5af4beedc6..591229b96257 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1153,7 +1153,7 @@ static void octeon_destroy_resources(struct octeon_device *oct)
* @lio: per-network private data
* @start_stop: whether to start or stop
*/
-static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
+static int send_rx_ctrl_cmd(struct lio *lio, int start_stop)
{
struct octeon_soft_command *sc;
union octnet_cmd *ncmd;
@@ -1161,15 +1161,15 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
int retval;
if (oct->props[lio->ifidx].rx_on == start_stop)
- return;
+ return 0;
sc = (struct octeon_soft_command *)
octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
16, 0);
if (!sc) {
netif_info(lio, rx_err, lio->netdev,
- "Failed to allocate octeon_soft_command\n");
- return;
+ "Failed to allocate octeon_soft_command struct\n");
+ return -ENOMEM;
}
ncmd = (union octnet_cmd *)sc->virtdptr;
@@ -1192,18 +1192,19 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
if (retval == IQ_SEND_FAILED) {
netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
octeon_free_soft_command(oct, sc);
- return;
} else {
/* Sleep on a wait queue till the cond flag indicates that the
* response arrived or timed-out.
*/
retval = wait_for_sc_completion_timeout(oct, sc, 0);
if (retval)
- return;
+ return retval;
oct->props[lio->ifidx].rx_on = start_stop;
WRITE_ONCE(sc->caller_is_done, true);
}
+
+ return retval;
}
/**
@@ -1778,6 +1779,7 @@ static int liquidio_open(struct net_device *netdev)
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)oct->priv;
struct napi_struct *napi, *n;
+ int ret = 0;
if (oct->props[lio->ifidx].napi_enabled == 0) {
tasklet_disable(&oct_priv->droq_tasklet);
@@ -1813,7 +1815,9 @@ static int liquidio_open(struct net_device *netdev)
netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
/* tell Octeon to start forwarding packets to host */
- send_rx_ctrl_cmd(lio, 1);
+ ret = send_rx_ctrl_cmd(lio, 1);
+ if (ret)
+ return ret;
/* start periodical statistics fetch */
INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
@@ -1824,7 +1828,7 @@ static int liquidio_open(struct net_device *netdev)
dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
netdev->name);
- return 0;
+ return ret;
}
/**
@@ -1838,6 +1842,7 @@ static int liquidio_stop(struct net_device *netdev)
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)oct->priv;
struct napi_struct *napi, *n;
+ int ret = 0;
ifstate_reset(lio, LIO_IFSTATE_RUNNING);
@@ -1854,7 +1859,9 @@ static int liquidio_stop(struct net_device *netdev)
lio->link_changes++;
/* Tell Octeon that nic interface is down. */
- send_rx_ctrl_cmd(lio, 0);
+ ret = send_rx_ctrl_cmd(lio, 0);
+ if (ret)
+ return ret;
if (OCTEON_CN23XX_PF(oct)) {
if (!oct->msix_on)
@@ -1889,7 +1896,7 @@ static int liquidio_stop(struct net_device *netdev)
dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
- return 0;
+ return ret;
}
/**
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 516f166ceff8..ffddb3126a32 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -595,7 +595,7 @@ static void octeon_destroy_resources(struct octeon_device *oct)
* @lio: per-network private data
* @start_stop: whether to start or stop
*/
-static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
+static int send_rx_ctrl_cmd(struct lio *lio, int start_stop)
{
struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
struct octeon_soft_command *sc;
@@ -603,11 +603,16 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
int retval;
if (oct->props[lio->ifidx].rx_on == start_stop)
- return;
+ return 0;
sc = (struct octeon_soft_command *)
octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
16, 0);
+ if (!sc) {
+ netif_info(lio, rx_err, lio->netdev,
+ "Failed to allocate octeon_soft_command struct\n");
+ return -ENOMEM;
+ }
ncmd = (union octnet_cmd *)sc->virtdptr;
@@ -635,11 +640,13 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
*/
retval = wait_for_sc_completion_timeout(oct, sc, 0);
if (retval)
- return;
+ return retval;
oct->props[lio->ifidx].rx_on = start_stop;
WRITE_ONCE(sc->caller_is_done, true);
}
+
+ return retval;
}
/**
@@ -906,6 +913,7 @@ static int liquidio_open(struct net_device *netdev)
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)oct->priv;
struct napi_struct *napi, *n;
+ int ret = 0;
if (!oct->props[lio->ifidx].napi_enabled) {
tasklet_disable(&oct_priv->droq_tasklet);
@@ -932,11 +940,13 @@ static int liquidio_open(struct net_device *netdev)
(LIQUIDIO_NDEV_STATS_POLL_TIME_MS));
/* tell Octeon to start forwarding packets to host */
- send_rx_ctrl_cmd(lio, 1);
+ ret = send_rx_ctrl_cmd(lio, 1);
+ if (ret)
+ return ret;
dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name);
- return 0;
+ return ret;
}
/**
@@ -950,9 +960,12 @@ static int liquidio_stop(struct net_device *netdev)
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)oct->priv;
struct napi_struct *napi, *n;
+ int ret = 0;
/* tell Octeon to stop forwarding packets to host */
- send_rx_ctrl_cmd(lio, 0);
+ ret = send_rx_ctrl_cmd(lio, 0);
+ if (ret)
+ return ret;
netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n");
/* Inform that netif carrier is down */
@@ -986,7 +999,7 @@ static int liquidio_stop(struct net_device *netdev)
dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
- return 0;
+ return ret;
}
/**
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 314f8d806723..9058f09f921e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -2177,8 +2177,6 @@ int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid,
bool persistent, u8 *smt_idx);
int cxgb4_get_msix_idx_from_bmap(struct adapter *adap);
void cxgb4_free_msix_idx_in_bmap(struct adapter *adap, u32 msix_idx);
-int cxgb_open(struct net_device *dev);
-int cxgb_close(struct net_device *dev);
void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q);
void cxgb4_quiesce_rx(struct sge_rspq *q);
int cxgb4_port_mirror_alloc(struct net_device *dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 61ea3ec5c3fc..83ed10ac8660 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -1337,13 +1337,27 @@ static int cxgb4_ethtool_flash_phy(struct net_device *netdev,
return ret;
}
- spin_lock_bh(&adap->win0_lock);
+ /* We have to RESET the chip/firmware because we need the
+ * chip in uninitialized state for loading new PHY image.
+ * Otherwise, the running firmware will only store the PHY
+ * image in local RAM which will be lost after next reset.
+ */
+ ret = t4_fw_reset(adap, adap->mbox, PIORSTMODE_F | PIORST_F);
+ if (ret < 0) {
+ dev_err(adap->pdev_dev,
+ "Set FW to RESET for flashing PHY FW failed. ret: %d\n",
+ ret);
+ return ret;
+ }
+
ret = t4_load_phy_fw(adap, MEMWIN_NIC, NULL, data, size);
- spin_unlock_bh(&adap->win0_lock);
- if (ret)
- dev_err(adap->pdev_dev, "Failed to load PHY FW\n");
+ if (ret < 0) {
+ dev_err(adap->pdev_dev, "Failed to load PHY FW. ret: %d\n",
+ ret);
+ return ret;
+ }
- return ret;
+ return 0;
}
static int cxgb4_ethtool_flash_fw(struct net_device *netdev,
@@ -1610,16 +1624,14 @@ static struct filter_entry *cxgb4_get_filter_entry(struct adapter *adap,
u32 ftid)
{
struct tid_info *t = &adap->tids;
- struct filter_entry *f;
- if (ftid < t->nhpftids)
- f = &adap->tids.hpftid_tab[ftid];
- else if (ftid < t->nftids)
- f = &adap->tids.ftid_tab[ftid - t->nhpftids];
- else
- f = lookup_tid(&adap->tids, ftid);
+ if (ftid >= t->hpftid_base && ftid < t->hpftid_base + t->nhpftids)
+ return &t->hpftid_tab[ftid - t->hpftid_base];
- return f;
+ if (ftid >= t->ftid_base && ftid < t->ftid_base + t->nftids)
+ return &t->ftid_tab[ftid - t->ftid_base];
+
+ return lookup_tid(t, ftid);
}
static void cxgb4_fill_filter_rule(struct ethtool_rx_flow_spec *fs,
@@ -1826,6 +1838,11 @@ static int cxgb4_ntuple_del_filter(struct net_device *dev,
filter_id = filter_info->loc_array[cmd->fs.location];
f = cxgb4_get_filter_entry(adapter, filter_id);
+ if (f->fs.prio)
+ filter_id -= adapter->tids.hpftid_base;
+ else if (!f->fs.hash)
+ filter_id -= (adapter->tids.ftid_base - adapter->tids.nhpftids);
+
ret = cxgb4_flow_rule_destroy(dev, f->fs.tc_prio, &f->fs, filter_id);
if (ret)
goto err;
@@ -1885,6 +1902,11 @@ static int cxgb4_ntuple_set_filter(struct net_device *netdev,
filter_info = &adapter->ethtool_filters->port[pi->port_id];
+ if (fs.prio)
+ tid += adapter->tids.hpftid_base;
+ else if (!fs.hash)
+ tid += (adapter->tids.ftid_base - adapter->tids.nhpftids);
+
filter_info->loc_array[cmd->fs.location] = tid;
set_bit(cmd->fs.location, filter_info->bmap);
filter_info->in_use++;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index bc581b149b11..6260b3bebd2b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -198,7 +198,7 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
WORD_MASK, f->fs.nat_lip[3] |
f->fs.nat_lip[2] << 8 |
f->fs.nat_lip[1] << 16 |
- (u64)f->fs.nat_lip[0] << 25, 1);
+ (u64)f->fs.nat_lip[0] << 24, 1);
}
}
@@ -1042,7 +1042,7 @@ void clear_all_filters(struct adapter *adapter)
cxgb4_del_filter(dev, f->tid, &f->fs);
}
- sb = t4_read_reg(adapter, LE_DB_SRVR_START_INDEX_A);
+ sb = adapter->tids.stid_base;
for (i = 0; i < sb; i++) {
f = (struct filter_entry *)adapter->tids.tid_tab[i];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 6264bc66a4fc..762113a04dde 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2834,7 +2834,7 @@ static void cxgb_down(struct adapter *adapter)
/*
* net_device operations
*/
-int cxgb_open(struct net_device *dev)
+static int cxgb_open(struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
@@ -2882,7 +2882,7 @@ out_unlock:
return err;
}
-int cxgb_close(struct net_device *dev)
+static int cxgb_close(struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
@@ -4424,10 +4424,8 @@ static int adap_init0_phy(struct adapter *adap)
/* Load PHY Firmware onto adapter.
*/
- spin_lock_bh(&adap->win0_lock);
ret = t4_load_phy_fw(adap, MEMWIN_NIC, phy_info->phy_fw_version,
(u8 *)phyf->data, phyf->size);
- spin_unlock_bh(&adap->win0_lock);
if (ret < 0)
dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
-ret);
@@ -6480,9 +6478,9 @@ static void cxgb4_ktls_dev_del(struct net_device *netdev,
adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_del(netdev, tls_ctx,
direction);
- cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
out_unlock:
+ cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
mutex_unlock(&uld_mutex);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index 1b88bd1c2dbe..dd9be229819a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -997,20 +997,16 @@ int cxgb4_tc_flower_destroy(struct net_device *dev,
if (!ch_flower)
return -ENOENT;
+ rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
+ adap->flower_ht_params);
+
ret = cxgb4_flow_rule_destroy(dev, ch_flower->fs.tc_prio,
&ch_flower->fs, ch_flower->filter_id);
if (ret)
- goto err;
+ netdev_err(dev, "Flow rule destroy failed for tid: %u, ret: %d",
+ ch_flower->filter_id, ret);
- ret = rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
- adap->flower_ht_params);
- if (ret) {
- netdev_err(dev, "Flow remove from rhashtable failed");
- goto err;
- }
kfree_rcu(ch_flower, rcu);
-
-err:
return ret;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
index 6c259de96f96..338b04f339b3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
@@ -589,7 +589,8 @@ int cxgb4_setup_tc_mqprio(struct net_device *dev,
* down before configuring tc params.
*/
if (netif_running(dev)) {
- cxgb_close(dev);
+ netif_tx_stop_all_queues(dev);
+ netif_carrier_off(dev);
needs_bring_up = true;
}
@@ -615,8 +616,10 @@ int cxgb4_setup_tc_mqprio(struct net_device *dev,
}
out:
- if (needs_bring_up)
- cxgb_open(dev);
+ if (needs_bring_up) {
+ netif_tx_start_all_queues(dev);
+ netif_carrier_on(dev);
+ }
mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
return ret;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 1e5f2edb70cf..6a099cb34b12 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2556,6 +2556,12 @@ int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
if (!eosw_txq)
return -ENOMEM;
+ if (!(adap->flags & CXGB4_FW_OK)) {
+ /* Don't stall caller when access to FW is lost */
+ complete(&eosw_txq->completion);
+ return -EIO;
+ }
+
skb = alloc_skb(len, GFP_KERNEL);
if (!skb)
return -ENOMEM;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 9428ef1f04a8..a0555f4d76fc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3060,16 +3060,19 @@ int t4_read_flash(struct adapter *adapter, unsigned int addr,
* @addr: the start address to write
* @n: length of data to write in bytes
* @data: the data to write
+ * @byte_oriented: whether to store data as bytes or as words
*
* Writes up to a page of data (256 bytes) to the serial flash starting
* at the given address. All the data must be written to the same page.
+ * If @byte_oriented is set the write data is stored as byte stream
+ * (i.e. matches what on disk), otherwise in big-endian.
*/
static int t4_write_flash(struct adapter *adapter, unsigned int addr,
- unsigned int n, const u8 *data)
+ unsigned int n, const u8 *data, bool byte_oriented)
{
- int ret;
- u32 buf[64];
unsigned int i, c, left, val, offset = addr & 0xff;
+ u32 buf[64];
+ int ret;
if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
return -EINVAL;
@@ -3080,10 +3083,14 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
(ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
goto unlock;
- for (left = n; left; left -= c) {
+ for (left = n; left; left -= c, data += c) {
c = min(left, 4U);
- for (val = 0, i = 0; i < c; ++i)
- val = (val << 8) + *data++;
+ for (val = 0, i = 0; i < c; ++i) {
+ if (byte_oriented)
+ val = (val << 8) + data[i];
+ else
+ val = (val << 8) + data[c - i - 1];
+ }
ret = sf1_write(adapter, c, c != left, 1, val);
if (ret)
@@ -3096,7 +3103,8 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
/* Read the page to verify the write succeeded */
- ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
+ ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
+ byte_oriented);
if (ret)
return ret;
@@ -3692,7 +3700,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
*/
memcpy(first_page, fw_data, SF_PAGE_SIZE);
((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
- ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page);
+ ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, true);
if (ret)
goto out;
@@ -3700,14 +3708,14 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
addr += SF_PAGE_SIZE;
fw_data += SF_PAGE_SIZE;
- ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
+ ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, true);
if (ret)
goto out;
}
- ret = t4_write_flash(adap,
- fw_start + offsetof(struct fw_hdr, fw_ver),
- sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
+ ret = t4_write_flash(adap, fw_start + offsetof(struct fw_hdr, fw_ver),
+ sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver,
+ true);
out:
if (ret)
dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
@@ -3812,9 +3820,11 @@ int t4_load_phy_fw(struct adapter *adap, int win,
/* Copy the supplied PHY Firmware image to the adapter memory location
* allocated by the adapter firmware.
*/
+ spin_lock_bh(&adap->win0_lock);
ret = t4_memory_rw(adap, win, mtype, maddr,
phy_fw_size, (__be32 *)phy_fw_data,
T4_MEMORY_WRITE);
+ spin_unlock_bh(&adap->win0_lock);
if (ret)
return ret;
@@ -10208,7 +10218,7 @@ int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
n = size - i;
else
n = SF_PAGE_SIZE;
- ret = t4_write_flash(adap, addr, n, cfg_data);
+ ret = t4_write_flash(adap, addr, n, cfg_data, true);
if (ret)
goto out;
@@ -10677,13 +10687,14 @@ int t4_load_boot(struct adapter *adap, u8 *boot_data,
for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
addr += SF_PAGE_SIZE;
boot_data += SF_PAGE_SIZE;
- ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data);
+ ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data,
+ false);
if (ret)
goto out;
}
ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
- (const u8 *)header);
+ (const u8 *)header, false);
out:
if (ret)
@@ -10758,7 +10769,7 @@ int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
for (i = 0; i < size; i += SF_PAGE_SIZE) {
n = min_t(u32, size - i, SF_PAGE_SIZE);
- ret = t4_write_flash(adap, addr, n, cfg_data);
+ ret = t4_write_flash(adap, addr, n, cfg_data, false);
if (ret)
goto out;
@@ -10770,7 +10781,8 @@ int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
for (i = 0; i < npad; i++) {
u8 data = 0;
- ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data);
+ ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data,
+ false);
if (ret)
goto out;
}
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index ef3f1e92632f..59683f79959c 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -59,6 +59,7 @@ static int chcr_get_nfrags_to_send(struct sk_buff *skb, u32 start, u32 len)
}
static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info);
+static void clear_conn_resources(struct chcr_ktls_info *tx_info);
/*
* chcr_ktls_save_keys: calculate and save crypto keys.
* @tx_info - driver specific tls info.
@@ -364,10 +365,14 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
chcr_get_ktls_tx_context(tls_ctx);
struct chcr_ktls_info *tx_info = tx_ctx->chcr_info;
struct ch_ktls_port_stats_debug *port_stats;
+ struct chcr_ktls_uld_ctx *u_ctx;
if (!tx_info)
return;
+ u_ctx = tx_info->adap->uld[CXGB4_ULD_KTLS].handle;
+ if (u_ctx && u_ctx->detach)
+ return;
/* clear l2t entry */
if (tx_info->l2te)
cxgb4_l2t_release(tx_info->l2te);
@@ -384,6 +389,8 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
if (tx_info->tid != -1) {
cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
tx_info->tid, tx_info->ip_family);
+
+ xa_erase(&u_ctx->tid_list, tx_info->tid);
}
port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
@@ -411,6 +418,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct ch_ktls_port_stats_debug *port_stats;
struct chcr_ktls_ofld_ctx_tx *tx_ctx;
+ struct chcr_ktls_uld_ctx *u_ctx;
struct chcr_ktls_info *tx_info;
struct dst_entry *dst;
struct adapter *adap;
@@ -425,6 +433,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
adap = pi->adapter;
port_stats = &adap->ch_ktls_stats.ktls_port[pi->port_id];
atomic64_inc(&port_stats->ktls_tx_connection_open);
+ u_ctx = adap->uld[CXGB4_ULD_KTLS].handle;
if (direction == TLS_OFFLOAD_CTX_DIR_RX) {
pr_err("not expecting for RX direction\n");
@@ -434,6 +443,9 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
if (tx_ctx->chcr_info)
goto out;
+ if (u_ctx && u_ctx->detach)
+ goto out;
+
tx_info = kvzalloc(sizeof(*tx_info), GFP_KERNEL);
if (!tx_info)
goto out;
@@ -569,6 +581,8 @@ free_tid:
cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
tx_info->tid, tx_info->ip_family);
+ xa_erase(&u_ctx->tid_list, tx_info->tid);
+
put_module:
/* release module refcount */
module_put(THIS_MODULE);
@@ -633,8 +647,12 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
{
const struct cpl_act_open_rpl *p = (void *)input;
struct chcr_ktls_info *tx_info = NULL;
+ struct chcr_ktls_ofld_ctx_tx *tx_ctx;
+ struct chcr_ktls_uld_ctx *u_ctx;
unsigned int atid, tid, status;
+ struct tls_context *tls_ctx;
struct tid_info *t;
+ int ret = 0;
tid = GET_TID(p);
status = AOPEN_STATUS_G(ntohl(p->atid_status));
@@ -666,14 +684,29 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
if (!status) {
tx_info->tid = tid;
cxgb4_insert_tid(t, tx_info, tx_info->tid, tx_info->ip_family);
+ /* Adding tid */
+ tls_ctx = tls_get_ctx(tx_info->sk);
+ tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
+ u_ctx = adap->uld[CXGB4_ULD_KTLS].handle;
+ if (u_ctx) {
+ ret = xa_insert_bh(&u_ctx->tid_list, tid, tx_ctx,
+ GFP_NOWAIT);
+ if (ret < 0) {
+ pr_err("%s: Failed to allocate tid XA entry = %d\n",
+ __func__, tx_info->tid);
+ tx_info->open_state = CH_KTLS_OPEN_FAILURE;
+ goto out;
+ }
+ }
tx_info->open_state = CH_KTLS_OPEN_SUCCESS;
} else {
tx_info->open_state = CH_KTLS_OPEN_FAILURE;
}
+out:
spin_unlock(&tx_info->lock);
complete(&tx_info->completion);
- return 0;
+ return ret;
}
/*
@@ -2090,6 +2123,8 @@ static void *chcr_ktls_uld_add(const struct cxgb4_lld_info *lldi)
goto out;
}
u_ctx->lldi = *lldi;
+ u_ctx->detach = false;
+ xa_init_flags(&u_ctx->tid_list, XA_FLAGS_LOCK_BH);
out:
return u_ctx;
}
@@ -2123,6 +2158,45 @@ static int chcr_ktls_uld_rx_handler(void *handle, const __be64 *rsp,
return 0;
}
+static void clear_conn_resources(struct chcr_ktls_info *tx_info)
+{
+ /* clear l2t entry */
+ if (tx_info->l2te)
+ cxgb4_l2t_release(tx_info->l2te);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ /* clear clip entry */
+ if (tx_info->ip_family == AF_INET6)
+ cxgb4_clip_release(tx_info->netdev, (const u32 *)
+ &tx_info->sk->sk_v6_rcv_saddr,
+ 1);
+#endif
+
+ /* clear tid */
+ if (tx_info->tid != -1)
+ cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
+ tx_info->tid, tx_info->ip_family);
+}
+
+static void ch_ktls_reset_all_conn(struct chcr_ktls_uld_ctx *u_ctx)
+{
+ struct ch_ktls_port_stats_debug *port_stats;
+ struct chcr_ktls_ofld_ctx_tx *tx_ctx;
+ struct chcr_ktls_info *tx_info;
+ unsigned long index;
+
+ xa_for_each(&u_ctx->tid_list, index, tx_ctx) {
+ tx_info = tx_ctx->chcr_info;
+ clear_conn_resources(tx_info);
+ port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
+ atomic64_inc(&port_stats->ktls_tx_connection_close);
+ kvfree(tx_info);
+ tx_ctx->chcr_info = NULL;
+ /* release module refcount */
+ module_put(THIS_MODULE);
+ }
+}
+
static int chcr_ktls_uld_state_change(void *handle, enum cxgb4_state new_state)
{
struct chcr_ktls_uld_ctx *u_ctx = handle;
@@ -2139,7 +2213,10 @@ static int chcr_ktls_uld_state_change(void *handle, enum cxgb4_state new_state)
case CXGB4_STATE_DETACH:
pr_info("%s: Down\n", pci_name(u_ctx->lldi.pdev));
mutex_lock(&dev_mutex);
+ u_ctx->detach = true;
list_del(&u_ctx->entry);
+ ch_ktls_reset_all_conn(u_ctx);
+ xa_destroy(&u_ctx->tid_list);
mutex_unlock(&dev_mutex);
break;
default:
@@ -2178,6 +2255,7 @@ static void __exit chcr_ktls_exit(void)
adap = pci_get_drvdata(u_ctx->lldi.pdev);
memset(&adap->ch_ktls_stats, 0, sizeof(adap->ch_ktls_stats));
list_del(&u_ctx->entry);
+ xa_destroy(&u_ctx->tid_list);
kfree(u_ctx);
}
mutex_unlock(&dev_mutex);
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
index 18b3b1f02415..10572dc55365 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
@@ -75,6 +75,8 @@ struct chcr_ktls_ofld_ctx_tx {
struct chcr_ktls_uld_ctx {
struct list_head entry;
struct cxgb4_lld_info lldi;
+ struct xarray tid_list;
+ bool detach;
};
static inline struct chcr_ktls_ofld_ctx_tx *
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
index 188d871f6b8c..c320cc8ca68d 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
@@ -1564,8 +1564,10 @@ found_ok_skb:
cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
sizeof(thdr->type), &thdr->type);
- if (cerr && thdr->type != TLS_RECORD_TYPE_DATA)
- return -EIO;
+ if (cerr && thdr->type != TLS_RECORD_TYPE_DATA) {
+ copied = -EIO;
+ break;
+ }
/* don't send tls header, skip copy */
goto skip_copy;
}
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index 46b0dbab8aad..7c992172933b 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -576,10 +576,12 @@ static void ec_bhf_remove(struct pci_dev *dev)
struct ec_bhf_priv *priv = netdev_priv(net_dev);
unregister_netdev(net_dev);
- free_netdev(net_dev);
pci_iounmap(dev, priv->dma_io);
pci_iounmap(dev, priv->io);
+
+ free_netdev(net_dev);
+
pci_release_regions(dev);
pci_clear_master(dev);
pci_disable_device(dev);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index b6eba29d8e99..7968568bbe21 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -5897,6 +5897,7 @@ drv_cleanup:
unmap_bars:
be_unmap_pci_bars(adapter);
free_netdev:
+ pci_disable_pcie_error_reporting(pdev);
free_netdev(netdev);
rel_reg:
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index f2065f9d02e6..ad82cffc6f3f 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1662,7 +1662,7 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
}
/* ------------------------------------------------------------------------- */
-static void fec_get_mac(struct net_device *ndev)
+static int fec_get_mac(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
unsigned char *iap, tmpaddr[ETH_ALEN];
@@ -1685,6 +1685,8 @@ static void fec_get_mac(struct net_device *ndev)
ret = of_get_mac_address(np, tmpaddr);
if (!ret)
iap = tmpaddr;
+ else if (ret == -EPROBE_DEFER)
+ return ret;
}
}
@@ -1723,7 +1725,7 @@ static void fec_get_mac(struct net_device *ndev)
eth_hw_addr_random(ndev);
dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n",
ndev->dev_addr);
- return;
+ return 0;
}
memcpy(ndev->dev_addr, iap, ETH_ALEN);
@@ -1731,6 +1733,8 @@ static void fec_get_mac(struct net_device *ndev)
/* Adjust MAC if using macaddr */
if (iap == macaddr)
ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
+
+ return 0;
}
/* ------------------------------------------------------------------------- */
@@ -3290,7 +3294,9 @@ static int fec_enet_init(struct net_device *ndev)
return ret;
}
- fec_enet_alloc_queue(ndev);
+ ret = fec_enet_alloc_queue(ndev);
+ if (ret)
+ return ret;
bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
@@ -3298,11 +3304,15 @@ static int fec_enet_init(struct net_device *ndev)
cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
GFP_KERNEL);
if (!cbd_base) {
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto free_queue_mem;
}
/* Get the Ethernet address */
- fec_get_mac(ndev);
+ ret = fec_get_mac(ndev);
+ if (ret)
+ goto free_queue_mem;
+
/* make sure MAC we just acquired is programmed into the hw */
fec_set_mac_address(ndev, NULL);
@@ -3376,6 +3386,10 @@ static int fec_enet_init(struct net_device *ndev)
fec_enet_update_ethtool_stats(ndev);
return 0;
+
+free_queue_mem:
+ fec_enet_free_queue(ndev);
+ return ret;
}
#ifdef CONFIG_OF
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 1753807cbf97..d71eac7e1924 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -215,15 +215,13 @@ static u64 fec_ptp_read(const struct cyclecounter *cc)
{
struct fec_enet_private *fep =
container_of(cc, struct fec_enet_private, cc);
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
u32 tempval;
tempval = readl(fep->hwp + FEC_ATIME_CTRL);
tempval |= FEC_T_CTRL_CAPTURE;
writel(tempval, fep->hwp + FEC_ATIME_CTRL);
- if (id_entry->driver_data & FEC_QUIRK_BUG_CAPTURE)
+ if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
udelay(1);
return readl(fep->hwp + FEC_ATIME);
@@ -604,6 +602,10 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
fep->ptp_caps.enable = fec_ptp_enable;
fep->cycle_speed = clk_get_rate(fep->clk_ptp);
+ if (!fep->cycle_speed) {
+ fep->cycle_speed = NSEC_PER_SEC;
+ dev_err(&fep->pdev->dev, "clk_ptp clock rate is zero\n");
+ }
fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed;
spin_lock_init(&fep->tmreg_lock);
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index a7b7a4aace79..b0c0504950d8 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -548,8 +548,8 @@ static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id)
base = ioremap(link->resource[2]->start, resource_size(link->resource[2]));
if (!base) {
- pcmcia_release_window(link, link->resource[2]);
- return -ENOMEM;
+ pcmcia_release_window(link, link->resource[2]);
+ return -1;
}
pcmcia_map_mem_page(link, link->resource[2], 0);
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 7302498c6df3..bbc423e93122 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -180,7 +180,7 @@ static int gve_napi_poll(struct napi_struct *napi, int budget)
/* Double check we have no extra work.
* Ensure unmask synchronizes with checking for work.
*/
- dma_rmb();
+ mb();
if (block->tx)
reschedule |= gve_tx_poll(block, -1);
if (block->rx)
@@ -220,6 +220,7 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
int vecs_left = new_num_ntfy_blks % 2;
priv->num_ntfy_blks = new_num_ntfy_blks;
+ priv->mgmt_msix_idx = priv->num_ntfy_blks;
priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues,
vecs_per_type);
priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues,
@@ -300,20 +301,22 @@ static void gve_free_notify_blocks(struct gve_priv *priv)
{
int i;
- /* Free the irqs */
- for (i = 0; i < priv->num_ntfy_blks; i++) {
- struct gve_notify_block *block = &priv->ntfy_blocks[i];
- int msix_idx = i;
+ if (priv->msix_vectors) {
+ /* Free the irqs */
+ for (i = 0; i < priv->num_ntfy_blks; i++) {
+ struct gve_notify_block *block = &priv->ntfy_blocks[i];
+ int msix_idx = i;
- irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
- NULL);
- free_irq(priv->msix_vectors[msix_idx].vector, block);
+ irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
+ NULL);
+ free_irq(priv->msix_vectors[msix_idx].vector, block);
+ }
+ free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
}
dma_free_coherent(&priv->pdev->dev,
priv->num_ntfy_blks * sizeof(*priv->ntfy_blocks),
priv->ntfy_blocks, priv->ntfy_block_bus);
priv->ntfy_blocks = NULL;
- free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
pci_disable_msix(priv->pdev);
kvfree(priv->msix_vectors);
priv->msix_vectors = NULL;
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index 6938f3a939d6..3e04a3973d68 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -212,10 +212,11 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
tx->dev = &priv->pdev->dev;
if (!tx->raw_addressing) {
tx->tx_fifo.qpl = gve_assign_tx_qpl(priv);
-
+ if (!tx->tx_fifo.qpl)
+ goto abort_with_desc;
/* map Tx FIFO */
if (gve_tx_fifo_init(priv, &tx->tx_fifo))
- goto abort_with_desc;
+ goto abort_with_qpl;
}
tx->q_resources =
@@ -236,6 +237,9 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
abort_with_fifo:
if (!tx->raw_addressing)
gve_tx_fifo_release(priv, &tx->tx_fifo);
+abort_with_qpl:
+ if (!tx->raw_addressing)
+ gve_unassign_qpl(priv, tx->tx_fifo.qpl->id);
abort_with_desc:
dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
tx->desc = NULL;
@@ -589,7 +593,7 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
struct gve_tx_ring *tx;
int nsegs;
- WARN(skb_get_queue_mapping(skb) > priv->tx_cfg.num_queues,
+ WARN(skb_get_queue_mapping(skb) >= priv->tx_cfg.num_queues,
"skb queue index out of range");
tx = &priv->tx[skb_get_queue_mapping(skb)];
if (unlikely(gve_maybe_stop_tx(tx, skb))) {
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index da48c05435ea..7e62dcff2426 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -192,7 +192,7 @@ static int hns_nic_get_link_ksettings(struct net_device *net_dev,
}
/**
- *hns_nic_set_link_settings - implement ethtool set link ksettings
+ *hns_nic_set_link_ksettings - implement ethtool set link ksettings
*@net_dev: net_device
*@cmd: ethtool_link_ksettings
*retuen 0 - success , negative --fail
@@ -827,7 +827,7 @@ hns_get_channels(struct net_device *net_dev, struct ethtool_channels *ch)
}
/**
- * get_ethtool_stats - get detail statistics.
+ * hns_get_ethtool_stats - get detail statistics.
* @netdev: net device
* @stats: statistics info.
* @data: statistics data.
@@ -885,7 +885,7 @@ static void hns_get_ethtool_stats(struct net_device *netdev,
}
/**
- * get_strings: Return a set of strings that describe the requested objects
+ * hns_get_strings: Return a set of strings that describe the requested objects
* @netdev: net device
* @stringset: string set ID.
* @data: objects data.
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 783fdaf8f8d6..026558f8e04b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -264,22 +264,17 @@ static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector,
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal;
struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal;
+ struct hns3_enet_coalesce *ptx_coal = &priv->tx_coal;
+ struct hns3_enet_coalesce *prx_coal = &priv->rx_coal;
- /* initialize the configuration for interrupt coalescing.
- * 1. GL (Interrupt Gap Limiter)
- * 2. RL (Interrupt Rate Limiter)
- * 3. QL (Interrupt Quantity Limiter)
- *
- * Default: enable interrupt coalescing self-adaptive and GL
- */
- tx_coal->adapt_enable = 1;
- rx_coal->adapt_enable = 1;
+ tx_coal->adapt_enable = ptx_coal->adapt_enable;
+ rx_coal->adapt_enable = prx_coal->adapt_enable;
- tx_coal->int_gl = HNS3_INT_GL_50K;
- rx_coal->int_gl = HNS3_INT_GL_50K;
+ tx_coal->int_gl = ptx_coal->int_gl;
+ rx_coal->int_gl = prx_coal->int_gl;
- rx_coal->flow_level = HNS3_FLOW_LOW;
- tx_coal->flow_level = HNS3_FLOW_LOW;
+ rx_coal->flow_level = prx_coal->flow_level;
+ tx_coal->flow_level = ptx_coal->flow_level;
/* device version above V3(include V3), GL can configure 1us
* unit, so uses 1us unit.
@@ -294,8 +289,8 @@ static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector,
rx_coal->ql_enable = 1;
tx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
rx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
- tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
- rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
+ tx_coal->int_ql = ptx_coal->int_ql;
+ rx_coal->int_ql = prx_coal->int_ql;
}
}
@@ -846,8 +841,6 @@ static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
l4.udp->dest == htons(4790))))
return false;
- skb_checksum_help(skb);
-
return true;
}
@@ -924,8 +917,7 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
/* the stack computes the IP header already,
* driver calculate l4 checksum when not TSO.
*/
- skb_checksum_help(skb);
- return 0;
+ return skb_checksum_help(skb);
}
hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec);
@@ -970,7 +962,7 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
break;
case IPPROTO_UDP:
if (hns3_tunnel_csum_bug(skb))
- break;
+ return skb_checksum_help(skb);
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
@@ -995,8 +987,7 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
/* the stack computes the IP header already,
* driver calculate l4 checksum when not TSO.
*/
- skb_checksum_help(skb);
- return 0;
+ return skb_checksum_help(skb);
}
return 0;
@@ -3844,6 +3835,34 @@ map_ring_fail:
return ret;
}
+static void hns3_nic_init_coal_cfg(struct hns3_nic_priv *priv)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
+ struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
+ struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
+
+ /* initialize the configuration for interrupt coalescing.
+ * 1. GL (Interrupt Gap Limiter)
+ * 2. RL (Interrupt Rate Limiter)
+ * 3. QL (Interrupt Quantity Limiter)
+ *
+ * Default: enable interrupt coalescing self-adaptive and GL
+ */
+ tx_coal->adapt_enable = 1;
+ rx_coal->adapt_enable = 1;
+
+ tx_coal->int_gl = HNS3_INT_GL_50K;
+ rx_coal->int_gl = HNS3_INT_GL_50K;
+
+ rx_coal->flow_level = HNS3_FLOW_LOW;
+ tx_coal->flow_level = HNS3_FLOW_LOW;
+
+ if (ae_dev->dev_specs.int_ql_max) {
+ tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
+ rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
+ }
+}
+
static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
{
struct hnae3_handle *h = priv->ae_handle;
@@ -4295,6 +4314,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
goto out_get_ring_cfg;
}
+ hns3_nic_init_coal_cfg(priv);
+
ret = hns3_nic_alloc_vector_data(priv);
if (ret) {
ret = -ENOMEM;
@@ -4317,12 +4338,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
if (ret)
goto out_init_phy;
- ret = register_netdev(netdev);
- if (ret) {
- dev_err(priv->dev, "probe register netdev fail!\n");
- goto out_reg_netdev_fail;
- }
-
/* the device can work without cpu rmap, only aRFS needs it */
ret = hns3_set_rx_cpu_rmap(netdev);
if (ret)
@@ -4355,17 +4370,23 @@ static int hns3_client_init(struct hnae3_handle *handle)
if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags);
+ ret = register_netdev(netdev);
+ if (ret) {
+ dev_err(priv->dev, "probe register netdev fail!\n");
+ goto out_reg_netdev_fail;
+ }
+
if (netif_msg_drv(handle))
hns3_info_show(priv);
return ret;
+out_reg_netdev_fail:
+ hns3_dbg_uninit(handle);
out_client_start:
hns3_free_rx_cpu_rmap(netdev);
hns3_nic_uninit_irq(priv);
out_init_irq_fail:
- unregister_netdev(netdev);
-out_reg_netdev_fail:
hns3_uninit_phy(netdev);
out_init_phy:
hns3_uninit_all_ring(priv);
@@ -4571,31 +4592,6 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h)
return 0;
}
-static void hns3_store_coal(struct hns3_nic_priv *priv)
-{
- /* ethtool only support setting and querying one coal
- * configuration for now, so save the vector 0' coal
- * configuration here in order to restore it.
- */
- memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal,
- sizeof(struct hns3_enet_coalesce));
- memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal,
- sizeof(struct hns3_enet_coalesce));
-}
-
-static void hns3_restore_coal(struct hns3_nic_priv *priv)
-{
- u16 vector_num = priv->vector_num;
- int i;
-
- for (i = 0; i < vector_num; i++) {
- memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal,
- sizeof(struct hns3_enet_coalesce));
- memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal,
- sizeof(struct hns3_enet_coalesce));
- }
-}
-
static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
@@ -4654,8 +4650,6 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
if (ret)
goto err_put_ring;
- hns3_restore_coal(priv);
-
ret = hns3_nic_init_vector_data(priv);
if (ret)
goto err_dealloc_vector;
@@ -4721,8 +4715,6 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
hns3_nic_uninit_vector_data(priv);
- hns3_store_coal(priv);
-
hns3_nic_dealloc_vector_data(priv);
hns3_uninit_all_ring(priv);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index b48faf769b1c..c1ea403d2b56 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -1134,50 +1134,32 @@ static void hns3_get_channels(struct net_device *netdev,
h->ae_algo->ops->get_channels(h, ch);
}
-static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
- struct ethtool_coalesce *cmd)
+static int hns3_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
{
- struct hns3_enet_tqp_vector *tx_vector, *rx_vector;
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
+ struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
struct hnae3_handle *h = priv->ae_handle;
- u16 queue_num = h->kinfo.num_tqps;
if (hns3_nic_resetting(netdev))
return -EBUSY;
- if (queue >= queue_num) {
- netdev_err(netdev,
- "Invalid queue value %u! Queue max id=%u\n",
- queue, queue_num - 1);
- return -EINVAL;
- }
-
- tx_vector = priv->ring[queue].tqp_vector;
- rx_vector = priv->ring[queue_num + queue].tqp_vector;
+ cmd->use_adaptive_tx_coalesce = tx_coal->adapt_enable;
+ cmd->use_adaptive_rx_coalesce = rx_coal->adapt_enable;
- cmd->use_adaptive_tx_coalesce =
- tx_vector->tx_group.coal.adapt_enable;
- cmd->use_adaptive_rx_coalesce =
- rx_vector->rx_group.coal.adapt_enable;
-
- cmd->tx_coalesce_usecs = tx_vector->tx_group.coal.int_gl;
- cmd->rx_coalesce_usecs = rx_vector->rx_group.coal.int_gl;
+ cmd->tx_coalesce_usecs = tx_coal->int_gl;
+ cmd->rx_coalesce_usecs = rx_coal->int_gl;
cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting;
cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting;
- cmd->tx_max_coalesced_frames = tx_vector->tx_group.coal.int_ql;
- cmd->rx_max_coalesced_frames = rx_vector->rx_group.coal.int_ql;
+ cmd->tx_max_coalesced_frames = tx_coal->int_ql;
+ cmd->rx_max_coalesced_frames = rx_coal->int_ql;
return 0;
}
-static int hns3_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *cmd)
-{
- return hns3_get_coalesce_per_queue(netdev, 0, cmd);
-}
-
static int hns3_check_gl_coalesce_para(struct net_device *netdev,
struct ethtool_coalesce *cmd)
{
@@ -1292,19 +1274,7 @@ static int hns3_check_coalesce_para(struct net_device *netdev,
return ret;
}
- ret = hns3_check_ql_coalesce_param(netdev, cmd);
- if (ret)
- return ret;
-
- if (cmd->use_adaptive_tx_coalesce == 1 ||
- cmd->use_adaptive_rx_coalesce == 1) {
- netdev_info(netdev,
- "adaptive-tx=%u and adaptive-rx=%u, tx_usecs or rx_usecs will changed dynamically.\n",
- cmd->use_adaptive_tx_coalesce,
- cmd->use_adaptive_rx_coalesce);
- }
-
- return 0;
+ return hns3_check_ql_coalesce_param(netdev, cmd);
}
static void hns3_set_coalesce_per_queue(struct net_device *netdev,
@@ -1350,6 +1320,9 @@ static int hns3_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *cmd)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
+ struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
u16 queue_num = h->kinfo.num_tqps;
int ret;
int i;
@@ -1364,6 +1337,15 @@ static int hns3_set_coalesce(struct net_device *netdev,
h->kinfo.int_rl_setting =
hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
+ tx_coal->adapt_enable = cmd->use_adaptive_tx_coalesce;
+ rx_coal->adapt_enable = cmd->use_adaptive_rx_coalesce;
+
+ tx_coal->int_gl = cmd->tx_coalesce_usecs;
+ rx_coal->int_gl = cmd->rx_coalesce_usecs;
+
+ tx_coal->int_ql = cmd->tx_max_coalesced_frames;
+ rx_coal->int_ql = cmd->rx_max_coalesced_frames;
+
for (i = 0; i < queue_num; i++)
hns3_set_coalesce_per_queue(netdev, cmd, i);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 8e5f9dc8791d..f1c9f4ada348 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -710,7 +710,6 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
unsigned int flag;
int ret = 0;
- memset(&resp_msg, 0, sizeof(resp_msg));
/* handle all the mailbox requests in the queue */
while (!hclge_cmd_crq_empty(&hdev->hw)) {
if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
@@ -738,6 +737,9 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
trace_hclge_pf_mbx_get(hdev, req);
+ /* clear the resp_msg before processing every mailbox message */
+ memset(&resp_msg, 0, sizeof(resp_msg));
+
switch (req->msg.code) {
case HCLGE_MBX_MAP_RING_TO_VECTOR:
ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index de70c16ef619..b883ab809df3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2313,15 +2313,20 @@ static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
case XDP_TX:
xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
+ if (result == I40E_XDP_CONSUMED)
+ goto out_failure;
break;
case XDP_REDIRECT:
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
+ if (err)
+ goto out_failure;
+ result = I40E_XDP_REDIR;
break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 46d884417c63..68f177a86403 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -162,9 +162,10 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
if (likely(act == XDP_REDIRECT)) {
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
+ if (err)
+ goto out_failure;
rcu_read_unlock();
- return result;
+ return I40E_XDP_REDIR;
}
switch (act) {
@@ -173,11 +174,14 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
case XDP_TX:
xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
+ if (result == I40E_XDP_CONSUMED)
+ goto out_failure;
break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index e35db3ff583b..2924c67567b8 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -335,6 +335,7 @@ struct ice_vsi {
struct ice_tc_cfg tc_cfg;
struct bpf_prog *xdp_prog;
struct ice_ring **xdp_rings; /* XDP ring array */
+ unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
u16 num_xdp_txq; /* Used XDP queues */
u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
@@ -547,15 +548,16 @@ static inline void ice_set_ring_xdp(struct ice_ring *ring)
*/
static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_ring *ring)
{
+ struct ice_vsi *vsi = ring->vsi;
u16 qid = ring->q_index;
if (ice_ring_is_xdp(ring))
- qid -= ring->vsi->num_xdp_txq;
+ qid -= vsi->num_xdp_txq;
- if (!ice_is_xdp_ena_vsi(ring->vsi))
+ if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
return NULL;
- return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
+ return xsk_get_pool_from_qid(vsi->netdev, qid);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index d9ddd0bcf65f..99301ad95290 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -1773,49 +1773,6 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
100000baseKR4_Full);
}
-
- /* Autoneg PHY types */
- if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX ||
- phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_T ||
- phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX ||
- phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T ||
- phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX ||
- phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_T ||
- phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR ||
- phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_T ||
- phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_T ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR_S ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR1 ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR_S ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1 ||
- phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_CR4 ||
- phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- Autoneg);
- }
- if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CR2 ||
- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR2 ||
- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CP ||
- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- Autoneg);
- }
- if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR4 ||
- phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR4 ||
- phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 ||
- phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- Autoneg);
- }
}
#define TEST_SET_BITS_TIMEOUT 50
@@ -1972,9 +1929,7 @@ ice_get_link_ksettings(struct net_device *netdev,
ks->base.port = PORT_TP;
break;
case ICE_MEDIA_BACKPLANE:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
ethtool_link_ksettings_add_link_mode(ks, supported, Backplane);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
ethtool_link_ksettings_add_link_mode(ks, advertising,
Backplane);
ks->base.port = PORT_NONE;
@@ -2049,6 +2004,12 @@ ice_get_link_ksettings(struct net_device *netdev,
if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)
ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+ /* Set supported and advertised autoneg */
+ if (ice_is_phy_caps_an_enabled(caps)) {
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ }
+
done:
kfree(caps);
return err;
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index de38a0fc9665..9b8300d4a267 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -31,6 +31,7 @@
#define PF_FW_ATQLEN_ATQOVFL_M BIT(29)
#define PF_FW_ATQLEN_ATQCRIT_M BIT(30)
#define VF_MBX_ARQLEN(_VF) (0x0022BC00 + ((_VF) * 4))
+#define VF_MBX_ATQLEN(_VF) (0x0022A800 + ((_VF) * 4))
#define PF_FW_ATQLEN_ATQENABLE_M BIT(31)
#define PF_FW_ATQT 0x00080400
#define PF_MBX_ARQBAH 0x0022E400
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 82e2ce23df3d..27f9dac8719c 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -105,8 +105,14 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
if (!vsi->q_vectors)
goto err_vectors;
+ vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL);
+ if (!vsi->af_xdp_zc_qps)
+ goto err_zc_qps;
+
return 0;
+err_zc_qps:
+ devm_kfree(dev, vsi->q_vectors);
err_vectors:
devm_kfree(dev, vsi->rxq_map);
err_rxq_map:
@@ -194,6 +200,8 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
break;
case ICE_VSI_VF:
vf = &pf->vf[vsi->vf_id];
+ if (vf->num_req_qs)
+ vf->num_vf_qs = vf->num_req_qs;
vsi->alloc_txq = vf->num_vf_qs;
vsi->alloc_rxq = vf->num_vf_qs;
/* pf->num_msix_per_vf includes (VF miscellaneous vector +
@@ -288,6 +296,10 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
dev = ice_pf_to_dev(pf);
+ if (vsi->af_xdp_zc_qps) {
+ bitmap_free(vsi->af_xdp_zc_qps);
+ vsi->af_xdp_zc_qps = NULL;
+ }
/* free the ring and vector containers */
if (vsi->q_vectors) {
devm_kfree(dev, vsi->q_vectors);
@@ -1705,12 +1717,13 @@ setup_rings:
* ice_vsi_cfg_txqs - Configure the VSI for Tx
* @vsi: the VSI being configured
* @rings: Tx ring array to be configured
+ * @count: number of Tx ring array elements
*
* Return 0 on success and a negative value on error
* Configure the Tx VSI for operation.
*/
static int
-ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
+ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, u16 count)
{
struct ice_aqc_add_tx_qgrp *qg_buf;
u16 q_idx = 0;
@@ -1722,7 +1735,7 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
qg_buf->num_txqs = 1;
- for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
+ for (q_idx = 0; q_idx < count; q_idx++) {
err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
if (err)
goto err_cfg_txqs;
@@ -1742,7 +1755,7 @@ err_cfg_txqs:
*/
int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
{
- return ice_vsi_cfg_txqs(vsi, vsi->tx_rings);
+ return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
}
/**
@@ -1757,7 +1770,7 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
int ret;
int i;
- ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings);
+ ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
if (ret)
return ret;
@@ -1997,17 +2010,18 @@ int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi)
* @rst_src: reset source
* @rel_vmvf_num: Relative ID of VF/VM
* @rings: Tx ring array to be stopped
+ * @count: number of Tx ring array elements
*/
static int
ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
- u16 rel_vmvf_num, struct ice_ring **rings)
+ u16 rel_vmvf_num, struct ice_ring **rings, u16 count)
{
u16 q_idx;
if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
return -EINVAL;
- for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
+ for (q_idx = 0; q_idx < count; q_idx++) {
struct ice_txq_meta txq_meta = { };
int status;
@@ -2035,7 +2049,7 @@ int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num)
{
- return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings);
+ return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq);
}
/**
@@ -2044,7 +2058,7 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
*/
int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
{
- return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings);
+ return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 4ee85a217c6f..0eb2307325d3 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -2556,6 +2556,20 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
}
/**
+ * ice_xdp_safe_mode - XDP handler for safe mode
+ * @dev: netdevice
+ * @xdp: XDP command
+ */
+static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
+ struct netdev_bpf *xdp)
+{
+ NL_SET_ERR_MSG_MOD(xdp->extack,
+ "Please provide working DDP firmware package in order to use XDP\n"
+ "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
+ return -EOPNOTSUPP;
+}
+
+/**
* ice_xdp - implements XDP handler
* @dev: netdevice
* @xdp: XDP command
@@ -6937,6 +6951,7 @@ static const struct net_device_ops ice_netdev_safe_mode_ops = {
.ndo_change_mtu = ice_change_mtu,
.ndo_get_stats64 = ice_get_stats64,
.ndo_tx_timeout = ice_tx_timeout,
+ .ndo_bpf = ice_xdp_safe_mode,
};
static const struct net_device_ops ice_netdev_ops = {
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index e2b4b29ea207..04748aa4c7c8 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -523,7 +523,7 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog)
{
struct ice_ring *xdp_ring;
- int err;
+ int err, result;
u32 act;
act = bpf_prog_run_xdp(xdp_prog, xdp);
@@ -532,14 +532,20 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
return ICE_XDP_PASS;
case XDP_TX:
xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()];
- return ice_xmit_xdp_buff(xdp, xdp_ring);
+ result = ice_xmit_xdp_buff(xdp, xdp_ring);
+ if (result == ICE_XDP_CONSUMED)
+ goto out_failure;
+ return result;
case XDP_REDIRECT:
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- return !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
+ if (err)
+ goto out_failure;
+ return ICE_XDP_REDIR;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_DROP:
@@ -2143,6 +2149,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
struct ice_tx_offload_params offload = { 0 };
struct ice_vsi *vsi = tx_ring->vsi;
struct ice_tx_buf *first;
+ struct ethhdr *eth;
unsigned int count;
int tso, csum;
@@ -2189,7 +2196,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
goto out_drop;
/* allow CONTROL frames egress from main VSI if FW LLDP disabled */
- if (unlikely(skb->priority == TC_PRIO_CONTROL &&
+ eth = (struct ethhdr *)skb_mac_header(skb);
+ if (unlikely((skb->priority == TC_PRIO_CONTROL ||
+ eth->h_proto == htons(ETH_P_LLDP)) &&
vsi->type == ICE_VSI_PF &&
vsi->port_info->qos_cfg.is_sw_lldp))
offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index a1d22d2aa0bd..97a46c616aca 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -713,13 +713,15 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
*/
clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
- /* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it
- * in the case of VFR. If this is done for PFR, it can mess up VF
- * resets because the VF driver may already have started cleanup
- * by the time we get here.
+ /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
+ * needs to clear them in the case of VFR/VFLR. If this is done for
+ * PFR, it can mess up VF resets because the VF driver may already
+ * have started cleanup by the time we get here.
*/
- if (!is_pfr)
+ if (!is_pfr) {
wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
+ wr32(hw, VF_MBX_ATQLEN(vf->vf_id), 0);
+ }
/* In the case of a VFLR, the HW has already reset the VF and we
* just need to clean up, so don't hit the VFRTRIG register.
@@ -1698,7 +1700,12 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
ice_vf_ctrl_vsi_release(vf);
ice_vf_pre_vsi_rebuild(vf);
- ice_vf_rebuild_vsi_with_release(vf);
+
+ if (ice_vf_rebuild_vsi_with_release(vf)) {
+ dev_err(dev, "Failed to release and setup the VF%u's VSI\n", vf->vf_id);
+ return false;
+ }
+
ice_vf_post_vsi_rebuild(vf);
/* if the VF has been reset allow it to come up again */
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index faa7b8d96adb..a1f89ea3c2bd 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -270,6 +270,7 @@ static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
if (!pool)
return -EINVAL;
+ clear_bit(qid, vsi->af_xdp_zc_qps);
xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
return 0;
@@ -300,6 +301,8 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
if (err)
return err;
+ set_bit(qid, vsi->af_xdp_zc_qps);
+
return 0;
}
@@ -473,9 +476,10 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
if (likely(act == XDP_REDIRECT)) {
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
+ if (err)
+ goto out_failure;
rcu_read_unlock();
- return result;
+ return ICE_XDP_REDIR;
}
switch (act) {
@@ -484,11 +488,14 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
case XDP_TX:
xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index];
result = ice_xmit_xdp_buff(xdp, xdp_ring);
+ if (result == ICE_XDP_CONSUMED)
+ goto out_failure;
break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_DROP:
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 7bda8c5edea5..2d3daf022651 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -749,7 +749,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter);
void igb_ptp_tx_hang(struct igb_adapter *adapter);
void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
- struct sk_buff *skb);
+ ktime_t *timestamp);
int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 038a9fd1af44..b2a042f825ff 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -8280,7 +8280,7 @@ static void igb_add_rx_frag(struct igb_ring *rx_ring,
static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
struct igb_rx_buffer *rx_buffer,
struct xdp_buff *xdp,
- union e1000_adv_rx_desc *rx_desc)
+ ktime_t timestamp)
{
#if (PAGE_SIZE < 8192)
unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
@@ -8300,12 +8300,8 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
if (unlikely(!skb))
return NULL;
- if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
- if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb)) {
- xdp->data += IGB_TS_HDR_LEN;
- size -= IGB_TS_HDR_LEN;
- }
- }
+ if (timestamp)
+ skb_hwtstamps(skb)->hwtstamp = timestamp;
/* Determine available headroom for copy */
headlen = size;
@@ -8336,7 +8332,7 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
struct igb_rx_buffer *rx_buffer,
struct xdp_buff *xdp,
- union e1000_adv_rx_desc *rx_desc)
+ ktime_t timestamp)
{
#if (PAGE_SIZE < 8192)
unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
@@ -8363,11 +8359,8 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
if (metasize)
skb_metadata_set(skb, metasize);
- /* pull timestamp out of packet data */
- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
- if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb))
- __skb_pull(skb, IGB_TS_HDR_LEN);
- }
+ if (timestamp)
+ skb_hwtstamps(skb)->hwtstamp = timestamp;
/* update buffer offset */
#if (PAGE_SIZE < 8192)
@@ -8401,18 +8394,20 @@ static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter,
break;
case XDP_TX:
result = igb_xdp_xmit_back(adapter, xdp);
+ if (result == IGB_XDP_CONSUMED)
+ goto out_failure;
break;
case XDP_REDIRECT:
err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
- if (!err)
- result = IGB_XDP_REDIR;
- else
- result = IGB_XDP_CONSUMED;
+ if (err)
+ goto out_failure;
+ result = IGB_XDP_REDIR;
break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_DROP:
@@ -8682,7 +8677,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
while (likely(total_packets < budget)) {
union e1000_adv_rx_desc *rx_desc;
struct igb_rx_buffer *rx_buffer;
+ ktime_t timestamp = 0;
+ int pkt_offset = 0;
unsigned int size;
+ void *pktbuf;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
@@ -8702,14 +8700,24 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
dma_rmb();
rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt);
+ pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset;
+
+ /* pull rx packet timestamp if available and valid */
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+ int ts_hdr_len;
+
+ ts_hdr_len = igb_ptp_rx_pktstamp(rx_ring->q_vector,
+ pktbuf, &timestamp);
+
+ pkt_offset += ts_hdr_len;
+ size -= ts_hdr_len;
+ }
/* retrieve a buffer from the ring */
if (!skb) {
- unsigned int offset = igb_rx_offset(rx_ring);
- unsigned char *hard_start;
+ unsigned char *hard_start = pktbuf - igb_rx_offset(rx_ring);
+ unsigned int offset = pkt_offset + igb_rx_offset(rx_ring);
- hard_start = page_address(rx_buffer->page) +
- rx_buffer->page_offset - offset;
xdp_prepare_buff(&xdp, hard_start, offset, size, true);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
@@ -8732,10 +8740,11 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
} else if (skb)
igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
else if (ring_uses_build_skb(rx_ring))
- skb = igb_build_skb(rx_ring, rx_buffer, &xdp, rx_desc);
+ skb = igb_build_skb(rx_ring, rx_buffer, &xdp,
+ timestamp);
else
skb = igb_construct_skb(rx_ring, rx_buffer,
- &xdp, rx_desc);
+ &xdp, timestamp);
/* exit if we failed to retrieve a buffer */
if (!skb) {
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index ba61fe9bfaf4..d68cd4466a54 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -856,30 +856,28 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
dev_kfree_skb_any(skb);
}
-#define IGB_RET_PTP_DISABLED 1
-#define IGB_RET_PTP_INVALID 2
-
/**
* igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp
* @q_vector: Pointer to interrupt specific structure
* @va: Pointer to address containing Rx buffer
- * @skb: Buffer containing timestamp and packet
+ * @timestamp: Pointer where timestamp will be stored
*
* This function is meant to retrieve a timestamp from the first buffer of an
* incoming frame. The value is stored in little endian format starting on
* byte 8
*
- * Returns: 0 if success, nonzero if failure
+ * Returns: The timestamp header length or 0 if not available
**/
int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
- struct sk_buff *skb)
+ ktime_t *timestamp)
{
struct igb_adapter *adapter = q_vector->adapter;
+ struct skb_shared_hwtstamps ts;
__le64 *regval = (__le64 *)va;
int adjust = 0;
if (!(adapter->ptp_flags & IGB_PTP_ENABLED))
- return IGB_RET_PTP_DISABLED;
+ return 0;
/* The timestamp is recorded in little endian format.
* DWORD: 0 1 2 3
@@ -888,10 +886,9 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
/* check reserved dwords are zero, be/le doesn't matter for zero */
if (regval[0])
- return IGB_RET_PTP_INVALID;
+ return 0;
- igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb),
- le64_to_cpu(regval[1]));
+ igb_ptp_systim_to_hwtstamp(adapter, &ts, le64_to_cpu(regval[1]));
/* adjust timestamp for the RX latency based on link speed */
if (adapter->hw.mac.type == e1000_i210) {
@@ -907,10 +904,10 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
break;
}
}
- skb_hwtstamps(skb)->hwtstamp =
- ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
- return 0;
+ *timestamp = ktime_sub_ns(ts.hwtstamp, adjust);
+
+ return IGB_TS_HDR_LEN;
}
/**
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 069471b7ffb0..f1adf154ec4a 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -2047,20 +2047,19 @@ static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
break;
case XDP_TX:
if (igc_xdp_xmit_back(adapter, xdp) < 0)
- res = IGC_XDP_CONSUMED;
- else
- res = IGC_XDP_TX;
+ goto out_failure;
+ res = IGC_XDP_TX;
break;
case XDP_REDIRECT:
if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0)
- res = IGC_XDP_CONSUMED;
- else
- res = IGC_XDP_REDIRECT;
+ goto out_failure;
+ res = IGC_XDP_REDIRECT;
break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(adapter->netdev, prog, act);
fallthrough;
case XDP_DROP:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index c5ec17d19c59..2ac5b82676f3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2213,23 +2213,23 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
break;
case XDP_TX:
xdpf = xdp_convert_buff_to_frame(xdp);
- if (unlikely(!xdpf)) {
- result = IXGBE_XDP_CONSUMED;
- break;
- }
+ if (unlikely(!xdpf))
+ goto out_failure;
result = ixgbe_xmit_xdp_ring(adapter, xdpf);
+ if (result == IXGBE_XDP_CONSUMED)
+ goto out_failure;
break;
case XDP_REDIRECT:
err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
- if (!err)
- result = IXGBE_XDP_REDIR;
- else
- result = IXGBE_XDP_CONSUMED;
+ if (err)
+ goto out_failure;
+ result = IXGBE_XDP_REDIR;
break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 988db46bff0e..214a38de3f41 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -467,12 +467,16 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
return err;
}
-static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
+static int ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 max_frame, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
- int max_frame = msgbuf[1];
u32 max_frs;
+ if (max_frame < ETH_MIN_MTU || max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
+ e_err(drv, "VF max_frame %d out of range\n", max_frame);
+ return -EINVAL;
+ }
+
/*
* For 82599EB we have to keep all PFs and VFs operating with
* the same max_frame value in order to avoid sending an oversize
@@ -533,12 +537,6 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
}
}
- /* MTU < 68 is an error and causes problems on some kernels */
- if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
- e_err(drv, "VF max_frame %d out of range\n", max_frame);
- return -EINVAL;
- }
-
/* pull current max frame size from hardware */
max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
max_frs &= IXGBE_MHADD_MFS_MASK;
@@ -1249,7 +1247,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf);
break;
case IXGBE_VF_SET_LPE:
- retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf);
+ retval = ixgbe_set_vf_lpe(adapter, msgbuf[1], vf);
break;
case IXGBE_VF_SET_MACVLAN:
retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 91ad5b902673..f72d2978263b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -106,9 +106,10 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
if (likely(act == XDP_REDIRECT)) {
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED;
+ if (err)
+ goto out_failure;
rcu_read_unlock();
- return result;
+ return IXGBE_XDP_REDIR;
}
switch (act) {
@@ -116,16 +117,17 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
break;
case XDP_TX:
xdpf = xdp_convert_buff_to_frame(xdp);
- if (unlikely(!xdpf)) {
- result = IXGBE_XDP_CONSUMED;
- break;
- }
+ if (unlikely(!xdpf))
+ goto out_failure;
result = ixgbe_xmit_xdp_ring(adapter, xdpf);
+ if (result == IXGBE_XDP_CONSUMED)
+ goto out_failure;
break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index ba2ed8a43d2d..0e733cc15c58 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1067,11 +1067,14 @@ static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
case XDP_TX:
xdp_ring = adapter->xdp_ring[rx_ring->queue_index];
result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp);
+ if (result == IXGBEVF_XDP_CONSUMED)
+ goto out_failure;
break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 6f987a7ffcb3..b30a45725374 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -1315,23 +1315,23 @@ static int korina_probe(struct platform_device *pdev)
lp->tx_irq = platform_get_irq_byname(pdev, "tx");
p = devm_platform_ioremap_resource_byname(pdev, "emac");
- if (!p) {
+ if (IS_ERR(p)) {
printk(KERN_ERR DRV_NAME ": cannot remap registers\n");
- return -ENOMEM;
+ return PTR_ERR(p);
}
lp->eth_regs = p;
p = devm_platform_ioremap_resource_byname(pdev, "dma_rx");
- if (!p) {
+ if (IS_ERR(p)) {
printk(KERN_ERR DRV_NAME ": cannot remap Rx DMA registers\n");
- return -ENOMEM;
+ return PTR_ERR(p);
}
lp->rx_dma_regs = p;
p = devm_platform_ioremap_resource_byname(pdev, "dma_tx");
- if (!p) {
+ if (IS_ERR(p)) {
printk(KERN_ERR DRV_NAME ": cannot remap Tx DMA registers\n");
- return -ENOMEM;
+ return PTR_ERR(p);
}
lp->tx_dma_regs = p;
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 41c2ad210bc9..21ef2f128070 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -154,6 +154,8 @@ static int xrx200_close(struct net_device *net_dev)
static int xrx200_alloc_skb(struct xrx200_chan *ch)
{
+ struct sk_buff *skb = ch->skb[ch->dma.desc];
+ dma_addr_t mapping;
int ret = 0;
ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev,
@@ -163,16 +165,18 @@ static int xrx200_alloc_skb(struct xrx200_chan *ch)
goto skip;
}
- ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(ch->priv->dev,
- ch->skb[ch->dma.desc]->data, XRX200_DMA_DATA_LEN,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(ch->priv->dev,
- ch->dma.desc_base[ch->dma.desc].addr))) {
+ mapping = dma_map_single(ch->priv->dev, ch->skb[ch->dma.desc]->data,
+ XRX200_DMA_DATA_LEN, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
dev_kfree_skb_any(ch->skb[ch->dma.desc]);
+ ch->skb[ch->dma.desc] = skb;
ret = -ENOMEM;
goto skip;
}
+ ch->dma.desc_base[ch->dma.desc].addr = mapping;
+ /* Make sure the address is written before we give it to HW */
+ wmb();
skip:
ch->dma.desc_base[ch->dma.desc].ctl =
LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
@@ -196,6 +200,7 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
ch->dma.desc %= LTQ_DESC_NUM;
if (ret) {
+ net_dev->stats.rx_dropped++;
netdev_err(net_dev, "failed to allocate new rx buffer\n");
return ret;
}
@@ -348,8 +353,8 @@ static irqreturn_t xrx200_dma_irq(int irq, void *ptr)
struct xrx200_chan *ch = ptr;
if (napi_schedule_prep(&ch->napi)) {
- __napi_schedule(&ch->napi);
ltq_dma_disable_irq(&ch->dma);
+ __napi_schedule(&ch->napi);
}
ltq_dma_ack_irq(&ch->dma);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index 8edba5ea90f0..4a61c90003b5 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -993,6 +993,14 @@ enum mvpp22_ptp_packet_format {
#define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40)
+/* Buffer header info bits */
+#define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff
+#define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
+#define MVPP2_B_HDR_INFO_LAST_OFFS 12
+#define MVPP2_B_HDR_INFO_LAST_MASK BIT(12)
+#define MVPP2_B_HDR_INFO_IS_LAST(info) \
+ (((info) & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
+
struct mvpp2_tai;
/* Definitions */
@@ -1002,6 +1010,20 @@ struct mvpp2_rss_table {
u32 indir[MVPP22_RSS_TABLE_ENTRIES];
};
+struct mvpp2_buff_hdr {
+ __le32 next_phys_addr;
+ __le32 next_dma_addr;
+ __le16 byte_count;
+ __le16 info;
+ __le16 reserved1; /* bm_qset (for future use, BM) */
+ u8 next_phys_addr_high;
+ u8 next_dma_addr_high;
+ __le16 reserved2;
+ __le16 reserved3;
+ __le16 reserved4;
+ __le16 reserved5;
+};
+
/* Shared Packet Processor resources */
struct mvpp2 {
/* Shared registers' base addresses */
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index ec706d614cac..d39c7639cdba 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -3839,6 +3839,35 @@ mvpp2_run_xdp(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq,
return ret;
}
+static void mvpp2_buff_hdr_pool_put(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc,
+ int pool, u32 rx_status)
+{
+ phys_addr_t phys_addr, phys_addr_next;
+ dma_addr_t dma_addr, dma_addr_next;
+ struct mvpp2_buff_hdr *buff_hdr;
+
+ phys_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
+ dma_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
+
+ do {
+ buff_hdr = (struct mvpp2_buff_hdr *)phys_to_virt(phys_addr);
+
+ phys_addr_next = le32_to_cpu(buff_hdr->next_phys_addr);
+ dma_addr_next = le32_to_cpu(buff_hdr->next_dma_addr);
+
+ if (port->priv->hw_version >= MVPP22) {
+ phys_addr_next |= ((u64)buff_hdr->next_phys_addr_high << 32);
+ dma_addr_next |= ((u64)buff_hdr->next_dma_addr_high << 32);
+ }
+
+ mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
+
+ phys_addr = phys_addr_next;
+ dma_addr = dma_addr_next;
+
+ } while (!MVPP2_B_HDR_INFO_IS_LAST(le16_to_cpu(buff_hdr->info)));
+}
+
/* Main rx processing */
static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
int rx_todo, struct mvpp2_rx_queue *rxq)
@@ -3885,14 +3914,6 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
MVPP2_RXD_BM_POOL_ID_OFFS;
bm_pool = &port->priv->bm_pools[pool];
- /* In case of an error, release the requested buffer pointer
- * to the Buffer Manager. This request process is controlled
- * by the hardware, and the information about the buffer is
- * comprised by the RX descriptor.
- */
- if (rx_status & MVPP2_RXD_ERR_SUMMARY)
- goto err_drop_frame;
-
if (port->priv->percpu_pools) {
pp = port->priv->page_pool[pool];
dma_dir = page_pool_get_dma_dir(pp);
@@ -3904,6 +3925,18 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
rx_bytes + MVPP2_MH_SIZE,
dma_dir);
+ /* Buffer header not supported */
+ if (rx_status & MVPP2_RXD_BUF_HDR)
+ goto err_drop_frame;
+
+ /* In case of an error, release the requested buffer pointer
+ * to the Buffer Manager. This request process is controlled
+ * by the hardware, and the information about the buffer is
+ * comprised by the RX descriptor.
+ */
+ if (rx_status & MVPP2_RXD_ERR_SUMMARY)
+ goto err_drop_frame;
+
/* Prefetch header */
prefetch(data);
@@ -3985,7 +4018,10 @@ err_drop_frame:
dev->stats.rx_errors++;
mvpp2_rx_error(port, rx_desc);
/* Return the buffer to the pool */
- mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
+ if (rx_status & MVPP2_RXD_BUF_HDR)
+ mvpp2_buff_hdr_pool_put(port, rx_desc, pool, rx_status);
+ else
+ mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
}
rcu_read_unlock();
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index f4962a97a075..9d9a2e438acf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -786,6 +786,10 @@ static int otx2_set_rxfh_context(struct net_device *dev, const u32 *indir,
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
+ if (*rss_context != ETH_RXFH_CONTEXT_ALLOC &&
+ *rss_context >= MAX_RSS_GROUPS)
+ return -EINVAL;
+
rss = &pfvf->hw.rss_info;
if (!rss->enable) {
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index ed4eacef17ce..64adfd24e134 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -681,32 +681,53 @@ static int mtk_set_mac_address(struct net_device *dev, void *p)
void mtk_stats_update_mac(struct mtk_mac *mac)
{
struct mtk_hw_stats *hw_stats = mac->hw_stats;
- unsigned int base = MTK_GDM1_TX_GBCNT;
- u64 stats;
-
- base += hw_stats->reg_offset;
+ struct mtk_eth *eth = mac->hw;
u64_stats_update_begin(&hw_stats->syncp);
- hw_stats->rx_bytes += mtk_r32(mac->hw, base);
- stats = mtk_r32(mac->hw, base + 0x04);
- if (stats)
- hw_stats->rx_bytes += (stats << 32);
- hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
- hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
- hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
- hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
- hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
- hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
- hw_stats->rx_flow_control_packets +=
- mtk_r32(mac->hw, base + 0x24);
- hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
- hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
- hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
- stats = mtk_r32(mac->hw, base + 0x34);
- if (stats)
- hw_stats->tx_bytes += (stats << 32);
- hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
+ hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
+ hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
+ hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
+ hw_stats->rx_checksum_errors +=
+ mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
+ } else {
+ unsigned int offs = hw_stats->reg_offset;
+ u64 stats;
+
+ hw_stats->rx_bytes += mtk_r32(mac->hw,
+ MTK_GDM1_RX_GBCNT_L + offs);
+ stats = mtk_r32(mac->hw, MTK_GDM1_RX_GBCNT_H + offs);
+ if (stats)
+ hw_stats->rx_bytes += (stats << 32);
+ hw_stats->rx_packets +=
+ mtk_r32(mac->hw, MTK_GDM1_RX_GPCNT + offs);
+ hw_stats->rx_overflow +=
+ mtk_r32(mac->hw, MTK_GDM1_RX_OERCNT + offs);
+ hw_stats->rx_fcs_errors +=
+ mtk_r32(mac->hw, MTK_GDM1_RX_FERCNT + offs);
+ hw_stats->rx_short_errors +=
+ mtk_r32(mac->hw, MTK_GDM1_RX_SERCNT + offs);
+ hw_stats->rx_long_errors +=
+ mtk_r32(mac->hw, MTK_GDM1_RX_LENCNT + offs);
+ hw_stats->rx_checksum_errors +=
+ mtk_r32(mac->hw, MTK_GDM1_RX_CERCNT + offs);
+ hw_stats->rx_flow_control_packets +=
+ mtk_r32(mac->hw, MTK_GDM1_RX_FCCNT + offs);
+ hw_stats->tx_skip +=
+ mtk_r32(mac->hw, MTK_GDM1_TX_SKIPCNT + offs);
+ hw_stats->tx_collisions +=
+ mtk_r32(mac->hw, MTK_GDM1_TX_COLCNT + offs);
+ hw_stats->tx_bytes +=
+ mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_L + offs);
+ stats = mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_H + offs);
+ if (stats)
+ hw_stats->tx_bytes += (stats << 32);
+ hw_stats->tx_packets +=
+ mtk_r32(mac->hw, MTK_GDM1_TX_GPCNT + offs);
+ }
+
u64_stats_update_end(&hw_stats->syncp);
}
@@ -2423,7 +2444,8 @@ static void mtk_dim_rx(struct work_struct *work)
val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
- mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
spin_unlock_bh(&eth->dim_lock);
@@ -2452,7 +2474,8 @@ static void mtk_dim_tx(struct work_struct *work)
val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
- mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
spin_unlock_bh(&eth->dim_lock);
@@ -2480,6 +2503,10 @@ static int mtk_hw_init(struct mtk_eth *eth)
goto err_disable_pm;
}
+ /* set interrupt delays based on current Net DIM sample */
+ mtk_dim_rx(&eth->rx_dim.work);
+ mtk_dim_tx(&eth->tx_dim.work);
+
/* disable delay and normal interrupt */
mtk_tx_irq_disable(eth, ~0);
mtk_rx_irq_disable(eth, ~0);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 11331b44ba07..5ef70dd8b49c 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -278,8 +278,21 @@
/* QDMA FQ Free Page Buffer Length Register */
#define MTK_QDMA_FQ_BLEN 0x1B2C
-/* GMA1 Received Good Byte Count Register */
-#define MTK_GDM1_TX_GBCNT 0x2400
+/* GMA1 counter / statics register */
+#define MTK_GDM1_RX_GBCNT_L 0x2400
+#define MTK_GDM1_RX_GBCNT_H 0x2404
+#define MTK_GDM1_RX_GPCNT 0x2408
+#define MTK_GDM1_RX_OERCNT 0x2410
+#define MTK_GDM1_RX_FERCNT 0x2414
+#define MTK_GDM1_RX_SERCNT 0x2418
+#define MTK_GDM1_RX_LENCNT 0x241c
+#define MTK_GDM1_RX_CERCNT 0x2420
+#define MTK_GDM1_RX_FCCNT 0x2424
+#define MTK_GDM1_TX_SKIPCNT 0x2428
+#define MTK_GDM1_TX_COLCNT 0x242c
+#define MTK_GDM1_TX_GBCNT_L 0x2430
+#define MTK_GDM1_TX_GBCNT_H 0x2434
+#define MTK_GDM1_TX_GPCNT 0x2438
#define MTK_STAT_OFFSET 0x40
/* QDMA descriptor txd4 */
@@ -502,6 +515,13 @@
#define MT7628_SDM_MAC_ADRL (MT7628_SDM_OFFSET + 0x0c)
#define MT7628_SDM_MAC_ADRH (MT7628_SDM_OFFSET + 0x10)
+/* Counter / stat register */
+#define MT7628_SDM_TPCNT (MT7628_SDM_OFFSET + 0x100)
+#define MT7628_SDM_TBCNT (MT7628_SDM_OFFSET + 0x104)
+#define MT7628_SDM_RPCNT (MT7628_SDM_OFFSET + 0x108)
+#define MT7628_SDM_RBCNT (MT7628_SDM_OFFSET + 0x10c)
+#define MT7628_SDM_CS_ERR (MT7628_SDM_OFFSET + 0x110)
+
struct mtk_rx_dma {
unsigned int rxd1;
unsigned int rxd2;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 1434df66fcf2..3616b77caa0a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -2027,8 +2027,6 @@ static int mlx4_en_set_tunable(struct net_device *dev,
return ret;
}
-#define MLX4_EEPROM_PAGE_LEN 256
-
static int mlx4_en_get_module_info(struct net_device *dev,
struct ethtool_modinfo *modinfo)
{
@@ -2063,7 +2061,7 @@ static int mlx4_en_get_module_info(struct net_device *dev,
break;
case MLX4_MODULE_ID_SFP:
modinfo->type = ETH_MODULE_SFF_8472;
- modinfo->eeprom_len = MLX4_EEPROM_PAGE_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
break;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index f6cfec81ccc3..dc4ac1a2b6b6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -823,6 +823,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0
#define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET 0xa8
#define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET 0xac
+#define QUERY_DEV_CAP_MAP_CLOCK_TO_USER 0xc1
#define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc
#define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0
#define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2
@@ -841,6 +842,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
if (mlx4_is_mfunc(dev))
disable_unsupported_roce_caps(outbox);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAP_CLOCK_TO_USER);
+ dev_cap->map_clock_to_user = field & 0x80;
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET);
dev_cap->reserved_qps = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 8f020f26ebf5..cf64e54eecb0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -131,6 +131,7 @@ struct mlx4_dev_cap {
u32 health_buffer_addrs;
struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1];
bool wol_port[MLX4_MAX_PORTS + 1];
+ bool map_clock_to_user;
};
struct mlx4_func_cap {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index c326b434734e..00c84656b2e7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -498,6 +498,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
}
}
+ dev->caps.map_clock_to_user = dev_cap->map_clock_to_user;
dev->caps.uar_page_size = PAGE_SIZE;
dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
@@ -1948,6 +1949,11 @@ int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
if (mlx4_is_slave(dev))
return -EOPNOTSUPP;
+ if (!dev->caps.map_clock_to_user) {
+ mlx4_dbg(dev, "Map clock to user is not supported.\n");
+ return -EOPNOTSUPP;
+ }
+
if (!params)
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index ba6ac31a339d..256a06b3c096 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -1973,6 +1973,7 @@ EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
#define I2C_ADDR_LOW 0x50
#define I2C_ADDR_HIGH 0x51
#define I2C_PAGE_SIZE 256
+#define I2C_HIGH_PAGE_SIZE 128
/* Module Info Data */
struct mlx4_cable_info {
@@ -2026,6 +2027,88 @@ static inline const char *cable_info_mad_err_str(u16 mad_status)
return "Unknown Error";
}
+static int mlx4_get_module_id(struct mlx4_dev *dev, u8 port, u8 *module_id)
+{
+ struct mlx4_cmd_mailbox *inbox, *outbox;
+ struct mlx4_mad_ifc *inmad, *outmad;
+ struct mlx4_cable_info *cable_info;
+ int ret;
+
+ inbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(inbox))
+ return PTR_ERR(inbox);
+
+ outbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(outbox)) {
+ mlx4_free_cmd_mailbox(dev, inbox);
+ return PTR_ERR(outbox);
+ }
+
+ inmad = (struct mlx4_mad_ifc *)(inbox->buf);
+ outmad = (struct mlx4_mad_ifc *)(outbox->buf);
+
+ inmad->method = 0x1; /* Get */
+ inmad->class_version = 0x1;
+ inmad->mgmt_class = 0x1;
+ inmad->base_version = 0x1;
+ inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */
+
+ cable_info = (struct mlx4_cable_info *)inmad->data;
+ cable_info->dev_mem_address = 0;
+ cable_info->page_num = 0;
+ cable_info->i2c_addr = I2C_ADDR_LOW;
+ cable_info->size = cpu_to_be16(1);
+
+ ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
+ MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
+ if (ret)
+ goto out;
+
+ if (be16_to_cpu(outmad->status)) {
+ /* Mad returned with bad status */
+ ret = be16_to_cpu(outmad->status);
+ mlx4_warn(dev,
+ "MLX4_CMD_MAD_IFC Get Module ID attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n",
+ 0xFF60, port, I2C_ADDR_LOW, 0, 1, ret,
+ cable_info_mad_err_str(ret));
+ ret = -ret;
+ goto out;
+ }
+ cable_info = (struct mlx4_cable_info *)outmad->data;
+ *module_id = cable_info->data[0];
+out:
+ mlx4_free_cmd_mailbox(dev, inbox);
+ mlx4_free_cmd_mailbox(dev, outbox);
+ return ret;
+}
+
+static void mlx4_sfp_eeprom_params_set(u8 *i2c_addr, u8 *page_num, u16 *offset)
+{
+ *i2c_addr = I2C_ADDR_LOW;
+ *page_num = 0;
+
+ if (*offset < I2C_PAGE_SIZE)
+ return;
+
+ *i2c_addr = I2C_ADDR_HIGH;
+ *offset -= I2C_PAGE_SIZE;
+}
+
+static void mlx4_qsfp_eeprom_params_set(u8 *i2c_addr, u8 *page_num, u16 *offset)
+{
+ /* Offsets 0-255 belong to page 0.
+ * Offsets 256-639 belong to pages 01, 02, 03.
+ * For example, offset 400 is page 02: 1 + (400 - 256) / 128 = 2
+ */
+ if (*offset < I2C_PAGE_SIZE)
+ *page_num = 0;
+ else
+ *page_num = 1 + (*offset - I2C_PAGE_SIZE) / I2C_HIGH_PAGE_SIZE;
+ *i2c_addr = I2C_ADDR_LOW;
+ *offset -= *page_num * I2C_HIGH_PAGE_SIZE;
+}
+
/**
* mlx4_get_module_info - Read cable module eeprom data
* @dev: mlx4_dev.
@@ -2045,12 +2128,30 @@ int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
struct mlx4_cmd_mailbox *inbox, *outbox;
struct mlx4_mad_ifc *inmad, *outmad;
struct mlx4_cable_info *cable_info;
- u16 i2c_addr;
+ u8 module_id, i2c_addr, page_num;
int ret;
if (size > MODULE_INFO_MAX_READ)
size = MODULE_INFO_MAX_READ;
+ ret = mlx4_get_module_id(dev, port, &module_id);
+ if (ret)
+ return ret;
+
+ switch (module_id) {
+ case MLX4_MODULE_ID_SFP:
+ mlx4_sfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
+ break;
+ case MLX4_MODULE_ID_QSFP:
+ case MLX4_MODULE_ID_QSFP_PLUS:
+ case MLX4_MODULE_ID_QSFP28:
+ mlx4_qsfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
+ break;
+ default:
+ mlx4_err(dev, "Module ID not recognized: %#x\n", module_id);
+ return -EINVAL;
+ }
+
inbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(inbox))
return PTR_ERR(inbox);
@@ -2076,11 +2177,9 @@ int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
*/
size -= offset + size - I2C_PAGE_SIZE;
- i2c_addr = I2C_ADDR_LOW;
-
cable_info = (struct mlx4_cable_info *)inmad->data;
cable_info->dev_mem_address = cpu_to_be16(offset);
- cable_info->page_num = 0;
+ cable_info->page_num = page_num;
cable_info->i2c_addr = i2c_addr;
cable_info->size = cpu_to_be16(size);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index a9166cd85013..ceebfc20f65e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -303,6 +303,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
int ret = 0, i;
mutex_lock(&mlx5_intf_mutex);
+ priv->flags &= ~MLX5_PRIV_FLAGS_DETACH;
for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
if (!priv->adev[i]) {
bool is_supported = false;
@@ -320,6 +321,16 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
}
} else {
adev = &priv->adev[i]->adev;
+
+ /* Pay attention that this is not PCI driver that
+ * mlx5_core_dev is connected, but auxiliary driver.
+ *
+ * Here we can race of module unload with devlink
+ * reload, but we don't need to take extra lock because
+ * we are holding global mlx5_intf_mutex.
+ */
+ if (!adev->dev.driver)
+ continue;
adrv = to_auxiliary_drv(adev->dev.driver);
if (adrv->resume)
@@ -350,6 +361,10 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)
continue;
adev = &priv->adev[i]->adev;
+ /* Auxiliary driver was unbind manually through sysfs */
+ if (!adev->dev.driver)
+ goto skip_suspend;
+
adrv = to_auxiliary_drv(adev->dev.driver);
if (adrv->suspend) {
@@ -357,9 +372,11 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)
continue;
}
+skip_suspend:
del_adev(&priv->adev[i]->adev);
priv->adev[i] = NULL;
}
+ priv->flags |= MLX5_PRIV_FLAGS_DETACH;
mutex_unlock(&mlx5_intf_mutex);
}
@@ -448,6 +465,8 @@ int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
struct mlx5_priv *priv = &dev->priv;
lockdep_assert_held(&mlx5_intf_mutex);
+ if (priv->flags & MLX5_PRIV_FLAGS_DETACH)
+ return 0;
delete_drivers(dev);
if (priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
index 0dd7615e5931..bc33eaada3b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
@@ -64,6 +64,8 @@ struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
struct mlx5e_priv *priv = netdev_priv(dev);
struct devlink_port *port;
+ if (!netif_device_present(dev))
+ return NULL;
port = mlx5e_devlink_get_dl_port(priv);
if (port->registered)
return port;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index d907c1acd4d5..778e229310a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2020 Mellanox Technologies
-#include <linux/ptp_classify.h>
#include "en/ptp.h"
#include "en/txrx.h"
#include "en/params.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
index ab935cce952b..c96668bd701c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
@@ -6,6 +6,7 @@
#include "en.h"
#include "en_stats.h"
+#include <linux/ptp_classify.h>
struct mlx5e_ptpsq {
struct mlx5e_txqsq txqsq;
@@ -43,6 +44,27 @@ struct mlx5e_ptp {
DECLARE_BITMAP(state, MLX5E_PTP_STATE_NUM_STATES);
};
+static inline bool mlx5e_use_ptpsq(struct sk_buff *skb)
+{
+ struct flow_keys fk;
+
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ return false;
+
+ if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
+ return false;
+
+ if (fk.basic.n_proto == htons(ETH_P_1588))
+ return true;
+
+ if (fk.basic.n_proto != htons(ETH_P_IP) &&
+ fk.basic.n_proto != htons(ETH_P_IPV6))
+ return false;
+
+ return (fk.basic.ip_proto == IPPROTO_UDP &&
+ fk.ports.dst == htons(PTP_EV_PORT));
+}
+
int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
u8 lag_port, struct mlx5e_ptp **cp);
void mlx5e_ptp_close(struct mlx5e_ptp *c);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
index 95f2b26a3ee3..9c076aa20306 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
@@ -223,6 +223,8 @@ static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *pt
rpriv = priv->ppriv;
fwd_vport_num = rpriv->rep->vport;
lag_dev = netdev_master_upper_dev_get(netdev);
+ if (!lag_dev)
+ return;
netdev_dbg(netdev, "lag_dev(%s)'s slave vport(%d) is txable(%d)\n",
lag_dev->name, fwd_vport_num, net_lag_port_dev_txable(netdev));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
index be0ee03de721..2e9bee4e5209 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
@@ -129,10 +129,9 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
work);
struct mlx5e_neigh_hash_entry *nhe = update_work->nhe;
struct neighbour *n = update_work->n;
+ struct mlx5e_encap_entry *e = NULL;
bool neigh_connected, same_dev;
- struct mlx5e_encap_entry *e;
unsigned char ha[ETH_ALEN];
- struct mlx5e_priv *priv;
u8 nud_state, dead;
rtnl_lock();
@@ -156,14 +155,12 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
if (!same_dev)
goto out;
- list_for_each_entry(e, &nhe->encap_list, encap_list) {
- if (!mlx5e_encap_take(e))
- continue;
+ /* mlx5e_get_next_init_encap() releases previous encap before returning
+ * the next one.
+ */
+ while ((e = mlx5e_get_next_init_encap(nhe, e)) != NULL)
+ mlx5e_rep_update_flows(netdev_priv(e->out_dev), e, neigh_connected, ha);
- priv = netdev_priv(e->out_dev);
- mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
- mlx5e_encap_put(priv, e);
- }
out:
rtnl_unlock();
mlx5e_release_neigh_update_work(update_work);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index 6cdc52d50a48..85eaadc989df 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -94,13 +94,9 @@ void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
ASSERT_RTNL();
- /* wait for encap to be fully initialized */
- wait_for_completion(&e->res_ready);
-
mutex_lock(&esw->offloads.encap_tbl_lock);
encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
- if (e->compl_result < 0 || (encap_connected == neigh_connected &&
- ether_addr_equal(e->h_dest, ha)))
+ if (encap_connected == neigh_connected && ether_addr_equal(e->h_dest, ha))
goto unlock;
mlx5e_take_all_encap_flows(e, &flow_list);
@@ -626,7 +622,7 @@ static bool mlx5e_restore_skb(struct sk_buff *skb, u32 chain, u32 reg_c1,
struct mlx5_eswitch *esw;
u32 zone_restore_id;
- tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
+ tc_skb_ext = tc_skb_ext_alloc(skb);
if (!tc_skb_ext) {
WARN_ON(1);
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index 593503bc4d07..490131e06efb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -251,9 +251,12 @@ static void mlx5e_take_all_route_decap_flows(struct mlx5e_route_entry *r,
mlx5e_take_tmp_flow(flow, flow_list, 0);
}
+typedef bool (match_cb)(struct mlx5e_encap_entry *);
+
static struct mlx5e_encap_entry *
-mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
- struct mlx5e_encap_entry *e)
+mlx5e_get_next_matching_encap(struct mlx5e_neigh_hash_entry *nhe,
+ struct mlx5e_encap_entry *e,
+ match_cb match)
{
struct mlx5e_encap_entry *next = NULL;
@@ -288,7 +291,7 @@ retry:
/* wait for encap to be fully initialized */
wait_for_completion(&next->res_ready);
/* continue searching if encap entry is not in valid state after completion */
- if (!(next->flags & MLX5_ENCAP_ENTRY_VALID)) {
+ if (!match(next)) {
e = next;
goto retry;
}
@@ -296,6 +299,30 @@ retry:
return next;
}
+static bool mlx5e_encap_valid(struct mlx5e_encap_entry *e)
+{
+ return e->flags & MLX5_ENCAP_ENTRY_VALID;
+}
+
+static struct mlx5e_encap_entry *
+mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
+ struct mlx5e_encap_entry *e)
+{
+ return mlx5e_get_next_matching_encap(nhe, e, mlx5e_encap_valid);
+}
+
+static bool mlx5e_encap_initialized(struct mlx5e_encap_entry *e)
+{
+ return e->compl_result >= 0;
+}
+
+struct mlx5e_encap_entry *
+mlx5e_get_next_init_encap(struct mlx5e_neigh_hash_entry *nhe,
+ struct mlx5e_encap_entry *e)
+{
+ return mlx5e_get_next_matching_encap(nhe, e, mlx5e_encap_initialized);
+}
+
void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
{
struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
@@ -1505,7 +1532,7 @@ mlx5e_init_fib_work_ipv4(struct mlx5e_priv *priv,
fen_info = container_of(info, struct fib_entry_notifier_info, info);
fib_dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
- if (fib_dev->netdev_ops != &mlx5e_netdev_ops ||
+ if (!fib_dev || fib_dev->netdev_ops != &mlx5e_netdev_ops ||
fen_info->dst_len != 32)
return NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 3d45341e2216..26f7fab109d9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -532,9 +532,6 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
struct net_device *netdev = priv->netdev;
- if (!priv->ipsec)
- return;
-
if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) ||
!MLX5_CAP_ETH(mdev, swp)) {
mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 5cd466ec6492..25403af32859 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -356,7 +356,7 @@ err:
int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
{
- int err = 0;
+ int err = -ENOMEM;
int i;
if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 8360289813f0..d6513aef5cd4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1624,12 +1624,13 @@ static int mlx5e_set_fecparam(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
+ unsigned long fec_bitmap;
u16 fec_policy = 0;
int mode;
int err;
- if (bitmap_weight((unsigned long *)&fecparam->fec,
- ETHTOOL_FEC_LLRS_BIT + 1) > 1)
+ bitmap_from_arr32(&fec_bitmap, &fecparam->fec, sizeof(fecparam->fec) * BITS_PER_BYTE);
+ if (bitmap_weight(&fec_bitmap, ETHTOOL_FEC_LLRS_BIT + 1) > 1)
return -EOPNOTSUPP;
for (mode = 0; mode < ARRAY_SIZE(pplm_fec_2_ethtool); mode++) {
@@ -1893,6 +1894,13 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
if (curr_val == new_val)
return 0;
+ if (new_val && !priv->profile->rx_ptp_support &&
+ priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) {
+ netdev_err(priv->netdev,
+ "Profile doesn't support enabling of CQE compression while hardware time-stamping is enabled.\n");
+ return -EINVAL;
+ }
+
new_params = priv->channels.params;
MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
if (priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 0d571a0c76d9..0b75fab41ae8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -35,6 +35,7 @@
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/mlx5/fs.h>
+#include <linux/mlx5/mpfs.h>
#include "en.h"
#include "en_rep.h"
#include "lib/mpfs.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index bca832cdc4cb..d26b8ed51195 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -889,10 +889,13 @@ err_free_rq:
void mlx5e_activate_rq(struct mlx5e_rq *rq)
{
set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
- if (rq->icosq)
+ if (rq->icosq) {
mlx5e_trigger_irq(rq->icosq);
- else
+ } else {
+ local_bh_disable();
napi_schedule(rq->cq.napi);
+ local_bh_enable();
+ }
}
void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
@@ -2697,13 +2700,11 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
int err;
old_num_txqs = netdev->real_num_tx_queues;
- old_ntc = netdev->num_tc;
+ old_ntc = netdev->num_tc ? : 1;
nch = priv->channels.params.num_channels;
ntc = priv->channels.params.num_tc;
num_rxqs = nch * priv->profile->rq_groups;
- if (priv->channels.params.ptp_rx)
- num_rxqs++;
mlx5e_netdev_set_tcs(netdev, nch, ntc);
@@ -3855,6 +3856,16 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n");
}
+ if (mlx5e_is_uplink_rep(priv)) {
+ features &= ~NETIF_F_HW_TLS_RX;
+ if (netdev->features & NETIF_F_HW_TLS_RX)
+ netdev_warn(netdev, "Disabling hw_tls_rx, not supported in switchdev mode\n");
+
+ features &= ~NETIF_F_HW_TLS_TX;
+ if (netdev->features & NETIF_F_HW_TLS_TX)
+ netdev_warn(netdev, "Disabling hw_tls_tx, not supported in switchdev mode\n");
+ }
+
mutex_unlock(&priv->state_lock);
return features;
@@ -3971,11 +3982,45 @@ int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx)
return mlx5e_ptp_rx_manage_fs(priv, set);
}
-int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
+static int mlx5e_hwstamp_config_no_ptp_rx(struct mlx5e_priv *priv, bool rx_filter)
+{
+ bool rx_cqe_compress_def = priv->channels.params.rx_cqe_compress_def;
+ int err;
+
+ if (!rx_filter)
+ /* Reset CQE compression to Admin default */
+ return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def);
+
+ if (!MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
+ return 0;
+
+ /* Disable CQE compression */
+ netdev_warn(priv->netdev, "Disabling RX cqe compression\n");
+ err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
+ if (err)
+ netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
+
+ return err;
+}
+
+static int mlx5e_hwstamp_config_ptp_rx(struct mlx5e_priv *priv, bool ptp_rx)
{
struct mlx5e_params new_params;
+
+ if (ptp_rx == priv->channels.params.ptp_rx)
+ return 0;
+
+ new_params = priv->channels.params;
+ new_params.ptp_rx = ptp_rx;
+ return mlx5e_safe_switch_params(priv, &new_params, mlx5e_ptp_rx_manage_fs_ctx,
+ &new_params.ptp_rx, true);
+}
+
+int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
+{
struct hwtstamp_config config;
bool rx_cqe_compress_def;
+ bool ptp_rx;
int err;
if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) ||
@@ -3995,13 +4040,12 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
}
mutex_lock(&priv->state_lock);
- new_params = priv->channels.params;
rx_cqe_compress_def = priv->channels.params.rx_cqe_compress_def;
/* RX HW timestamp */
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
- new_params.ptp_rx = false;
+ ptp_rx = false;
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
@@ -4018,24 +4062,25 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
- new_params.ptp_rx = rx_cqe_compress_def;
config.rx_filter = HWTSTAMP_FILTER_ALL;
+ /* ptp_rx is set if both HW TS is set and CQE
+ * compression is set
+ */
+ ptp_rx = rx_cqe_compress_def;
break;
default:
- mutex_unlock(&priv->state_lock);
- return -ERANGE;
+ err = -ERANGE;
+ goto err_unlock;
}
- if (new_params.ptp_rx == priv->channels.params.ptp_rx)
- goto out;
+ if (!priv->profile->rx_ptp_support)
+ err = mlx5e_hwstamp_config_no_ptp_rx(priv,
+ config.rx_filter != HWTSTAMP_FILTER_NONE);
+ else
+ err = mlx5e_hwstamp_config_ptp_rx(priv, ptp_rx);
+ if (err)
+ goto err_unlock;
- err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_ptp_rx_manage_fs_ctx,
- &new_params.ptp_rx, true);
- if (err) {
- mutex_unlock(&priv->state_lock);
- return err;
- }
-out:
memcpy(&priv->tstamp, &config, sizeof(config));
mutex_unlock(&priv->state_lock);
@@ -4044,6 +4089,9 @@ out:
return copy_to_user(ifr->ifr_data, &config,
sizeof(config)) ? -EFAULT : 0;
+err_unlock:
+ mutex_unlock(&priv->state_lock);
+ return err;
}
int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
@@ -4774,22 +4822,15 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
}
if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) {
- netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM;
- netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM;
- netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
- netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
+ netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL;
}
if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_GRE)) {
- netdev->hw_features |= NETIF_F_GSO_GRE |
- NETIF_F_GSO_GRE_CSUM;
- netdev->hw_enc_features |= NETIF_F_GSO_GRE |
- NETIF_F_GSO_GRE_CSUM;
- netdev->gso_partial_features |= NETIF_F_GSO_GRE |
- NETIF_F_GSO_GRE_CSUM;
+ netdev->hw_features |= NETIF_F_GSO_GRE;
+ netdev->hw_enc_features |= NETIF_F_GSO_GRE;
+ netdev->gso_partial_features |= NETIF_F_GSO_GRE;
}
if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_IPIP)) {
@@ -5229,6 +5270,11 @@ static void mlx5e_update_features(struct net_device *netdev)
rtnl_unlock();
}
+static void mlx5e_reset_channels(struct net_device *netdev)
+{
+ netdev_reset_tc(netdev);
+}
+
int mlx5e_attach_netdev(struct mlx5e_priv *priv)
{
const bool take_rtnl = priv->netdev->reg_state == NETREG_REGISTERED;
@@ -5283,6 +5329,7 @@ err_cleanup_tx:
profile->cleanup_tx(priv);
out:
+ mlx5e_reset_channels(priv->netdev);
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
cancel_work_sync(&priv->update_stats_work);
return err;
@@ -5300,6 +5347,7 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv)
profile->cleanup_rx(priv);
profile->cleanup_tx(priv);
+ mlx5e_reset_channels(priv->netdev);
cancel_work_sync(&priv->update_stats_work);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 47a9c49b25fd..d4b0f270b6bb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1322,10 +1322,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct net_device *out_dev, *encap_dev = NULL;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
bool vf_tun = false, encap_valid = true;
+ struct net_device *encap_dev = NULL;
struct mlx5_esw_flow_attr *esw_attr;
struct mlx5_fc *counter = NULL;
struct mlx5e_rep_priv *rpriv;
@@ -1371,16 +1371,22 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
esw_attr = attr->esw_attr;
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
+ struct net_device *out_dev;
int mirred_ifindex;
if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
continue;
mirred_ifindex = parse_attr->mirred_ifindex[out_index];
- out_dev = __dev_get_by_index(dev_net(priv->netdev),
- mirred_ifindex);
+ out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex);
+ if (!out_dev) {
+ NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found");
+ err = -ENODEV;
+ goto err_out;
+ }
err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
extack, &encap_dev, &encap_valid);
+ dev_put(out_dev);
if (err)
goto err_out;
@@ -1393,6 +1399,12 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
esw_attr->dests[out_index].mdev = out_priv->mdev;
}
+ if (vf_tun && esw_attr->out_count > 1) {
+ NL_SET_ERR_MSG_MOD(extack, "VF tunnel encap with mirroring is not supported");
+ err = -EOPNOTSUPP;
+ goto err_out;
+ }
+
err = mlx5_eswitch_add_vlan_action(esw, attr);
if (err)
goto err_out;
@@ -2003,11 +2015,13 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
misc_parameters_3);
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_dissector *dissector = rule->match.dissector;
+ enum fs_flow_table_type fs_type;
u16 addr_type = 0;
u8 ip_proto = 0;
u8 *match_level;
int err;
+ fs_type = mlx5e_is_eswitch_flow(flow) ? FS_FT_FDB : FS_FT_NIC_RX;
match_level = outer_match_level;
if (dissector->used_keys &
@@ -2133,6 +2147,13 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
if (match.mask->vlan_id ||
match.mask->vlan_priority ||
match.mask->vlan_tpid) {
+ if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ft_field_support.outer_second_vid,
+ fs_type)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on CVLAN is not supported");
+ return -EOPNOTSUPP;
+ }
+
if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
MLX5_SET(fte_match_set_misc, misc_c,
outer_second_svlan_tag, 1);
@@ -3526,8 +3547,12 @@ static int add_vlan_push_action(struct mlx5e_priv *priv,
if (err)
return err;
- *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev),
- dev_get_iflink(vlan_dev));
+ rcu_read_lock();
+ *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev), dev_get_iflink(vlan_dev));
+ rcu_read_unlock();
+ if (!*out_dev)
+ return -ENODEV;
+
if (is_vlan_dev(*out_dev))
err = add_vlan_push_action(priv, attr, out_dev, action);
@@ -4740,7 +4765,7 @@ static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
wait_for_completion(&hpe->res_ready);
if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
- hpe->hp->pair->peer_gone = true;
+ mlx5_core_hairpin_clear_dead_peer(hpe->hp->pair);
mlx5e_hairpin_put(priv, hpe);
}
@@ -5074,7 +5099,7 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) {
chain = mapped_obj.chain;
- tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
+ tc_skb_ext = tc_skb_ext_alloc(skb);
if (WARN_ON(!tc_skb_ext))
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 25c091795bcd..17027536efba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -178,6 +178,9 @@ void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *f
void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list);
struct mlx5e_neigh_hash_entry;
+struct mlx5e_encap_entry *
+mlx5e_get_next_init_encap(struct mlx5e_neigh_hash_entry *nhe,
+ struct mlx5e_encap_entry *e);
void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
void mlx5e_tc_reoffload_flows_work(struct work_struct *work);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 8ba62671f5f1..320fe0cda917 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -32,7 +32,6 @@
#include <linux/tcp.h>
#include <linux/if_vlan.h>
-#include <linux/ptp_classify.h>
#include <net/geneve.h>
#include <net/dsfield.h>
#include "en.h"
@@ -67,24 +66,6 @@ static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb
}
#endif
-static bool mlx5e_use_ptpsq(struct sk_buff *skb)
-{
- struct flow_keys fk;
-
- if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
- return false;
-
- if (fk.basic.n_proto == htons(ETH_P_1588))
- return true;
-
- if (fk.basic.n_proto != htons(ETH_P_IP) &&
- fk.basic.n_proto != htons(ETH_P_IPV6))
- return false;
-
- return (fk.basic.ip_proto == IPPROTO_UDP &&
- fk.ports.dst == htons(PTP_EV_PORT));
-}
-
static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -145,9 +126,9 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
}
ptp_channel = READ_ONCE(priv->channels.ptp);
- if (unlikely(ptp_channel) &&
- test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) &&
- mlx5e_use_ptpsq(skb))
+ if (unlikely(ptp_channel &&
+ test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) &&
+ mlx5e_use_ptpsq(skb)))
return mlx5e_select_ptpsq(dev, skb);
txq_ix = netdev_pick_tx(dev, skb, NULL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 77c0ca655975..940333410267 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -136,7 +136,7 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
eqe = next_eqe_sw(eq);
if (!eqe)
- return 0;
+ goto out;
do {
struct mlx5_core_cq *cq;
@@ -161,6 +161,8 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
++eq->cons_index;
} while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
+
+out:
eq_update_ci(eq, 1);
if (cqn != -1)
@@ -248,9 +250,9 @@ static int mlx5_eq_async_int(struct notifier_block *nb,
++eq->cons_index;
} while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
- eq_update_ci(eq, 1);
out:
+ eq_update_ci(eq, 1);
mlx5_eq_async_int_unlock(eq_async, recovery, &flags);
return unlikely(recovery) ? num_eqes : 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 570f2280823c..97e6cb6f13c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -35,6 +35,7 @@
#include <linux/mlx5/mlx5_ifc.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h>
+#include <linux/mlx5/mpfs.h>
#include "esw/acl/lgcy.h"
#include "esw/legacy.h"
#include "mlx5_core.h"
@@ -1053,6 +1054,12 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
goto err_vhca_mapping;
}
+ /* External controller host PF has factory programmed MAC.
+ * Read it from the device.
+ */
+ if (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF)
+ mlx5_query_nic_vport_mac_address(esw->dev, vport_num, true, vport->info.mac);
+
esw_vport_change_handle_locked(vport);
esw->enabled_vports++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index db1e74280e57..d18a28a6e9a6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -219,7 +219,8 @@ esw_setup_slow_path_dest(struct mlx5_flow_destination *dest,
struct mlx5_fs_chains *chains,
int i)
{
- flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ if (mlx5_chains_ignore_flow_level_supported(chains))
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index a81ece94f599..b45954905845 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -65,7 +65,7 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *root_ns;
- int err;
+ int err, err2;
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
if (!root_ns) {
@@ -76,33 +76,34 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
/* As this is the terminating action then the termination table is the
* same prio as the slow path
*/
- ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION |
+ ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION | MLX5_FLOW_TABLE_UNMANAGED |
MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
- ft_attr.prio = FDB_SLOW_PATH;
+ ft_attr.prio = FDB_TC_OFFLOAD;
ft_attr.max_fte = 1;
+ ft_attr.level = 1;
ft_attr.autogroup.max_num_groups = 1;
tt->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
if (IS_ERR(tt->termtbl)) {
- esw_warn(dev, "Failed to create termination table (error %d)\n",
- IS_ERR(tt->termtbl));
- return -EOPNOTSUPP;
+ err = PTR_ERR(tt->termtbl);
+ esw_warn(dev, "Failed to create termination table, err %pe\n", tt->termtbl);
+ return err;
}
tt->rule = mlx5_add_flow_rules(tt->termtbl, NULL, flow_act,
&tt->dest, 1);
if (IS_ERR(tt->rule)) {
- esw_warn(dev, "Failed to create termination table rule (error %d)\n",
- IS_ERR(tt->rule));
+ err = PTR_ERR(tt->rule);
+ esw_warn(dev, "Failed to create termination table rule, err %pe\n", tt->rule);
goto add_flow_err;
}
return 0;
add_flow_err:
- err = mlx5_destroy_flow_table(tt->termtbl);
- if (err)
- esw_warn(dev, "Failed to destroy termination table\n");
+ err2 = mlx5_destroy_flow_table(tt->termtbl);
+ if (err2)
+ esw_warn(dev, "Failed to destroy termination table, err %d\n", err2);
- return -EOPNOTSUPP;
+ return err;
}
static struct mlx5_termtbl_handle *
@@ -172,19 +173,6 @@ mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
}
}
-static bool mlx5_eswitch_termtbl_is_encap_reformat(struct mlx5_pkt_reformat *rt)
-{
- switch (rt->reformat_type) {
- case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
- case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
- case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
- case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
- return true;
- default:
- return false;
- }
-}
-
static void
mlx5_eswitch_termtbl_actions_move(struct mlx5_flow_act *src,
struct mlx5_flow_act *dst)
@@ -202,14 +190,6 @@ mlx5_eswitch_termtbl_actions_move(struct mlx5_flow_act *src,
memset(&src->vlan[1], 0, sizeof(src->vlan[1]));
}
}
-
- if (src->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT &&
- mlx5_eswitch_termtbl_is_encap_reformat(src->pkt_reformat)) {
- src->action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
- dst->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
- dst->pkt_reformat = src->pkt_reformat;
- src->pkt_reformat = NULL;
- }
}
static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
@@ -238,6 +218,7 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
int i;
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table) ||
+ !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level) ||
attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH ||
!mlx5_eswitch_offload_is_uplink_port(esw, spec))
return false;
@@ -279,12 +260,19 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
if (dest[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT)
continue;
+ if (attr->dests[num_vport_dests].flags & MLX5_ESW_DEST_ENCAP) {
+ term_tbl_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ term_tbl_act.pkt_reformat = attr->dests[num_vport_dests].pkt_reformat;
+ } else {
+ term_tbl_act.action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ term_tbl_act.pkt_reformat = NULL;
+ }
+
/* get the terminating table for the action list */
tt = mlx5_eswitch_termtbl_get_create(esw, &term_tbl_act,
&dest[i], attr);
if (IS_ERR(tt)) {
- esw_warn(esw->dev, "Failed to get termination table (error %d)\n",
- IS_ERR(tt));
+ esw_warn(esw->dev, "Failed to get termination table, err %pe\n", tt);
goto revert_changes;
}
attr->dests[num_vport_dests].termtbl = tt;
@@ -301,6 +289,9 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
goto revert_changes;
/* create the FTE */
+ flow_act->action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ flow_act->pkt_reformat = NULL;
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
rule = mlx5_add_flow_rules(fdb, spec, flow_act, dest, num_dest);
if (IS_ERR(rule))
goto revert_changes;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index d5d57630015f..106b50e42b46 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -349,6 +349,9 @@ static void mlx5_sync_reset_abort_event(struct work_struct *work)
reset_abort_work);
struct mlx5_core_dev *dev = fw_reset->dev;
+ if (!test_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags))
+ return;
+
mlx5_sync_reset_clear_reset_requested(dev, true);
mlx5_core_warn(dev, "PCI Sync FW Update Reset Aborted.\n");
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
index 2c41a6920264..fd6196b5e163 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
@@ -307,6 +307,11 @@ int mlx5_lag_mp_init(struct mlx5_lag *ldev)
struct lag_mp *mp = &ldev->lag_mp;
int err;
+ /* always clear mfi, as it might become stale when a route delete event
+ * has been missed
+ */
+ mp->mfi = NULL;
+
if (mp->fib_nb.notifier_call)
return 0;
@@ -335,4 +340,5 @@ void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev)
unregister_fib_notifier(&init_net, &mp->fib_nb);
destroy_workqueue(mp->wq);
mp->fib_nb.notifier_call = NULL;
+ mp->mfi = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
index 00ef10a1a9f8..20a4047f2737 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
@@ -107,7 +107,7 @@ bool mlx5_chains_prios_supported(struct mlx5_fs_chains *chains)
return chains->flags & MLX5_CHAINS_AND_PRIOS_SUPPORTED;
}
-static bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
+bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
{
return chains->flags & MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h
index e96f345e7dae..d50bdb226cef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h
@@ -28,6 +28,7 @@ struct mlx5_chains_attr {
bool
mlx5_chains_prios_supported(struct mlx5_fs_chains *chains);
+bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains);
bool
mlx5_chains_backwards_supported(struct mlx5_fs_chains *chains);
u32
@@ -70,6 +71,10 @@ mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
#else /* CONFIG_MLX5_CLS_ACT */
+static inline bool
+mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
+{ return false; }
+
static inline struct mlx5_flow_table *
mlx5_chains_get_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
u32 level) { return ERR_PTR(-EOPNOTSUPP); }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
index fd8449ff9e17..839a01da110f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
@@ -33,6 +33,7 @@
#include <linux/etherdevice.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/mlx5_ifc.h>
+#include <linux/mlx5/mpfs.h>
#include <linux/mlx5/eswitch.h>
#include "mlx5_core.h"
#include "lib/mpfs.h"
@@ -175,6 +176,7 @@ out:
mutex_unlock(&mpfs->lock);
return err;
}
+EXPORT_SYMBOL(mlx5_mpfs_add_mac);
int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac)
{
@@ -206,3 +208,4 @@ unlock:
mutex_unlock(&mpfs->lock);
return err;
}
+EXPORT_SYMBOL(mlx5_mpfs_del_mac);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h
index 4a7b2c3203a7..4a293542a7aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h
@@ -84,12 +84,9 @@ struct l2addr_node {
#ifdef CONFIG_MLX5_MPFS
int mlx5_mpfs_init(struct mlx5_core_dev *dev);
void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev);
-int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac);
-int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac);
#else /* #ifndef CONFIG_MLX5_MPFS */
static inline int mlx5_mpfs_init(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev) {}
-static inline int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
-static inline int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
#endif
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c114365eb126..0d0f63a27aba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -503,7 +503,7 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
{
- struct mlx5_profile *prof = dev->profile;
+ struct mlx5_profile *prof = &dev->profile;
void *set_hca_cap;
int err;
@@ -524,11 +524,11 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
to_fw_pkey_sz(dev, 128));
/* Check log_max_qp from HCA caps to set in current profile */
- if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < profile[prof_sel].log_max_qp) {
+ if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) {
mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
- profile[prof_sel].log_max_qp,
+ prof->log_max_qp,
MLX5_CAP_GEN_MAX(dev, log_max_qp));
- profile[prof_sel].log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
+ prof->log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
}
if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
@@ -1161,7 +1161,7 @@ static int mlx5_load(struct mlx5_core_dev *dev)
err = mlx5_core_set_hca_defaults(dev);
if (err) {
mlx5_core_err(dev, "Failed to set hca defaults\n");
- goto err_sriov;
+ goto err_set_hca;
}
mlx5_vhca_event_start(dev);
@@ -1194,6 +1194,7 @@ err_ec:
mlx5_sf_hw_table_destroy(dev);
err_vhca:
mlx5_vhca_event_stop(dev);
+err_set_hca:
mlx5_cleanup_fs(dev);
err_fs:
mlx5_accel_tls_cleanup(dev);
@@ -1381,8 +1382,7 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
struct mlx5_priv *priv = &dev->priv;
int err;
- dev->profile = &profile[profile_idx];
-
+ memcpy(&dev->profile, &profile[profile_idx], sizeof(dev->profile));
INIT_LIST_HEAD(&priv->ctx_list);
spin_lock_init(&priv->ctx_lock);
mutex_init(&dev->intf_state_mutex);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index 50af84e76fb6..174f71ed5280 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -54,7 +54,7 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index);
mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
mkey->size = MLX5_GET64(mkc, mkc, len);
- mkey->key |= mlx5_idx_to_mkey(mkey_index);
+ mkey->key = (u32)mlx5_mkey_variant(mkey->key) | mlx5_idx_to_mkey(mkey_index);
mkey->pd = MLX5_GET(mkc, mkc, pd);
init_waitqueue_head(&mkey->wait);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 1f907df5b3a2..c3373fb1cd7f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -95,9 +95,10 @@ int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs)
int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
int msix_vec_count)
{
- int sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ void *hca_cap = NULL, *query_cap = NULL, *cap;
int num_vf_msix, min_msix, max_msix;
- void *hca_cap, *cap;
int ret;
num_vf_msix = MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
@@ -116,11 +117,20 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
if (msix_vec_count > max_msix)
return -EOVERFLOW;
- hca_cap = kzalloc(sz, GFP_KERNEL);
- if (!hca_cap)
- return -ENOMEM;
+ query_cap = kzalloc(query_sz, GFP_KERNEL);
+ hca_cap = kzalloc(set_sz, GFP_KERNEL);
+ if (!hca_cap || !query_cap) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = mlx5_vport_get_other_func_cap(dev, function_id, query_cap);
+ if (ret)
+ goto out;
cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
+ memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
+ MLX5_UN_SZ_BYTES(hca_cap_union));
MLX5_SET(cmd_hca_cap, cap, dynamic_msix_table_size, msix_vec_count);
MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
@@ -130,7 +140,9 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1);
ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap);
+out:
kfree(hca_cap);
+ kfree(query_cap);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
index 441b5453acae..540cf05f6373 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
@@ -156,6 +156,9 @@ void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
{
int err;
+ if (!MLX5_CAP_GEN(dev, roce))
+ return;
+
err = mlx5_nic_vport_enable_roce(dev);
if (err) {
mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
index 6a0c6f965ad1..fa0288afc0dd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
@@ -163,6 +163,7 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
sf_index = event->function_id - base_id;
sf_dev = xa_load(&table->devices, sf_index);
switch (event->new_vhca_state) {
+ case MLX5_VHCA_STATE_INVALID:
case MLX5_VHCA_STATE_ALLOCATED:
if (sf_dev)
mlx5_sf_dev_del(table->dev, sf_dev, sf_index);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index a8e73c9ed1ea..1be048769309 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -136,10 +136,10 @@ static enum devlink_port_fn_state mlx5_sf_to_devlink_state(u8 hw_state)
switch (hw_state) {
case MLX5_VHCA_STATE_ACTIVE:
case MLX5_VHCA_STATE_IN_USE:
- case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
return DEVLINK_PORT_FN_STATE_ACTIVE;
case MLX5_VHCA_STATE_INVALID:
case MLX5_VHCA_STATE_ALLOCATED:
+ case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
default:
return DEVLINK_PORT_FN_STATE_INACTIVE;
}
@@ -192,14 +192,17 @@ sf_err:
return err;
}
-static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf)
+static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf,
+ struct netlink_ext_ack *extack)
{
int err;
if (mlx5_sf_is_active(sf))
return 0;
- if (sf->hw_state != MLX5_VHCA_STATE_ALLOCATED)
- return -EINVAL;
+ if (sf->hw_state != MLX5_VHCA_STATE_ALLOCATED) {
+ NL_SET_ERR_MSG_MOD(extack, "SF is inactivated but it is still attached");
+ return -EBUSY;
+ }
err = mlx5_cmd_sf_enable_hca(dev, sf->hw_fn_id);
if (err)
@@ -226,7 +229,8 @@ static int mlx5_sf_deactivate(struct mlx5_core_dev *dev, struct mlx5_sf *sf)
static int mlx5_sf_state_set(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
struct mlx5_sf *sf,
- enum devlink_port_fn_state state)
+ enum devlink_port_fn_state state,
+ struct netlink_ext_ack *extack)
{
int err = 0;
@@ -234,7 +238,7 @@ static int mlx5_sf_state_set(struct mlx5_core_dev *dev, struct mlx5_sf_table *ta
if (state == mlx5_sf_to_devlink_state(sf->hw_state))
goto out;
if (state == DEVLINK_PORT_FN_STATE_ACTIVE)
- err = mlx5_sf_activate(dev, sf);
+ err = mlx5_sf_activate(dev, sf, extack);
else if (state == DEVLINK_PORT_FN_STATE_INACTIVE)
err = mlx5_sf_deactivate(dev, sf);
else
@@ -265,7 +269,7 @@ int mlx5_devlink_sf_port_fn_state_set(struct devlink *devlink, struct devlink_po
goto out;
}
- err = mlx5_sf_state_set(dev, table, sf, state);
+ err = mlx5_sf_state_set(dev, table, sf, state, extack);
out:
mlx5_sf_table_put(table);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
index 1fbcd012bb85..7ccfd40586ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
@@ -112,7 +112,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
int ret;
ft_attr.table_type = MLX5_FLOW_TABLE_TYPE_FDB;
- ft_attr.level = dmn->info.caps.max_ft_level - 2;
+ ft_attr.level = min_t(int, dmn->info.caps.max_ft_level - 2,
+ MLX5_FT_MAX_MULTIPATH_LEVEL);
ft_attr.reformat_en = reformat_req;
ft_attr.decap_en = reformat_req;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
index 054c2e2b6554..7466f016375c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
@@ -694,7 +694,11 @@ static int dr_ste_v1_set_action_decap_l3_list(void *data,
if (hw_action_sz / DR_STE_ACTION_DOUBLE_SZ < DR_STE_DECAP_L3_ACTION_NUM)
return -EINVAL;
- memcpy(padded_data, data, data_sz);
+ inline_data_sz =
+ MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v1, inline_data);
+
+ /* Add an alignment padding */
+ memcpy(padded_data + data_sz % inline_data_sz, data, data_sz);
/* Remove L2L3 outer headers */
MLX5_SET(ste_single_action_remove_header_v1, hw_action, action_id,
@@ -706,32 +710,34 @@ static int dr_ste_v1_set_action_decap_l3_list(void *data,
hw_action += DR_STE_ACTION_DOUBLE_SZ;
used_actions++; /* Remove and NOP are a single double action */
- inline_data_sz =
- MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v1, inline_data);
+ /* Point to the last dword of the header */
+ data_ptr += (data_sz / inline_data_sz) * inline_data_sz;
- /* Add the new header inline + 2 extra bytes */
+ /* Add the new header using inline action 4Byte at a time, the header
+ * is added in reversed order to the beginning of the packet to avoid
+ * incorrect parsing by the HW. Since header is 14B or 18B an extra
+ * two bytes are padded and later removed.
+ */
for (i = 0; i < data_sz / inline_data_sz + 1; i++) {
void *addr_inline;
MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, action_id,
DR_STE_V1_ACTION_ID_INSERT_INLINE);
/* The hardware expects here offset to words (2 bytes) */
- MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, start_offset,
- i * 2);
+ MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, start_offset, 0);
/* Copy bytes one by one to avoid endianness problem */
addr_inline = MLX5_ADDR_OF(ste_double_action_insert_with_inline_v1,
hw_action, inline_data);
- memcpy(addr_inline, data_ptr, inline_data_sz);
+ memcpy(addr_inline, data_ptr - i * inline_data_sz, inline_data_sz);
hw_action += DR_STE_ACTION_DOUBLE_SZ;
- data_ptr += inline_data_sz;
used_actions++;
}
- /* Remove 2 extra bytes */
+ /* Remove first 2 extra bytes */
MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, action_id,
DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
- MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, start_offset, data_sz / 2);
+ MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, start_offset, 0);
/* The hardware expects here size in words (2 bytes) */
MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, remove_size, 1);
used_actions++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index 612b0ac31db2..9737565cd8d4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -124,10 +124,11 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action);
static inline bool
mlx5dr_is_supported(struct mlx5_core_dev *dev)
{
- return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
- (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
- (MLX5_CAP_GEN(dev, steering_format_version) <=
- MLX5_STEERING_FORMAT_CONNECTX_6DX));
+ return MLX5_CAP_GEN(dev, roce) &&
+ (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
+ (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
+ (MLX5_CAP_GEN(dev, steering_format_version) <=
+ MLX5_STEERING_FORMAT_CONNECTX_6DX)));
}
/* buddy functions & structure */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index 01cc00ad8acf..b6931bbe52d2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -424,6 +424,15 @@ err_modify_sq:
return err;
}
+static void mlx5_hairpin_unpair_peer_sq(struct mlx5_hairpin *hp)
+{
+ int i;
+
+ for (i = 0; i < hp->num_channels; i++)
+ mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
+ MLX5_SQC_STATE_RST, 0, 0);
+}
+
static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
{
int i;
@@ -432,13 +441,9 @@ static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
for (i = 0; i < hp->num_channels; i++)
mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[i], MLX5_RQC_STATE_RDY,
MLX5_RQC_STATE_RST, 0, 0);
-
/* unset peer SQs */
- if (hp->peer_gone)
- return;
- for (i = 0; i < hp->num_channels; i++)
- mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
- MLX5_SQC_STATE_RST, 0, 0);
+ if (!hp->peer_gone)
+ mlx5_hairpin_unpair_peer_sq(hp);
}
struct mlx5_hairpin *
@@ -485,3 +490,16 @@ void mlx5_core_hairpin_destroy(struct mlx5_hairpin *hp)
mlx5_hairpin_destroy_queues(hp);
kfree(hp);
}
+
+void mlx5_core_hairpin_clear_dead_peer(struct mlx5_hairpin *hp)
+{
+ int i;
+
+ mlx5_hairpin_unpair_peer_sq(hp);
+
+ /* destroy peer SQ */
+ for (i = 0; i < hp->num_channels; i++)
+ mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
+
+ hp->peer_gone = true;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 457ad42eaa2a..4c1440a95ad7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -465,8 +465,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
void *in;
int err;
- if (!vport)
- return -EINVAL;
if (!MLX5_CAP_GEN(mdev, vport_group_manager))
return -EACCES;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index dfea14399607..85f0ce285146 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -693,7 +693,8 @@ mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz)
MLXSW_THERMAL_TRIP_MASK,
module_tz,
&mlxsw_thermal_module_ops,
- NULL, 0, 0);
+ NULL, 0,
+ module_tz->parent->polling_delay);
if (IS_ERR(module_tz->tzdev)) {
err = PTR_ERR(module_tz->tzdev);
return err;
@@ -815,7 +816,8 @@ mlxsw_thermal_gearbox_tz_init(struct mlxsw_thermal_module *gearbox_tz)
MLXSW_THERMAL_TRIP_MASK,
gearbox_tz,
&mlxsw_thermal_gearbox_ops,
- NULL, 0, 0);
+ NULL, 0,
+ gearbox_tz->parent->polling_delay);
if (IS_ERR(gearbox_tz->tzdev))
return PTR_ERR(gearbox_tz->tzdev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 900b4bf5bb5b..2bc5a9003c6d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -3907,7 +3907,7 @@ MLXSW_ITEM32(reg, qeec, max_shaper_bs, 0x1C, 0, 6);
#define MLXSW_REG_QEEC_HIGHEST_SHAPER_BS 25
#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1 5
#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2 11
-#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3 5
+#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3 11
static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port,
enum mlxsw_reg_qeec_hr hr, u8 index,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index 04672eb5c7f3..9958d503bf0e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -1332,6 +1332,7 @@ __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
u8 band, u32 child_handle)
{
struct mlxsw_sp_qdisc *old_qdisc;
+ u32 parent;
if (band < mlxsw_sp_qdisc->num_classes &&
mlxsw_sp_qdisc->qdiscs[band].handle == child_handle)
@@ -1352,7 +1353,9 @@ __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
if (old_qdisc)
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
- mlxsw_sp_qdisc = mlxsw_sp_qdisc->ops->find_class(mlxsw_sp_qdisc, band);
+ parent = TC_H_MAKE(mlxsw_sp_qdisc->handle, band + 1);
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc->ops->find_class(mlxsw_sp_qdisc,
+ parent);
if (!WARN_ON(!mlxsw_sp_qdisc))
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index 3658c4ae3c37..ee921a99e439 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/**
+/*
* Microchip ENCX24J600 ethernet driver
*
* Copyright (C) 2015 Gridpoint
diff --git a/drivers/net/ethernet/microchip/encx24j600_hw.h b/drivers/net/ethernet/microchip/encx24j600_hw.h
index f604a260ede7..fac61a8fbd02 100644
--- a/drivers/net/ethernet/microchip/encx24j600_hw.h
+++ b/drivers/net/ethernet/microchip/encx24j600_hw.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/**
+/*
* encx24j600_hw.h: Register definitions
*
*/
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 0c4283319d7f..adfb9781799e 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -379,6 +379,7 @@ static u32 ocelot_read_eq_avail(struct ocelot *ocelot, int port)
int ocelot_port_flush(struct ocelot *ocelot, int port)
{
+ unsigned int pause_ena;
int err, val;
/* Disable dequeuing from the egress queues */
@@ -387,6 +388,7 @@ int ocelot_port_flush(struct ocelot *ocelot, int port)
QSYS_PORT_MODE, port);
/* Disable flow control */
+ ocelot_fields_read(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, &pause_ena);
ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
/* Disable priority flow control */
@@ -422,6 +424,9 @@ int ocelot_port_flush(struct ocelot *ocelot, int port)
/* Clear flushing again. */
ocelot_rmw_gix(ocelot, 0, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG, port);
+ /* Re-enable flow control */
+ ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, pause_ena);
+
return err;
}
EXPORT_SYMBOL(ocelot_port_flush);
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index c84c8bf2bc20..fc99ad8e4a38 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -3815,6 +3815,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev,
"invalid sram_size %dB or board span %ldB\n",
mgp->sram_size, mgp->board_span);
+ status = -EINVAL;
goto abort_with_ioremap;
}
memcpy_fromio(mgp->eeprom_strings,
diff --git a/drivers/net/ethernet/pensando/Kconfig b/drivers/net/ethernet/pensando/Kconfig
index 5f8b0bb3af6e..202973a82712 100644
--- a/drivers/net/ethernet/pensando/Kconfig
+++ b/drivers/net/ethernet/pensando/Kconfig
@@ -20,6 +20,7 @@ if NET_VENDOR_PENSANDO
config IONIC
tristate "Pensando Ethernet IONIC Support"
depends on 64BIT && PCI
+ depends on PTP_1588_CLOCK || !PTP_1588_CLOCK
select NET_DEVLINK
select DIMLIB
help
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 7e6bac85495d..344ea1143454 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1602,6 +1602,8 @@ err_out_free_netdev:
free_netdev(netdev);
err_out_free_res:
+ if (NX_IS_REVISION_P3(pdev->revision))
+ pci_disable_pcie_error_reporting(pdev);
pci_release_regions(pdev);
err_out_disable_pdev:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 17d5b649eb36..e81dd34a3cac 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -1266,9 +1266,11 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_STATIC;
p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled;
+ BUILD_BUG_ON(sizeof(dcbx_info->operational.params) !=
+ sizeof(p_hwfn->p_dcbx_info->set.config.params));
memcpy(&p_hwfn->p_dcbx_info->set.config.params,
&dcbx_info->operational.params,
- sizeof(struct qed_dcbx_admin_params));
+ sizeof(p_hwfn->p_dcbx_info->set.config.params));
p_hwfn->p_dcbx_info->set.config.valid = true;
memcpy(params, &p_hwfn->p_dcbx_info->set, sizeof(struct qed_dcbx_set));
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 214e347097a7..2376b2729633 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -114,7 +114,7 @@ static int ql_sem_spinlock(struct ql3_adapter *qdev,
value = readl(&port_regs->CommonRegs.semaphoreReg);
if ((value & (sem_mask >> 16)) == sem_bits)
return 0;
- ssleep(1);
+ mdelay(1000);
} while (--seconds);
return -1;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index d8a3ecaed3fc..d8f0863b3934 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1048,7 +1048,7 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
if (!skb)
- break;
+ goto error;
qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
skb_put(skb, QLCNIC_ILB_PKT_SIZE);
adapter->ahw->diag_cnt = 0;
@@ -1072,6 +1072,7 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
cnt++;
}
if (cnt != i) {
+error:
dev_err(&adapter->pdev->dev,
"LB Test: failed, TX[%d], RX[%d]\n", i, cnt);
if (mode != QLCNIC_ILB_MODE)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 96b947fde646..3beafc60747e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -2690,6 +2690,7 @@ err_out_free_hw_res:
kfree(ahw);
err_out_free_res:
+ pci_disable_pcie_error_reporting(pdev);
pci_release_regions(pdev);
err_out_disable_pdev:
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 41fbd2ceeede..ab1e0fcccabb 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -126,24 +126,24 @@ static void rmnet_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *s)
{
struct rmnet_priv *priv = netdev_priv(dev);
- struct rmnet_vnd_stats total_stats;
+ struct rmnet_vnd_stats total_stats = { };
struct rmnet_pcpu_stats *pcpu_ptr;
+ struct rmnet_vnd_stats snapshot;
unsigned int cpu, start;
- memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats));
-
for_each_possible_cpu(cpu) {
pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
do {
start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
- total_stats.rx_pkts += pcpu_ptr->stats.rx_pkts;
- total_stats.rx_bytes += pcpu_ptr->stats.rx_bytes;
- total_stats.tx_pkts += pcpu_ptr->stats.tx_pkts;
- total_stats.tx_bytes += pcpu_ptr->stats.tx_bytes;
+ snapshot = pcpu_ptr->stats; /* struct assignment */
} while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start));
- total_stats.tx_drops += pcpu_ptr->stats.tx_drops;
+ total_stats.rx_pkts += snapshot.rx_pkts;
+ total_stats.rx_bytes += snapshot.rx_bytes;
+ total_stats.tx_pkts += snapshot.tx_pkts;
+ total_stats.tx_bytes += snapshot.tx_bytes;
+ total_stats.tx_drops += snapshot.tx_drops;
}
s->rx_packets = total_stats.rx_pkts;
@@ -354,4 +354,4 @@ int rmnet_vnd_update_dev_mtu(struct rmnet_port *port,
}
return 0;
-} \ No newline at end of file
+}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 2c89cde7da1e..2ee72dc431cd 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -1671,7 +1671,7 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
switch(stringset) {
case ETH_SS_STATS:
- memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
+ memcpy(data, rtl8169_gstrings, sizeof(rtl8169_gstrings));
break;
}
}
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index c5b154868c1f..713d3629b4c1 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2287,7 +2287,7 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
{
switch (stringset) {
case ETH_SS_STATS:
- memcpy(data, *sh_eth_gstrings_stats,
+ memcpy(data, sh_eth_gstrings_stats,
sizeof(sh_eth_gstrings_stats));
break;
}
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index d1e908846f5d..22fbb0ae77fb 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -90,6 +90,7 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
efx->pci_dev->irq);
goto fail1;
}
+ efx->irqs_hooked = true;
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index 527077c98ebc..fc3b0acc8f99 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -30,7 +30,7 @@ struct sunxi_priv_data {
static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
{
struct sunxi_priv_data *gmac = priv;
- int ret;
+ int ret = 0;
if (gmac->regulator) {
ret = regulator_enable(gmac->regulator);
@@ -51,11 +51,11 @@ static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
} else {
clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE);
ret = clk_prepare(gmac->tx_clk);
- if (ret)
- return ret;
+ if (ret && gmac->regulator)
+ regulator_disable(gmac->regulator);
}
- return 0;
+ return ret;
}
static void sun7i_gmac_exit(struct platform_device *pdev, void *priv)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index b70d44ac0990..3c73453725f9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -76,10 +76,10 @@ enum power_event {
#define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */
/* GMAC HW ADDR regs */
-#define GMAC_ADDR_HIGH(reg) (((reg > 15) ? 0x00000800 : 0x00000040) + \
- (reg * 8))
-#define GMAC_ADDR_LOW(reg) (((reg > 15) ? 0x00000804 : 0x00000044) + \
- (reg * 8))
+#define GMAC_ADDR_HIGH(reg) ((reg > 15) ? 0x00000800 + (reg - 16) * 8 : \
+ 0x00000040 + (reg * 8))
+#define GMAC_ADDR_LOW(reg) ((reg > 15) ? 0x00000804 + (reg - 16) * 8 : \
+ 0x00000044 + (reg * 8))
#define GMAC_MAX_PERFECT_ADDRESSES 1
#define GMAC_PCS_BASE 0x000000c0 /* PCS register base */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 345b4c6d1fd4..c87202cbd3d6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1196,7 +1196,6 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
*/
static int stmmac_init_phy(struct net_device *dev)
{
- struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
struct stmmac_priv *priv = netdev_priv(dev);
struct device_node *node;
int ret;
@@ -1222,8 +1221,12 @@ static int stmmac_init_phy(struct net_device *dev)
ret = phylink_connect_phy(priv->phylink, phydev);
}
- phylink_ethtool_get_wol(priv->phylink, &wol);
- device_set_wakeup_capable(priv->device, !!wol.supported);
+ if (!priv->plat->pmt) {
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+
+ phylink_ethtool_get_wol(priv->phylink, &wol);
+ device_set_wakeup_capable(priv->device, !!wol.supported);
+ }
return ret;
}
@@ -1237,8 +1240,9 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
priv->phylink_config.dev = &priv->dev->dev;
priv->phylink_config.type = PHYLINK_NETDEV;
priv->phylink_config.pcs_poll = true;
- priv->phylink_config.ovr_an_inband =
- priv->plat->mdio_bus_data->xpcs_an_inband;
+ if (priv->plat->mdio_bus_data)
+ priv->phylink_config.ovr_an_inband =
+ priv->plat->mdio_bus_data->xpcs_an_inband;
if (!fwnode)
fwnode = dev_fwnode(priv->device);
@@ -5888,12 +5892,21 @@ static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
struct stmmac_priv *priv = netdev_priv(ndev);
int ret = 0;
+ ret = pm_runtime_get_sync(priv->device);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->device);
+ return ret;
+ }
+
ret = eth_mac_addr(ndev, addr);
if (ret)
- return ret;
+ goto set_mac_error;
stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
+set_mac_error:
+ pm_runtime_put(priv->device);
+
return ret;
}
@@ -6188,12 +6201,6 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid
bool is_double = false;
int ret;
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
- return ret;
- }
-
if (be16_to_cpu(proto) == ETH_P_8021AD)
is_double = true;
@@ -6219,6 +6226,12 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi
bool is_double = false;
int ret;
+ ret = pm_runtime_get_sync(priv->device);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->device);
+ return ret;
+ }
+
if (be16_to_cpu(proto) == ETH_P_8021AD)
is_double = true;
@@ -7036,7 +7049,6 @@ error_mdio_register:
stmmac_napi_del(ndev);
error_hw_init:
destroy_workqueue(priv->wq);
- stmmac_bus_clks_config(priv, false);
bitmap_free(priv->af_xdp_zc_qps);
return ret;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 1e17a23d9118..a696ada013eb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -622,6 +622,8 @@ error_pclk_get:
void stmmac_remove_config_dt(struct platform_device *pdev,
struct plat_stmmacenet_data *plat)
{
+ clk_disable_unprepare(plat->stmmac_clk);
+ clk_disable_unprepare(plat->pclk);
of_node_put(plat->phy_node);
of_node_put(plat->mdio_node);
}
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 707ccdd03b19..74e748662ec0 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -8144,10 +8144,10 @@ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
"VPD_SCAN: Reading in property [%s] len[%d]\n",
namebuf, prop_len);
for (i = 0; i < prop_len; i++) {
- err = niu_pci_eeprom_read(np, off + i);
- if (err >= 0)
- *prop_buf = err;
- ++prop_buf;
+ err = niu_pci_eeprom_read(np, off + i);
+ if (err < 0)
+ return err;
+ *prop_buf++ = err;
}
}
@@ -8158,14 +8158,14 @@ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
}
/* ESPC_PIO_EN_ENABLE must be set */
-static void niu_pci_vpd_fetch(struct niu *np, u32 start)
+static int niu_pci_vpd_fetch(struct niu *np, u32 start)
{
u32 offset;
int err;
err = niu_pci_eeprom_read16_swp(np, start + 1);
if (err < 0)
- return;
+ return err;
offset = err + 3;
@@ -8174,12 +8174,14 @@ static void niu_pci_vpd_fetch(struct niu *np, u32 start)
u32 end;
err = niu_pci_eeprom_read(np, here);
+ if (err < 0)
+ return err;
if (err != 0x90)
- return;
+ return -EINVAL;
err = niu_pci_eeprom_read16_swp(np, here + 1);
if (err < 0)
- return;
+ return err;
here = start + offset + 3;
end = start + offset + err;
@@ -8187,9 +8189,12 @@ static void niu_pci_vpd_fetch(struct niu *np, u32 start)
offset += err;
err = niu_pci_vpd_scan_props(np, here, end);
- if (err < 0 || err == 1)
- return;
+ if (err < 0)
+ return err;
+ if (err == 1)
+ return -EINVAL;
}
+ return 0;
}
/* ESPC_PIO_EN_ENABLE must be set */
@@ -9280,8 +9285,11 @@ static int niu_get_invariants(struct niu *np)
offset = niu_pci_vpd_offset(np);
netif_printk(np, probe, KERN_DEBUG, np->dev,
"%s() VPD offset [%08x]\n", __func__, offset);
- if (offset)
- niu_pci_vpd_fetch(np, offset);
+ if (offset) {
+ err = niu_pci_vpd_fetch(np, offset);
+ if (err < 0)
+ return err;
+ }
nw64(ESPC_PIO_EN, 0);
if (np->flags & NIU_FLAGS_VPD_VALID) {
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 9030e619e543..97942b0e3897 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1350,8 +1350,8 @@ int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe)
tx_pipe->dma_queue = knav_queue_open(name, tx_pipe->dma_queue_id,
KNAV_QUEUE_SHARED);
if (IS_ERR(tx_pipe->dma_queue)) {
- dev_err(dev, "Could not open DMA queue for channel \"%s\": %d\n",
- name, ret);
+ dev_err(dev, "Could not open DMA queue for channel \"%s\": %pe\n",
+ name, tx_pipe->dma_queue);
ret = PTR_ERR(tx_pipe->dma_queue);
goto err;
}
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index a1f5f07f4ca9..9a13953ea70f 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -774,12 +774,15 @@ static void temac_start_xmit_done(struct net_device *ndev)
stat = be32_to_cpu(cur_p->app0);
while (stat & STS_CTRL_APP0_CMPLT) {
+ /* Make sure that the other fields are read after bd is
+ * released by dma
+ */
+ rmb();
dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
be32_to_cpu(cur_p->len), DMA_TO_DEVICE);
skb = (struct sk_buff *)ptr_from_txbd(cur_p);
if (skb)
dev_consume_skb_irq(skb);
- cur_p->app0 = 0;
cur_p->app1 = 0;
cur_p->app2 = 0;
cur_p->app3 = 0;
@@ -788,6 +791,12 @@ static void temac_start_xmit_done(struct net_device *ndev)
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += be32_to_cpu(cur_p->len);
+ /* app0 must be visible last, as it is used to flag
+ * availability of the bd
+ */
+ smp_mb();
+ cur_p->app0 = 0;
+
lp->tx_bd_ci++;
if (lp->tx_bd_ci >= lp->tx_bd_num)
lp->tx_bd_ci = 0;
@@ -814,6 +823,9 @@ static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
if (cur_p->app0)
return NETDEV_TX_BUSY;
+ /* Make sure to read next bd app0 after this one */
+ rmb();
+
tail++;
if (tail >= lp->tx_bd_num)
tail = 0;
@@ -849,7 +861,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
smp_mb();
/* Space might have just been freed - check again */
- if (temac_check_tx_bd_space(lp, num_frag))
+ if (temac_check_tx_bd_space(lp, num_frag + 1))
return NETDEV_TX_BUSY;
netif_wake_queue(ndev);
@@ -876,7 +888,6 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK;
}
cur_p->phys = cpu_to_be32(skb_dma_addr);
- ptr_to_txbd((void *)skb, cur_p);
for (ii = 0; ii < num_frag; ii++) {
if (++lp->tx_bd_tail >= lp->tx_bd_num)
@@ -915,6 +926,11 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP);
+ /* Mark last fragment with skb address, so it can be consumed
+ * in temac_start_xmit_done()
+ */
+ ptr_to_txbd((void *)skb, cur_p);
+
tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
lp->tx_bd_tail++;
if (lp->tx_bd_tail >= lp->tx_bd_num)
@@ -926,6 +942,11 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
wmb();
lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
+ if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
+ netdev_info(ndev, "%s -> netif_stop_queue\n", __func__);
+ netif_stop_queue(ndev);
+ }
+
return NETDEV_TX_OK;
}
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 65154224d5b8..7685a1721597 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -799,6 +799,7 @@ static void mkiss_close(struct tty_struct *tty)
ax->tty = NULL;
unregister_netdev(ax->dev);
+ free_netdev(ax->dev);
}
/* Perform I/O control on an active ax25 channel. */
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index b9be530b285f..ff83e00b77af 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -8,8 +8,8 @@
#include <linux/spi/spi.h>
#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/ieee802154.h>
#include <linux/irq.h>
@@ -1388,7 +1388,7 @@ MODULE_DEVICE_TABLE(spi, mrf24j40_ids);
static struct spi_driver mrf24j40_driver = {
.driver = {
- .of_match_table = of_match_ptr(mrf24j40_of_match),
+ .of_match_table = mrf24j40_of_match,
.name = "mrf24j40",
},
.id_table = mrf24j40_ids,
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
index e7ff376cb5b7..744406832a77 100644
--- a/drivers/net/ipa/ipa.h
+++ b/drivers/net/ipa/ipa.h
@@ -58,6 +58,7 @@ enum ipa_flag {
* @mem_virt: Virtual address of IPA-local memory space
* @mem_offset: Offset from @mem_virt used for access to IPA memory
* @mem_size: Total size (bytes) of memory at @mem_virt
+ * @mem_count: Number of entries in the mem array
* @mem: Array of IPA-local memory region descriptors
* @imem_iova: I/O virtual address of IPA region in IMEM
* @imem_size: Size of IMEM region
@@ -103,6 +104,7 @@ struct ipa {
void *mem_virt;
u32 mem_offset;
u32 mem_size;
+ u32 mem_count;
const struct ipa_mem *mem;
unsigned long imem_iova;
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index c5c3b1b7e67d..1624125e7459 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -180,7 +180,7 @@ int ipa_mem_config(struct ipa *ipa)
* for the region, write "canary" values in the space prior to
* the region's base address.
*/
- for (mem_id = 0; mem_id < IPA_MEM_COUNT; mem_id++) {
+ for (mem_id = 0; mem_id < ipa->mem_count; mem_id++) {
const struct ipa_mem *mem = &ipa->mem[mem_id];
u16 canary_count;
__le32 *canary;
@@ -487,6 +487,7 @@ int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data)
ipa->mem_size = resource_size(res);
/* The ipa->mem[] array is indexed by enum ipa_mem_id values */
+ ipa->mem_count = mem_data->local_count;
ipa->mem = mem_data->local;
ret = ipa_imem_init(ipa, mem_data->imem_addr, mem_data->imem_size);
diff --git a/drivers/net/mdio/mdio-octeon.c b/drivers/net/mdio/mdio-octeon.c
index 8ce99c4888e1..e096e68ac667 100644
--- a/drivers/net/mdio/mdio-octeon.c
+++ b/drivers/net/mdio/mdio-octeon.c
@@ -71,7 +71,6 @@ static int octeon_mdiobus_probe(struct platform_device *pdev)
return 0;
fail_register:
- mdiobus_free(bus->mii_bus);
smi_en.u64 = 0;
oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
return err;
@@ -85,7 +84,6 @@ static int octeon_mdiobus_remove(struct platform_device *pdev)
bus = platform_get_drvdata(pdev);
mdiobus_unregister(bus->mii_bus);
- mdiobus_free(bus->mii_bus);
smi_en.u64 = 0;
oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
return 0;
diff --git a/drivers/net/mdio/mdio-thunder.c b/drivers/net/mdio/mdio-thunder.c
index cb1761693b69..822d2cdd2f35 100644
--- a/drivers/net/mdio/mdio-thunder.c
+++ b/drivers/net/mdio/mdio-thunder.c
@@ -126,7 +126,6 @@ static void thunder_mdiobus_pci_remove(struct pci_dev *pdev)
continue;
mdiobus_unregister(bus->mii_bus);
- mdiobus_free(bus->mii_bus);
oct_mdio_writeq(0, bus->register_base + SMI_EN);
}
pci_release_regions(pdev);
diff --git a/drivers/net/mhi/net.c b/drivers/net/mhi/net.c
index 0d8293a47a56..b806f2f8f859 100644
--- a/drivers/net/mhi/net.c
+++ b/drivers/net/mhi/net.c
@@ -49,7 +49,7 @@ static int mhi_ndo_stop(struct net_device *ndev)
return 0;
}
-static int mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
const struct mhi_net_proto *proto = mhi_netdev->proto;
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 9bd9a5c0b1db..6bbc81ad295f 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -826,16 +826,12 @@ static int dp83867_phy_reset(struct phy_device *phydev)
{
int err;
- err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESET);
+ err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESTART);
if (err < 0)
return err;
usleep_range(10, 20);
- /* After reset FORCE_LINK_GOOD bit is set. Although the
- * default value should be unset. Disable FORCE_LINK_GOOD
- * for the phy to work properly.
- */
return phy_modify(phydev, MII_DP83867_PHYCTRL,
DP83867_PHYCR_FORCE_LINK_GOOD, 0);
}
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index dadf75ff3ab9..6045ad3def12 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -607,7 +607,8 @@ void mdiobus_unregister(struct mii_bus *bus)
struct mdio_device *mdiodev;
int i;
- BUG_ON(bus->state != MDIOBUS_REGISTERED);
+ if (WARN_ON_ONCE(bus->state != MDIOBUS_REGISTERED))
+ return;
bus->state = MDIOBUS_UNREGISTERED;
for (i = 0; i < PHY_MAX_ADDR; i++) {
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index 0eeec80bec31..359ea0d10e59 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -26,7 +26,7 @@
* for transport over USB using a simpler USB device model than the
* previous CDC "Ethernet Control Model" (ECM, or "CDC Ethernet").
*
- * For details, see www.usb.org/developers/devclass_docs/CDC_EEM10.pdf
+ * For details, see https://usb.org/sites/default/files/CDC_EEM10.pdf
*
* This version has been tested with GIGAntIC WuaoW SIM Smart Card on 2.6.24,
* 2.6.27 and 2.6.30rc2 kernel.
@@ -123,10 +123,10 @@ static struct sk_buff *eem_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
}
skb2 = skb_copy_expand(skb, EEM_HEAD, ETH_FCS_LEN + padlen, flags);
+ dev_kfree_skb_any(skb);
if (!skb2)
return NULL;
- dev_kfree_skb_any(skb);
skb = skb2;
done:
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index b04055fd1b79..df0d1837e4ed 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1880,7 +1880,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
static const struct driver_info cdc_ncm_info = {
.description = "CDC NCM",
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
- | FLAG_LINK_INTR,
+ | FLAG_LINK_INTR | FLAG_ETHER,
.bind = cdc_ncm_bind,
.unbind = cdc_ncm_unbind,
.manage_power = usbnet_manage_power,
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 3ef4b2841402..5c779cc0ea11 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1689,7 +1689,7 @@ static int hso_serial_tiocmset(struct tty_struct *tty,
spin_unlock_irqrestore(&serial->serial_lock, flags);
return usb_control_msg(serial->parent->usb,
- usb_rcvctrlpipe(serial->parent->usb, 0), 0x22,
+ usb_sndctrlpipe(serial->parent->usb, 0), 0x22,
0x21, val, if_num, NULL, 0,
USB_CTRL_SET_TIMEOUT);
}
@@ -2436,7 +2436,7 @@ static int hso_rfkill_set_block(void *data, bool blocked)
if (hso_dev->usb_gone)
rv = 0;
else
- rv = usb_control_msg(hso_dev->usb, usb_rcvctrlpipe(hso_dev->usb, 0),
+ rv = usb_control_msg(hso_dev->usb, usb_sndctrlpipe(hso_dev->usb, 0),
enabled ? 0x82 : 0x81, 0x40, 0, 0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
mutex_unlock(&hso_dev->mutex);
@@ -2618,32 +2618,31 @@ static struct hso_device *hso_create_bulk_serial_device(
num_urbs = 2;
serial->tiocmget = kzalloc(sizeof(struct hso_tiocmget),
GFP_KERNEL);
+ if (!serial->tiocmget)
+ goto exit;
serial->tiocmget->serial_state_notification
= kzalloc(sizeof(struct hso_serial_state_notification),
GFP_KERNEL);
- /* it isn't going to break our heart if serial->tiocmget
- * allocation fails don't bother checking this.
- */
- if (serial->tiocmget && serial->tiocmget->serial_state_notification) {
- tiocmget = serial->tiocmget;
- tiocmget->endp = hso_get_ep(interface,
- USB_ENDPOINT_XFER_INT,
- USB_DIR_IN);
- if (!tiocmget->endp) {
- dev_err(&interface->dev, "Failed to find INT IN ep\n");
- goto exit;
- }
-
- tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL);
- if (tiocmget->urb) {
- mutex_init(&tiocmget->mutex);
- init_waitqueue_head(&tiocmget->waitq);
- } else
- hso_free_tiomget(serial);
+ if (!serial->tiocmget->serial_state_notification)
+ goto exit;
+ tiocmget = serial->tiocmget;
+ tiocmget->endp = hso_get_ep(interface,
+ USB_ENDPOINT_XFER_INT,
+ USB_DIR_IN);
+ if (!tiocmget->endp) {
+ dev_err(&interface->dev, "Failed to find INT IN ep\n");
+ goto exit;
}
- }
- else
+
+ tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!tiocmget->urb)
+ goto exit;
+
+ mutex_init(&tiocmget->mutex);
+ init_waitqueue_head(&tiocmget->waitq);
+ } else {
num_urbs = 1;
+ }
if (hso_serial_common_create(serial, num_urbs, BULK_URB_RX_SIZE,
BULK_URB_TX_SIZE))
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 6acc5e904518..02bce40a67e5 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1645,6 +1645,7 @@ static const struct ethtool_ops lan78xx_ethtool_ops = {
.get_strings = lan78xx_get_strings,
.get_wol = lan78xx_get_wol,
.set_wol = lan78xx_set_wol,
+ .get_ts_info = ethtool_op_get_ts_info,
.get_eee = lan78xx_get_eee,
.set_eee = lan78xx_set_eee,
.get_pauseparam = lan78xx_get_pause,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 6700f1970b24..bc55ec739af9 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -575,7 +575,7 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if (info->flags & QMI_WWAN_FLAG_PASS_THROUGH) {
skb->protocol = htons(ETH_P_MAP);
- return (netif_rx(skb) == NET_RX_SUCCESS);
+ return 1;
}
switch (skb->data[0] & 0xf0) {
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 136ea06540ff..e25bfb7021ed 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -8107,6 +8107,37 @@ static void r8156b_init(struct r8152 *tp)
tp->coalesce = 15000; /* 15 us */
}
+static bool rtl_check_vendor_ok(struct usb_interface *intf)
+{
+ struct usb_host_interface *alt = intf->cur_altsetting;
+ struct usb_endpoint_descriptor *in, *out, *intr;
+
+ if (usb_find_common_endpoints(alt, &in, &out, &intr, NULL) < 0) {
+ dev_err(&intf->dev, "Expected endpoints are not found\n");
+ return false;
+ }
+
+ /* Check Rx endpoint address */
+ if (usb_endpoint_num(in) != 1) {
+ dev_err(&intf->dev, "Invalid Rx endpoint address\n");
+ return false;
+ }
+
+ /* Check Tx endpoint address */
+ if (usb_endpoint_num(out) != 2) {
+ dev_err(&intf->dev, "Invalid Tx endpoint address\n");
+ return false;
+ }
+
+ /* Check interrupt endpoint address */
+ if (usb_endpoint_num(intr) != 3) {
+ dev_err(&intf->dev, "Invalid interrupt endpoint address\n");
+ return false;
+ }
+
+ return true;
+}
+
static bool rtl_vendor_mode(struct usb_interface *intf)
{
struct usb_host_interface *alt = intf->cur_altsetting;
@@ -8115,12 +8146,15 @@ static bool rtl_vendor_mode(struct usb_interface *intf)
int i, num_configs;
if (alt->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC)
- return true;
+ return rtl_check_vendor_ok(intf);
/* The vendor mode is not always config #1, so to find it out. */
udev = interface_to_usbdev(intf);
c = udev->config;
num_configs = udev->descriptor.bNumConfigurations;
+ if (num_configs < 2)
+ return false;
+
for (i = 0; i < num_configs; (i++, c++)) {
struct usb_interface_descriptor *desc = NULL;
@@ -8135,7 +8169,8 @@ static bool rtl_vendor_mode(struct usb_interface *intf)
}
}
- WARN_ON_ONCE(i == num_configs);
+ if (i == num_configs)
+ dev_err(&intf->dev, "Unexpected Device\n");
return false;
}
@@ -8643,7 +8678,7 @@ static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
switch (stringset) {
case ETH_SS_STATS:
- memcpy(data, *rtl8152_gstrings, sizeof(rtl8152_gstrings));
+ memcpy(data, rtl8152_gstrings, sizeof(rtl8152_gstrings));
break;
}
}
@@ -9381,9 +9416,6 @@ static int rtl8152_probe(struct usb_interface *intf,
if (!rtl_vendor_mode(intf))
return -ENODEV;
- if (intf->cur_altsetting->desc.bNumEndpoints < 3)
- return -ENODEV;
-
usb_reset_device(udev);
netdev = alloc_etherdev(sizeof(struct r8152));
if (!netdev) {
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index f8cdabb9ef5a..13141dbfa3a8 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -1483,7 +1483,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
ret = smsc75xx_wait_ready(dev, 0);
if (ret < 0) {
netdev_warn(dev->net, "device not ready in smsc75xx_bind\n");
- return ret;
+ goto free_pdata;
}
smsc75xx_init_mac_address(dev);
@@ -1492,7 +1492,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
ret = smsc75xx_reset(dev);
if (ret < 0) {
netdev_warn(dev->net, "smsc75xx_reset error %d\n", ret);
- return ret;
+ goto cancel_work;
}
dev->net->netdev_ops = &smsc75xx_netdev_ops;
@@ -1502,6 +1502,13 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
dev->net->max_mtu = MAX_SINGLE_PACKET_SIZE;
return 0;
+
+cancel_work:
+ cancel_work_sync(&pdata->set_multicast);
+free_pdata:
+ kfree(pdata);
+ dev->data[0] = 0;
+ return ret;
}
static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
@@ -1511,7 +1518,6 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
cancel_work_sync(&pdata->set_multicast);
netif_dbg(dev, ifdown, dev->net, "free pdata\n");
kfree(pdata);
- pdata = NULL;
dev->data[0] = 0;
}
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 9b6a4a875c55..78a01c71a17c 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -401,18 +401,13 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
/* If headroom is not 0, there is an offset between the beginning of the
* data and the allocated space, otherwise the data and the allocated
* space are aligned.
+ *
+ * Buffers with headroom use PAGE_SIZE as alloc size, see
+ * add_recvbuf_mergeable() + get_mergeable_buf_len()
*/
- if (headroom) {
- /* Buffers with headroom use PAGE_SIZE as alloc size,
- * see add_recvbuf_mergeable() + get_mergeable_buf_len()
- */
- truesize = PAGE_SIZE;
- tailroom = truesize - len - offset;
- buf = page_address(page);
- } else {
- tailroom = truesize - len;
- buf = p;
- }
+ truesize = headroom ? PAGE_SIZE : truesize;
+ tailroom = truesize - len - headroom - (hdr_padded_len - hdr_len);
+ buf = p - headroom;
len -= hdr_len;
offset += hdr_padded_len;
@@ -958,7 +953,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
put_page(page);
head_skb = page_to_skb(vi, rq, xdp_page, offset,
len, PAGE_SIZE, false,
- metasize, headroom);
+ metasize,
+ VIRTIO_XDP_HEADROOM);
return head_skb;
}
break;
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 503e2fd7ce51..28a6c4cfe9b8 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -1183,9 +1183,6 @@ static int vrf_dev_init(struct net_device *dev)
dev->flags = IFF_MASTER | IFF_NOARP;
- /* MTU is irrelevant for VRF device; set to 64k similar to lo */
- dev->mtu = 64 * 1024;
-
/* similarly, oper state is irrelevant; set to up to avoid confusion */
dev->operstate = IF_OPER_UP;
netdev_lockdep_set_classes(dev);
@@ -1685,7 +1682,8 @@ static void vrf_setup(struct net_device *dev)
* which breaks networking.
*/
dev->min_mtu = IPV6_MIN_MTU;
- dev->max_mtu = ETH_MAX_MTU;
+ dev->max_mtu = IP6_MAX_MTU;
+ dev->mtu = dev->max_mtu;
}
static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
diff --git a/drivers/net/wireguard/Makefile b/drivers/net/wireguard/Makefile
index fc52b2cb500b..dbe1f8514efc 100644
--- a/drivers/net/wireguard/Makefile
+++ b/drivers/net/wireguard/Makefile
@@ -1,5 +1,4 @@
-ccflags-y := -O3
-ccflags-y += -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt'
+ccflags-y := -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt'
ccflags-$(CONFIG_WIREGUARD_DEBUG) += -DDEBUG
wireguard-y := main.o
wireguard-y += noise.o
diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c
index 3725e9cd85f4..b7197e80f226 100644
--- a/drivers/net/wireguard/allowedips.c
+++ b/drivers/net/wireguard/allowedips.c
@@ -6,6 +6,8 @@
#include "allowedips.h"
#include "peer.h"
+static struct kmem_cache *node_cache;
+
static void swap_endian(u8 *dst, const u8 *src, u8 bits)
{
if (bits == 32) {
@@ -28,8 +30,11 @@ static void copy_and_assign_cidr(struct allowedips_node *node, const u8 *src,
node->bitlen = bits;
memcpy(node->bits, src, bits / 8U);
}
-#define CHOOSE_NODE(parent, key) \
- parent->bit[(key[parent->bit_at_a] >> parent->bit_at_b) & 1]
+
+static inline u8 choose(struct allowedips_node *node, const u8 *key)
+{
+ return (key[node->bit_at_a] >> node->bit_at_b) & 1;
+}
static void push_rcu(struct allowedips_node **stack,
struct allowedips_node __rcu *p, unsigned int *len)
@@ -40,6 +45,11 @@ static void push_rcu(struct allowedips_node **stack,
}
}
+static void node_free_rcu(struct rcu_head *rcu)
+{
+ kmem_cache_free(node_cache, container_of(rcu, struct allowedips_node, rcu));
+}
+
static void root_free_rcu(struct rcu_head *rcu)
{
struct allowedips_node *node, *stack[128] = {
@@ -49,7 +59,7 @@ static void root_free_rcu(struct rcu_head *rcu)
while (len > 0 && (node = stack[--len])) {
push_rcu(stack, node->bit[0], &len);
push_rcu(stack, node->bit[1], &len);
- kfree(node);
+ kmem_cache_free(node_cache, node);
}
}
@@ -66,60 +76,6 @@ static void root_remove_peer_lists(struct allowedips_node *root)
}
}
-static void walk_remove_by_peer(struct allowedips_node __rcu **top,
- struct wg_peer *peer, struct mutex *lock)
-{
-#define REF(p) rcu_access_pointer(p)
-#define DEREF(p) rcu_dereference_protected(*(p), lockdep_is_held(lock))
-#define PUSH(p) ({ \
- WARN_ON(IS_ENABLED(DEBUG) && len >= 128); \
- stack[len++] = p; \
- })
-
- struct allowedips_node __rcu **stack[128], **nptr;
- struct allowedips_node *node, *prev;
- unsigned int len;
-
- if (unlikely(!peer || !REF(*top)))
- return;
-
- for (prev = NULL, len = 0, PUSH(top); len > 0; prev = node) {
- nptr = stack[len - 1];
- node = DEREF(nptr);
- if (!node) {
- --len;
- continue;
- }
- if (!prev || REF(prev->bit[0]) == node ||
- REF(prev->bit[1]) == node) {
- if (REF(node->bit[0]))
- PUSH(&node->bit[0]);
- else if (REF(node->bit[1]))
- PUSH(&node->bit[1]);
- } else if (REF(node->bit[0]) == prev) {
- if (REF(node->bit[1]))
- PUSH(&node->bit[1]);
- } else {
- if (rcu_dereference_protected(node->peer,
- lockdep_is_held(lock)) == peer) {
- RCU_INIT_POINTER(node->peer, NULL);
- list_del_init(&node->peer_list);
- if (!node->bit[0] || !node->bit[1]) {
- rcu_assign_pointer(*nptr, DEREF(
- &node->bit[!REF(node->bit[0])]));
- kfree_rcu(node, rcu);
- node = DEREF(nptr);
- }
- }
- --len;
- }
- }
-
-#undef REF
-#undef DEREF
-#undef PUSH
-}
-
static unsigned int fls128(u64 a, u64 b)
{
return a ? fls64(a) + 64U : fls64(b);
@@ -159,7 +115,7 @@ static struct allowedips_node *find_node(struct allowedips_node *trie, u8 bits,
found = node;
if (node->cidr == bits)
break;
- node = rcu_dereference_bh(CHOOSE_NODE(node, key));
+ node = rcu_dereference_bh(node->bit[choose(node, key)]);
}
return found;
}
@@ -191,8 +147,7 @@ static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key,
u8 cidr, u8 bits, struct allowedips_node **rnode,
struct mutex *lock)
{
- struct allowedips_node *node = rcu_dereference_protected(trie,
- lockdep_is_held(lock));
+ struct allowedips_node *node = rcu_dereference_protected(trie, lockdep_is_held(lock));
struct allowedips_node *parent = NULL;
bool exact = false;
@@ -202,13 +157,24 @@ static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key,
exact = true;
break;
}
- node = rcu_dereference_protected(CHOOSE_NODE(parent, key),
- lockdep_is_held(lock));
+ node = rcu_dereference_protected(parent->bit[choose(parent, key)], lockdep_is_held(lock));
}
*rnode = parent;
return exact;
}
+static inline void connect_node(struct allowedips_node **parent, u8 bit, struct allowedips_node *node)
+{
+ node->parent_bit_packed = (unsigned long)parent | bit;
+ rcu_assign_pointer(*parent, node);
+}
+
+static inline void choose_and_connect_node(struct allowedips_node *parent, struct allowedips_node *node)
+{
+ u8 bit = choose(parent, node->bits);
+ connect_node(&parent->bit[bit], bit, node);
+}
+
static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
u8 cidr, struct wg_peer *peer, struct mutex *lock)
{
@@ -218,13 +184,13 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
return -EINVAL;
if (!rcu_access_pointer(*trie)) {
- node = kzalloc(sizeof(*node), GFP_KERNEL);
+ node = kmem_cache_zalloc(node_cache, GFP_KERNEL);
if (unlikely(!node))
return -ENOMEM;
RCU_INIT_POINTER(node->peer, peer);
list_add_tail(&node->peer_list, &peer->allowedips_list);
copy_and_assign_cidr(node, key, cidr, bits);
- rcu_assign_pointer(*trie, node);
+ connect_node(trie, 2, node);
return 0;
}
if (node_placement(*trie, key, cidr, bits, &node, lock)) {
@@ -233,7 +199,7 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
return 0;
}
- newnode = kzalloc(sizeof(*newnode), GFP_KERNEL);
+ newnode = kmem_cache_zalloc(node_cache, GFP_KERNEL);
if (unlikely(!newnode))
return -ENOMEM;
RCU_INIT_POINTER(newnode->peer, peer);
@@ -243,10 +209,10 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
if (!node) {
down = rcu_dereference_protected(*trie, lockdep_is_held(lock));
} else {
- down = rcu_dereference_protected(CHOOSE_NODE(node, key),
- lockdep_is_held(lock));
+ const u8 bit = choose(node, key);
+ down = rcu_dereference_protected(node->bit[bit], lockdep_is_held(lock));
if (!down) {
- rcu_assign_pointer(CHOOSE_NODE(node, key), newnode);
+ connect_node(&node->bit[bit], bit, newnode);
return 0;
}
}
@@ -254,30 +220,29 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
parent = node;
if (newnode->cidr == cidr) {
- rcu_assign_pointer(CHOOSE_NODE(newnode, down->bits), down);
+ choose_and_connect_node(newnode, down);
if (!parent)
- rcu_assign_pointer(*trie, newnode);
+ connect_node(trie, 2, newnode);
else
- rcu_assign_pointer(CHOOSE_NODE(parent, newnode->bits),
- newnode);
- } else {
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (unlikely(!node)) {
- list_del(&newnode->peer_list);
- kfree(newnode);
- return -ENOMEM;
- }
- INIT_LIST_HEAD(&node->peer_list);
- copy_and_assign_cidr(node, newnode->bits, cidr, bits);
+ choose_and_connect_node(parent, newnode);
+ return 0;
+ }
- rcu_assign_pointer(CHOOSE_NODE(node, down->bits), down);
- rcu_assign_pointer(CHOOSE_NODE(node, newnode->bits), newnode);
- if (!parent)
- rcu_assign_pointer(*trie, node);
- else
- rcu_assign_pointer(CHOOSE_NODE(parent, node->bits),
- node);
+ node = kmem_cache_zalloc(node_cache, GFP_KERNEL);
+ if (unlikely(!node)) {
+ list_del(&newnode->peer_list);
+ kmem_cache_free(node_cache, newnode);
+ return -ENOMEM;
}
+ INIT_LIST_HEAD(&node->peer_list);
+ copy_and_assign_cidr(node, newnode->bits, cidr, bits);
+
+ choose_and_connect_node(node, down);
+ choose_and_connect_node(node, newnode);
+ if (!parent)
+ connect_node(trie, 2, node);
+ else
+ choose_and_connect_node(parent, node);
return 0;
}
@@ -335,9 +300,41 @@ int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip,
void wg_allowedips_remove_by_peer(struct allowedips *table,
struct wg_peer *peer, struct mutex *lock)
{
+ struct allowedips_node *node, *child, **parent_bit, *parent, *tmp;
+ bool free_parent;
+
+ if (list_empty(&peer->allowedips_list))
+ return;
++table->seq;
- walk_remove_by_peer(&table->root4, peer, lock);
- walk_remove_by_peer(&table->root6, peer, lock);
+ list_for_each_entry_safe(node, tmp, &peer->allowedips_list, peer_list) {
+ list_del_init(&node->peer_list);
+ RCU_INIT_POINTER(node->peer, NULL);
+ if (node->bit[0] && node->bit[1])
+ continue;
+ child = rcu_dereference_protected(node->bit[!rcu_access_pointer(node->bit[0])],
+ lockdep_is_held(lock));
+ if (child)
+ child->parent_bit_packed = node->parent_bit_packed;
+ parent_bit = (struct allowedips_node **)(node->parent_bit_packed & ~3UL);
+ *parent_bit = child;
+ parent = (void *)parent_bit -
+ offsetof(struct allowedips_node, bit[node->parent_bit_packed & 1]);
+ free_parent = !rcu_access_pointer(node->bit[0]) &&
+ !rcu_access_pointer(node->bit[1]) &&
+ (node->parent_bit_packed & 3) <= 1 &&
+ !rcu_access_pointer(parent->peer);
+ if (free_parent)
+ child = rcu_dereference_protected(
+ parent->bit[!(node->parent_bit_packed & 1)],
+ lockdep_is_held(lock));
+ call_rcu(&node->rcu, node_free_rcu);
+ if (!free_parent)
+ continue;
+ if (child)
+ child->parent_bit_packed = parent->parent_bit_packed;
+ *(struct allowedips_node **)(parent->parent_bit_packed & ~3UL) = child;
+ call_rcu(&parent->rcu, node_free_rcu);
+ }
}
int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr)
@@ -374,4 +371,16 @@ struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table,
return NULL;
}
+int __init wg_allowedips_slab_init(void)
+{
+ node_cache = KMEM_CACHE(allowedips_node, 0);
+ return node_cache ? 0 : -ENOMEM;
+}
+
+void wg_allowedips_slab_uninit(void)
+{
+ rcu_barrier();
+ kmem_cache_destroy(node_cache);
+}
+
#include "selftest/allowedips.c"
diff --git a/drivers/net/wireguard/allowedips.h b/drivers/net/wireguard/allowedips.h
index e5c83cafcef4..2346c797eb4d 100644
--- a/drivers/net/wireguard/allowedips.h
+++ b/drivers/net/wireguard/allowedips.h
@@ -15,14 +15,11 @@ struct wg_peer;
struct allowedips_node {
struct wg_peer __rcu *peer;
struct allowedips_node __rcu *bit[2];
- /* While it may seem scandalous that we waste space for v4,
- * we're alloc'ing to the nearest power of 2 anyway, so this
- * doesn't actually make a difference.
- */
- u8 bits[16] __aligned(__alignof(u64));
u8 cidr, bit_at_a, bit_at_b, bitlen;
+ u8 bits[16] __aligned(__alignof(u64));
- /* Keep rarely used list at bottom to be beyond cache line. */
+ /* Keep rarely used members at bottom to be beyond cache line. */
+ unsigned long parent_bit_packed;
union {
struct list_head peer_list;
struct rcu_head rcu;
@@ -33,7 +30,7 @@ struct allowedips {
struct allowedips_node __rcu *root4;
struct allowedips_node __rcu *root6;
u64 seq;
-};
+} __aligned(4); /* We pack the lower 2 bits of &root, but m68k only gives 16-bit alignment. */
void wg_allowedips_init(struct allowedips *table);
void wg_allowedips_free(struct allowedips *table, struct mutex *mutex);
@@ -56,4 +53,7 @@ struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table,
bool wg_allowedips_selftest(void);
#endif
+int wg_allowedips_slab_init(void);
+void wg_allowedips_slab_uninit(void);
+
#endif /* _WG_ALLOWEDIPS_H */
diff --git a/drivers/net/wireguard/main.c b/drivers/net/wireguard/main.c
index 7a7d5f1a80fc..75dbe77b0b4b 100644
--- a/drivers/net/wireguard/main.c
+++ b/drivers/net/wireguard/main.c
@@ -21,13 +21,22 @@ static int __init mod_init(void)
{
int ret;
+ ret = wg_allowedips_slab_init();
+ if (ret < 0)
+ goto err_allowedips;
+
#ifdef DEBUG
+ ret = -ENOTRECOVERABLE;
if (!wg_allowedips_selftest() || !wg_packet_counter_selftest() ||
!wg_ratelimiter_selftest())
- return -ENOTRECOVERABLE;
+ goto err_peer;
#endif
wg_noise_init();
+ ret = wg_peer_init();
+ if (ret < 0)
+ goto err_peer;
+
ret = wg_device_init();
if (ret < 0)
goto err_device;
@@ -44,6 +53,10 @@ static int __init mod_init(void)
err_netlink:
wg_device_uninit();
err_device:
+ wg_peer_uninit();
+err_peer:
+ wg_allowedips_slab_uninit();
+err_allowedips:
return ret;
}
@@ -51,6 +64,8 @@ static void __exit mod_exit(void)
{
wg_genetlink_uninit();
wg_device_uninit();
+ wg_peer_uninit();
+ wg_allowedips_slab_uninit();
}
module_init(mod_init);
diff --git a/drivers/net/wireguard/peer.c b/drivers/net/wireguard/peer.c
index cd5cb0292cb6..1acd00ab2fbc 100644
--- a/drivers/net/wireguard/peer.c
+++ b/drivers/net/wireguard/peer.c
@@ -15,6 +15,7 @@
#include <linux/rcupdate.h>
#include <linux/list.h>
+static struct kmem_cache *peer_cache;
static atomic64_t peer_counter = ATOMIC64_INIT(0);
struct wg_peer *wg_peer_create(struct wg_device *wg,
@@ -29,10 +30,10 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
if (wg->num_peers >= MAX_PEERS_PER_DEVICE)
return ERR_PTR(ret);
- peer = kzalloc(sizeof(*peer), GFP_KERNEL);
+ peer = kmem_cache_zalloc(peer_cache, GFP_KERNEL);
if (unlikely(!peer))
return ERR_PTR(ret);
- if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))
+ if (unlikely(dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)))
goto err;
peer->device = wg;
@@ -64,7 +65,7 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
return peer;
err:
- kfree(peer);
+ kmem_cache_free(peer_cache, peer);
return ERR_PTR(ret);
}
@@ -88,7 +89,7 @@ static void peer_make_dead(struct wg_peer *peer)
/* Mark as dead, so that we don't allow jumping contexts after. */
WRITE_ONCE(peer->is_dead, true);
- /* The caller must now synchronize_rcu() for this to take effect. */
+ /* The caller must now synchronize_net() for this to take effect. */
}
static void peer_remove_after_dead(struct wg_peer *peer)
@@ -160,7 +161,7 @@ void wg_peer_remove(struct wg_peer *peer)
lockdep_assert_held(&peer->device->device_update_lock);
peer_make_dead(peer);
- synchronize_rcu();
+ synchronize_net();
peer_remove_after_dead(peer);
}
@@ -178,7 +179,7 @@ void wg_peer_remove_all(struct wg_device *wg)
peer_make_dead(peer);
list_add_tail(&peer->peer_list, &dead_peers);
}
- synchronize_rcu();
+ synchronize_net();
list_for_each_entry_safe(peer, temp, &dead_peers, peer_list)
peer_remove_after_dead(peer);
}
@@ -193,7 +194,8 @@ static void rcu_release(struct rcu_head *rcu)
/* The final zeroing takes care of clearing any remaining handshake key
* material and other potentially sensitive information.
*/
- kfree_sensitive(peer);
+ memzero_explicit(peer, sizeof(*peer));
+ kmem_cache_free(peer_cache, peer);
}
static void kref_release(struct kref *refcount)
@@ -225,3 +227,14 @@ void wg_peer_put(struct wg_peer *peer)
return;
kref_put(&peer->refcount, kref_release);
}
+
+int __init wg_peer_init(void)
+{
+ peer_cache = KMEM_CACHE(wg_peer, 0);
+ return peer_cache ? 0 : -ENOMEM;
+}
+
+void wg_peer_uninit(void)
+{
+ kmem_cache_destroy(peer_cache);
+}
diff --git a/drivers/net/wireguard/peer.h b/drivers/net/wireguard/peer.h
index 8d53b687a1d1..76e4d3128ad4 100644
--- a/drivers/net/wireguard/peer.h
+++ b/drivers/net/wireguard/peer.h
@@ -80,4 +80,7 @@ void wg_peer_put(struct wg_peer *peer);
void wg_peer_remove(struct wg_peer *peer);
void wg_peer_remove_all(struct wg_device *wg);
+int wg_peer_init(void);
+void wg_peer_uninit(void);
+
#endif /* _WG_PEER_H */
diff --git a/drivers/net/wireguard/selftest/allowedips.c b/drivers/net/wireguard/selftest/allowedips.c
index 846db14cb046..e173204ae7d7 100644
--- a/drivers/net/wireguard/selftest/allowedips.c
+++ b/drivers/net/wireguard/selftest/allowedips.c
@@ -19,32 +19,22 @@
#include <linux/siphash.h>
-static __init void swap_endian_and_apply_cidr(u8 *dst, const u8 *src, u8 bits,
- u8 cidr)
-{
- swap_endian(dst, src, bits);
- memset(dst + (cidr + 7) / 8, 0, bits / 8 - (cidr + 7) / 8);
- if (cidr)
- dst[(cidr + 7) / 8 - 1] &= ~0U << ((8 - (cidr % 8)) % 8);
-}
-
static __init void print_node(struct allowedips_node *node, u8 bits)
{
char *fmt_connection = KERN_DEBUG "\t\"%p/%d\" -> \"%p/%d\";\n";
- char *fmt_declaration = KERN_DEBUG
- "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n";
+ char *fmt_declaration = KERN_DEBUG "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n";
+ u8 ip1[16], ip2[16], cidr1, cidr2;
char *style = "dotted";
- u8 ip1[16], ip2[16];
u32 color = 0;
+ if (node == NULL)
+ return;
if (bits == 32) {
fmt_connection = KERN_DEBUG "\t\"%pI4/%d\" -> \"%pI4/%d\";\n";
- fmt_declaration = KERN_DEBUG
- "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n";
+ fmt_declaration = KERN_DEBUG "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n";
} else if (bits == 128) {
fmt_connection = KERN_DEBUG "\t\"%pI6/%d\" -> \"%pI6/%d\";\n";
- fmt_declaration = KERN_DEBUG
- "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n";
+ fmt_declaration = KERN_DEBUG "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n";
}
if (node->peer) {
hsiphash_key_t key = { { 0 } };
@@ -55,24 +45,20 @@ static __init void print_node(struct allowedips_node *node, u8 bits)
hsiphash_1u32(0xabad1dea, &key) % 200;
style = "bold";
}
- swap_endian_and_apply_cidr(ip1, node->bits, bits, node->cidr);
- printk(fmt_declaration, ip1, node->cidr, style, color);
+ wg_allowedips_read_node(node, ip1, &cidr1);
+ printk(fmt_declaration, ip1, cidr1, style, color);
if (node->bit[0]) {
- swap_endian_and_apply_cidr(ip2,
- rcu_dereference_raw(node->bit[0])->bits, bits,
- node->cidr);
- printk(fmt_connection, ip1, node->cidr, ip2,
- rcu_dereference_raw(node->bit[0])->cidr);
- print_node(rcu_dereference_raw(node->bit[0]), bits);
+ wg_allowedips_read_node(rcu_dereference_raw(node->bit[0]), ip2, &cidr2);
+ printk(fmt_connection, ip1, cidr1, ip2, cidr2);
}
if (node->bit[1]) {
- swap_endian_and_apply_cidr(ip2,
- rcu_dereference_raw(node->bit[1])->bits,
- bits, node->cidr);
- printk(fmt_connection, ip1, node->cidr, ip2,
- rcu_dereference_raw(node->bit[1])->cidr);
- print_node(rcu_dereference_raw(node->bit[1]), bits);
+ wg_allowedips_read_node(rcu_dereference_raw(node->bit[1]), ip2, &cidr2);
+ printk(fmt_connection, ip1, cidr1, ip2, cidr2);
}
+ if (node->bit[0])
+ print_node(rcu_dereference_raw(node->bit[0]), bits);
+ if (node->bit[1])
+ print_node(rcu_dereference_raw(node->bit[1]), bits);
}
static __init void print_tree(struct allowedips_node __rcu *top, u8 bits)
@@ -121,8 +107,8 @@ static __init inline union nf_inet_addr horrible_cidr_to_mask(u8 cidr)
{
union nf_inet_addr mask;
- memset(&mask, 0x00, 128 / 8);
- memset(&mask, 0xff, cidr / 8);
+ memset(&mask, 0, sizeof(mask));
+ memset(&mask.all, 0xff, cidr / 8);
if (cidr % 32)
mask.all[cidr / 32] = (__force u32)htonl(
(0xFFFFFFFFUL << (32 - (cidr % 32))) & 0xFFFFFFFFUL);
@@ -149,42 +135,36 @@ horrible_mask_self(struct horrible_allowedips_node *node)
}
static __init inline bool
-horrible_match_v4(const struct horrible_allowedips_node *node,
- struct in_addr *ip)
+horrible_match_v4(const struct horrible_allowedips_node *node, struct in_addr *ip)
{
return (ip->s_addr & node->mask.ip) == node->ip.ip;
}
static __init inline bool
-horrible_match_v6(const struct horrible_allowedips_node *node,
- struct in6_addr *ip)
+horrible_match_v6(const struct horrible_allowedips_node *node, struct in6_addr *ip)
{
- return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) ==
- node->ip.ip6[0] &&
- (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) ==
- node->ip.ip6[1] &&
- (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) ==
- node->ip.ip6[2] &&
+ return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) == node->ip.ip6[0] &&
+ (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) == node->ip.ip6[1] &&
+ (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) == node->ip.ip6[2] &&
(ip->in6_u.u6_addr32[3] & node->mask.ip6[3]) == node->ip.ip6[3];
}
static __init void
-horrible_insert_ordered(struct horrible_allowedips *table,
- struct horrible_allowedips_node *node)
+horrible_insert_ordered(struct horrible_allowedips *table, struct horrible_allowedips_node *node)
{
struct horrible_allowedips_node *other = NULL, *where = NULL;
u8 my_cidr = horrible_mask_to_cidr(node->mask);
hlist_for_each_entry(other, &table->head, table) {
- if (!memcmp(&other->mask, &node->mask,
- sizeof(union nf_inet_addr)) &&
- !memcmp(&other->ip, &node->ip,
- sizeof(union nf_inet_addr)) &&
- other->ip_version == node->ip_version) {
+ if (other->ip_version == node->ip_version &&
+ !memcmp(&other->mask, &node->mask, sizeof(union nf_inet_addr)) &&
+ !memcmp(&other->ip, &node->ip, sizeof(union nf_inet_addr))) {
other->value = node->value;
kfree(node);
return;
}
+ }
+ hlist_for_each_entry(other, &table->head, table) {
where = other;
if (horrible_mask_to_cidr(other->mask) <= my_cidr)
break;
@@ -201,8 +181,7 @@ static __init int
horrible_allowedips_insert_v4(struct horrible_allowedips *table,
struct in_addr *ip, u8 cidr, void *value)
{
- struct horrible_allowedips_node *node = kzalloc(sizeof(*node),
- GFP_KERNEL);
+ struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
if (unlikely(!node))
return -ENOMEM;
@@ -219,8 +198,7 @@ static __init int
horrible_allowedips_insert_v6(struct horrible_allowedips *table,
struct in6_addr *ip, u8 cidr, void *value)
{
- struct horrible_allowedips_node *node = kzalloc(sizeof(*node),
- GFP_KERNEL);
+ struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
if (unlikely(!node))
return -ENOMEM;
@@ -234,39 +212,43 @@ horrible_allowedips_insert_v6(struct horrible_allowedips *table,
}
static __init void *
-horrible_allowedips_lookup_v4(struct horrible_allowedips *table,
- struct in_addr *ip)
+horrible_allowedips_lookup_v4(struct horrible_allowedips *table, struct in_addr *ip)
{
struct horrible_allowedips_node *node;
- void *ret = NULL;
hlist_for_each_entry(node, &table->head, table) {
- if (node->ip_version != 4)
- continue;
- if (horrible_match_v4(node, ip)) {
- ret = node->value;
- break;
- }
+ if (node->ip_version == 4 && horrible_match_v4(node, ip))
+ return node->value;
}
- return ret;
+ return NULL;
}
static __init void *
-horrible_allowedips_lookup_v6(struct horrible_allowedips *table,
- struct in6_addr *ip)
+horrible_allowedips_lookup_v6(struct horrible_allowedips *table, struct in6_addr *ip)
{
struct horrible_allowedips_node *node;
- void *ret = NULL;
hlist_for_each_entry(node, &table->head, table) {
- if (node->ip_version != 6)
+ if (node->ip_version == 6 && horrible_match_v6(node, ip))
+ return node->value;
+ }
+ return NULL;
+}
+
+
+static __init void
+horrible_allowedips_remove_by_value(struct horrible_allowedips *table, void *value)
+{
+ struct horrible_allowedips_node *node;
+ struct hlist_node *h;
+
+ hlist_for_each_entry_safe(node, h, &table->head, table) {
+ if (node->value != value)
continue;
- if (horrible_match_v6(node, ip)) {
- ret = node->value;
- break;
- }
+ hlist_del(&node->table);
+ kfree(node);
}
- return ret;
+
}
static __init bool randomized_test(void)
@@ -296,6 +278,7 @@ static __init bool randomized_test(void)
goto free;
}
kref_init(&peers[i]->refcount);
+ INIT_LIST_HEAD(&peers[i]->allowedips_list);
}
mutex_lock(&mutex);
@@ -333,7 +316,7 @@ static __init bool randomized_test(void)
if (wg_allowedips_insert_v4(&t,
(struct in_addr *)mutated,
cidr, peer, &mutex) < 0) {
- pr_err("allowedips random malloc: FAIL\n");
+ pr_err("allowedips random self-test malloc: FAIL\n");
goto free_locked;
}
if (horrible_allowedips_insert_v4(&h,
@@ -396,23 +379,33 @@ static __init bool randomized_test(void)
print_tree(t.root6, 128);
}
- for (i = 0; i < NUM_QUERIES; ++i) {
- prandom_bytes(ip, 4);
- if (lookup(t.root4, 32, ip) !=
- horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) {
- pr_err("allowedips random self-test: FAIL\n");
- goto free;
+ for (j = 0;; ++j) {
+ for (i = 0; i < NUM_QUERIES; ++i) {
+ prandom_bytes(ip, 4);
+ if (lookup(t.root4, 32, ip) != horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) {
+ horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip);
+ pr_err("allowedips random v4 self-test: FAIL\n");
+ goto free;
+ }
+ prandom_bytes(ip, 16);
+ if (lookup(t.root6, 128, ip) != horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) {
+ pr_err("allowedips random v6 self-test: FAIL\n");
+ goto free;
+ }
}
+ if (j >= NUM_PEERS)
+ break;
+ mutex_lock(&mutex);
+ wg_allowedips_remove_by_peer(&t, peers[j], &mutex);
+ mutex_unlock(&mutex);
+ horrible_allowedips_remove_by_value(&h, peers[j]);
}
- for (i = 0; i < NUM_QUERIES; ++i) {
- prandom_bytes(ip, 16);
- if (lookup(t.root6, 128, ip) !=
- horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) {
- pr_err("allowedips random self-test: FAIL\n");
- goto free;
- }
+ if (t.root4 || t.root6) {
+ pr_err("allowedips random self-test removal: FAIL\n");
+ goto free;
}
+
ret = true;
free:
diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c
index d9ad850daa79..8c496b747108 100644
--- a/drivers/net/wireguard/socket.c
+++ b/drivers/net/wireguard/socket.c
@@ -430,7 +430,7 @@ void wg_socket_reinit(struct wg_device *wg, struct sock *new4,
if (new4)
wg->incoming_port = ntohs(inet_sk(new4)->inet_sport);
mutex_unlock(&wg->socket_update_lock);
- synchronize_rcu();
+ synchronize_net();
sock_free(old4);
sock_free(old6);
}
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 956157946106..dbc8aef82a65 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -845,6 +845,7 @@ enum htt_security_types {
#define ATH10K_HTT_TXRX_PEER_SECURITY_MAX 2
#define ATH10K_TXRX_NUM_EXT_TIDS 19
+#define ATH10K_TXRX_NON_QOS_TID 16
enum htt_security_flags {
#define HTT_SECURITY_TYPE_MASK 0x7F
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 1a08156d5011..7ffb5d5b2a70 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -1746,16 +1746,97 @@ static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
}
+static u64 ath10k_htt_rx_h_get_pn(struct ath10k *ar, struct sk_buff *skb,
+ u16 offset,
+ enum htt_rx_mpdu_encrypt_type enctype)
+{
+ struct ieee80211_hdr *hdr;
+ u64 pn = 0;
+ u8 *ehdr;
+
+ hdr = (struct ieee80211_hdr *)(skb->data + offset);
+ ehdr = skb->data + offset + ieee80211_hdrlen(hdr->frame_control);
+
+ if (enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) {
+ pn = ehdr[0];
+ pn |= (u64)ehdr[1] << 8;
+ pn |= (u64)ehdr[4] << 16;
+ pn |= (u64)ehdr[5] << 24;
+ pn |= (u64)ehdr[6] << 32;
+ pn |= (u64)ehdr[7] << 40;
+ }
+ return pn;
+}
+
+static bool ath10k_htt_rx_h_frag_multicast_check(struct ath10k *ar,
+ struct sk_buff *skb,
+ u16 offset)
+{
+ struct ieee80211_hdr *hdr;
+
+ hdr = (struct ieee80211_hdr *)(skb->data + offset);
+ return !is_multicast_ether_addr(hdr->addr1);
+}
+
+static bool ath10k_htt_rx_h_frag_pn_check(struct ath10k *ar,
+ struct sk_buff *skb,
+ u16 peer_id,
+ u16 offset,
+ enum htt_rx_mpdu_encrypt_type enctype)
+{
+ struct ath10k_peer *peer;
+ union htt_rx_pn_t *last_pn, new_pn = {0};
+ struct ieee80211_hdr *hdr;
+ bool more_frags;
+ u8 tid, frag_number;
+ u32 seq;
+
+ peer = ath10k_peer_find_by_id(ar, peer_id);
+ if (!peer) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer for frag pn check\n");
+ return false;
+ }
+
+ hdr = (struct ieee80211_hdr *)(skb->data + offset);
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ tid = ieee80211_get_tid(hdr);
+ else
+ tid = ATH10K_TXRX_NON_QOS_TID;
+
+ last_pn = &peer->frag_tids_last_pn[tid];
+ new_pn.pn48 = ath10k_htt_rx_h_get_pn(ar, skb, offset, enctype);
+ more_frags = ieee80211_has_morefrags(hdr->frame_control);
+ frag_number = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
+ seq = (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
+
+ if (frag_number == 0) {
+ last_pn->pn48 = new_pn.pn48;
+ peer->frag_tids_seq[tid] = seq;
+ } else {
+ if (seq != peer->frag_tids_seq[tid])
+ return false;
+
+ if (new_pn.pn48 != last_pn->pn48 + 1)
+ return false;
+
+ last_pn->pn48 = new_pn.pn48;
+ }
+
+ return true;
+}
+
static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
struct sk_buff_head *amsdu,
struct ieee80211_rx_status *status,
bool fill_crypt_header,
u8 *rx_hdr,
- enum ath10k_pkt_rx_err *err)
+ enum ath10k_pkt_rx_err *err,
+ u16 peer_id,
+ bool frag)
{
struct sk_buff *first;
struct sk_buff *last;
- struct sk_buff *msdu;
+ struct sk_buff *msdu, *temp;
struct htt_rx_desc *rxd;
struct ieee80211_hdr *hdr;
enum htt_rx_mpdu_encrypt_type enctype;
@@ -1768,6 +1849,7 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
bool is_decrypted;
bool is_mgmt;
u32 attention;
+ bool frag_pn_check = true, multicast_check = true;
if (skb_queue_empty(amsdu))
return;
@@ -1866,7 +1948,37 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
}
skb_queue_walk(amsdu, msdu) {
+ if (frag && !fill_crypt_header && is_decrypted &&
+ enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
+ frag_pn_check = ath10k_htt_rx_h_frag_pn_check(ar,
+ msdu,
+ peer_id,
+ 0,
+ enctype);
+
+ if (frag)
+ multicast_check = ath10k_htt_rx_h_frag_multicast_check(ar,
+ msdu,
+ 0);
+
+ if (!frag_pn_check || !multicast_check) {
+ /* Discard the fragment with invalid PN or multicast DA
+ */
+ temp = msdu->prev;
+ __skb_unlink(msdu, amsdu);
+ dev_kfree_skb_any(msdu);
+ msdu = temp;
+ frag_pn_check = true;
+ multicast_check = true;
+ continue;
+ }
+
ath10k_htt_rx_h_csum_offload(msdu);
+
+ if (frag && !fill_crypt_header &&
+ enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
+ status->flag &= ~RX_FLAG_MMIC_STRIPPED;
+
ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
is_decrypted);
@@ -1884,6 +1996,11 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
hdr = (void *)msdu->data;
hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+
+ if (frag && !fill_crypt_header &&
+ enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
+ status->flag &= ~RX_FLAG_IV_STRIPPED &
+ ~RX_FLAG_MMIC_STRIPPED;
}
}
@@ -1991,14 +2108,62 @@ static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
ath10k_unchain_msdu(amsdu, unchain_cnt);
}
+static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar,
+ struct sk_buff_head *amsdu)
+{
+ u8 *subframe_hdr;
+ struct sk_buff *first;
+ bool is_first, is_last;
+ struct htt_rx_desc *rxd;
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len, crypto_len;
+ enum htt_rx_mpdu_encrypt_type enctype;
+ int bytes_aligned = ar->hw_params.decap_align_bytes;
+
+ first = skb_peek(amsdu);
+
+ rxd = (void *)first->data - sizeof(*rxd);
+ hdr = (void *)rxd->rx_hdr_status;
+
+ is_first = !!(rxd->msdu_end.common.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
+ is_last = !!(rxd->msdu_end.common.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
+
+ /* Return in case of non-aggregated msdu */
+ if (is_first && is_last)
+ return true;
+
+ /* First msdu flag is not set for the first msdu of the list */
+ if (!is_first)
+ return false;
+
+ enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+ RX_MPDU_START_INFO0_ENCRYPT_TYPE);
+
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
+
+ subframe_hdr = (u8 *)hdr + round_up(hdr_len, bytes_aligned) +
+ crypto_len;
+
+ /* Validate if the amsdu has a proper first subframe.
+ * There are chances a single msdu can be received as amsdu when
+ * the unauthenticated amsdu flag of a QoS header
+ * gets flipped in non-SPP AMSDU's, in such cases the first
+ * subframe has llc/snap header in place of a valid da.
+ * return false if the da matches rfc1042 pattern
+ */
+ if (ether_addr_equal(subframe_hdr, rfc1042_header))
+ return false;
+
+ return true;
+}
+
static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
struct sk_buff_head *amsdu,
struct ieee80211_rx_status *rx_status)
{
- /* FIXME: It might be a good idea to do some fuzzy-testing to drop
- * invalid/dangerous frames.
- */
-
if (!rx_status->freq) {
ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n");
return false;
@@ -2009,6 +2174,11 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
return false;
}
+ if (!ath10k_htt_rx_validate_amsdu(ar, amsdu)) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid amsdu received\n");
+ return false;
+ }
+
return true;
}
@@ -2071,7 +2241,8 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt);
ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter);
- ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err);
+ ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err, 0,
+ false);
msdus_to_queue = skb_queue_len(&amsdu);
ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);
@@ -2204,6 +2375,11 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
fw_desc = &rx->fw_desc;
rx_desc_len = fw_desc->len;
+ if (fw_desc->u.bits.discard) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt discard mpdu\n");
+ goto err;
+ }
+
/* I have not yet seen any case where num_mpdu_ranges > 1.
* qcacld does not seem handle that case either, so we introduce the
* same limitiation here as well.
@@ -2509,6 +2685,13 @@ static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt,
rx_desc = (struct htt_hl_rx_desc *)(skb->data + tot_hdr_len);
rx_desc_info = __le32_to_cpu(rx_desc->info);
+ hdr = (struct ieee80211_hdr *)((u8 *)rx_desc + rx_hl->fw_desc.len);
+
+ if (is_multicast_ether_addr(hdr->addr1)) {
+ /* Discard the fragment with multicast DA */
+ goto err;
+ }
+
if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED)) {
spin_unlock_bh(&ar->data_lock);
return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
@@ -2516,8 +2699,6 @@ static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt,
HTT_RX_NON_TKIP_MIC);
}
- hdr = (struct ieee80211_hdr *)((u8 *)rx_desc + rx_hl->fw_desc.len);
-
if (ieee80211_has_retry(hdr->frame_control))
goto err;
@@ -3027,7 +3208,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL);
ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL,
- NULL);
+ NULL, peer_id, frag);
ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
break;
case -EAGAIN:
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index f2b6bf8f0d60..705b6295e466 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -1282,7 +1282,19 @@ struct fw_rx_desc_base {
#define FW_RX_DESC_UDP (1 << 6)
struct fw_rx_desc_hl {
- u8 info0;
+ union {
+ struct {
+ u8 discard:1,
+ forward:1,
+ any_err:1,
+ dup_err:1,
+ reserved:1,
+ inspect:1,
+ extension:2;
+ } bits;
+ u8 info0;
+ } u;
+
u8 version;
u8 len;
u8 flags;
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index 1d9aa1bb6b6e..603d2f93ac18 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -260,6 +260,16 @@ static void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab,
ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len);
}
+static bool ath11k_dp_rx_h_attn_is_mcbc(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ struct rx_attention *attn = ath11k_dp_rx_get_attention(ab, desc);
+
+ return ath11k_dp_rx_h_msdu_end_first_msdu(ab, desc) &&
+ (!!FIELD_GET(RX_ATTENTION_INFO1_MCAST_BCAST,
+ __le32_to_cpu(attn->info1)));
+}
+
static void ath11k_dp_service_mon_ring(struct timer_list *t)
{
struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer);
@@ -852,6 +862,24 @@ static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_d
__skb_queue_purge(&rx_tid->rx_frags);
}
+void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer)
+{
+ struct dp_rx_tid *rx_tid;
+ int i;
+
+ lockdep_assert_held(&ar->ab->base_lock);
+
+ for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
+ rx_tid = &peer->rx_tid[i];
+
+ spin_unlock_bh(&ar->ab->base_lock);
+ del_timer_sync(&rx_tid->frag_timer);
+ spin_lock_bh(&ar->ab->base_lock);
+
+ ath11k_dp_rx_frags_cleanup(rx_tid, true);
+ }
+}
+
void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer)
{
struct dp_rx_tid *rx_tid;
@@ -3450,6 +3478,7 @@ static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
u8 tid;
int ret = 0;
bool more_frags;
+ bool is_mcbc;
rx_desc = (struct hal_rx_desc *)msdu->data;
peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
@@ -3457,6 +3486,11 @@ static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
seqno = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, msdu);
more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(ar->ab, msdu);
+ is_mcbc = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);
+
+ /* Multicast/Broadcast fragments are not expected */
+ if (is_mcbc)
+ return -EINVAL;
if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(ar->ab, rx_desc) ||
!ath11k_dp_rx_h_mpdu_start_fc_valid(ar->ab, rx_desc) ||
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.h b/drivers/net/wireless/ath/ath11k/dp_rx.h
index bf399312b5ff..623da3bf9dc8 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.h
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.h
@@ -49,6 +49,7 @@ int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif,
const u8 *peer_addr,
enum set_key_cmd key_cmd,
struct ieee80211_key_conf *key);
+void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer);
void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer);
void ath11k_peer_rx_tid_delete(struct ath11k *ar,
struct ath11k_peer *peer, u8 tid);
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 4df425dd31a2..9d0ff150ec30 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -2779,6 +2779,12 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
*/
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
+
+ /* flush the fragments cache during key (re)install to
+ * ensure all frags in the new frag list belong to the same key.
+ */
+ if (peer && cmd == SET_KEY)
+ ath11k_peer_frags_flush(ar, peer);
spin_unlock_bh(&ab->base_lock);
if (!peer) {
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index 7506cea46f58..433a047f3747 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -1027,14 +1027,17 @@ static ssize_t ath6kl_lrssi_roam_write(struct file *file,
{
struct ath6kl *ar = file->private_data;
unsigned long lrssi_roam_threshold;
+ int ret;
if (kstrtoul_from_user(user_buf, count, 0, &lrssi_roam_threshold))
return -EINVAL;
ar->lrssi_roam_threshold = lrssi_roam_threshold;
- ath6kl_wmi_set_roam_lrssi_cmd(ar->wmi, ar->lrssi_roam_threshold);
+ ret = ath6kl_wmi_set_roam_lrssi_cmd(ar->wmi, ar->lrssi_roam_threshold);
+ if (ret)
+ return ret;
return count;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index ce8c102df7b3..633d0ab19031 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -1217,13 +1217,9 @@ static struct sdio_driver brcmf_sdmmc_driver = {
},
};
-void brcmf_sdio_register(void)
+int brcmf_sdio_register(void)
{
- int ret;
-
- ret = sdio_register_driver(&brcmf_sdmmc_driver);
- if (ret)
- brcmf_err("sdio_register_driver failed: %d\n", ret);
+ return sdio_register_driver(&brcmf_sdmmc_driver);
}
void brcmf_sdio_exit(void)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
index 08f9d47f2e5c..3f5da3bb6aa5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
@@ -275,11 +275,26 @@ void brcmf_bus_add_txhdrlen(struct device *dev, uint len);
#ifdef CONFIG_BRCMFMAC_SDIO
void brcmf_sdio_exit(void);
-void brcmf_sdio_register(void);
+int brcmf_sdio_register(void);
+#else
+static inline void brcmf_sdio_exit(void) { }
+static inline int brcmf_sdio_register(void) { return 0; }
#endif
+
#ifdef CONFIG_BRCMFMAC_USB
void brcmf_usb_exit(void);
-void brcmf_usb_register(void);
+int brcmf_usb_register(void);
+#else
+static inline void brcmf_usb_exit(void) { }
+static inline int brcmf_usb_register(void) { return 0; }
+#endif
+
+#ifdef CONFIG_BRCMFMAC_PCIE
+void brcmf_pcie_exit(void);
+int brcmf_pcie_register(void);
+#else
+static inline void brcmf_pcie_exit(void) { }
+static inline int brcmf_pcie_register(void) { return 0; }
#endif
#endif /* BRCMFMAC_BUS_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 838b09b23abf..cee1682d2333 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -1518,40 +1518,34 @@ void brcmf_bus_change_state(struct brcmf_bus *bus, enum brcmf_bus_state state)
}
}
-static void brcmf_driver_register(struct work_struct *work)
-{
-#ifdef CONFIG_BRCMFMAC_SDIO
- brcmf_sdio_register();
-#endif
-#ifdef CONFIG_BRCMFMAC_USB
- brcmf_usb_register();
-#endif
-#ifdef CONFIG_BRCMFMAC_PCIE
- brcmf_pcie_register();
-#endif
-}
-static DECLARE_WORK(brcmf_driver_work, brcmf_driver_register);
-
int __init brcmf_core_init(void)
{
- if (!schedule_work(&brcmf_driver_work))
- return -EBUSY;
+ int err;
+ err = brcmf_sdio_register();
+ if (err)
+ return err;
+
+ err = brcmf_usb_register();
+ if (err)
+ goto error_usb_register;
+
+ err = brcmf_pcie_register();
+ if (err)
+ goto error_pcie_register;
return 0;
+
+error_pcie_register:
+ brcmf_usb_exit();
+error_usb_register:
+ brcmf_sdio_exit();
+ return err;
}
void __exit brcmf_core_exit(void)
{
- cancel_work_sync(&brcmf_driver_work);
-
-#ifdef CONFIG_BRCMFMAC_SDIO
brcmf_sdio_exit();
-#endif
-#ifdef CONFIG_BRCMFMAC_USB
brcmf_usb_exit();
-#endif
-#ifdef CONFIG_BRCMFMAC_PCIE
brcmf_pcie_exit();
-#endif
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index ad79e3b7e74a..143a705b5cb3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -2140,15 +2140,10 @@ static struct pci_driver brcmf_pciedrvr = {
};
-void brcmf_pcie_register(void)
+int brcmf_pcie_register(void)
{
- int err;
-
brcmf_dbg(PCIE, "Enter\n");
- err = pci_register_driver(&brcmf_pciedrvr);
- if (err)
- brcmf_err(NULL, "PCIE driver registration failed, err=%d\n",
- err);
+ return pci_register_driver(&brcmf_pciedrvr);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.h
index d026401d2001..8e6c227e8315 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.h
@@ -11,9 +11,4 @@ struct brcmf_pciedev {
struct brcmf_pciedev_info *devinfo;
};
-
-void brcmf_pcie_exit(void);
-void brcmf_pcie_register(void);
-
-
#endif /* BRCMFMAC_PCIE_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index 586f4dfc638b..9fb68c2dc7e3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -1584,12 +1584,8 @@ void brcmf_usb_exit(void)
usb_deregister(&brcmf_usbdrvr);
}
-void brcmf_usb_register(void)
+int brcmf_usb_register(void)
{
- int ret;
-
brcmf_dbg(USB, "Enter\n");
- ret = usb_register(&brcmf_usbdrvr);
- if (ret)
- brcmf_err("usb_register failed %d\n", ret);
+ return usb_register(&brcmf_usbdrvr);
}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 51ce767eaf88..7a6fd46d0c6e 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1693,8 +1693,13 @@ static int mac80211_hwsim_start(struct ieee80211_hw *hw)
static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
{
struct mac80211_hwsim_data *data = hw->priv;
+
data->started = false;
hrtimer_cancel(&data->beacon_timer);
+
+ while (!skb_queue_empty(&data->pending))
+ ieee80211_free_txskb(hw, skb_dequeue(&data->pending));
+
wiphy_dbg(hw->wiphy, "%s\n", __func__);
}
diff --git a/drivers/net/wireless/marvell/libertas/mesh.c b/drivers/net/wireless/marvell/libertas/mesh.c
index f5b78257d551..c68814841583 100644
--- a/drivers/net/wireless/marvell/libertas/mesh.c
+++ b/drivers/net/wireless/marvell/libertas/mesh.c
@@ -801,24 +801,6 @@ static const struct attribute_group mesh_ie_group = {
.attrs = mesh_ie_attrs,
};
-static void lbs_persist_config_init(struct net_device *dev)
-{
- int ret;
- ret = sysfs_create_group(&(dev->dev.kobj), &boot_opts_group);
- if (ret)
- pr_err("failed to create boot_opts_group.\n");
-
- ret = sysfs_create_group(&(dev->dev.kobj), &mesh_ie_group);
- if (ret)
- pr_err("failed to create mesh_ie_group.\n");
-}
-
-static void lbs_persist_config_remove(struct net_device *dev)
-{
- sysfs_remove_group(&(dev->dev.kobj), &boot_opts_group);
- sysfs_remove_group(&(dev->dev.kobj), &mesh_ie_group);
-}
-
/***************************************************************************
* Initializing and starting, stopping mesh
@@ -1014,6 +996,10 @@ static int lbs_add_mesh(struct lbs_private *priv)
SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent);
mesh_dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
+ mesh_dev->sysfs_groups[0] = &lbs_mesh_attr_group;
+ mesh_dev->sysfs_groups[1] = &boot_opts_group;
+ mesh_dev->sysfs_groups[2] = &mesh_ie_group;
+
/* Register virtual mesh interface */
ret = register_netdev(mesh_dev);
if (ret) {
@@ -1021,19 +1007,10 @@ static int lbs_add_mesh(struct lbs_private *priv)
goto err_free_netdev;
}
- ret = sysfs_create_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group);
- if (ret)
- goto err_unregister;
-
- lbs_persist_config_init(mesh_dev);
-
/* Everything successful */
ret = 0;
goto done;
-err_unregister:
- unregister_netdev(mesh_dev);
-
err_free_netdev:
free_netdev(mesh_dev);
@@ -1054,8 +1031,6 @@ void lbs_remove_mesh(struct lbs_private *priv)
netif_stop_queue(mesh_dev);
netif_carrier_off(mesh_dev);
- sysfs_remove_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group);
- lbs_persist_config_remove(mesh_dev);
unregister_netdev(mesh_dev);
priv->mesh_dev = NULL;
kfree(mesh_dev->ieee80211_ptr);
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 977acab0360a..03fe62837557 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -514,10 +514,36 @@ EXPORT_SYMBOL_GPL(mt76_free_device);
static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
{
struct sk_buff *skb = phy->rx_amsdu[q].head;
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
struct mt76_dev *dev = phy->dev;
phy->rx_amsdu[q].head = NULL;
phy->rx_amsdu[q].tail = NULL;
+
+ /*
+ * Validate if the amsdu has a proper first subframe.
+ * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU
+ * flag of the QoS header gets flipped. In such cases, the first
+ * subframe has a LLC/SNAP header in the location of the destination
+ * address.
+ */
+ if (skb_shinfo(skb)->frag_list) {
+ int offset = 0;
+
+ if (!(status->flag & RX_FLAG_8023)) {
+ offset = ieee80211_get_hdrlen_from_skb(skb);
+
+ if ((status->flag &
+ (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) ==
+ RX_FLAG_DECRYPTED)
+ offset += 8;
+ }
+
+ if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
+ dev_kfree_skb(skb);
+ return;
+ }
+ }
__skb_queue_tail(&dev->rx_skb[q], skb);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
index 86341d1f82f3..d20f05a7717d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
@@ -510,7 +510,6 @@ void mt7615_init_device(struct mt7615_dev *dev)
mutex_init(&dev->pm.mutex);
init_waitqueue_head(&dev->pm.wait);
spin_lock_init(&dev->pm.txq_lock);
- set_bit(MT76_STATE_PM, &dev->mphy.state);
INIT_DELAYED_WORK(&dev->mphy.mac_work, mt7615_mac_work);
INIT_DELAYED_WORK(&dev->phy.scan_work, mt7615_scan_work);
INIT_DELAYED_WORK(&dev->coredump.work, mt7615_coredump_work);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index f81a17d56008..e2dcfee6be81 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -1912,8 +1912,9 @@ void mt7615_pm_wake_work(struct work_struct *work)
napi_schedule(&dev->mt76.napi[i]);
mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
- ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
- MT7615_WATCHDOG_TIME);
+ if (test_bit(MT76_STATE_RUNNING, &mphy->state))
+ ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
+ MT7615_WATCHDOG_TIME);
}
ieee80211_wake_queues(mphy->hw);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
index 17fe4187d1de..d1be78b0711c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
@@ -51,16 +51,13 @@ mt7663s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
return ret;
}
-static int mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
+static int __mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
{
struct sdio_func *func = dev->mt76.sdio.func;
struct mt76_phy *mphy = &dev->mt76.phy;
u32 status;
int ret;
- if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state))
- goto out;
-
sdio_claim_host(func);
sdio_writel(func, WHLPCR_FW_OWN_REQ_CLR, MCR_WHLPCR, NULL);
@@ -76,13 +73,21 @@ static int mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
}
sdio_release_host(func);
-
-out:
dev->pm.last_activity = jiffies;
return 0;
}
+static int mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+
+ if (test_and_clear_bit(MT76_STATE_PM, &mphy->state))
+ return __mt7663s_mcu_drv_pmctrl(dev);
+
+ return 0;
+}
+
static int mt7663s_mcu_fw_pmctrl(struct mt7615_dev *dev)
{
struct sdio_func *func = dev->mt76.sdio.func;
@@ -123,7 +128,7 @@ int mt7663s_mcu_init(struct mt7615_dev *dev)
struct mt7615_mcu_ops *mcu_ops;
int ret;
- ret = mt7663s_mcu_drv_pmctrl(dev);
+ ret = __mt7663s_mcu_drv_pmctrl(dev);
if (ret)
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
index c55698f9c49a..028ff432d811 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
@@ -55,10 +55,7 @@ int mt7663u_mcu_init(struct mt7615_dev *dev)
dev->mt76.mcu_ops = &mt7663u_mcu_ops,
- /* usb does not support runtime-pm */
- clear_bit(MT76_STATE_PM, &dev->mphy.state);
mt76_set(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
-
if (test_and_clear_bit(MT76_STATE_POWER_OFF, &dev->mphy.state)) {
mt7615_mcu_restart(&dev->mt76);
if (!mt76_poll_msec(dev, MT_CONN_ON_MISC,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index fe0ab5e5ff81..619561606f96 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -721,6 +721,10 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
phy->phy_type = mt76_connac_get_phy_mode_v2(mphy, vif, band, sta);
phy->basic_rate = cpu_to_le16((u16)vif->bss_conf.basic_rates);
phy->rcpi = rcpi;
+ phy->ampdu = FIELD_PREP(IEEE80211_HT_AMPDU_PARM_FACTOR,
+ sta->ht_cap.ampdu_factor) |
+ FIELD_PREP(IEEE80211_HT_AMPDU_PARM_DENSITY,
+ sta->ht_cap.ampdu_density);
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra_info));
ra_info = (struct sta_rec_ra_info *)tlv;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index 5847f943e8da..b795e7245c07 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -87,7 +87,7 @@ static const struct ieee80211_ops mt76x0e_ops = {
.reconfig_complete = mt76x02_reconfig_complete,
};
-static int mt76x0e_register_device(struct mt76x02_dev *dev)
+static int mt76x0e_init_hardware(struct mt76x02_dev *dev, bool resume)
{
int err;
@@ -100,9 +100,11 @@ static int mt76x0e_register_device(struct mt76x02_dev *dev)
if (err < 0)
return err;
- err = mt76x02_dma_init(dev);
- if (err < 0)
- return err;
+ if (!resume) {
+ err = mt76x02_dma_init(dev);
+ if (err < 0)
+ return err;
+ }
err = mt76x0_init_hardware(dev);
if (err < 0)
@@ -123,6 +125,17 @@ static int mt76x0e_register_device(struct mt76x02_dev *dev)
mt76_clear(dev, 0x110, BIT(9));
mt76_set(dev, MT_MAX_LEN_CFG, BIT(13));
+ return 0;
+}
+
+static int mt76x0e_register_device(struct mt76x02_dev *dev)
+{
+ int err;
+
+ err = mt76x0e_init_hardware(dev, false);
+ if (err < 0)
+ return err;
+
err = mt76x0_register_device(dev);
if (err < 0)
return err;
@@ -167,6 +180,8 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
return ret;
+ mt76_pci_disable_aspm(pdev);
+
mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt76x0e_ops,
&drv_ops);
if (!mdev)
@@ -220,6 +235,60 @@ mt76x0e_remove(struct pci_dev *pdev)
mt76_free_device(mdev);
}
+#ifdef CONFIG_PM
+static int mt76x0e_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct mt76_dev *mdev = pci_get_drvdata(pdev);
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ int i;
+
+ mt76_worker_disable(&mdev->tx_worker);
+ for (i = 0; i < ARRAY_SIZE(mdev->phy.q_tx); i++)
+ mt76_queue_tx_cleanup(dev, mdev->phy.q_tx[i], true);
+ for (i = 0; i < ARRAY_SIZE(mdev->q_mcu); i++)
+ mt76_queue_tx_cleanup(dev, mdev->q_mcu[i], true);
+ napi_disable(&mdev->tx_napi);
+
+ mt76_for_each_q_rx(mdev, i)
+ napi_disable(&mdev->napi[i]);
+
+ mt76x02_dma_disable(dev);
+ mt76x02_mcu_cleanup(dev);
+ mt76x0_chip_onoff(dev, false, false);
+
+ pci_enable_wake(pdev, pci_choose_state(pdev, state), true);
+ pci_save_state(pdev);
+
+ return pci_set_power_state(pdev, pci_choose_state(pdev, state));
+}
+
+static int mt76x0e_resume(struct pci_dev *pdev)
+{
+ struct mt76_dev *mdev = pci_get_drvdata(pdev);
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ int err, i;
+
+ err = pci_set_power_state(pdev, PCI_D0);
+ if (err)
+ return err;
+
+ pci_restore_state(pdev);
+
+ mt76_worker_enable(&mdev->tx_worker);
+
+ mt76_for_each_q_rx(mdev, i) {
+ mt76_queue_rx_reset(dev, i);
+ napi_enable(&mdev->napi[i]);
+ napi_schedule(&mdev->napi[i]);
+ }
+
+ napi_enable(&mdev->tx_napi);
+ napi_schedule(&mdev->tx_napi);
+
+ return mt76x0e_init_hardware(dev, true);
+}
+#endif /* CONFIG_PM */
+
static const struct pci_device_id mt76x0e_device_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7610) },
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7630) },
@@ -237,6 +306,10 @@ static struct pci_driver mt76x0e_driver = {
.id_table = mt76x0e_device_table,
.probe = mt76x0e_probe,
.remove = mt76x0e_remove,
+#ifdef CONFIG_PM
+ .suspend = mt76x0e_suspend,
+ .resume = mt76x0e_resume,
+#endif /* CONFIG_PM */
};
module_pci_driver(mt76x0e_driver);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
index fe28bf4050c4..1763ea0614ce 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
@@ -76,8 +76,8 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
struct wiphy *wiphy = hw->wiphy;
hw->queues = 4;
- hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
- hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+ hw->max_rx_aggregation_subframes = 64;
+ hw->max_tx_aggregation_subframes = 128;
hw->radiotap_timestamp.units_pos =
IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
index 214bd1859792..decf2d5f0ce3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
@@ -1404,8 +1404,9 @@ void mt7921_pm_wake_work(struct work_struct *work)
napi_schedule(&dev->mt76.napi[i]);
mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
mt7921_tx_cleanup(dev);
- ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
- MT7921_WATCHDOG_TIME);
+ if (test_bit(MT76_STATE_RUNNING, &mphy->state))
+ ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
+ MT7921_WATCHDOG_TIME);
}
ieee80211_wake_queues(mphy->hw);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index f4c27aa41048..97a0ef331ac3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -74,8 +74,7 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band,
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
else if (band == NL80211_BAND_5GHZ)
he_cap_elem->phy_cap_info[0] =
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G;
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
he_cap_elem->phy_cap_info[1] =
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index 5f3d56d570a5..67dc4b4cc094 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -402,20 +402,22 @@ static void
mt7921_mcu_tx_rate_report(struct mt7921_dev *dev, struct sk_buff *skb,
u16 wlan_idx)
{
- struct mt7921_mcu_wlan_info_event *wtbl_info =
- (struct mt7921_mcu_wlan_info_event *)(skb->data);
- struct rate_info rate = {};
- u8 curr_idx = wtbl_info->rate_info.rate_idx;
- u16 curr = le16_to_cpu(wtbl_info->rate_info.rate[curr_idx]);
- struct mt7921_mcu_peer_cap peer = wtbl_info->peer_cap;
+ struct mt7921_mcu_wlan_info_event *wtbl_info;
struct mt76_phy *mphy = &dev->mphy;
struct mt7921_sta_stats *stats;
+ struct rate_info rate = {};
struct mt7921_sta *msta;
struct mt76_wcid *wcid;
+ u8 idx;
if (wlan_idx >= MT76_N_WCIDS)
return;
+ wtbl_info = (struct mt7921_mcu_wlan_info_event *)skb->data;
+ idx = wtbl_info->rate_info.rate_idx;
+ if (idx >= ARRAY_SIZE(wtbl_info->rate_info.rate))
+ return;
+
rcu_read_lock();
wcid = rcu_dereference(dev->mt76.wcid[wlan_idx]);
@@ -426,7 +428,8 @@ mt7921_mcu_tx_rate_report(struct mt7921_dev *dev, struct sk_buff *skb,
stats = &msta->stats;
/* current rate */
- mt7921_mcu_tx_rate_parse(mphy, &peer, &rate, curr);
+ mt7921_mcu_tx_rate_parse(mphy, &wtbl_info->peer_cap, &rate,
+ le16_to_cpu(wtbl_info->rate_info.rate[idx]));
stats->tx_rate = rate;
out:
rcu_read_unlock();
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index 2a7ee90a3f54..ffd150ec181f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -440,9 +440,14 @@ static void rtl_watchdog_wq_callback(struct work_struct *work);
static void rtl_fwevt_wq_callback(struct work_struct *work);
static void rtl_c2hcmd_wq_callback(struct work_struct *work);
-static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
+static int _rtl_init_deferred_work(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct workqueue_struct *wq;
+
+ wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
+ if (!wq)
+ return -ENOMEM;
/* <1> timer */
timer_setup(&rtlpriv->works.watchdog_timer,
@@ -451,11 +456,7 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
rtl_easy_concurrent_retrytimer_callback, 0);
/* <2> work queue */
rtlpriv->works.hw = hw;
- rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
- if (unlikely(!rtlpriv->works.rtl_wq)) {
- pr_err("Failed to allocate work queue\n");
- return;
- }
+ rtlpriv->works.rtl_wq = wq;
INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq,
rtl_watchdog_wq_callback);
@@ -466,6 +467,7 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
rtl_swlps_rfon_wq_callback);
INIT_DELAYED_WORK(&rtlpriv->works.fwevt_wq, rtl_fwevt_wq_callback);
INIT_DELAYED_WORK(&rtlpriv->works.c2hcmd_wq, rtl_c2hcmd_wq_callback);
+ return 0;
}
void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq)
@@ -564,9 +566,7 @@ int rtl_init_core(struct ieee80211_hw *hw)
rtlmac->link_state = MAC80211_NOLINK;
/* <6> init deferred work */
- _rtl_init_deferred_work(hw);
-
- return 0;
+ return _rtl_init_deferred_work(hw);
}
EXPORT_SYMBOL_GPL(rtl_init_core);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 193b723fe3bd..c58996c1e230 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -684,6 +684,7 @@ static void xenvif_disconnect_queue(struct xenvif_queue *queue)
{
if (queue->task) {
kthread_stop(queue->task);
+ put_task_struct(queue->task);
queue->task = NULL;
}
@@ -745,6 +746,11 @@ int xenvif_connect_data(struct xenvif_queue *queue,
if (IS_ERR(task))
goto kthread_err;
queue->task = task;
+ /*
+ * Take a reference to the task in order to prevent it from being freed
+ * if the thread function returns before kthread_stop is called.
+ */
+ get_task_struct(task);
task = kthread_run(xenvif_dealloc_kthread, queue,
"%s-dealloc", queue->name);
diff --git a/drivers/nfc/nfcmrvl/fw_dnld.h b/drivers/nfc/nfcmrvl/fw_dnld.h
index ee4a339c05fd..058ce77b3cbc 100644
--- a/drivers/nfc/nfcmrvl/fw_dnld.h
+++ b/drivers/nfc/nfcmrvl/fw_dnld.h
@@ -1,4 +1,4 @@
-/**
+/*
* Marvell NFC driver: Firmware downloader
*
* Copyright (C) 2015, Marvell International Ltd.
diff --git a/drivers/nfc/nfcmrvl/i2c.c b/drivers/nfc/nfcmrvl/i2c.c
index 18cd96284b77..c5420616b7bc 100644
--- a/drivers/nfc/nfcmrvl/i2c.c
+++ b/drivers/nfc/nfcmrvl/i2c.c
@@ -1,4 +1,4 @@
-/**
+/*
* Marvell NFC-over-I2C driver: I2C interface related functions
*
* Copyright (C) 2015, Marvell International Ltd.
diff --git a/drivers/nfc/nfcmrvl/nfcmrvl.h b/drivers/nfc/nfcmrvl/nfcmrvl.h
index de68ff45e49a..e84ee18c73ae 100644
--- a/drivers/nfc/nfcmrvl/nfcmrvl.h
+++ b/drivers/nfc/nfcmrvl/nfcmrvl.h
@@ -1,4 +1,4 @@
-/**
+/*
* Marvell NFC driver
*
* Copyright (C) 2014-2015, Marvell International Ltd.
diff --git a/drivers/nfc/nfcmrvl/spi.c b/drivers/nfc/nfcmrvl/spi.c
index 8e0ddb434770..dec0d3eb3648 100644
--- a/drivers/nfc/nfcmrvl/spi.c
+++ b/drivers/nfc/nfcmrvl/spi.c
@@ -1,4 +1,4 @@
-/**
+/*
* Marvell NFC-over-SPI driver: SPI interface related functions
*
* Copyright (C) 2015, Marvell International Ltd.
diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c
index e5a622ce4b95..7194dd7ef0f1 100644
--- a/drivers/nfc/nfcmrvl/uart.c
+++ b/drivers/nfc/nfcmrvl/uart.c
@@ -1,4 +1,4 @@
-/**
+/*
* Marvell NFC-over-UART driver
*
* Copyright (C) 2015, Marvell International Ltd.
diff --git a/drivers/nfc/nfcmrvl/usb.c b/drivers/nfc/nfcmrvl/usb.c
index 888e298f610b..bcd563cb556c 100644
--- a/drivers/nfc/nfcmrvl/usb.c
+++ b/drivers/nfc/nfcmrvl/usb.c
@@ -1,4 +1,4 @@
-/**
+/*
* Marvell NFC-over-USB driver: USB interface related functions
*
* Copyright (C) 2014, Marvell International Ltd.
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index a44d49d63968..494675aeaaad 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -71,7 +71,8 @@ config NVME_FC
config NVME_TCP
tristate "NVM Express over Fabrics TCP host driver"
depends on INET
- depends on BLK_DEV_NVME
+ depends on BLOCK
+ select NVME_CORE
select NVME_FABRICS
select CRYPTO
select CRYPTO_CRC32C
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 522c9b229f80..66973bb56305 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2901,7 +2901,7 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
}
- ret = nvme_mpath_init(ctrl, id);
+ ret = nvme_mpath_init_identify(ctrl, id);
if (ret < 0)
goto out_free;
@@ -3485,8 +3485,10 @@ int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
cdev_init(cdev, fops);
cdev->owner = owner;
ret = cdev_device_add(cdev, cdev_device);
- if (ret)
+ if (ret) {
+ put_device(cdev_device);
ida_simple_remove(&nvme_ns_chr_minor_ida, minor);
+ }
return ret;
}
@@ -4364,6 +4366,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
min(default_ps_max_latency_us, (unsigned long)S32_MAX));
nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
+ nvme_mpath_init_ctrl(ctrl);
return 0;
out_free_name:
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index a2bb7fc63a73..34a84d2086c7 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -336,6 +336,11 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
cmd->connect.recfmt);
break;
+ case NVME_SC_HOST_PATH_ERROR:
+ dev_err(ctrl->device,
+ "Connect command failed: host path error\n");
+ break;
+
default:
dev_err(ctrl->device,
"Connect command failed, error wo/DNR bit: %d\n",
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index d9ab9e7871d0..f183f9fa03d0 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2461,6 +2461,18 @@ nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
static void
__nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
{
+ int q;
+
+ /*
+ * if aborting io, the queues are no longer good, mark them
+ * all as not live.
+ */
+ if (ctrl->ctrl.queue_count > 1) {
+ for (q = 1; q < ctrl->ctrl.queue_count; q++)
+ clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[q].flags);
+ }
+ clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
+
/*
* If io queues are present, stop them and terminate all outstanding
* ios on them. As FC allocates FC exchange for each io, the
@@ -3095,6 +3107,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
if (ctrl->ctrl.icdoff) {
dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
ctrl->ctrl.icdoff);
+ ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
goto out_disconnect_admin_queue;
}
@@ -3102,6 +3115,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
if (!(ctrl->ctrl.sgls & ((1 << 0) | (1 << 1)))) {
dev_err(ctrl->ctrl.device,
"Mandatory sgls are not supported!\n");
+ ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
goto out_disconnect_admin_queue;
}
@@ -3268,11 +3282,13 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
return;
- if (portptr->port_state == FC_OBJSTATE_ONLINE)
+ if (portptr->port_state == FC_OBJSTATE_ONLINE) {
dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
ctrl->cnum, status);
- else if (time_after_eq(jiffies, rport->dev_loss_end))
+ if (status > 0 && (status & NVME_SC_DNR))
+ recon = false;
+ } else if (time_after_eq(jiffies, rport->dev_loss_end))
recon = false;
if (recon && nvmf_should_reconnect(&ctrl->ctrl)) {
@@ -3286,12 +3302,17 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
} else {
- if (portptr->port_state == FC_OBJSTATE_ONLINE)
- dev_warn(ctrl->ctrl.device,
- "NVME-FC{%d}: Max reconnect attempts (%d) "
- "reached.\n",
- ctrl->cnum, ctrl->ctrl.nr_reconnects);
- else
+ if (portptr->port_state == FC_OBJSTATE_ONLINE) {
+ if (status > 0 && (status & NVME_SC_DNR))
+ dev_warn(ctrl->ctrl.device,
+ "NVME-FC{%d}: reconnect failure\n",
+ ctrl->cnum);
+ else
+ dev_warn(ctrl->ctrl.device,
+ "NVME-FC{%d}: Max reconnect attempts "
+ "(%d) reached.\n",
+ ctrl->cnum, ctrl->ctrl.nr_reconnects);
+ } else
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: dev_loss_tmo (%d) expired "
"while waiting for remoteport connectivity.\n",
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 0551796517e6..f81871c7128a 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -781,9 +781,18 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
put_disk(head->disk);
}
-int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
+void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
{
- int error;
+ mutex_init(&ctrl->ana_lock);
+ timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
+ INIT_WORK(&ctrl->ana_work, nvme_ana_work);
+}
+
+int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
+{
+ size_t max_transfer_size = ctrl->max_hw_sectors << SECTOR_SHIFT;
+ size_t ana_log_size;
+ int error = 0;
/* check if multipath is enabled and we have the capability */
if (!multipath || !ctrl->subsys ||
@@ -795,37 +804,31 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
ctrl->nanagrpid = le32_to_cpu(id->nanagrpid);
ctrl->anagrpmax = le32_to_cpu(id->anagrpmax);
- mutex_init(&ctrl->ana_lock);
- timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
- ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
- ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc);
- ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
-
- if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) {
+ ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
+ ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc) +
+ ctrl->max_namespaces * sizeof(__le32);
+ if (ana_log_size > max_transfer_size) {
dev_err(ctrl->device,
- "ANA log page size (%zd) larger than MDTS (%d).\n",
- ctrl->ana_log_size,
- ctrl->max_hw_sectors << SECTOR_SHIFT);
+ "ANA log page size (%zd) larger than MDTS (%zd).\n",
+ ana_log_size, max_transfer_size);
dev_err(ctrl->device, "disabling ANA support.\n");
- return 0;
+ goto out_uninit;
}
-
- INIT_WORK(&ctrl->ana_work, nvme_ana_work);
- kfree(ctrl->ana_log_buf);
- ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL);
- if (!ctrl->ana_log_buf) {
- error = -ENOMEM;
- goto out;
+ if (ana_log_size > ctrl->ana_log_size) {
+ nvme_mpath_stop(ctrl);
+ kfree(ctrl->ana_log_buf);
+ ctrl->ana_log_buf = kmalloc(ana_log_size, GFP_KERNEL);
+ if (!ctrl->ana_log_buf)
+ return -ENOMEM;
}
-
+ ctrl->ana_log_size = ana_log_size;
error = nvme_read_ana_log(ctrl);
if (error)
- goto out_free_ana_log_buf;
+ goto out_uninit;
return 0;
-out_free_ana_log_buf:
- kfree(ctrl->ana_log_buf);
- ctrl->ana_log_buf = NULL;
-out:
+
+out_uninit:
+ nvme_mpath_uninit(ctrl);
return error;
}
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 05f31a2c64bb..0015860ec12b 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -712,7 +712,8 @@ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
void nvme_mpath_remove_disk(struct nvme_ns_head *head);
-int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
+int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
+void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
void nvme_mpath_stop(struct nvme_ctrl *ctrl);
bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
@@ -780,7 +781,10 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
static inline void nvme_trace_bio_complete(struct request *req)
{
}
-static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
+static inline void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
+{
+}
+static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl,
struct nvme_id_ctrl *id)
{
if (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 37943dc4c2c1..4697a94c0945 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1320,16 +1320,17 @@ static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
int count)
{
struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
- struct scatterlist *sgl = req->data_sgl.sg_table.sgl;
struct ib_sge *sge = &req->sge[1];
+ struct scatterlist *sgl;
u32 len = 0;
int i;
- for (i = 0; i < count; i++, sgl++, sge++) {
+ for_each_sg(req->data_sgl.sg_table.sgl, sgl, count, i) {
sge->addr = sg_dma_address(sgl);
sge->length = sg_dma_len(sgl);
sge->lkey = queue->device->pd->local_dma_lkey;
len += sge->length;
+ sge++;
}
sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 0222e23f5936..34f4b3402f7c 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -943,7 +943,6 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
if (ret <= 0)
return ret;
- nvme_tcp_advance_req(req, ret);
if (queue->data_digest)
nvme_tcp_ddgst_update(queue->snd_hash, page,
offset, ret);
@@ -960,6 +959,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
}
return 1;
}
+ nvme_tcp_advance_req(req, ret);
}
return -EAGAIN;
}
@@ -1140,7 +1140,8 @@ static void nvme_tcp_io_work(struct work_struct *w)
pending = true;
else if (unlikely(result < 0))
break;
- }
+ } else
+ pending = !llist_empty(&queue->req_list);
result = nvme_tcp_try_recv(queue);
if (result > 0)
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index e7a367cf6d36..dcd49a72f2f3 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -975,10 +975,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
case nvme_admin_keep_alive:
req->execute = nvmet_execute_keep_alive;
return 0;
+ default:
+ return nvmet_report_invalid_opcode(req);
}
-
- pr_debug("unhandled cmd %d on qid %d\n", cmd->common.opcode,
- req->sq->qid);
- req->error_loc = offsetof(struct nvme_common_command, opcode);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 25cc2ee8de3f..b20b8d0a1144 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -388,10 +388,10 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
{
struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
struct nvmet_ctrl, ka_work);
- bool cmd_seen = ctrl->cmd_seen;
+ bool reset_tbkas = ctrl->reset_tbkas;
- ctrl->cmd_seen = false;
- if (cmd_seen) {
+ ctrl->reset_tbkas = false;
+ if (reset_tbkas) {
pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
ctrl->cntlid);
schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
@@ -804,6 +804,13 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
percpu_ref_exit(&sq->ref);
if (ctrl) {
+ /*
+ * The teardown flow may take some time, and the host may not
+ * send us keep-alive during this period, hence reset the
+ * traffic based keep-alive timer so we don't trigger a
+ * controller teardown as a result of a keep-alive expiration.
+ */
+ ctrl->reset_tbkas = true;
nvmet_ctrl_put(ctrl);
sq->ctrl = NULL; /* allows reusing the queue later */
}
@@ -952,7 +959,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
}
if (sq->ctrl)
- sq->ctrl->cmd_seen = true;
+ sq->ctrl->reset_tbkas = true;
return true;
@@ -998,19 +1005,23 @@ static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
return req->transfer_len - req->metadata_len;
}
-static int nvmet_req_alloc_p2pmem_sgls(struct nvmet_req *req)
+static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev,
+ struct nvmet_req *req)
{
- req->sg = pci_p2pmem_alloc_sgl(req->p2p_dev, &req->sg_cnt,
+ req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
nvmet_data_transfer_len(req));
if (!req->sg)
goto out_err;
if (req->metadata_len) {
- req->metadata_sg = pci_p2pmem_alloc_sgl(req->p2p_dev,
+ req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev,
&req->metadata_sg_cnt, req->metadata_len);
if (!req->metadata_sg)
goto out_free_sg;
}
+
+ req->p2p_dev = p2p_dev;
+
return 0;
out_free_sg:
pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
@@ -1018,25 +1029,19 @@ out_err:
return -ENOMEM;
}
-static bool nvmet_req_find_p2p_dev(struct nvmet_req *req)
+static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req)
{
- if (!IS_ENABLED(CONFIG_PCI_P2PDMA))
- return false;
-
- if (req->sq->ctrl && req->sq->qid && req->ns) {
- req->p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map,
- req->ns->nsid);
- if (req->p2p_dev)
- return true;
- }
-
- req->p2p_dev = NULL;
- return false;
+ if (!IS_ENABLED(CONFIG_PCI_P2PDMA) ||
+ !req->sq->ctrl || !req->sq->qid || !req->ns)
+ return NULL;
+ return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid);
}
int nvmet_req_alloc_sgls(struct nvmet_req *req)
{
- if (nvmet_req_find_p2p_dev(req) && !nvmet_req_alloc_p2pmem_sgls(req))
+ struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req);
+
+ if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req))
return 0;
req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
@@ -1065,6 +1070,7 @@ void nvmet_req_free_sgls(struct nvmet_req *req)
pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
if (req->metadata_sg)
pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
+ req->p2p_dev = NULL;
} else {
sgl_free(req->sg);
if (req->metadata_sg)
@@ -1372,7 +1378,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
goto out_free_changed_ns_list;
if (subsys->cntlid_min > subsys->cntlid_max)
- goto out_free_changed_ns_list;
+ goto out_free_sqs;
ret = ida_simple_get(&cntlid_ida,
subsys->cntlid_min, subsys->cntlid_max,
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 4845d12e374a..fc3645fc2c24 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -379,7 +379,7 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
req->execute = nvmet_execute_disc_identify;
return 0;
default:
- pr_err("unhandled cmd %d\n", cmd->common.opcode);
+ pr_debug("unhandled cmd %d\n", cmd->common.opcode);
req->error_loc = offsetof(struct nvme_common_command, opcode);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index 1420a8e3e0b1..7d0f3523fdab 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -94,7 +94,7 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req)
req->execute = nvmet_execute_prop_get;
break;
default:
- pr_err("received unknown capsule type 0x%x\n",
+ pr_debug("received unknown capsule type 0x%x\n",
cmd->fabrics.fctype);
req->error_loc = offsetof(struct nvmf_common_command, fctype);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
@@ -284,13 +284,13 @@ u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
struct nvme_command *cmd = req->cmd;
if (!nvme_is_fabrics(cmd)) {
- pr_err("invalid command 0x%x on unconnected queue.\n",
+ pr_debug("invalid command 0x%x on unconnected queue.\n",
cmd->fabrics.opcode);
req->error_loc = offsetof(struct nvme_common_command, opcode);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
if (cmd->fabrics.fctype != nvme_fabrics_type_connect) {
- pr_err("invalid capsule type 0x%x on unconnected queue.\n",
+ pr_debug("invalid capsule type 0x%x on unconnected queue.\n",
cmd->fabrics.fctype);
req->error_loc = offsetof(struct nvmf_common_command, fctype);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 9a8b3726a37c..429263ca9b97 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -258,7 +258,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
- if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
+ if (nvmet_use_inline_bvec(req)) {
bio = &req->b.inline_bio;
bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
} else {
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 715d4376c997..7fdbdc496597 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -49,9 +49,11 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns)
ns->file = filp_open(ns->device_path, flags, 0);
if (IS_ERR(ns->file)) {
- pr_err("failed to open file %s: (%ld)\n",
- ns->device_path, PTR_ERR(ns->file));
- return PTR_ERR(ns->file);
+ ret = PTR_ERR(ns->file);
+ pr_err("failed to open file %s: (%d)\n",
+ ns->device_path, ret);
+ ns->file = NULL;
+ return ret;
}
ret = nvmet_file_ns_revalidate(ns);
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 74b3b150e1a5..a5c4a1865026 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -263,7 +263,8 @@ static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
{
- clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
+ if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
+ return;
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
blk_cleanup_queue(ctrl->ctrl.admin_q);
blk_cleanup_queue(ctrl->ctrl.fabrics_q);
@@ -299,6 +300,7 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
}
+ ctrl->ctrl.queue_count = 1;
}
static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
@@ -405,6 +407,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
return 0;
out_cleanup_queue:
+ clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
blk_cleanup_queue(ctrl->ctrl.admin_q);
out_cleanup_fabrics_q:
blk_cleanup_queue(ctrl->ctrl.fabrics_q);
@@ -462,8 +465,10 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
nvme_loop_shutdown_ctrl(ctrl);
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
- /* state change failure should never happen */
- WARN_ON_ONCE(1);
+ if (ctrl->ctrl.state != NVME_CTRL_DELETING &&
+ ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO)
+ /* state change failure for non-deleted ctrl? */
+ WARN_ON_ONCE(1);
return;
}
@@ -590,8 +595,10 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
0 /* no quirks, we're perfect! */);
- if (ret)
+ if (ret) {
+ kfree(ctrl);
goto out;
+ }
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
WARN_ON_ONCE(1);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 5566ed403576..53aea9a8056e 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -167,7 +167,7 @@ struct nvmet_ctrl {
struct nvmet_subsys *subsys;
struct nvmet_sq **sqs;
- bool cmd_seen;
+ bool reset_tbkas;
struct mutex lock;
u64 cap;
@@ -616,4 +616,10 @@ static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
}
+static inline bool nvmet_use_inline_bvec(struct nvmet_req *req)
+{
+ return req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN &&
+ req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
+}
+
#endif /* _NVMET_H */
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 2798944899b7..39b1473f7204 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -194,7 +194,7 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
if (req->sg_cnt > BIO_MAX_VECS)
return -EINVAL;
- if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
+ if (nvmet_use_inline_bvec(req)) {
bio = &req->p.inline_bio;
bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
} else {
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 6c1f3ab7649c..7d607f435e36 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -700,7 +700,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct nvmet_rdma_rsp *rsp =
container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
- struct nvmet_rdma_queue *queue = cq->cq_context;
+ struct nvmet_rdma_queue *queue = wc->qp->qp_context;
nvmet_rdma_release_rsp(rsp);
@@ -786,7 +786,7 @@ static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct nvmet_rdma_rsp *rsp =
container_of(wc->wr_cqe, struct nvmet_rdma_rsp, write_cqe);
- struct nvmet_rdma_queue *queue = cq->cq_context;
+ struct nvmet_rdma_queue *queue = wc->qp->qp_context;
struct rdma_cm_id *cm_id = rsp->queue->cm_id;
u16 status;
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index f9f34f6caf5e..d8aceef83284 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -550,7 +550,7 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
* nvmet_req_init is completed.
*/
if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
- len && len < cmd->req.port->inline_data_size &&
+ len && len <= cmd->req.port->inline_data_size &&
nvme_is_write(cmd->req.cmd))
return;
}
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index eca805c1a023..9e6ce0dc2f53 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_PCIE_INTEL_GW) += pcie-intel-gw.o
obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o
obj-$(CONFIG_PCI_MESON) += pci-meson.o
+obj-$(CONFIG_PCIE_TEGRA194) += pcie-tegra194.o
obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o
obj-$(CONFIG_PCIE_UNIPHIER_EP) += pcie-uniphier-ep.o
@@ -38,6 +39,6 @@ ifdef CONFIG_ACPI
ifdef CONFIG_PCI_QUIRKS
obj-$(CONFIG_ARM64) += pcie-al.o
obj-$(CONFIG_ARM64) += pcie-hisi.o
-obj-$(CONFIG_ARM64) += pcie-tegra194.o
+obj-$(CONFIG_ARM64) += pcie-tegra194-acpi.o
endif
endif
diff --git a/drivers/pci/controller/dwc/pcie-tegra194-acpi.c b/drivers/pci/controller/dwc/pcie-tegra194-acpi.c
new file mode 100644
index 000000000000..c2de6ed4d86f
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-tegra194-acpi.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ACPI quirks for Tegra194 PCIe host controller
+ *
+ * Copyright (C) 2021 NVIDIA Corporation.
+ *
+ * Author: Vidya Sagar <vidyas@nvidia.com>
+ */
+
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
+
+#include "pcie-designware.h"
+
+struct tegra194_pcie_ecam {
+ void __iomem *config_base;
+ void __iomem *iatu_base;
+ void __iomem *dbi_base;
+};
+
+static int tegra194_acpi_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct tegra194_pcie_ecam *pcie_ecam;
+
+ pcie_ecam = devm_kzalloc(dev, sizeof(*pcie_ecam), GFP_KERNEL);
+ if (!pcie_ecam)
+ return -ENOMEM;
+
+ pcie_ecam->config_base = cfg->win;
+ pcie_ecam->iatu_base = cfg->win + SZ_256K;
+ pcie_ecam->dbi_base = cfg->win + SZ_512K;
+ cfg->priv = pcie_ecam;
+
+ return 0;
+}
+
+static void atu_reg_write(struct tegra194_pcie_ecam *pcie_ecam, int index,
+ u32 val, u32 reg)
+{
+ u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
+
+ writel(val, pcie_ecam->iatu_base + offset + reg);
+}
+
+static void program_outbound_atu(struct tegra194_pcie_ecam *pcie_ecam,
+ int index, int type, u64 cpu_addr,
+ u64 pci_addr, u64 size)
+{
+ atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr),
+ PCIE_ATU_LOWER_BASE);
+ atu_reg_write(pcie_ecam, index, upper_32_bits(cpu_addr),
+ PCIE_ATU_UPPER_BASE);
+ atu_reg_write(pcie_ecam, index, lower_32_bits(pci_addr),
+ PCIE_ATU_LOWER_TARGET);
+ atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr + size - 1),
+ PCIE_ATU_LIMIT);
+ atu_reg_write(pcie_ecam, index, upper_32_bits(pci_addr),
+ PCIE_ATU_UPPER_TARGET);
+ atu_reg_write(pcie_ecam, index, type, PCIE_ATU_CR1);
+ atu_reg_write(pcie_ecam, index, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+}
+
+static void __iomem *tegra194_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct tegra194_pcie_ecam *pcie_ecam = cfg->priv;
+ u32 busdev;
+ int type;
+
+ if (bus->number < cfg->busr.start || bus->number > cfg->busr.end)
+ return NULL;
+
+ if (bus->number == cfg->busr.start) {
+ if (PCI_SLOT(devfn) == 0)
+ return pcie_ecam->dbi_base + where;
+ else
+ return NULL;
+ }
+
+ busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
+ PCIE_ATU_FUNC(PCI_FUNC(devfn));
+
+ if (bus->parent->number == cfg->busr.start) {
+ if (PCI_SLOT(devfn) == 0)
+ type = PCIE_ATU_TYPE_CFG0;
+ else
+ return NULL;
+ } else {
+ type = PCIE_ATU_TYPE_CFG1;
+ }
+
+ program_outbound_atu(pcie_ecam, 0, type, cfg->res.start, busdev,
+ SZ_256K);
+
+ return pcie_ecam->config_base + where;
+}
+
+const struct pci_ecam_ops tegra194_pcie_ops = {
+ .init = tegra194_acpi_init,
+ .pci_ops = {
+ .map_bus = tegra194_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index bafd2c6ab3c2..504669e3afe0 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -22,8 +22,6 @@
#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
-#include <linux/pci-acpi.h>
-#include <linux/pci-ecam.h>
#include <linux/phy/phy.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
@@ -247,24 +245,6 @@ static const unsigned int pcie_gen_freq[] = {
GEN4_CORE_CLK_FREQ
};
-static const u32 event_cntr_ctrl_offset[] = {
- 0x1d8,
- 0x1a8,
- 0x1a8,
- 0x1a8,
- 0x1c4,
- 0x1d8
-};
-
-static const u32 event_cntr_data_offset[] = {
- 0x1dc,
- 0x1ac,
- 0x1ac,
- 0x1ac,
- 0x1c8,
- 0x1dc
-};
-
struct tegra_pcie_dw {
struct device *dev;
struct resource *appl_res;
@@ -313,104 +293,6 @@ struct tegra_pcie_dw_of_data {
enum dw_pcie_device_mode mode;
};
-#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
-struct tegra194_pcie_ecam {
- void __iomem *config_base;
- void __iomem *iatu_base;
- void __iomem *dbi_base;
-};
-
-static int tegra194_acpi_init(struct pci_config_window *cfg)
-{
- struct device *dev = cfg->parent;
- struct tegra194_pcie_ecam *pcie_ecam;
-
- pcie_ecam = devm_kzalloc(dev, sizeof(*pcie_ecam), GFP_KERNEL);
- if (!pcie_ecam)
- return -ENOMEM;
-
- pcie_ecam->config_base = cfg->win;
- pcie_ecam->iatu_base = cfg->win + SZ_256K;
- pcie_ecam->dbi_base = cfg->win + SZ_512K;
- cfg->priv = pcie_ecam;
-
- return 0;
-}
-
-static void atu_reg_write(struct tegra194_pcie_ecam *pcie_ecam, int index,
- u32 val, u32 reg)
-{
- u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
-
- writel(val, pcie_ecam->iatu_base + offset + reg);
-}
-
-static void program_outbound_atu(struct tegra194_pcie_ecam *pcie_ecam,
- int index, int type, u64 cpu_addr,
- u64 pci_addr, u64 size)
-{
- atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr),
- PCIE_ATU_LOWER_BASE);
- atu_reg_write(pcie_ecam, index, upper_32_bits(cpu_addr),
- PCIE_ATU_UPPER_BASE);
- atu_reg_write(pcie_ecam, index, lower_32_bits(pci_addr),
- PCIE_ATU_LOWER_TARGET);
- atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr + size - 1),
- PCIE_ATU_LIMIT);
- atu_reg_write(pcie_ecam, index, upper_32_bits(pci_addr),
- PCIE_ATU_UPPER_TARGET);
- atu_reg_write(pcie_ecam, index, type, PCIE_ATU_CR1);
- atu_reg_write(pcie_ecam, index, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
-}
-
-static void __iomem *tegra194_map_bus(struct pci_bus *bus,
- unsigned int devfn, int where)
-{
- struct pci_config_window *cfg = bus->sysdata;
- struct tegra194_pcie_ecam *pcie_ecam = cfg->priv;
- u32 busdev;
- int type;
-
- if (bus->number < cfg->busr.start || bus->number > cfg->busr.end)
- return NULL;
-
- if (bus->number == cfg->busr.start) {
- if (PCI_SLOT(devfn) == 0)
- return pcie_ecam->dbi_base + where;
- else
- return NULL;
- }
-
- busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
- PCIE_ATU_FUNC(PCI_FUNC(devfn));
-
- if (bus->parent->number == cfg->busr.start) {
- if (PCI_SLOT(devfn) == 0)
- type = PCIE_ATU_TYPE_CFG0;
- else
- return NULL;
- } else {
- type = PCIE_ATU_TYPE_CFG1;
- }
-
- program_outbound_atu(pcie_ecam, 0, type, cfg->res.start, busdev,
- SZ_256K);
-
- return pcie_ecam->config_base + where;
-}
-
-const struct pci_ecam_ops tegra194_pcie_ops = {
- .init = tegra194_acpi_init,
- .pci_ops = {
- .map_bus = tegra194_map_bus,
- .read = pci_generic_config_read,
- .write = pci_generic_config_write,
- }
-};
-#endif /* defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) */
-
-#ifdef CONFIG_PCIE_TEGRA194
-
static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
{
return container_of(pci, struct tegra_pcie_dw, pci);
@@ -694,6 +576,24 @@ static struct pci_ops tegra_pci_ops = {
};
#if defined(CONFIG_PCIEASPM)
+static const u32 event_cntr_ctrl_offset[] = {
+ 0x1d8,
+ 0x1a8,
+ 0x1a8,
+ 0x1a8,
+ 0x1c4,
+ 0x1d8
+};
+
+static const u32 event_cntr_data_offset[] = {
+ 0x1dc,
+ 0x1ac,
+ 0x1ac,
+ 0x1ac,
+ 0x1c8,
+ 0x1dc
+};
+
static void disable_aspm_l11(struct tegra_pcie_dw *pcie)
{
u32 val;
@@ -2411,5 +2311,3 @@ MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match);
MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>");
MODULE_DESCRIPTION("NVIDIA PCIe host controller driver");
MODULE_LICENSE("GPL v2");
-
-#endif /* CONFIG_PCIE_TEGRA194 */
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index 051b48bd7985..e3f5e7ab7606 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -514,7 +514,7 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie)
udelay(PIO_RETRY_DELAY);
}
- dev_err(dev, "config read/write timed out\n");
+ dev_err(dev, "PIO read/write transfer time out\n");
return -ETIMEDOUT;
}
@@ -657,6 +657,35 @@ static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
return true;
}
+static bool advk_pcie_pio_is_running(struct advk_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+
+ /*
+ * Trying to start a new PIO transfer when previous has not completed
+ * cause External Abort on CPU which results in kernel panic:
+ *
+ * SError Interrupt on CPU0, code 0xbf000002 -- SError
+ * Kernel panic - not syncing: Asynchronous SError Interrupt
+ *
+ * Functions advk_pcie_rd_conf() and advk_pcie_wr_conf() are protected
+ * by raw_spin_lock_irqsave() at pci_lock_config() level to prevent
+ * concurrent calls at the same time. But because PIO transfer may take
+ * about 1.5s when link is down or card is disconnected, it means that
+ * advk_pcie_wait_pio() does not always have to wait for completion.
+ *
+ * Some versions of ARM Trusted Firmware handles this External Abort at
+ * EL3 level and mask it to prevent kernel panic. Relevant TF-A commit:
+ * https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git/commit/?id=3c7dcdac5c50
+ */
+ if (advk_readl(pcie, PIO_START)) {
+ dev_err(dev, "Previous PIO read/write transfer is still running\n");
+ return true;
+ }
+
+ return false;
+}
+
static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
int where, int size, u32 *val)
{
@@ -673,9 +702,10 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
return pci_bridge_emul_conf_read(&pcie->bridge, where,
size, val);
- /* Start PIO */
- advk_writel(pcie, 0, PIO_START);
- advk_writel(pcie, 1, PIO_ISR);
+ if (advk_pcie_pio_is_running(pcie)) {
+ *val = 0xffffffff;
+ return PCIBIOS_SET_FAILED;
+ }
/* Program the control register */
reg = advk_readl(pcie, PIO_CTRL);
@@ -694,7 +724,8 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
/* Program the data strobe */
advk_writel(pcie, 0xf, PIO_WR_DATA_STRB);
- /* Start the transfer */
+ /* Clear PIO DONE ISR and start the transfer */
+ advk_writel(pcie, 1, PIO_ISR);
advk_writel(pcie, 1, PIO_START);
ret = advk_pcie_wait_pio(pcie);
@@ -734,9 +765,8 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
if (where % size)
return PCIBIOS_SET_FAILED;
- /* Start PIO */
- advk_writel(pcie, 0, PIO_START);
- advk_writel(pcie, 1, PIO_ISR);
+ if (advk_pcie_pio_is_running(pcie))
+ return PCIBIOS_SET_FAILED;
/* Program the control register */
reg = advk_readl(pcie, PIO_CTRL);
@@ -763,7 +793,8 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
/* Program the data strobe */
advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB);
- /* Start the transfer */
+ /* Clear PIO DONE ISR and start the transfer */
+ advk_writel(pcie, 1, PIO_ISR);
advk_writel(pcie, 1, PIO_START);
ret = advk_pcie_wait_pio(pcie);
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index da5b414d585a..a143b02b2dcd 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -103,6 +103,13 @@ struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus)
#endif
}
+bool pci_host_of_has_msi_map(struct device *dev)
+{
+ if (dev && dev->of_node)
+ return of_get_property(dev->of_node, "msi-map", NULL);
+ return false;
+}
+
static inline int __of_pci_pci_compare(struct device_node *node,
unsigned int data)
{
@@ -346,6 +353,8 @@ static int devm_of_pci_get_host_bridge_resources(struct device *dev,
dev_warn(dev, "More than one I/O resource converted for %pOF. CPU base address for old range lost!\n",
dev_node);
*io_base = range.cpu_addr;
+ } else if (resource_type(res) == IORESOURCE_MEM) {
+ res->flags &= ~IORESOURCE_MEM_64;
}
pci_add_resource_offset(resources, res, res->start - range.pci_addr);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index b717680377a9..8d4ebe095d0c 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1900,11 +1900,21 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
int err;
int i, bars = 0;
- if (atomic_inc_return(&dev->enable_cnt) > 1) {
- pci_update_current_state(dev, dev->current_state);
- return 0; /* already enabled */
+ /*
+ * Power state could be unknown at this point, either due to a fresh
+ * boot or a device removal call. So get the current power state
+ * so that things like MSI message writing will behave as expected
+ * (e.g. if the device really is in D0 at enable time).
+ */
+ if (dev->pm_cap) {
+ u16 pmcsr;
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+ dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
}
+ if (atomic_inc_return(&dev->enable_cnt) > 1)
+ return 0; /* already enabled */
+
bridge = pci_upstream_bridge(dev);
if (bridge)
pci_enable_bridge(bridge);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 3a62d09b8869..275204646c68 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -925,7 +925,8 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
device_enable_async_suspend(bus->bridge);
pci_set_bus_of_node(bus);
pci_set_bus_msi_domain(bus);
- if (bridge->msi_domain && !dev_get_msi_domain(&bus->dev))
+ if (bridge->msi_domain && !dev_get_msi_domain(&bus->dev) &&
+ !pci_host_of_has_msi_map(parent))
bus->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
if (!parent)
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index dcb229de1acb..22b2bb1109c9 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3547,6 +3547,18 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
}
/*
+ * Some NVIDIA GPU devices do not work with bus reset, SBR needs to be
+ * prevented for those affected devices.
+ */
+static void quirk_nvidia_no_bus_reset(struct pci_dev *dev)
+{
+ if ((dev->device & 0xffc0) == 0x2340)
+ quirk_no_bus_reset(dev);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
+ quirk_nvidia_no_bus_reset);
+
+/*
* Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset.
* The device will throw a Link Down error on AER-capable systems and
* regardless of AER, config space of the device is never accessible again
@@ -3566,6 +3578,16 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset);
*/
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CAVIUM, 0xa100, quirk_no_bus_reset);
+/*
+ * Some TI KeyStone C667X devices do not support bus/hot reset. The PCIESS
+ * automatically disables LTSSM when Secondary Bus Reset is received and
+ * the device stops working. Prevent bus reset for these devices. With
+ * this change, the device can be assigned to VMs with VFIO, but it will
+ * leak state between VMs. Reference
+ * https://e2e.ti.com/support/processors/f/791/t/954382
+ */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, 0xb005, quirk_no_bus_reset);
+
static void quirk_no_pm_reset(struct pci_dev *dev)
{
/*
@@ -3901,6 +3923,69 @@ static int delay_250ms_after_flr(struct pci_dev *dev, int probe)
return 0;
}
+#define PCI_DEVICE_ID_HINIC_VF 0x375E
+#define HINIC_VF_FLR_TYPE 0x1000
+#define HINIC_VF_FLR_CAP_BIT (1UL << 30)
+#define HINIC_VF_OP 0xE80
+#define HINIC_VF_FLR_PROC_BIT (1UL << 18)
+#define HINIC_OPERATION_TIMEOUT 15000 /* 15 seconds */
+
+/* Device-specific reset method for Huawei Intelligent NIC virtual functions */
+static int reset_hinic_vf_dev(struct pci_dev *pdev, int probe)
+{
+ unsigned long timeout;
+ void __iomem *bar;
+ u32 val;
+
+ if (probe)
+ return 0;
+
+ bar = pci_iomap(pdev, 0, 0);
+ if (!bar)
+ return -ENOTTY;
+
+ /* Get and check firmware capabilities */
+ val = ioread32be(bar + HINIC_VF_FLR_TYPE);
+ if (!(val & HINIC_VF_FLR_CAP_BIT)) {
+ pci_iounmap(pdev, bar);
+ return -ENOTTY;
+ }
+
+ /* Set HINIC_VF_FLR_PROC_BIT for the start of FLR */
+ val = ioread32be(bar + HINIC_VF_OP);
+ val = val | HINIC_VF_FLR_PROC_BIT;
+ iowrite32be(val, bar + HINIC_VF_OP);
+
+ pcie_flr(pdev);
+
+ /*
+ * The device must recapture its Bus and Device Numbers after FLR
+ * in order generate Completions. Issue a config write to let the
+ * device capture this information.
+ */
+ pci_write_config_word(pdev, PCI_VENDOR_ID, 0);
+
+ /* Firmware clears HINIC_VF_FLR_PROC_BIT when reset is complete */
+ timeout = jiffies + msecs_to_jiffies(HINIC_OPERATION_TIMEOUT);
+ do {
+ val = ioread32be(bar + HINIC_VF_OP);
+ if (!(val & HINIC_VF_FLR_PROC_BIT))
+ goto reset_complete;
+ msleep(20);
+ } while (time_before(jiffies, timeout));
+
+ val = ioread32be(bar + HINIC_VF_OP);
+ if (!(val & HINIC_VF_FLR_PROC_BIT))
+ goto reset_complete;
+
+ pci_warn(pdev, "Reset dev timeout, FLR ack reg: %#010x\n", val);
+
+reset_complete:
+ pci_iounmap(pdev, bar);
+
+ return 0;
+}
+
static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
reset_intel_82599_sfp_virtfn },
@@ -3913,6 +3998,8 @@ static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
{ PCI_VENDOR_ID_INTEL, 0x0a54, delay_250ms_after_flr },
{ PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
reset_chelsio_generic_dev },
+ { PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HINIC_VF,
+ reset_hinic_vf_dev },
{ 0 }
};
@@ -4753,6 +4840,8 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_AMPERE, 0xE00A, pci_quirk_xgene_acs },
{ PCI_VENDOR_ID_AMPERE, 0xE00B, pci_quirk_xgene_acs },
{ PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs },
+ /* Broadcom multi-function device */
+ { PCI_VENDOR_ID_BROADCOM, 0x16D7, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
/* Amazon Annapurna Labs */
{ PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
@@ -5154,7 +5243,8 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
{
if ((pdev->device == 0x7312 && pdev->revision != 0x00) ||
- (pdev->device == 0x7340 && pdev->revision != 0xc5))
+ (pdev->device == 0x7340 && pdev->revision != 0xc5) ||
+ (pdev->device == 0x7341 && pdev->revision != 0x00))
return;
if (pdev->device == 0x15d8) {
@@ -5181,6 +5271,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats);
/* AMD Navi14 dGPU */
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7341, quirk_amd_harvest_no_ats);
/* AMD Raven platform iGPU */
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x15d8, quirk_amd_harvest_no_ats);
#endif /* CONFIG_PCI_ATS */
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.h b/drivers/phy/broadcom/phy-brcm-usb-init.h
index 899b9eb43fad..a39f30fa2e99 100644
--- a/drivers/phy/broadcom/phy-brcm-usb-init.h
+++ b/drivers/phy/broadcom/phy-brcm-usb-init.h
@@ -78,7 +78,7 @@ static inline u32 brcm_usb_readl(void __iomem *addr)
* Other architectures (e.g., ARM) either do not support big endian, or
* else leave I/O in little endian mode.
*/
- if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
return __raw_readl(addr);
else
return readl_relaxed(addr);
@@ -87,7 +87,7 @@ static inline u32 brcm_usb_readl(void __iomem *addr)
static inline void brcm_usb_writel(u32 val, void __iomem *addr)
{
/* See brcmnand_readl() comments */
- if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
__raw_writel(val, addr);
else
writel_relaxed(val, addr);
diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
index 5c68e31c5939..e93818e3991f 100644
--- a/drivers/phy/cadence/phy-cadence-sierra.c
+++ b/drivers/phy/cadence/phy-cadence-sierra.c
@@ -940,6 +940,7 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
sp->nsubnodes = node;
if (sp->num_lanes > SIERRA_MAX_LANES) {
+ ret = -EINVAL;
dev_err(dev, "Invalid lane configuration\n");
goto put_child2;
}
diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
index cdbcc49f7115..731c483a04de 100644
--- a/drivers/phy/mediatek/phy-mtk-tphy.c
+++ b/drivers/phy/mediatek/phy-mtk-tphy.c
@@ -949,6 +949,8 @@ static int mtk_phy_init(struct phy *phy)
break;
default:
dev_err(tphy->dev, "incompatible PHY type\n");
+ clk_disable_unprepare(instance->ref_clk);
+ clk_disable_unprepare(instance->da_ref_clk);
return -EINVAL;
}
diff --git a/drivers/phy/microchip/sparx5_serdes.c b/drivers/phy/microchip/sparx5_serdes.c
index c8a7d0927ced..4076580fc2cd 100644
--- a/drivers/phy/microchip/sparx5_serdes.c
+++ b/drivers/phy/microchip/sparx5_serdes.c
@@ -2470,6 +2470,10 @@ static int sparx5_serdes_probe(struct platform_device *pdev)
priv->coreclock = clock;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iores) {
+ dev_err(priv->dev, "Invalid resource\n");
+ return -EINVAL;
+ }
iomem = devm_ioremap(priv->dev, iores->start, resource_size(iores));
if (IS_ERR(iomem)) {
dev_err(priv->dev, "Unable to get serdes registers: %s\n",
diff --git a/drivers/phy/ralink/phy-mt7621-pci.c b/drivers/phy/ralink/phy-mt7621-pci.c
index 753cb5bab930..2a9465f4bb3a 100644
--- a/drivers/phy/ralink/phy-mt7621-pci.c
+++ b/drivers/phy/ralink/phy-mt7621-pci.c
@@ -341,7 +341,7 @@ static struct platform_driver mt7621_pci_phy_driver = {
.probe = mt7621_pci_phy_probe,
.driver = {
.name = "mt7621-pci-phy",
- .of_match_table = of_match_ptr(mt7621_pci_phy_ids),
+ .of_match_table = mt7621_pci_phy_ids,
},
};
diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
index 9eb6d37c907e..126f5b8735cc 100644
--- a/drivers/phy/ti/phy-j721e-wiz.c
+++ b/drivers/phy/ti/phy-j721e-wiz.c
@@ -1212,6 +1212,7 @@ static int wiz_probe(struct platform_device *pdev)
if (wiz->typec_dir_delay < WIZ_TYPEC_DIR_DEBOUNCE_MIN ||
wiz->typec_dir_delay > WIZ_TYPEC_DIR_DEBOUNCE_MAX) {
+ ret = -EINVAL;
dev_err(dev, "Invalid typec-dir-debounce property\n");
goto err_addr_to_resource;
}
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
index 996ebcba4d38..4c0d26606b6c 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
@@ -2702,8 +2702,8 @@ static int aspeed_g5_sig_expr_eval(struct aspeed_pinmux_data *ctx,
}
/**
- * Configure a pin's signal by applying an expression's descriptor state for
- * all descriptors in the expression.
+ * aspeed_g5_sig_expr_set() - Configure a pin's signal by applying an
+ * expression's descriptor state for all descriptors in the expression.
*
* @ctx: The pinmux context
* @expr: The expression associated with the function whose signal is to be
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
index 5c1a109842a7..eeab093a7815 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
@@ -2611,8 +2611,8 @@ static struct aspeed_pin_config aspeed_g6_configs[] = {
};
/**
- * Configure a pin's signal by applying an expression's descriptor state for
- * all descriptors in the expression.
+ * aspeed_g6_sig_expr_set() - Configure a pin's signal by applying an
+ * expression's descriptor state for all descriptors in the expression.
*
* @ctx: The pinmux context
* @expr: The expression associated with the function whose signal is to be
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index 9c65d560d48f..9bbfe5c14b36 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -108,7 +108,8 @@ static int aspeed_sig_expr_disable(struct aspeed_pinmux_data *ctx,
}
/**
- * Disable a signal on a pin by disabling all provided signal expressions.
+ * aspeed_disable_sig() - Disable a signal on a pin by disabling all provided
+ * signal expressions.
*
* @ctx: The pinmux context
* @exprs: The list of signal expressions (from a priority level on a pin)
diff --git a/drivers/pinctrl/aspeed/pinmux-aspeed.c b/drivers/pinctrl/aspeed/pinmux-aspeed.c
index 57305ca838a7..894e2efd3be7 100644
--- a/drivers/pinctrl/aspeed/pinmux-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinmux-aspeed.c
@@ -21,7 +21,8 @@ static inline void aspeed_sig_desc_print_val(
}
/**
- * Query the enabled or disabled state of a signal descriptor
+ * aspeed_sig_desc_eval() - Query the enabled or disabled state of a signal
+ * descriptor.
*
* @desc: The signal descriptor of interest
* @enabled: True to query the enabled state, false to query disabled state
diff --git a/drivers/pinctrl/pinctrl-microchip-sgpio.c b/drivers/pinctrl/pinctrl-microchip-sgpio.c
index c12fa57ebd12..165cb7a59715 100644
--- a/drivers/pinctrl/pinctrl-microchip-sgpio.c
+++ b/drivers/pinctrl/pinctrl-microchip-sgpio.c
@@ -845,8 +845,10 @@ static int microchip_sgpio_probe(struct platform_device *pdev)
i = 0;
device_for_each_child_node(dev, fwnode) {
ret = microchip_sgpio_register_bank(dev, priv, fwnode, i++);
- if (ret)
+ if (ret) {
+ fwnode_handle_put(fwnode);
return ret;
+ }
}
if (priv->in.gpio.ngpio != priv->out.gpio.ngpio) {
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 25d2f7f7f3b6..11e967dbb44b 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -223,7 +223,7 @@ config PINCTRL_SC7280
config PINCTRL_SC8180X
tristate "Qualcomm Technologies Inc SC8180x pin controller driver"
depends on GPIOLIB && (OF || ACPI)
- select PINCTRL_MSM
+ depends on PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm Technologies Inc TLMM block found on the Qualcomm
diff --git a/drivers/pinctrl/qcom/pinctrl-sdx55.c b/drivers/pinctrl/qcom/pinctrl-sdx55.c
index 5aaf57b40407..0bb4931cec59 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdx55.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdx55.c
@@ -410,15 +410,15 @@ static const char * const gpio_groups[] = {
"gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
"gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
"gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
- "gpio50", "gpio51", "gpio52", "gpio52", "gpio53", "gpio53", "gpio54",
- "gpio55", "gpio56", "gpio57", "gpio58", "gpio59", "gpio60", "gpio61",
- "gpio62", "gpio63", "gpio64", "gpio65", "gpio66", "gpio67", "gpio68",
- "gpio69", "gpio70", "gpio71", "gpio72", "gpio73", "gpio74", "gpio75",
- "gpio76", "gpio77", "gpio78", "gpio79", "gpio80", "gpio81", "gpio82",
- "gpio83", "gpio84", "gpio85", "gpio86", "gpio87", "gpio88", "gpio89",
- "gpio90", "gpio91", "gpio92", "gpio93", "gpio94", "gpio95", "gpio96",
- "gpio97", "gpio98", "gpio99", "gpio100", "gpio101", "gpio102",
- "gpio103", "gpio104", "gpio105", "gpio106", "gpio107",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107",
};
static const char * const qdss_stm_groups[] = {
diff --git a/drivers/pinctrl/ralink/pinctrl-rt2880.c b/drivers/pinctrl/ralink/pinctrl-rt2880.c
index 1f4bca854add..a9b511c7e850 100644
--- a/drivers/pinctrl/ralink/pinctrl-rt2880.c
+++ b/drivers/pinctrl/ralink/pinctrl-rt2880.c
@@ -127,7 +127,7 @@ static int rt2880_pmx_group_enable(struct pinctrl_dev *pctrldev,
if (p->groups[group].enabled) {
dev_err(p->dev, "%s is already enabled\n",
p->groups[group].name);
- return -EBUSY;
+ return 0;
}
p->groups[group].enabled = 1;
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index ad9eb5ed8e81..c14d12d54cc5 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -1224,7 +1224,7 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl,
struct device *dev = pctl->dev;
struct resource res;
int npins = STM32_GPIO_PINS_PER_BANK;
- int bank_nr, err;
+ int bank_nr, err, i = 0;
if (!IS_ERR(bank->rstc))
reset_control_deassert(bank->rstc);
@@ -1246,9 +1246,14 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl,
of_property_read_string(np, "st,bank-name", &bank->gpio_chip.label);
- if (!of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args)) {
+ if (!of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, i, &args)) {
bank_nr = args.args[1] / STM32_GPIO_PINS_PER_BANK;
bank->gpio_chip.base = args.args[1];
+
+ npins = args.args[2];
+ while (!of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3,
+ ++i, &args))
+ npins += args.args[2];
} else {
bank_nr = pctl->nbanks;
bank->gpio_chip.base = bank_nr * STM32_GPIO_PINS_PER_BANK;
diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
index bbc4e71a16ff..38800e86ed8a 100644
--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
+++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
@@ -294,6 +294,9 @@ mlxbf_tmfifo_get_next_desc(struct mlxbf_tmfifo_vring *vring)
if (vring->next_avail == virtio16_to_cpu(vdev, vr->avail->idx))
return NULL;
+ /* Make sure 'avail->idx' is visible already. */
+ virtio_rmb(false);
+
idx = vring->next_avail % vr->num;
head = virtio16_to_cpu(vdev, vr->avail->ring[idx]);
if (WARN_ON(head >= vr->num))
@@ -322,7 +325,7 @@ static void mlxbf_tmfifo_release_desc(struct mlxbf_tmfifo_vring *vring,
* done or not. Add a memory barrier here to make sure the update above
* completes before updating the idx.
*/
- mb();
+ virtio_mb(false);
vr->used->idx = cpu_to_virtio16(vdev, vr_idx + 1);
}
@@ -733,6 +736,12 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
desc = NULL;
fifo->vring[is_rx] = NULL;
+ /*
+ * Make sure the load/store are in order before
+ * returning back to virtio.
+ */
+ virtio_mb(false);
+
/* Notify upper layer that packet is done. */
spin_lock_irqsave(&fifo->spin_lock[is_rx], flags);
vring_interrupt(0, vring->vq);
diff --git a/drivers/platform/mellanox/mlxreg-hotplug.c b/drivers/platform/mellanox/mlxreg-hotplug.c
index a9db2f32658f..b013445147dd 100644
--- a/drivers/platform/mellanox/mlxreg-hotplug.c
+++ b/drivers/platform/mellanox/mlxreg-hotplug.c
@@ -683,13 +683,13 @@ static int mlxreg_hotplug_probe(struct platform_device *pdev)
err = devm_request_irq(&pdev->dev, priv->irq,
mlxreg_hotplug_irq_handler, IRQF_TRIGGER_FALLING
- | IRQF_SHARED | IRQF_NO_AUTOEN,
- "mlxreg-hotplug", priv);
+ | IRQF_SHARED, "mlxreg-hotplug", priv);
if (err) {
dev_err(&pdev->dev, "Failed to request irq: %d\n", err);
return err;
}
+ disable_irq(priv->irq);
spin_lock_init(&priv->lock);
INIT_DELAYED_WORK(&priv->dwork_irq, mlxreg_hotplug_work_handler);
dev_set_drvdata(&pdev->dev, priv);
diff --git a/drivers/platform/surface/aggregator/controller.c b/drivers/platform/surface/aggregator/controller.c
index 69e86cd599d3..a06964aa96e7 100644
--- a/drivers/platform/surface/aggregator/controller.c
+++ b/drivers/platform/surface/aggregator/controller.c
@@ -1907,7 +1907,7 @@ static int ssam_ssh_event_disable(struct ssam_controller *ctrl,
{
int status;
- status = __ssam_ssh_event_request(ctrl, reg, reg.cid_enable, id, flags);
+ status = __ssam_ssh_event_request(ctrl, reg, reg.cid_disable, id, flags);
if (status < 0 && status != -EINVAL) {
ssam_err(ctrl,
@@ -2483,8 +2483,7 @@ int ssam_irq_setup(struct ssam_controller *ctrl)
* interrupt, and let the SAM resume callback during the controller
* resume process clear it.
*/
- const int irqf = IRQF_SHARED | IRQF_ONESHOT |
- IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN;
+ const int irqf = IRQF_ONESHOT | IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN;
gpiod = gpiod_get(dev, "ssam_wakeup-int", GPIOD_ASIS);
if (IS_ERR(gpiod))
diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
index 685d37a7add1..ef83461fa536 100644
--- a/drivers/platform/surface/surface_aggregator_registry.c
+++ b/drivers/platform/surface/surface_aggregator_registry.c
@@ -156,7 +156,7 @@ static const struct software_node *ssam_node_group_sl2[] = {
NULL,
};
-/* Devices for Surface Laptop 3. */
+/* Devices for Surface Laptop 3 and 4. */
static const struct software_node *ssam_node_group_sl3[] = {
&ssam_node_root,
&ssam_node_bat_ac,
@@ -521,9 +521,12 @@ static const struct acpi_device_id ssam_platform_hub_match[] = {
/* Surface Laptop 3 (13", Intel) */
{ "MSHW0114", (unsigned long)ssam_node_group_sl3 },
- /* Surface Laptop 3 (15", AMD) */
+ /* Surface Laptop 3 (15", AMD) and 4 (15", AMD) */
{ "MSHW0110", (unsigned long)ssam_node_group_sl3 },
+ /* Surface Laptop 4 (13", Intel) */
+ { "MSHW0250", (unsigned long)ssam_node_group_sl3 },
+
/* Surface Laptop Go 1 */
{ "MSHW0118", (unsigned long)ssam_node_group_slg1 },
diff --git a/drivers/platform/surface/surface_dtx.c b/drivers/platform/surface/surface_dtx.c
index 63ce587e79e3..1203b9a82993 100644
--- a/drivers/platform/surface/surface_dtx.c
+++ b/drivers/platform/surface/surface_dtx.c
@@ -427,6 +427,7 @@ static int surface_dtx_open(struct inode *inode, struct file *file)
*/
if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
up_write(&ddev->client_lock);
+ mutex_destroy(&client->read_lock);
sdtx_device_put(client->ddev);
kfree(client);
return -ENODEV;
@@ -527,20 +528,14 @@ static __poll_t surface_dtx_poll(struct file *file, struct poll_table_struct *pt
struct sdtx_client *client = file->private_data;
__poll_t events = 0;
- if (down_read_killable(&client->ddev->lock))
- return -ERESTARTSYS;
-
- if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags)) {
- up_read(&client->ddev->lock);
+ if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags))
return EPOLLHUP | EPOLLERR;
- }
poll_wait(file, &client->ddev->waitq, pt);
if (!kfifo_is_empty(&client->buffer))
events |= EPOLLIN | EPOLLRDNORM;
- up_read(&client->ddev->lock);
return events;
}
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 2714f7c3843e..60592fb88e7a 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -711,7 +711,7 @@ config INTEL_HID_EVENT
config INTEL_INT0002_VGPIO
tristate "Intel ACPI INT0002 Virtual GPIO driver"
- depends on GPIOLIB && ACPI
+ depends on GPIOLIB && ACPI && PM_SLEEP
select GPIOLIB_IRQCHIP
help
Some peripherals on Bay Trail and Cherry Trail platforms signal a
diff --git a/drivers/platform/x86/dell/dell-smbios-wmi.c b/drivers/platform/x86/dell/dell-smbios-wmi.c
index a1753485159c..33f823772733 100644
--- a/drivers/platform/x86/dell/dell-smbios-wmi.c
+++ b/drivers/platform/x86/dell/dell-smbios-wmi.c
@@ -270,7 +270,8 @@ int init_dell_smbios_wmi(void)
void exit_dell_smbios_wmi(void)
{
- wmi_driver_unregister(&dell_smbios_wmi_driver);
+ if (wmi_supported)
+ wmi_driver_unregister(&dell_smbios_wmi_driver);
}
MODULE_DEVICE_TABLE(wmi, dell_smbios_wmi_id_table);
diff --git a/drivers/platform/x86/gigabyte-wmi.c b/drivers/platform/x86/gigabyte-wmi.c
index 13d57434e60f..5529d7b0abea 100644
--- a/drivers/platform/x86/gigabyte-wmi.c
+++ b/drivers/platform/x86/gigabyte-wmi.c
@@ -133,31 +133,21 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev)
return r;
}
+#define DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME(name) \
+ { .matches = { \
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), \
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, name), \
+ }}
+
static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
- { .matches = {
- DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "B550 GAMING X V2"),
- }},
- { .matches = {
- DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "B550M AORUS PRO-P"),
- }},
- { .matches = {
- DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "B550M DS3H"),
- }},
- { .matches = {
- DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "Z390 I AORUS PRO WIFI-CF"),
- }},
- { .matches = {
- DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "X570 AORUS ELITE"),
- }},
- { .matches = {
- DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "X570 I AORUS PRO WIFI"),
- }},
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 GAMING X V2"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M AORUS PRO-P"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z390 I AORUS PRO WIFI-CF"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 AORUS ELITE"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 I AORUS PRO WIFI"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 UD"),
{ }
};
diff --git a/drivers/platform/x86/hp-wireless.c b/drivers/platform/x86/hp-wireless.c
index 12c31fd5d5ae..0753ef18e721 100644
--- a/drivers/platform/x86/hp-wireless.c
+++ b/drivers/platform/x86/hp-wireless.c
@@ -17,12 +17,14 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alex Hung");
MODULE_ALIAS("acpi*:HPQ6001:*");
MODULE_ALIAS("acpi*:WSTADEF:*");
+MODULE_ALIAS("acpi*:AMDI0051:*");
static struct input_dev *hpwl_input_dev;
static const struct acpi_device_id hpwl_ids[] = {
{"HPQ6001", 0},
{"WSTADEF", 0},
+ {"AMDI0051", 0},
{"", 0},
};
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index 799cbe2ffcf3..8c0867bda828 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -88,6 +88,9 @@ MODULE_DEVICE_TABLE(acpi, lis3lv02d_device_ids);
static int lis3lv02d_acpi_init(struct lis3lv02d *lis3)
{
struct acpi_device *dev = lis3->bus_priv;
+ if (!lis3->init_required)
+ return 0;
+
if (acpi_evaluate_object(dev->handle, METHOD_NAME__INI,
NULL, NULL) != AE_OK)
return -EINVAL;
@@ -356,6 +359,7 @@ static int lis3lv02d_add(struct acpi_device *device)
}
/* call the core layer do its init */
+ lis3_dev.init_required = true;
ret = lis3lv02d_init_device(&lis3_dev);
if (ret)
return ret;
@@ -403,11 +407,27 @@ static int lis3lv02d_suspend(struct device *dev)
static int lis3lv02d_resume(struct device *dev)
{
+ lis3_dev.init_required = false;
+ lis3lv02d_poweron(&lis3_dev);
+ return 0;
+}
+
+static int lis3lv02d_restore(struct device *dev)
+{
+ lis3_dev.init_required = true;
lis3lv02d_poweron(&lis3_dev);
return 0;
}
-static SIMPLE_DEV_PM_OPS(hp_accel_pm, lis3lv02d_suspend, lis3lv02d_resume);
+static const struct dev_pm_ops hp_accel_pm = {
+ .suspend = lis3lv02d_suspend,
+ .resume = lis3lv02d_resume,
+ .freeze = lis3lv02d_suspend,
+ .thaw = lis3lv02d_resume,
+ .poweroff = lis3lv02d_suspend,
+ .restore = lis3lv02d_restore,
+};
+
#define HP_ACCEL_PM (&hp_accel_pm)
#else
#define HP_ACCEL_PM NULL
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 6cb5ad4be231..387817290921 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -57,8 +57,8 @@ enum {
};
enum {
- SMBC_CONSERVATION_ON = 3,
- SMBC_CONSERVATION_OFF = 5,
+ SBMC_CONSERVATION_ON = 3,
+ SBMC_CONSERVATION_OFF = 5,
};
enum {
@@ -182,9 +182,9 @@ static int eval_gbmd(acpi_handle handle, unsigned long *res)
return eval_int(handle, "GBMD", res);
}
-static int exec_smbc(acpi_handle handle, unsigned long arg)
+static int exec_sbmc(acpi_handle handle, unsigned long arg)
{
- return exec_simple_method(handle, "SMBC", arg);
+ return exec_simple_method(handle, "SBMC", arg);
}
static int eval_hals(acpi_handle handle, unsigned long *res)
@@ -477,7 +477,7 @@ static ssize_t conservation_mode_store(struct device *dev,
if (err)
return err;
- err = exec_smbc(priv->adev->handle, state ? SMBC_CONSERVATION_ON : SMBC_CONSERVATION_OFF);
+ err = exec_sbmc(priv->adev->handle, state ? SBMC_CONSERVATION_ON : SBMC_CONSERVATION_OFF);
if (err)
return err;
@@ -809,6 +809,7 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
{
struct ideapad_dytc_priv *dytc = container_of(pprof, struct ideapad_dytc_priv, pprof);
struct ideapad_private *priv = dytc->priv;
+ unsigned long output;
int err;
err = mutex_lock_interruptible(&dytc->mutex);
@@ -829,7 +830,7 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
/* Determine if we are in CQL mode. This alters the commands we do */
err = dytc_cql_command(priv, DYTC_SET_COMMAND(DYTC_FUNCTION_MMC, perfmode, 1),
- NULL);
+ &output);
if (err)
goto unlock;
}
diff --git a/drivers/platform/x86/intel_int0002_vgpio.c b/drivers/platform/x86/intel_int0002_vgpio.c
index 289c6655d425..569342aa8926 100644
--- a/drivers/platform/x86/intel_int0002_vgpio.c
+++ b/drivers/platform/x86/intel_int0002_vgpio.c
@@ -51,6 +51,12 @@
#define GPE0A_STS_PORT 0x420
#define GPE0A_EN_PORT 0x428
+struct int0002_data {
+ struct gpio_chip chip;
+ int parent_irq;
+ int wake_enable_count;
+};
+
/*
* As this is not a real GPIO at all, but just a hack to model an event in
* ACPI the get / set functions are dummy functions.
@@ -98,14 +104,16 @@ static void int0002_irq_mask(struct irq_data *data)
static int int0002_irq_set_wake(struct irq_data *data, unsigned int on)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
- struct platform_device *pdev = to_platform_device(chip->parent);
- int irq = platform_get_irq(pdev, 0);
+ struct int0002_data *int0002 = container_of(chip, struct int0002_data, chip);
- /* Propagate to parent irq */
+ /*
+ * Applying of the wakeup flag to our parent IRQ is delayed till system
+ * suspend, because we only want to do this when using s2idle.
+ */
if (on)
- enable_irq_wake(irq);
+ int0002->wake_enable_count++;
else
- disable_irq_wake(irq);
+ int0002->wake_enable_count--;
return 0;
}
@@ -135,7 +143,7 @@ static bool int0002_check_wake(void *data)
return (gpe_sts_reg & GPE0A_PME_B0_STS_BIT);
}
-static struct irq_chip int0002_byt_irqchip = {
+static struct irq_chip int0002_irqchip = {
.name = DRV_NAME,
.irq_ack = int0002_irq_ack,
.irq_mask = int0002_irq_mask,
@@ -143,21 +151,9 @@ static struct irq_chip int0002_byt_irqchip = {
.irq_set_wake = int0002_irq_set_wake,
};
-static struct irq_chip int0002_cht_irqchip = {
- .name = DRV_NAME,
- .irq_ack = int0002_irq_ack,
- .irq_mask = int0002_irq_mask,
- .irq_unmask = int0002_irq_unmask,
- /*
- * No set_wake, on CHT the IRQ is typically shared with the ACPI SCI
- * and we don't want to mess with the ACPI SCI irq settings.
- */
- .flags = IRQCHIP_SKIP_SET_WAKE,
-};
-
static const struct x86_cpu_id int0002_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &int0002_byt_irqchip),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &int0002_cht_irqchip),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL),
{}
};
@@ -172,8 +168,9 @@ static int int0002_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct x86_cpu_id *cpu_id;
- struct gpio_chip *chip;
+ struct int0002_data *int0002;
struct gpio_irq_chip *girq;
+ struct gpio_chip *chip;
int irq, ret;
/* Menlow has a different INT0002 device? <sigh> */
@@ -185,10 +182,13 @@ static int int0002_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
- if (!chip)
+ int0002 = devm_kzalloc(dev, sizeof(*int0002), GFP_KERNEL);
+ if (!int0002)
return -ENOMEM;
+ int0002->parent_irq = irq;
+
+ chip = &int0002->chip;
chip->label = DRV_NAME;
chip->parent = dev;
chip->owner = THIS_MODULE;
@@ -214,7 +214,7 @@ static int int0002_probe(struct platform_device *pdev)
}
girq = &chip->irq;
- girq->chip = (struct irq_chip *)cpu_id->driver_data;
+ girq->chip = &int0002_irqchip;
/* This let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
@@ -230,6 +230,7 @@ static int int0002_probe(struct platform_device *pdev)
acpi_register_wakeup_handler(irq, int0002_check_wake, NULL);
device_init_wakeup(dev, true);
+ dev_set_drvdata(dev, int0002);
return 0;
}
@@ -240,6 +241,36 @@ static int int0002_remove(struct platform_device *pdev)
return 0;
}
+static int int0002_suspend(struct device *dev)
+{
+ struct int0002_data *int0002 = dev_get_drvdata(dev);
+
+ /*
+ * The INT0002 parent IRQ is often shared with the ACPI GPE IRQ, don't
+ * muck with it when firmware based suspend is used, otherwise we may
+ * cause spurious wakeups from firmware managed suspend.
+ */
+ if (!pm_suspend_via_firmware() && int0002->wake_enable_count)
+ enable_irq_wake(int0002->parent_irq);
+
+ return 0;
+}
+
+static int int0002_resume(struct device *dev)
+{
+ struct int0002_data *int0002 = dev_get_drvdata(dev);
+
+ if (!pm_suspend_via_firmware() && int0002->wake_enable_count)
+ disable_irq_wake(int0002->parent_irq);
+
+ return 0;
+}
+
+static const struct dev_pm_ops int0002_pm_ops = {
+ .suspend = int0002_suspend,
+ .resume = int0002_resume,
+};
+
static const struct acpi_device_id int0002_acpi_ids[] = {
{ "INT0002", 0 },
{ },
@@ -250,6 +281,7 @@ static struct platform_driver int0002_driver = {
.driver = {
.name = DRV_NAME,
.acpi_match_table = int0002_acpi_ids,
+ .pm = &int0002_pm_ops,
},
.probe = int0002_probe,
.remove = int0002_remove,
diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
index 05cced59e251..f58b8543f6ac 100644
--- a/drivers/platform/x86/intel_punit_ipc.c
+++ b/drivers/platform/x86/intel_punit_ipc.c
@@ -312,6 +312,7 @@ static const struct acpi_device_id punit_ipc_acpi_ids[] = {
{ "INT34D4", 0 },
{ }
};
+MODULE_DEVICE_TABLE(acpi, punit_ipc_acpi_ids);
static struct platform_driver intel_punit_ipc_driver = {
.probe = intel_punit_ipc_probe,
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index dd60c9397d35..edd71e744d27 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -8853,6 +8853,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (2nd gen) */
TPACPI_Q_LNV3('N', '2', 'V', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (3nd gen) */
TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL), /* P15 (1st gen) / P15v (1st gen) */
+ TPACPI_Q_LNV3('N', '3', '2', TPACPI_FAN_2CTL), /* X1 Carbon (9th gen) */
};
static int __init fan_init(struct ibm_init_struct *iibm)
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 90fe4f8f3c2c..bde740d6120e 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -115,6 +115,32 @@ static const struct ts_dmi_data chuwi_hi10_plus_data = {
.properties = chuwi_hi10_plus_props,
};
+static const struct property_entry chuwi_hi10_pro_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 8),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 8),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1912),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1272),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10-pro.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data chuwi_hi10_pro_data = {
+ .embedded_fw = {
+ .name = "silead/gsl1680-chuwi-hi10-pro.fw",
+ .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+ .length = 42504,
+ .sha256 = { 0xdb, 0x92, 0x68, 0xa8, 0xdb, 0x81, 0x31, 0x00,
+ 0x1f, 0x58, 0x89, 0xdb, 0x19, 0x1b, 0x15, 0x8c,
+ 0x05, 0x14, 0xf4, 0x95, 0xba, 0x15, 0x45, 0x98,
+ 0x42, 0xa3, 0xbb, 0x65, 0xe3, 0x30, 0xa5, 0x93 },
+ },
+ .acpi_name = "MSSL1680:00",
+ .properties = chuwi_hi10_pro_props,
+};
+
static const struct property_entry chuwi_vi8_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 4),
PROPERTY_ENTRY_U32("touchscreen-min-y", 6),
@@ -916,6 +942,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Chuwi Hi10 Prus (CWI597) */
+ .driver_data = (void *)&chuwi_hi10_pro_data,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Hi10 pro tablet"),
+ DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ },
+ },
+ {
/* Chuwi Vi8 (CWI506) */
.driver_data = (void *)&chuwi_vi8_data,
.matches = {
@@ -1097,6 +1132,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Mediacom WinPad 7.0 W700 (same hw as Wintron surftab 7") */
+ .driver_data = (void *)&trekstor_surftab_wintron70_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MEDIACOM"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "WinPad 7 W10 - WPW700"),
+ },
+ },
+ {
/* Mediacom Flexbook Edge 11 (same hw as TS Primebook C11) */
.driver_data = (void *)&trekstor_primebook_c11_data,
.matches = {
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 03a246e60fd9..21c4c34c52d8 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -63,7 +63,7 @@ static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
spin_unlock_irqrestore(&queue->lock, flags);
}
-s32 scaled_ppm_to_ppb(long ppm)
+long scaled_ppm_to_ppb(long ppm)
{
/*
* The 'freq' field in the 'struct timex' is in parts per
@@ -80,7 +80,7 @@ s32 scaled_ppm_to_ppb(long ppm)
s64 ppb = 1 + ppm;
ppb *= 125;
ppb >>= 13;
- return (s32) ppb;
+ return (long) ppb;
}
EXPORT_SYMBOL(scaled_ppm_to_ppb);
@@ -138,7 +138,7 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
delta = ktime_to_ns(kt);
err = ops->adjtime(ops, delta);
} else if (tx->modes & ADJ_FREQUENCY) {
- s32 ppb = scaled_ppm_to_ppb(tx->freq);
+ long ppb = scaled_ppm_to_ppb(tx->freq);
if (ppb > ops->max_adj || ppb < -ops->max_adj)
return -ERANGE;
if (ops->adjfine)
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index 530e5f90095e..0d1034e3ed0f 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -324,7 +324,7 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!bp->base) {
dev_err(&pdev->dev, "io_remap bar0\n");
err = -ENOMEM;
- goto out;
+ goto out_release_regions;
}
bp->reg = bp->base + OCP_REGISTER_OFFSET;
bp->tod = bp->base + TOD_REGISTER_OFFSET;
@@ -347,6 +347,8 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
out:
+ pci_iounmap(pdev, bp->base);
+out_release_regions:
pci_release_regions(pdev);
out_disable:
pci_disable_device(pdev);
diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c
index 50ec53d67a4c..db4c265287ae 100644
--- a/drivers/rapidio/rio_cm.c
+++ b/drivers/rapidio/rio_cm.c
@@ -2127,6 +2127,14 @@ static int riocm_add_mport(struct device *dev,
return -ENODEV;
}
+ cm->rx_wq = create_workqueue(DRV_NAME "/rxq");
+ if (!cm->rx_wq) {
+ rio_release_inb_mbox(mport, cmbox);
+ rio_release_outb_mbox(mport, cmbox);
+ kfree(cm);
+ return -ENOMEM;
+ }
+
/*
* Allocate and register inbound messaging buffers to be ready
* to receive channel and system management requests
@@ -2137,15 +2145,6 @@ static int riocm_add_mport(struct device *dev,
cm->rx_slots = RIOCM_RX_RING_SIZE;
mutex_init(&cm->rx_lock);
riocm_rx_fill(cm, RIOCM_RX_RING_SIZE);
- cm->rx_wq = create_workqueue(DRV_NAME "/rxq");
- if (!cm->rx_wq) {
- riocm_error("failed to allocate IBMBOX_%d on %s",
- cmbox, mport->name);
- rio_release_outb_mbox(mport, cmbox);
- kfree(cm);
- return -ENOMEM;
- }
-
INIT_WORK(&cm->rx_work, rio_ibmsg_handler);
cm->tx_slot = 0;
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 9d84d9245490..24ce9a17ab4f 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -193,20 +193,10 @@ config REGULATOR_BCM590XX
BCM590xx PMUs. This will enable support for the software
controllable LDO/Switching regulators.
-config REGULATOR_BD70528
- tristate "ROHM BD70528 Power Regulator"
- depends on MFD_ROHM_BD70528
- help
- This driver supports voltage regulators on ROHM BD70528 PMIC.
- This will enable support for the software controllable buck
- and LDO regulators.
-
- This driver can also be built as a module. If so, the module
- will be called bd70528-regulator.
-
config REGULATOR_BD71815
tristate "ROHM BD71815 Power Regulator"
depends on MFD_ROHM_BD71828
+ select REGULATOR_ROHM
help
This driver supports voltage regulators on ROHM BD71815 PMIC.
This will enable support for the software controllable buck
@@ -588,6 +578,14 @@ config REGULATOR_MAX8660
This driver controls a Maxim 8660/8661 voltage output
regulator via I2C bus.
+config REGULATOR_MAX8893
+ tristate "Maxim 8893 voltage regulator"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ This driver controls a Maxim 8893 voltage output
+ regulator via I2C bus.
+
config REGULATOR_MAX8907
tristate "Maxim 8907 voltage regulator"
depends on MFD_MAX8907 || COMPILE_TEST
@@ -779,6 +777,15 @@ config REGULATOR_MT6358
This driver supports the control of different power rails of device
through regulator interface.
+config REGULATOR_MT6359
+ tristate "MediaTek MT6359 PMIC"
+ depends on MFD_MT6397
+ help
+ Say y here to select this option to enable the power regulator of
+ MediaTek MT6359 PMIC.
+ This driver supports the control of different power rails of device
+ through regulator interface.
+
config REGULATOR_MT6360
tristate "MT6360 SubPMIC Regulator"
depends on MFD_MT6360
@@ -1030,8 +1037,28 @@ config REGULATOR_RT5033
RT5033 PMIC. The device supports multiple regulators like
current source, LDO and Buck.
+config REGULATOR_RT6160
+ tristate "Richtek RT6160 BuckBoost voltage regulator"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ This adds support for voltage regulator in Richtek RT6160.
+ This device automatically change voltage output mode from
+ Buck or Boost. The mode transistion depend on the input source voltage.
+ The wide output range is from 2025mV to 5200mV and can be used on most
+ common application scenario.
+
+config REGULATOR_RT6245
+ tristate "Richtek RT6245 voltage regulator"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ This adds supprot for Richtek RT6245 voltage regulator.
+ It can support up to 14A output current and adjustable output voltage
+ from 0.4375V to 1.3875V, per step 12.5mV.
+
config REGULATOR_RTMV20
- tristate "RTMV20 Laser Diode Regulator"
+ tristate "Richtek RTMV20 Laser Diode Regulator"
depends on I2C
select REGMAP_I2C
help
@@ -1150,6 +1177,12 @@ config REGULATOR_STW481X_VMMC
This driver supports the internal VMMC regulator in the STw481x
PMIC chips.
+config REGULATOR_SY7636A
+ tristate "Silergy SY7636A voltage regulator"
+ depends on MFD_SY7636A
+ help
+ This driver supports Silergy SY3686A voltage regulator.
+
config REGULATOR_SY8106A
tristate "Silergy SY8106A regulator"
depends on I2C && (OF || COMPILE_TEST)
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 580b015296ea..8c2f82206b94 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -4,7 +4,7 @@
#
-obj-$(CONFIG_REGULATOR) += core.o dummy.o fixed-helper.o helpers.o devres.o
+obj-$(CONFIG_REGULATOR) += core.o dummy.o fixed-helper.o helpers.o devres.o irq_helpers.o
obj-$(CONFIG_OF) += of_regulator.o
obj-$(CONFIG_REGULATOR_FIXED_VOLTAGE) += fixed.o
obj-$(CONFIG_REGULATOR_VIRTUAL_CONSUMER) += virtual.o
@@ -29,7 +29,6 @@ obj-$(CONFIG_REGULATOR_AS3722) += as3722-regulator.o
obj-$(CONFIG_REGULATOR_ATC260X) += atc260x-regulator.o
obj-$(CONFIG_REGULATOR_AXP20X) += axp20x-regulator.o
obj-$(CONFIG_REGULATOR_BCM590XX) += bcm590xx-regulator.o
-obj-$(CONFIG_REGULATOR_BD70528) += bd70528-regulator.o
obj-$(CONFIG_REGULATOR_BD71815) += bd71815-regulator.o
obj-$(CONFIG_REGULATOR_BD71828) += bd71828-regulator.o
obj-$(CONFIG_REGULATOR_BD718XX) += bd718x7-regulator.o
@@ -72,6 +71,7 @@ obj-$(CONFIG_REGULATOR_MAX77620) += max77620-regulator.o
obj-$(CONFIG_REGULATOR_MAX77650) += max77650-regulator.o
obj-$(CONFIG_REGULATOR_MAX8649) += max8649.o
obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o
+obj-$(CONFIG_REGULATOR_MAX8893) += max8893.o
obj-$(CONFIG_REGULATOR_MAX8907) += max8907-regulator.o
obj-$(CONFIG_REGULATOR_MAX8925) += max8925-regulator.o
obj-$(CONFIG_REGULATOR_MAX8952) += max8952.o
@@ -94,6 +94,7 @@ obj-$(CONFIG_REGULATOR_MT6311) += mt6311-regulator.o
obj-$(CONFIG_REGULATOR_MT6315) += mt6315-regulator.o
obj-$(CONFIG_REGULATOR_MT6323) += mt6323-regulator.o
obj-$(CONFIG_REGULATOR_MT6358) += mt6358-regulator.o
+obj-$(CONFIG_REGULATOR_MT6359) += mt6359-regulator.o
obj-$(CONFIG_REGULATOR_MT6360) += mt6360-regulator.o
obj-$(CONFIG_REGULATOR_MT6380) += mt6380-regulator.o
obj-$(CONFIG_REGULATOR_MT6397) += mt6397-regulator.o
@@ -124,6 +125,8 @@ obj-$(CONFIG_REGULATOR_ROHM) += rohm-regulator.o
obj-$(CONFIG_REGULATOR_RT4801) += rt4801-regulator.o
obj-$(CONFIG_REGULATOR_RT4831) += rt4831-regulator.o
obj-$(CONFIG_REGULATOR_RT5033) += rt5033-regulator.o
+obj-$(CONFIG_REGULATOR_RT6160) += rt6160-regulator.o
+obj-$(CONFIG_REGULATOR_RT6245) += rt6245-regulator.o
obj-$(CONFIG_REGULATOR_RTMV20) += rtmv20-regulator.o
obj-$(CONFIG_REGULATOR_S2MPA01) += s2mpa01.o
obj-$(CONFIG_REGULATOR_S2MPS11) += s2mps11.o
@@ -136,6 +139,7 @@ obj-$(CONFIG_REGULATOR_STM32_VREFBUF) += stm32-vrefbuf.o
obj-$(CONFIG_REGULATOR_STM32_PWR) += stm32-pwr.o
obj-$(CONFIG_REGULATOR_STPMIC1) += stpmic1_regulator.o
obj-$(CONFIG_REGULATOR_STW481X_VMMC) += stw481x-vmmc.o
+obj-$(CONFIG_REGULATOR_SY7636A) += sy7636a-regulator.o
obj-$(CONFIG_REGULATOR_SY8106A) += sy8106a-regulator.o
obj-$(CONFIG_REGULATOR_SY8824X) += sy8824x.o
obj-$(CONFIG_REGULATOR_SY8827N) += sy8827n.o
diff --git a/drivers/regulator/atc260x-regulator.c b/drivers/regulator/atc260x-regulator.c
index d8b429955d33..05147d2c3842 100644
--- a/drivers/regulator/atc260x-regulator.c
+++ b/drivers/regulator/atc260x-regulator.c
@@ -28,16 +28,16 @@ static const struct linear_range atc2609a_dcdc_voltage_ranges[] = {
static const struct linear_range atc2609a_ldo_voltage_ranges0[] = {
REGULATOR_LINEAR_RANGE(700000, 0, 15, 100000),
- REGULATOR_LINEAR_RANGE(2100000, 16, 28, 100000),
+ REGULATOR_LINEAR_RANGE(2100000, 0, 12, 100000),
};
static const struct linear_range atc2609a_ldo_voltage_ranges1[] = {
REGULATOR_LINEAR_RANGE(850000, 0, 15, 100000),
- REGULATOR_LINEAR_RANGE(2100000, 16, 27, 100000),
+ REGULATOR_LINEAR_RANGE(2100000, 0, 11, 100000),
};
static const unsigned int atc260x_ldo_voltage_range_sel[] = {
- 0x0, 0x1,
+ 0x0, 0x20,
};
static int atc260x_dcdc_set_voltage_time_sel(struct regulator_dev *rdev,
@@ -411,7 +411,7 @@ enum atc2609a_reg_ids {
.owner = THIS_MODULE, \
}
-#define atc2609a_reg_desc_ldo_range_pick(num, n_range) { \
+#define atc2609a_reg_desc_ldo_range_pick(num, n_range, n_volt) { \
.name = "LDO"#num, \
.supply_name = "ldo"#num, \
.of_match = of_match_ptr("ldo"#num), \
@@ -421,6 +421,7 @@ enum atc2609a_reg_ids {
.type = REGULATOR_VOLTAGE, \
.linear_ranges = atc2609a_ldo_voltage_ranges##n_range, \
.n_linear_ranges = ARRAY_SIZE(atc2609a_ldo_voltage_ranges##n_range), \
+ .n_voltages = n_volt, \
.vsel_reg = ATC2609A_PMU_LDO##num##_CTL0, \
.vsel_mask = GENMASK(4, 1), \
.vsel_range_reg = ATC2609A_PMU_LDO##num##_CTL0, \
@@ -458,12 +459,12 @@ static const struct regulator_desc atc2609a_reg[] = {
atc2609a_reg_desc_ldo_bypass(0),
atc2609a_reg_desc_ldo_bypass(1),
atc2609a_reg_desc_ldo_bypass(2),
- atc2609a_reg_desc_ldo_range_pick(3, 0),
- atc2609a_reg_desc_ldo_range_pick(4, 0),
+ atc2609a_reg_desc_ldo_range_pick(3, 0, 29),
+ atc2609a_reg_desc_ldo_range_pick(4, 0, 29),
atc2609a_reg_desc_ldo(5),
- atc2609a_reg_desc_ldo_range_pick(6, 1),
- atc2609a_reg_desc_ldo_range_pick(7, 0),
- atc2609a_reg_desc_ldo_range_pick(8, 0),
+ atc2609a_reg_desc_ldo_range_pick(6, 1, 28),
+ atc2609a_reg_desc_ldo_range_pick(7, 0, 29),
+ atc2609a_reg_desc_ldo_range_pick(8, 0, 29),
atc2609a_reg_desc_ldo_fixed(9),
};
diff --git a/drivers/regulator/bd70528-regulator.c b/drivers/regulator/bd70528-regulator.c
deleted file mode 100644
index 1f5f9482b209..000000000000
--- a/drivers/regulator/bd70528-regulator.c
+++ /dev/null
@@ -1,283 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2018 ROHM Semiconductors
-// bd70528-regulator.c ROHM BD70528MWV regulator driver
-
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/mfd/rohm-bd70528.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-#include <linux/regulator/driver.h>
-#include <linux/regulator/machine.h>
-#include <linux/regulator/of_regulator.h>
-#include <linux/slab.h>
-
-#define BUCK_RAMPRATE_250MV 0
-#define BUCK_RAMPRATE_125MV 1
-#define BUCK_RAMP_MAX 250
-
-static const struct linear_range bd70528_buck1_volts[] = {
- REGULATOR_LINEAR_RANGE(1200000, 0x00, 0x1, 600000),
- REGULATOR_LINEAR_RANGE(2750000, 0x2, 0xf, 50000),
-};
-static const struct linear_range bd70528_buck2_volts[] = {
- REGULATOR_LINEAR_RANGE(1200000, 0x00, 0x1, 300000),
- REGULATOR_LINEAR_RANGE(1550000, 0x2, 0xd, 50000),
- REGULATOR_LINEAR_RANGE(3000000, 0xe, 0xf, 300000),
-};
-static const struct linear_range bd70528_buck3_volts[] = {
- REGULATOR_LINEAR_RANGE(800000, 0x00, 0xd, 50000),
- REGULATOR_LINEAR_RANGE(1800000, 0xe, 0xf, 0),
-};
-
-/* All LDOs have same voltage ranges */
-static const struct linear_range bd70528_ldo_volts[] = {
- REGULATOR_LINEAR_RANGE(1650000, 0x0, 0x07, 50000),
- REGULATOR_LINEAR_RANGE(2100000, 0x8, 0x0f, 100000),
- REGULATOR_LINEAR_RANGE(2850000, 0x10, 0x19, 50000),
- REGULATOR_LINEAR_RANGE(3300000, 0x19, 0x1f, 0),
-};
-
-/* Also both LEDs support same voltages */
-static const unsigned int led_volts[] = {
- 20000, 30000
-};
-
-static int bd70528_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
-{
- if (ramp_delay > 0 && ramp_delay <= BUCK_RAMP_MAX) {
- unsigned int ramp_value = BUCK_RAMPRATE_250MV;
-
- if (ramp_delay <= 125)
- ramp_value = BUCK_RAMPRATE_125MV;
-
- return regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg,
- BD70528_MASK_BUCK_RAMP,
- ramp_value << BD70528_SIFT_BUCK_RAMP);
- }
- dev_err(&rdev->dev, "%s: ramp_delay: %d not supported\n",
- rdev->desc->name, ramp_delay);
- return -EINVAL;
-}
-
-static int bd70528_led_set_voltage_sel(struct regulator_dev *rdev,
- unsigned int sel)
-{
- int ret;
-
- ret = regulator_is_enabled_regmap(rdev);
- if (ret < 0)
- return ret;
-
- if (ret == 0)
- return regulator_set_voltage_sel_regmap(rdev, sel);
-
- dev_err(&rdev->dev,
- "LED voltage change not allowed when led is enabled\n");
-
- return -EBUSY;
-}
-
-static const struct regulator_ops bd70528_buck_ops = {
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
- .list_voltage = regulator_list_voltage_linear_range,
- .set_voltage_sel = regulator_set_voltage_sel_regmap,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage_time_sel = regulator_set_voltage_time_sel,
- .set_ramp_delay = bd70528_set_ramp_delay,
-};
-
-static const struct regulator_ops bd70528_ldo_ops = {
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
- .list_voltage = regulator_list_voltage_linear_range,
- .set_voltage_sel = regulator_set_voltage_sel_regmap,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage_time_sel = regulator_set_voltage_time_sel,
-};
-
-static const struct regulator_ops bd70528_led_ops = {
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
- .list_voltage = regulator_list_voltage_table,
- .set_voltage_sel = bd70528_led_set_voltage_sel,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
-};
-
-static const struct regulator_desc bd70528_desc[] = {
- {
- .name = "buck1",
- .of_match = of_match_ptr("BUCK1"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD70528_BUCK1,
- .ops = &bd70528_buck_ops,
- .type = REGULATOR_VOLTAGE,
- .linear_ranges = bd70528_buck1_volts,
- .n_linear_ranges = ARRAY_SIZE(bd70528_buck1_volts),
- .n_voltages = BD70528_BUCK_VOLTS,
- .enable_reg = BD70528_REG_BUCK1_EN,
- .enable_mask = BD70528_MASK_RUN_EN,
- .vsel_reg = BD70528_REG_BUCK1_VOLT,
- .vsel_mask = BD70528_MASK_BUCK_VOLT,
- .owner = THIS_MODULE,
- },
- {
- .name = "buck2",
- .of_match = of_match_ptr("BUCK2"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD70528_BUCK2,
- .ops = &bd70528_buck_ops,
- .type = REGULATOR_VOLTAGE,
- .linear_ranges = bd70528_buck2_volts,
- .n_linear_ranges = ARRAY_SIZE(bd70528_buck2_volts),
- .n_voltages = BD70528_BUCK_VOLTS,
- .enable_reg = BD70528_REG_BUCK2_EN,
- .enable_mask = BD70528_MASK_RUN_EN,
- .vsel_reg = BD70528_REG_BUCK2_VOLT,
- .vsel_mask = BD70528_MASK_BUCK_VOLT,
- .owner = THIS_MODULE,
- },
- {
- .name = "buck3",
- .of_match = of_match_ptr("BUCK3"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD70528_BUCK3,
- .ops = &bd70528_buck_ops,
- .type = REGULATOR_VOLTAGE,
- .linear_ranges = bd70528_buck3_volts,
- .n_linear_ranges = ARRAY_SIZE(bd70528_buck3_volts),
- .n_voltages = BD70528_BUCK_VOLTS,
- .enable_reg = BD70528_REG_BUCK3_EN,
- .enable_mask = BD70528_MASK_RUN_EN,
- .vsel_reg = BD70528_REG_BUCK3_VOLT,
- .vsel_mask = BD70528_MASK_BUCK_VOLT,
- .owner = THIS_MODULE,
- },
- {
- .name = "ldo1",
- .of_match = of_match_ptr("LDO1"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD70528_LDO1,
- .ops = &bd70528_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .linear_ranges = bd70528_ldo_volts,
- .n_linear_ranges = ARRAY_SIZE(bd70528_ldo_volts),
- .n_voltages = BD70528_LDO_VOLTS,
- .enable_reg = BD70528_REG_LDO1_EN,
- .enable_mask = BD70528_MASK_RUN_EN,
- .vsel_reg = BD70528_REG_LDO1_VOLT,
- .vsel_mask = BD70528_MASK_LDO_VOLT,
- .owner = THIS_MODULE,
- },
- {
- .name = "ldo2",
- .of_match = of_match_ptr("LDO2"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD70528_LDO2,
- .ops = &bd70528_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .linear_ranges = bd70528_ldo_volts,
- .n_linear_ranges = ARRAY_SIZE(bd70528_ldo_volts),
- .n_voltages = BD70528_LDO_VOLTS,
- .enable_reg = BD70528_REG_LDO2_EN,
- .enable_mask = BD70528_MASK_RUN_EN,
- .vsel_reg = BD70528_REG_LDO2_VOLT,
- .vsel_mask = BD70528_MASK_LDO_VOLT,
- .owner = THIS_MODULE,
- },
- {
- .name = "ldo3",
- .of_match = of_match_ptr("LDO3"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD70528_LDO3,
- .ops = &bd70528_ldo_ops,
- .type = REGULATOR_VOLTAGE,
- .linear_ranges = bd70528_ldo_volts,
- .n_linear_ranges = ARRAY_SIZE(bd70528_ldo_volts),
- .n_voltages = BD70528_LDO_VOLTS,
- .enable_reg = BD70528_REG_LDO3_EN,
- .enable_mask = BD70528_MASK_RUN_EN,
- .vsel_reg = BD70528_REG_LDO3_VOLT,
- .vsel_mask = BD70528_MASK_LDO_VOLT,
- .owner = THIS_MODULE,
- },
- {
- .name = "ldo_led1",
- .of_match = of_match_ptr("LDO_LED1"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD70528_LED1,
- .ops = &bd70528_led_ops,
- .type = REGULATOR_VOLTAGE,
- .volt_table = &led_volts[0],
- .n_voltages = ARRAY_SIZE(led_volts),
- .enable_reg = BD70528_REG_LED_EN,
- .enable_mask = BD70528_MASK_LED1_EN,
- .vsel_reg = BD70528_REG_LED_VOLT,
- .vsel_mask = BD70528_MASK_LED1_VOLT,
- .owner = THIS_MODULE,
- },
- {
- .name = "ldo_led2",
- .of_match = of_match_ptr("LDO_LED2"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD70528_LED2,
- .ops = &bd70528_led_ops,
- .type = REGULATOR_VOLTAGE,
- .volt_table = &led_volts[0],
- .n_voltages = ARRAY_SIZE(led_volts),
- .enable_reg = BD70528_REG_LED_EN,
- .enable_mask = BD70528_MASK_LED2_EN,
- .vsel_reg = BD70528_REG_LED_VOLT,
- .vsel_mask = BD70528_MASK_LED2_VOLT,
- .owner = THIS_MODULE,
- },
-
-};
-
-static int bd70528_probe(struct platform_device *pdev)
-{
- int i;
- struct regulator_config config = {
- .dev = pdev->dev.parent,
- };
-
- config.regmap = dev_get_regmap(pdev->dev.parent, NULL);
- if (!config.regmap)
- return -ENODEV;
-
- for (i = 0; i < ARRAY_SIZE(bd70528_desc); i++) {
- struct regulator_dev *rdev;
-
- rdev = devm_regulator_register(&pdev->dev, &bd70528_desc[i],
- &config);
- if (IS_ERR(rdev)) {
- dev_err(&pdev->dev,
- "failed to register %s regulator\n",
- bd70528_desc[i].name);
- return PTR_ERR(rdev);
- }
- }
- return 0;
-}
-
-static struct platform_driver bd70528_regulator = {
- .driver = {
- .name = "bd70528-pmic"
- },
- .probe = bd70528_probe,
-};
-
-module_platform_driver(bd70528_regulator);
-
-MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
-MODULE_DESCRIPTION("BD70528 voltage regulator driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:bd70528-pmic");
diff --git a/drivers/regulator/bd71815-regulator.c b/drivers/regulator/bd71815-regulator.c
index a4e8d5e36b40..16edd9062ca9 100644
--- a/drivers/regulator/bd71815-regulator.c
+++ b/drivers/regulator/bd71815-regulator.c
@@ -13,6 +13,8 @@
#include <linux/init.h>
#include <linux/err.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/gpio/consumer.h>
#include <linux/regulator/driver.h>
#include <linux/delay.h>
#include <linux/slab.h>
@@ -26,14 +28,6 @@ struct bd71815_regulator {
const struct rohm_dvs_config *dvs;
};
-struct bd71815_pmic {
- struct bd71815_regulator descs[BD71815_REGULATOR_CNT];
- struct regmap *regmap;
- struct device *dev;
- struct gpio_descs *gps;
- struct regulator_dev *rdev[BD71815_REGULATOR_CNT];
-};
-
static const int bd7181x_wled_currents[] = {
10, 20, 30, 50, 70, 100, 200, 300, 500, 700, 1000, 2000, 3000, 4000,
5000, 6000, 7000, 8000, 9000, 10000, 11000, 12000, 13000, 14000, 15000,
@@ -300,14 +294,13 @@ static int bd7181x_led_set_current_limit(struct regulator_dev *rdev,
static int bd7181x_buck12_get_voltage_sel(struct regulator_dev *rdev)
{
- struct bd71815_pmic *pmic = rdev_get_drvdata(rdev);
int rid = rdev_get_id(rdev);
int ret, regh, regl, val;
regh = BD71815_REG_BUCK1_VOLT_H + rid * 0x2;
regl = BD71815_REG_BUCK1_VOLT_L + rid * 0x2;
- ret = regmap_read(pmic->regmap, regh, &val);
+ ret = regmap_read(rdev->regmap, regh, &val);
if (ret)
return ret;
@@ -319,7 +312,7 @@ static int bd7181x_buck12_get_voltage_sel(struct regulator_dev *rdev)
* by BD71815_BUCK_DVSSEL bit
*/
if ((!(val & BD71815_BUCK_STBY_DVS)) && (!(val & BD71815_BUCK_DVSSEL)))
- ret = regmap_read(pmic->regmap, regl, &val);
+ ret = regmap_read(rdev->regmap, regl, &val);
if (ret)
return ret;
@@ -333,14 +326,13 @@ static int bd7181x_buck12_get_voltage_sel(struct regulator_dev *rdev)
static int bd7181x_buck12_set_voltage_sel(struct regulator_dev *rdev,
unsigned int sel)
{
- struct bd71815_pmic *pmic = rdev_get_drvdata(rdev);
int rid = rdev_get_id(rdev);
int ret, val, reg, regh, regl;
regh = BD71815_REG_BUCK1_VOLT_H + rid*0x2;
regl = BD71815_REG_BUCK1_VOLT_L + rid*0x2;
- ret = regmap_read(pmic->regmap, regh, &val);
+ ret = regmap_read(rdev->regmap, regh, &val);
if (ret)
return ret;
@@ -350,7 +342,7 @@ static int bd7181x_buck12_set_voltage_sel(struct regulator_dev *rdev,
* voltages at runtime is not supported by this driver.
*/
if (((val & BD71815_BUCK_STBY_DVS))) {
- return regmap_update_bits(pmic->regmap, regh, BD71815_VOLT_MASK,
+ return regmap_update_bits(rdev->regmap, regh, BD71815_VOLT_MASK,
sel);
}
/* Update new voltage to the register which is not selected now */
@@ -359,12 +351,13 @@ static int bd7181x_buck12_set_voltage_sel(struct regulator_dev *rdev,
else
reg = regh;
- ret = regmap_update_bits(pmic->regmap, reg, BD71815_VOLT_MASK, sel);
+ ret = regmap_update_bits(rdev->regmap, reg, BD71815_VOLT_MASK, sel);
if (ret)
return ret;
/* Select the other DVS register to be used */
- return regmap_update_bits(pmic->regmap, regh, BD71815_BUCK_DVSSEL, ~val);
+ return regmap_update_bits(rdev->regmap, regh, BD71815_BUCK_DVSSEL,
+ ~val);
}
static const struct regulator_ops bd7181x_ldo_regulator_ops = {
@@ -522,7 +515,7 @@ static const struct regulator_ops bd7181x_led_regulator_ops = {
.dvs = (_dvs), \
}
-static struct bd71815_regulator bd71815_regulators[] = {
+static const struct bd71815_regulator bd71815_regulators[] = {
BD71815_BUCK12_REG(buck1, BD71815_BUCK1, BD71815_REG_BUCK1_VOLT_H,
BD71815_REG_BUCK1_MODE, 800000, 2000000, 25000,
&buck1_dvs),
@@ -568,24 +561,16 @@ static struct bd71815_regulator bd71815_regulators[] = {
static int bd7181x_probe(struct platform_device *pdev)
{
- struct bd71815_pmic *pmic;
struct regulator_config config = {};
int i, ret;
struct gpio_desc *ldo4_en;
+ struct regmap *regmap;
- pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL);
- if (!pmic)
- return -ENOMEM;
-
- memcpy(pmic->descs, bd71815_regulators, sizeof(pmic->descs));
-
- pmic->dev = &pdev->dev;
- pmic->regmap = dev_get_regmap(pdev->dev.parent, NULL);
- if (!pmic->regmap) {
- dev_err(pmic->dev, "No parent regmap\n");
+ regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!regmap) {
+ dev_err(&pdev->dev, "No parent regmap\n");
return -ENODEV;
}
- platform_set_drvdata(pdev, pmic);
ldo4_en = devm_gpiod_get_from_of_node(&pdev->dev,
pdev->dev.parent->of_node,
"rohm,vsel-gpios", 0,
@@ -599,23 +584,23 @@ static int bd7181x_probe(struct platform_device *pdev)
}
/* Disable to go to ship-mode */
- ret = regmap_update_bits(pmic->regmap, BD71815_REG_PWRCTRL,
- RESTARTEN, 0);
+ ret = regmap_update_bits(regmap, BD71815_REG_PWRCTRL, RESTARTEN, 0);
if (ret)
return ret;
config.dev = pdev->dev.parent;
- config.regmap = pmic->regmap;
+ config.regmap = regmap;
for (i = 0; i < BD71815_REGULATOR_CNT; i++) {
- struct regulator_desc *desc;
+ const struct regulator_desc *desc;
struct regulator_dev *rdev;
- desc = &pmic->descs[i].desc;
+ desc = &bd71815_regulators[i].desc;
+
if (i == BD71815_LDO4)
config.ena_gpiod = ldo4_en;
-
- config.driver_data = pmic;
+ else
+ config.ena_gpiod = NULL;
rdev = devm_regulator_register(&pdev->dev, desc, &config);
if (IS_ERR(rdev)) {
@@ -624,8 +609,6 @@ static int bd7181x_probe(struct platform_device *pdev)
desc->name);
return PTR_ERR(rdev);
}
- config.ena_gpiod = NULL;
- pmic->rdev[i] = rdev;
}
return 0;
}
@@ -639,7 +622,6 @@ MODULE_DEVICE_TABLE(platform, bd7181x_pmic_id);
static struct platform_driver bd7181x_regulator = {
.driver = {
.name = "bd7181x-pmic",
- .owner = THIS_MODULE,
},
.probe = bd7181x_probe,
.id_table = bd7181x_pmic_id,
diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c
index e61295b30503..b1eb46961993 100644
--- a/drivers/regulator/bd718x7-regulator.c
+++ b/drivers/regulator/bd718x7-regulator.c
@@ -334,7 +334,7 @@ BD718XX_OPS(bd71837_buck_regulator_ops, regulator_list_voltage_linear_range,
NULL);
BD718XX_OPS(bd71837_buck_regulator_nolinear_ops, regulator_list_voltage_table,
- regulator_map_voltage_ascend, bd718xx_set_voltage_sel_restricted,
+ regulator_map_voltage_ascend, bd71837_set_voltage_sel_restricted,
regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
NULL);
/*
diff --git a/drivers/regulator/bd9576-regulator.c b/drivers/regulator/bd9576-regulator.c
index 204a2da054f5..e16c3727db7a 100644
--- a/drivers/regulator/bd9576-regulator.c
+++ b/drivers/regulator/bd9576-regulator.c
@@ -2,10 +2,10 @@
// Copyright (C) 2020 ROHM Semiconductors
// ROHM BD9576MUF/BD9573MUF regulator driver
-#include <linux/delay.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
+#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/mfd/rohm-bd957x.h>
#include <linux/mfd/rohm-generic.h>
@@ -16,29 +16,118 @@
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
#define BD957X_VOUTS1_VOLT 3300000
#define BD957X_VOUTS4_BASE_VOLT 1030000
#define BD957X_VOUTS34_NUM_VOLT 32
-static int vout1_volt_table[] = {5000000, 4900000, 4800000, 4700000, 4600000,
- 4500000, 4500000, 4500000, 5000000, 5100000,
- 5200000, 5300000, 5400000, 5500000, 5500000,
- 5500000};
+#define BD9576_THERM_IRQ_MASK_TW BIT(5)
+#define BD9576_xVD_IRQ_MASK_VOUTL1 BIT(5)
+#define BD9576_UVD_IRQ_MASK_VOUTS1_OCW BIT(6)
+#define BD9576_xVD_IRQ_MASK_VOUT1TO4 0x0F
-static int vout2_volt_table[] = {1800000, 1780000, 1760000, 1740000, 1720000,
- 1700000, 1680000, 1660000, 1800000, 1820000,
- 1840000, 1860000, 1880000, 1900000, 1920000,
- 1940000};
+static const unsigned int vout1_volt_table[] = {
+ 5000000, 4900000, 4800000, 4700000, 4600000,
+ 4500000, 4500000, 4500000, 5000000, 5100000,
+ 5200000, 5300000, 5400000, 5500000, 5500000,
+ 5500000
+};
+
+static const unsigned int vout2_volt_table[] = {
+ 1800000, 1780000, 1760000, 1740000, 1720000,
+ 1700000, 1680000, 1660000, 1800000, 1820000,
+ 1840000, 1860000, 1880000, 1900000, 1920000,
+ 1940000
+};
+
+static const unsigned int voutl1_volt_table[] = {
+ 2500000, 2540000, 2580000, 2620000, 2660000,
+ 2700000, 2740000, 2780000, 2500000, 2460000,
+ 2420000, 2380000, 2340000, 2300000, 2260000,
+ 2220000
+};
+
+static const struct linear_range vout1_xvd_ranges[] = {
+ REGULATOR_LINEAR_RANGE(225000, 0x01, 0x2b, 0),
+ REGULATOR_LINEAR_RANGE(225000, 0x2c, 0x54, 5000),
+ REGULATOR_LINEAR_RANGE(425000, 0x55, 0x7f, 0),
+};
+
+static const struct linear_range vout234_xvd_ranges[] = {
+ REGULATOR_LINEAR_RANGE(17000, 0x01, 0x0f, 0),
+ REGULATOR_LINEAR_RANGE(17000, 0x10, 0x6d, 1000),
+ REGULATOR_LINEAR_RANGE(110000, 0x6e, 0x7f, 0),
+};
+
+static const struct linear_range voutL1_xvd_ranges[] = {
+ REGULATOR_LINEAR_RANGE(34000, 0x01, 0x0f, 0),
+ REGULATOR_LINEAR_RANGE(34000, 0x10, 0x6d, 2000),
+ REGULATOR_LINEAR_RANGE(220000, 0x6e, 0x7f, 0),
+};
+
+static struct linear_range voutS1_ocw_ranges_internal[] = {
+ REGULATOR_LINEAR_RANGE(200000, 0x01, 0x04, 0),
+ REGULATOR_LINEAR_RANGE(250000, 0x05, 0x18, 50000),
+ REGULATOR_LINEAR_RANGE(1200000, 0x19, 0x3f, 0),
+};
+
+static struct linear_range voutS1_ocw_ranges[] = {
+ REGULATOR_LINEAR_RANGE(50000, 0x01, 0x04, 0),
+ REGULATOR_LINEAR_RANGE(60000, 0x05, 0x18, 10000),
+ REGULATOR_LINEAR_RANGE(250000, 0x19, 0x3f, 0),
+};
+
+static struct linear_range voutS1_ocp_ranges_internal[] = {
+ REGULATOR_LINEAR_RANGE(300000, 0x01, 0x06, 0),
+ REGULATOR_LINEAR_RANGE(350000, 0x7, 0x1b, 50000),
+ REGULATOR_LINEAR_RANGE(1350000, 0x1c, 0x3f, 0),
+};
-static int voutl1_volt_table[] = {2500000, 2540000, 2580000, 2620000, 2660000,
- 2700000, 2740000, 2780000, 2500000, 2460000,
- 2420000, 2380000, 2340000, 2300000, 2260000,
- 2220000};
+static struct linear_range voutS1_ocp_ranges[] = {
+ REGULATOR_LINEAR_RANGE(70000, 0x01, 0x06, 0),
+ REGULATOR_LINEAR_RANGE(80000, 0x7, 0x1b, 10000),
+ REGULATOR_LINEAR_RANGE(280000, 0x1c, 0x3f, 0),
+};
struct bd957x_regulator_data {
struct regulator_desc desc;
int base_voltage;
+ struct regulator_dev *rdev;
+ int ovd_notif;
+ int uvd_notif;
+ int temp_notif;
+ int ovd_err;
+ int uvd_err;
+ int temp_err;
+ const struct linear_range *xvd_ranges;
+ int num_xvd_ranges;
+ bool oc_supported;
+ unsigned int ovd_reg;
+ unsigned int uvd_reg;
+ unsigned int xvd_mask;
+ unsigned int ocp_reg;
+ unsigned int ocp_mask;
+ unsigned int ocw_reg;
+ unsigned int ocw_mask;
+ unsigned int ocw_rfet;
+};
+
+#define BD9576_NUM_REGULATORS 6
+#define BD9576_NUM_OVD_REGULATORS 5
+
+struct bd957x_data {
+ struct bd957x_regulator_data regulator_data[BD9576_NUM_REGULATORS];
+ struct regmap *regmap;
+ struct delayed_work therm_irq_suppress;
+ struct delayed_work ovd_irq_suppress;
+ struct delayed_work uvd_irq_suppress;
+ unsigned int therm_irq;
+ unsigned int ovd_irq;
+ unsigned int uvd_irq;
+ spinlock_t err_lock;
+ int regulator_global_err;
};
static int bd957x_vout34_list_voltage(struct regulator_dev *rdev,
@@ -72,151 +161,784 @@ static int bd957x_list_voltage(struct regulator_dev *rdev,
return desc->volt_table[index];
}
-static const struct regulator_ops bd957x_vout34_ops = {
+static void bd9576_fill_ovd_flags(struct bd957x_regulator_data *data,
+ bool warn)
+{
+ if (warn) {
+ data->ovd_notif = REGULATOR_EVENT_OVER_VOLTAGE_WARN;
+ data->ovd_err = REGULATOR_ERROR_OVER_VOLTAGE_WARN;
+ } else {
+ data->ovd_notif = REGULATOR_EVENT_REGULATION_OUT;
+ data->ovd_err = REGULATOR_ERROR_REGULATION_OUT;
+ }
+}
+
+static void bd9576_fill_ocp_flags(struct bd957x_regulator_data *data,
+ bool warn)
+{
+ if (warn) {
+ data->uvd_notif = REGULATOR_EVENT_OVER_CURRENT_WARN;
+ data->uvd_err = REGULATOR_ERROR_OVER_CURRENT_WARN;
+ } else {
+ data->uvd_notif = REGULATOR_EVENT_OVER_CURRENT;
+ data->uvd_err = REGULATOR_ERROR_OVER_CURRENT;
+ }
+}
+
+static void bd9576_fill_uvd_flags(struct bd957x_regulator_data *data,
+ bool warn)
+{
+ if (warn) {
+ data->uvd_notif = REGULATOR_EVENT_UNDER_VOLTAGE_WARN;
+ data->uvd_err = REGULATOR_ERROR_UNDER_VOLTAGE_WARN;
+ } else {
+ data->uvd_notif = REGULATOR_EVENT_UNDER_VOLTAGE;
+ data->uvd_err = REGULATOR_ERROR_UNDER_VOLTAGE;
+ }
+}
+
+static void bd9576_fill_temp_flags(struct bd957x_regulator_data *data,
+ bool enable, bool warn)
+{
+ if (!enable) {
+ data->temp_notif = 0;
+ data->temp_err = 0;
+ } else if (warn) {
+ data->temp_notif = REGULATOR_EVENT_OVER_TEMP_WARN;
+ data->temp_err = REGULATOR_ERROR_OVER_TEMP_WARN;
+ } else {
+ data->temp_notif = REGULATOR_EVENT_OVER_TEMP;
+ data->temp_err = REGULATOR_ERROR_OVER_TEMP;
+ }
+}
+
+static int bd9576_set_limit(const struct linear_range *r, int num_ranges,
+ struct regmap *regmap, int reg, int mask, int lim)
+{
+ int ret;
+ bool found;
+ int sel = 0;
+
+ if (lim) {
+
+ ret = linear_range_get_selector_low_array(r, num_ranges,
+ lim, &sel, &found);
+ if (ret)
+ return ret;
+
+ if (!found)
+ dev_warn(regmap_get_device(regmap),
+ "limit %d out of range. Setting lower\n",
+ lim);
+ }
+
+ return regmap_update_bits(regmap, reg, mask, sel);
+}
+
+static bool check_ocp_flag_mismatch(struct regulator_dev *rdev, int severity,
+ struct bd957x_regulator_data *r)
+{
+ if ((severity == REGULATOR_SEVERITY_ERR &&
+ r->uvd_notif != REGULATOR_EVENT_OVER_CURRENT) ||
+ (severity == REGULATOR_SEVERITY_WARN &&
+ r->uvd_notif != REGULATOR_EVENT_OVER_CURRENT_WARN)) {
+ dev_warn(rdev_get_dev(rdev),
+ "Can't support both OCP WARN and ERR\n");
+ /* Do not overwrite ERR config with WARN */
+ if (severity == REGULATOR_SEVERITY_WARN)
+ return true;
+
+ bd9576_fill_ocp_flags(r, 0);
+ }
+
+ return false;
+}
+
+static bool check_uvd_flag_mismatch(struct regulator_dev *rdev, int severity,
+ struct bd957x_regulator_data *r)
+{
+ if ((severity == REGULATOR_SEVERITY_ERR &&
+ r->uvd_notif != REGULATOR_EVENT_UNDER_VOLTAGE) ||
+ (severity == REGULATOR_SEVERITY_WARN &&
+ r->uvd_notif != REGULATOR_EVENT_UNDER_VOLTAGE_WARN)) {
+ dev_warn(rdev_get_dev(rdev),
+ "Can't support both UVD WARN and ERR\n");
+ if (severity == REGULATOR_SEVERITY_WARN)
+ return true;
+
+ bd9576_fill_uvd_flags(r, 0);
+ }
+
+ return false;
+}
+
+static bool check_ovd_flag_mismatch(struct regulator_dev *rdev, int severity,
+ struct bd957x_regulator_data *r)
+{
+ if ((severity == REGULATOR_SEVERITY_ERR &&
+ r->ovd_notif != REGULATOR_EVENT_REGULATION_OUT) ||
+ (severity == REGULATOR_SEVERITY_WARN &&
+ r->ovd_notif != REGULATOR_EVENT_OVER_VOLTAGE_WARN)) {
+ dev_warn(rdev_get_dev(rdev),
+ "Can't support both OVD WARN and ERR\n");
+ if (severity == REGULATOR_SEVERITY_WARN)
+ return true;
+
+ bd9576_fill_ovd_flags(r, 0);
+ }
+
+ return false;
+}
+
+static bool check_temp_flag_mismatch(struct regulator_dev *rdev, int severity,
+ struct bd957x_regulator_data *r)
+{
+ if ((severity == REGULATOR_SEVERITY_ERR &&
+ r->ovd_notif != REGULATOR_EVENT_OVER_TEMP) ||
+ (severity == REGULATOR_SEVERITY_WARN &&
+ r->ovd_notif != REGULATOR_EVENT_OVER_TEMP_WARN)) {
+ dev_warn(rdev_get_dev(rdev),
+ "Can't support both thermal WARN and ERR\n");
+ if (severity == REGULATOR_SEVERITY_WARN)
+ return true;
+ }
+
+ return false;
+}
+
+static int bd9576_set_ocp(struct regulator_dev *rdev, int lim_uA, int severity,
+ bool enable)
+{
+ struct bd957x_data *d;
+ struct bd957x_regulator_data *r;
+ int reg, mask;
+ int Vfet, rfet;
+ const struct linear_range *range;
+ int num_ranges;
+
+ if ((lim_uA && !enable) || (!lim_uA && enable))
+ return -EINVAL;
+
+ r = container_of(rdev->desc, struct bd957x_regulator_data, desc);
+ if (!r->oc_supported)
+ return -EINVAL;
+
+ d = rdev_get_drvdata(rdev);
+
+ if (severity == REGULATOR_SEVERITY_PROT) {
+ reg = r->ocp_reg;
+ mask = r->ocp_mask;
+ if (r->ocw_rfet) {
+ range = voutS1_ocp_ranges;
+ num_ranges = ARRAY_SIZE(voutS1_ocp_ranges);
+ rfet = r->ocw_rfet / 1000;
+ } else {
+ range = voutS1_ocp_ranges_internal;
+ num_ranges = ARRAY_SIZE(voutS1_ocp_ranges_internal);
+ /* Internal values are already micro-amperes */
+ rfet = 1000;
+ }
+ } else {
+ reg = r->ocw_reg;
+ mask = r->ocw_mask;
+
+ if (r->ocw_rfet) {
+ range = voutS1_ocw_ranges;
+ num_ranges = ARRAY_SIZE(voutS1_ocw_ranges);
+ rfet = r->ocw_rfet / 1000;
+ } else {
+ range = voutS1_ocw_ranges_internal;
+ num_ranges = ARRAY_SIZE(voutS1_ocw_ranges_internal);
+ /* Internal values are already micro-amperes */
+ rfet = 1000;
+ }
+
+ /* We abuse uvd fields for OCW on VoutS1 */
+ if (r->uvd_notif) {
+ /*
+ * If both warning and error are requested, prioritize
+ * ERROR configuration
+ */
+ if (check_ocp_flag_mismatch(rdev, severity, r))
+ return 0;
+ } else {
+ bool warn = severity == REGULATOR_SEVERITY_WARN;
+
+ bd9576_fill_ocp_flags(r, warn);
+ }
+ }
+
+ /*
+ * limits are given in uA, rfet is mOhm
+ * Divide lim_uA by 1000 to get Vfet in uV.
+ * (We expect both Rfet and limit uA to be magnitude of hundreds of
+ * milli Amperes & milli Ohms => we should still have decent accuracy)
+ */
+ Vfet = lim_uA/1000 * rfet;
+
+ return bd9576_set_limit(range, num_ranges, d->regmap,
+ reg, mask, Vfet);
+}
+
+static int bd9576_set_uvp(struct regulator_dev *rdev, int lim_uV, int severity,
+ bool enable)
+{
+ struct bd957x_data *d;
+ struct bd957x_regulator_data *r;
+ int mask, reg;
+
+ if (severity == REGULATOR_SEVERITY_PROT) {
+ if (!enable || lim_uV)
+ return -EINVAL;
+ return 0;
+ }
+
+ /*
+ * BD9576 has enable control as a special value in limit reg. Can't
+ * set limit but keep feature disabled or enable W/O given limit.
+ */
+ if ((lim_uV && !enable) || (!lim_uV && enable))
+ return -EINVAL;
+
+ r = container_of(rdev->desc, struct bd957x_regulator_data, desc);
+ d = rdev_get_drvdata(rdev);
+
+ mask = r->xvd_mask;
+ reg = r->uvd_reg;
+ /*
+ * Check that there is no mismatch for what the detection IRQs are to
+ * be used.
+ */
+ if (r->uvd_notif) {
+ if (check_uvd_flag_mismatch(rdev, severity, r))
+ return 0;
+ } else {
+ bd9576_fill_uvd_flags(r, severity == REGULATOR_SEVERITY_WARN);
+ }
+
+ return bd9576_set_limit(r->xvd_ranges, r->num_xvd_ranges, d->regmap,
+ reg, mask, lim_uV);
+}
+
+static int bd9576_set_ovp(struct regulator_dev *rdev, int lim_uV, int severity,
+ bool enable)
+{
+ struct bd957x_data *d;
+ struct bd957x_regulator_data *r;
+ int mask, reg;
+
+ if (severity == REGULATOR_SEVERITY_PROT) {
+ if (!enable || lim_uV)
+ return -EINVAL;
+ return 0;
+ }
+
+ /*
+ * BD9576 has enable control as a special value in limit reg. Can't
+ * set limit but keep feature disabled or enable W/O given limit.
+ */
+ if ((lim_uV && !enable) || (!lim_uV && enable))
+ return -EINVAL;
+
+ r = container_of(rdev->desc, struct bd957x_regulator_data, desc);
+ d = rdev_get_drvdata(rdev);
+
+ mask = r->xvd_mask;
+ reg = r->ovd_reg;
+ /*
+ * Check that there is no mismatch for what the detection IRQs are to
+ * be used.
+ */
+ if (r->ovd_notif) {
+ if (check_ovd_flag_mismatch(rdev, severity, r))
+ return 0;
+ } else {
+ bd9576_fill_ovd_flags(r, severity == REGULATOR_SEVERITY_WARN);
+ }
+
+ return bd9576_set_limit(r->xvd_ranges, r->num_xvd_ranges, d->regmap,
+ reg, mask, lim_uV);
+}
+
+
+static int bd9576_set_tw(struct regulator_dev *rdev, int lim, int severity,
+ bool enable)
+{
+ struct bd957x_data *d;
+ struct bd957x_regulator_data *r;
+ int i;
+
+ /*
+ * BD9576MUF has fixed temperature limits
+ * The detection can only be enabled/disabled
+ */
+ if (lim)
+ return -EINVAL;
+
+ /* Protection can't be disabled */
+ if (severity == REGULATOR_SEVERITY_PROT) {
+ if (!enable)
+ return -EINVAL;
+ else
+ return 0;
+ }
+
+ r = container_of(rdev->desc, struct bd957x_regulator_data, desc);
+ d = rdev_get_drvdata(rdev);
+
+ /*
+ * Check that there is no mismatch for what the detection IRQs are to
+ * be used.
+ */
+ if (r->temp_notif)
+ if (check_temp_flag_mismatch(rdev, severity, r))
+ return 0;
+
+ bd9576_fill_temp_flags(r, enable, severity == REGULATOR_SEVERITY_WARN);
+
+ if (enable)
+ return regmap_update_bits(d->regmap, BD957X_REG_INT_THERM_MASK,
+ BD9576_THERM_IRQ_MASK_TW, 0);
+
+ /*
+ * If any of the regulators is interested in thermal warning we keep IRQ
+ * enabled.
+ */
+ for (i = 0; i < BD9576_NUM_REGULATORS; i++)
+ if (d->regulator_data[i].temp_notif)
+ return 0;
+
+ return regmap_update_bits(d->regmap, BD957X_REG_INT_THERM_MASK,
+ BD9576_THERM_IRQ_MASK_TW,
+ BD9576_THERM_IRQ_MASK_TW);
+}
+
+static const struct regulator_ops bd9573_vout34_ops = {
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = bd957x_vout34_list_voltage,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
};
-static const struct regulator_ops bd957X_vouts1_regulator_ops = {
+static const struct regulator_ops bd9576_vout34_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = bd957x_vout34_list_voltage,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_over_voltage_protection = bd9576_set_ovp,
+ .set_under_voltage_protection = bd9576_set_uvp,
+ .set_thermal_protection = bd9576_set_tw,
+};
+
+static const struct regulator_ops bd9573_vouts1_regulator_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+};
+
+static const struct regulator_ops bd9576_vouts1_regulator_ops = {
.is_enabled = regulator_is_enabled_regmap,
+ .set_over_current_protection = bd9576_set_ocp,
};
-static const struct regulator_ops bd957x_ops = {
+static const struct regulator_ops bd9573_ops = {
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = bd957x_list_voltage,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
};
-static struct bd957x_regulator_data bd9576_regulators[] = {
- {
- .desc = {
- .name = "VD50",
- .of_match = of_match_ptr("regulator-vd50"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD957X_VD50,
- .type = REGULATOR_VOLTAGE,
- .ops = &bd957x_ops,
- .volt_table = &vout1_volt_table[0],
- .n_voltages = ARRAY_SIZE(vout1_volt_table),
- .vsel_reg = BD957X_REG_VOUT1_TUNE,
- .vsel_mask = BD957X_MASK_VOUT1_TUNE,
- .enable_reg = BD957X_REG_POW_TRIGGER1,
- .enable_mask = BD957X_REGULATOR_EN_MASK,
- .enable_val = BD957X_REGULATOR_DIS_VAL,
- .enable_is_inverted = true,
- .owner = THIS_MODULE,
+static const struct regulator_ops bd9576_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = bd957x_list_voltage,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_over_voltage_protection = bd9576_set_ovp,
+ .set_under_voltage_protection = bd9576_set_uvp,
+ .set_thermal_protection = bd9576_set_tw,
+};
+
+static const struct regulator_ops *bd9573_ops_arr[] = {
+ [BD957X_VD50] = &bd9573_ops,
+ [BD957X_VD18] = &bd9573_ops,
+ [BD957X_VDDDR] = &bd9573_vout34_ops,
+ [BD957X_VD10] = &bd9573_vout34_ops,
+ [BD957X_VOUTL1] = &bd9573_ops,
+ [BD957X_VOUTS1] = &bd9573_vouts1_regulator_ops,
+};
+
+static const struct regulator_ops *bd9576_ops_arr[] = {
+ [BD957X_VD50] = &bd9576_ops,
+ [BD957X_VD18] = &bd9576_ops,
+ [BD957X_VDDDR] = &bd9576_vout34_ops,
+ [BD957X_VD10] = &bd9576_vout34_ops,
+ [BD957X_VOUTL1] = &bd9576_ops,
+ [BD957X_VOUTS1] = &bd9576_vouts1_regulator_ops,
+};
+
+static int vouts1_get_fet_res(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *cfg)
+{
+ struct bd957x_regulator_data *data;
+ int ret;
+ u32 uohms;
+
+ data = container_of(desc, struct bd957x_regulator_data, desc);
+
+ ret = of_property_read_u32(np, "rohm,ocw-fet-ron-micro-ohms", &uohms);
+ if (ret) {
+ if (ret != -EINVAL)
+ return ret;
+
+ return 0;
+ }
+ data->ocw_rfet = uohms;
+ return 0;
+}
+
+static struct bd957x_data bd957x_regulators = {
+ .regulator_data = {
+ {
+ .desc = {
+ .name = "VD50",
+ .of_match = of_match_ptr("regulator-vd50"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD957X_VD50,
+ .type = REGULATOR_VOLTAGE,
+ .volt_table = &vout1_volt_table[0],
+ .n_voltages = ARRAY_SIZE(vout1_volt_table),
+ .vsel_reg = BD957X_REG_VOUT1_TUNE,
+ .vsel_mask = BD957X_MASK_VOUT1_TUNE,
+ .enable_reg = BD957X_REG_POW_TRIGGER1,
+ .enable_mask = BD957X_REGULATOR_EN_MASK,
+ .enable_val = BD957X_REGULATOR_DIS_VAL,
+ .enable_is_inverted = true,
+ .owner = THIS_MODULE,
+ },
+ .xvd_ranges = vout1_xvd_ranges,
+ .num_xvd_ranges = ARRAY_SIZE(vout1_xvd_ranges),
+ .ovd_reg = BD9576_REG_VOUT1_OVD,
+ .uvd_reg = BD9576_REG_VOUT1_UVD,
+ .xvd_mask = BD9576_MASK_XVD,
},
- },
- {
- .desc = {
- .name = "VD18",
- .of_match = of_match_ptr("regulator-vd18"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD957X_VD18,
- .type = REGULATOR_VOLTAGE,
- .ops = &bd957x_ops,
- .volt_table = &vout2_volt_table[0],
- .n_voltages = ARRAY_SIZE(vout2_volt_table),
- .vsel_reg = BD957X_REG_VOUT2_TUNE,
- .vsel_mask = BD957X_MASK_VOUT2_TUNE,
- .enable_reg = BD957X_REG_POW_TRIGGER2,
- .enable_mask = BD957X_REGULATOR_EN_MASK,
- .enable_val = BD957X_REGULATOR_DIS_VAL,
- .enable_is_inverted = true,
- .owner = THIS_MODULE,
+ {
+ .desc = {
+ .name = "VD18",
+ .of_match = of_match_ptr("regulator-vd18"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD957X_VD18,
+ .type = REGULATOR_VOLTAGE,
+ .volt_table = &vout2_volt_table[0],
+ .n_voltages = ARRAY_SIZE(vout2_volt_table),
+ .vsel_reg = BD957X_REG_VOUT2_TUNE,
+ .vsel_mask = BD957X_MASK_VOUT2_TUNE,
+ .enable_reg = BD957X_REG_POW_TRIGGER2,
+ .enable_mask = BD957X_REGULATOR_EN_MASK,
+ .enable_val = BD957X_REGULATOR_DIS_VAL,
+ .enable_is_inverted = true,
+ .owner = THIS_MODULE,
+ },
+ .xvd_ranges = vout234_xvd_ranges,
+ .num_xvd_ranges = ARRAY_SIZE(vout234_xvd_ranges),
+ .ovd_reg = BD9576_REG_VOUT2_OVD,
+ .uvd_reg = BD9576_REG_VOUT2_UVD,
+ .xvd_mask = BD9576_MASK_XVD,
},
- },
- {
- .desc = {
- .name = "VDDDR",
- .of_match = of_match_ptr("regulator-vdddr"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD957X_VDDDR,
- .ops = &bd957x_vout34_ops,
- .type = REGULATOR_VOLTAGE,
- .n_voltages = BD957X_VOUTS34_NUM_VOLT,
- .vsel_reg = BD957X_REG_VOUT3_TUNE,
- .vsel_mask = BD957X_MASK_VOUT3_TUNE,
- .enable_reg = BD957X_REG_POW_TRIGGER3,
- .enable_mask = BD957X_REGULATOR_EN_MASK,
- .enable_val = BD957X_REGULATOR_DIS_VAL,
- .enable_is_inverted = true,
- .owner = THIS_MODULE,
+ {
+ .desc = {
+ .name = "VDDDR",
+ .of_match = of_match_ptr("regulator-vdddr"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD957X_VDDDR,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD957X_VOUTS34_NUM_VOLT,
+ .vsel_reg = BD957X_REG_VOUT3_TUNE,
+ .vsel_mask = BD957X_MASK_VOUT3_TUNE,
+ .enable_reg = BD957X_REG_POW_TRIGGER3,
+ .enable_mask = BD957X_REGULATOR_EN_MASK,
+ .enable_val = BD957X_REGULATOR_DIS_VAL,
+ .enable_is_inverted = true,
+ .owner = THIS_MODULE,
+ },
+ .ovd_reg = BD9576_REG_VOUT3_OVD,
+ .uvd_reg = BD9576_REG_VOUT3_UVD,
+ .xvd_mask = BD9576_MASK_XVD,
+ .xvd_ranges = vout234_xvd_ranges,
+ .num_xvd_ranges = ARRAY_SIZE(vout234_xvd_ranges),
},
- },
- {
- .desc = {
- .name = "VD10",
- .of_match = of_match_ptr("regulator-vd10"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD957X_VD10,
- .ops = &bd957x_vout34_ops,
- .type = REGULATOR_VOLTAGE,
- .fixed_uV = BD957X_VOUTS4_BASE_VOLT,
- .n_voltages = BD957X_VOUTS34_NUM_VOLT,
- .vsel_reg = BD957X_REG_VOUT4_TUNE,
- .vsel_mask = BD957X_MASK_VOUT4_TUNE,
- .enable_reg = BD957X_REG_POW_TRIGGER4,
- .enable_mask = BD957X_REGULATOR_EN_MASK,
- .enable_val = BD957X_REGULATOR_DIS_VAL,
- .enable_is_inverted = true,
- .owner = THIS_MODULE,
+ {
+ .desc = {
+ .name = "VD10",
+ .of_match = of_match_ptr("regulator-vd10"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD957X_VD10,
+ .type = REGULATOR_VOLTAGE,
+ .fixed_uV = BD957X_VOUTS4_BASE_VOLT,
+ .n_voltages = BD957X_VOUTS34_NUM_VOLT,
+ .vsel_reg = BD957X_REG_VOUT4_TUNE,
+ .vsel_mask = BD957X_MASK_VOUT4_TUNE,
+ .enable_reg = BD957X_REG_POW_TRIGGER4,
+ .enable_mask = BD957X_REGULATOR_EN_MASK,
+ .enable_val = BD957X_REGULATOR_DIS_VAL,
+ .enable_is_inverted = true,
+ .owner = THIS_MODULE,
+ },
+ .xvd_ranges = vout234_xvd_ranges,
+ .num_xvd_ranges = ARRAY_SIZE(vout234_xvd_ranges),
+ .ovd_reg = BD9576_REG_VOUT4_OVD,
+ .uvd_reg = BD9576_REG_VOUT4_UVD,
+ .xvd_mask = BD9576_MASK_XVD,
},
- },
- {
- .desc = {
- .name = "VOUTL1",
- .of_match = of_match_ptr("regulator-voutl1"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD957X_VOUTL1,
- .ops = &bd957x_ops,
- .type = REGULATOR_VOLTAGE,
- .volt_table = &voutl1_volt_table[0],
- .n_voltages = ARRAY_SIZE(voutl1_volt_table),
- .vsel_reg = BD957X_REG_VOUTL1_TUNE,
- .vsel_mask = BD957X_MASK_VOUTL1_TUNE,
- .enable_reg = BD957X_REG_POW_TRIGGERL1,
- .enable_mask = BD957X_REGULATOR_EN_MASK,
- .enable_val = BD957X_REGULATOR_DIS_VAL,
- .enable_is_inverted = true,
- .owner = THIS_MODULE,
+ {
+ .desc = {
+ .name = "VOUTL1",
+ .of_match = of_match_ptr("regulator-voutl1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD957X_VOUTL1,
+ .type = REGULATOR_VOLTAGE,
+ .volt_table = &voutl1_volt_table[0],
+ .n_voltages = ARRAY_SIZE(voutl1_volt_table),
+ .vsel_reg = BD957X_REG_VOUTL1_TUNE,
+ .vsel_mask = BD957X_MASK_VOUTL1_TUNE,
+ .enable_reg = BD957X_REG_POW_TRIGGERL1,
+ .enable_mask = BD957X_REGULATOR_EN_MASK,
+ .enable_val = BD957X_REGULATOR_DIS_VAL,
+ .enable_is_inverted = true,
+ .owner = THIS_MODULE,
+ },
+ .xvd_ranges = voutL1_xvd_ranges,
+ .num_xvd_ranges = ARRAY_SIZE(voutL1_xvd_ranges),
+ .ovd_reg = BD9576_REG_VOUTL1_OVD,
+ .uvd_reg = BD9576_REG_VOUTL1_UVD,
+ .xvd_mask = BD9576_MASK_XVD,
},
- },
- {
- .desc = {
- .name = "VOUTS1",
- .of_match = of_match_ptr("regulator-vouts1"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD957X_VOUTS1,
- .ops = &bd957X_vouts1_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .n_voltages = 1,
- .fixed_uV = BD957X_VOUTS1_VOLT,
- .enable_reg = BD957X_REG_POW_TRIGGERS1,
- .enable_mask = BD957X_REGULATOR_EN_MASK,
- .enable_val = BD957X_REGULATOR_DIS_VAL,
- .enable_is_inverted = true,
- .owner = THIS_MODULE,
+ {
+ .desc = {
+ .name = "VOUTS1",
+ .of_match = of_match_ptr("regulator-vouts1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD957X_VOUTS1,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = 1,
+ .fixed_uV = BD957X_VOUTS1_VOLT,
+ .enable_reg = BD957X_REG_POW_TRIGGERS1,
+ .enable_mask = BD957X_REGULATOR_EN_MASK,
+ .enable_val = BD957X_REGULATOR_DIS_VAL,
+ .enable_is_inverted = true,
+ .owner = THIS_MODULE,
+ .of_parse_cb = vouts1_get_fet_res,
+ },
+ .oc_supported = true,
+ .ocw_reg = BD9576_REG_VOUT1S_OCW,
+ .ocw_mask = BD9576_MASK_VOUT1S_OCW,
+ .ocp_reg = BD9576_REG_VOUT1S_OCP,
+ .ocp_mask = BD9576_MASK_VOUT1S_OCP,
},
},
};
+static int bd9576_renable(struct regulator_irq_data *rid, int reg, int mask)
+{
+ int val, ret;
+ struct bd957x_data *d = (struct bd957x_data *)rid->data;
+
+ ret = regmap_read(d->regmap, reg, &val);
+ if (ret)
+ return REGULATOR_FAILED_RETRY;
+
+ if (rid->opaque && rid->opaque == (val & mask)) {
+ /*
+ * It seems we stil have same status. Ack and return
+ * information that we are still out of limits and core
+ * should not enable IRQ
+ */
+ regmap_write(d->regmap, reg, mask & val);
+ return REGULATOR_ERROR_ON;
+ }
+ rid->opaque = 0;
+ /*
+ * Status was changed. Either prolem was solved or we have new issues.
+ * Let's re-enable IRQs and be prepared to report problems again
+ */
+ return REGULATOR_ERROR_CLEARED;
+}
+
+static int bd9576_uvd_renable(struct regulator_irq_data *rid)
+{
+ return bd9576_renable(rid, BD957X_REG_INT_UVD_STAT, UVD_IRQ_VALID_MASK);
+}
+
+static int bd9576_ovd_renable(struct regulator_irq_data *rid)
+{
+ return bd9576_renable(rid, BD957X_REG_INT_OVD_STAT, OVD_IRQ_VALID_MASK);
+}
+
+static int bd9576_temp_renable(struct regulator_irq_data *rid)
+{
+ return bd9576_renable(rid, BD957X_REG_INT_THERM_STAT,
+ BD9576_THERM_IRQ_MASK_TW);
+}
+
+static int bd9576_uvd_handler(int irq, struct regulator_irq_data *rid,
+ unsigned long *dev_mask)
+{
+ int val, ret, i;
+ struct bd957x_data *d = (struct bd957x_data *)rid->data;
+
+ ret = regmap_read(d->regmap, BD957X_REG_INT_UVD_STAT, &val);
+ if (ret)
+ return REGULATOR_FAILED_RETRY;
+
+ *dev_mask = 0;
+
+ rid->opaque = val & UVD_IRQ_VALID_MASK;
+
+ /*
+ * Go through the set status bits and report either error or warning
+ * to the notifier depending on what was flagged in DT
+ */
+ *dev_mask = val & BD9576_xVD_IRQ_MASK_VOUT1TO4;
+ /* There is 1 bit gap in register after Vout1 .. Vout4 statuses */
+ *dev_mask |= ((val & BD9576_xVD_IRQ_MASK_VOUTL1) >> 1);
+ /*
+ * We (ab)use the uvd for OCW notification. DT parsing should
+ * have added correct OCW flag to uvd_notif and uvd_err for S1
+ */
+ *dev_mask |= ((val & BD9576_UVD_IRQ_MASK_VOUTS1_OCW) >> 1);
+
+ for_each_set_bit(i, dev_mask, 6) {
+ struct bd957x_regulator_data *rdata;
+ struct regulator_err_state *stat;
+
+ rdata = &d->regulator_data[i];
+ stat = &rid->states[i];
+
+ stat->notifs = rdata->uvd_notif;
+ stat->errors = rdata->uvd_err;
+ }
+
+ ret = regmap_write(d->regmap, BD957X_REG_INT_UVD_STAT,
+ UVD_IRQ_VALID_MASK & val);
+
+ return 0;
+}
+
+static int bd9576_ovd_handler(int irq, struct regulator_irq_data *rid,
+ unsigned long *dev_mask)
+{
+ int val, ret, i;
+ struct bd957x_data *d = (struct bd957x_data *)rid->data;
+
+ ret = regmap_read(d->regmap, BD957X_REG_INT_OVD_STAT, &val);
+ if (ret)
+ return REGULATOR_FAILED_RETRY;
+
+ rid->opaque = val & OVD_IRQ_VALID_MASK;
+ *dev_mask = 0;
+
+ if (!(val & OVD_IRQ_VALID_MASK))
+ return 0;
+
+ *dev_mask = val & BD9576_xVD_IRQ_MASK_VOUT1TO4;
+ /* There is 1 bit gap in register after Vout1 .. Vout4 statuses */
+ *dev_mask |= ((val & BD9576_xVD_IRQ_MASK_VOUTL1) >> 1);
+
+ for_each_set_bit(i, dev_mask, 5) {
+ struct bd957x_regulator_data *rdata;
+ struct regulator_err_state *stat;
+
+ rdata = &d->regulator_data[i];
+ stat = &rid->states[i];
+
+ stat->notifs = rdata->ovd_notif;
+ stat->errors = rdata->ovd_err;
+ }
+
+ /* Clear the sub-IRQ status */
+ regmap_write(d->regmap, BD957X_REG_INT_OVD_STAT,
+ OVD_IRQ_VALID_MASK & val);
+
+ return 0;
+}
+
+#define BD9576_DEV_MASK_ALL_REGULATORS 0x3F
+
+static int bd9576_thermal_handler(int irq, struct regulator_irq_data *rid,
+ unsigned long *dev_mask)
+{
+ int val, ret, i;
+ struct bd957x_data *d = (struct bd957x_data *)rid->data;
+
+ ret = regmap_read(d->regmap, BD957X_REG_INT_THERM_STAT, &val);
+ if (ret)
+ return REGULATOR_FAILED_RETRY;
+
+ if (!(val & BD9576_THERM_IRQ_MASK_TW)) {
+ *dev_mask = 0;
+ return 0;
+ }
+
+ *dev_mask = BD9576_DEV_MASK_ALL_REGULATORS;
+
+ for (i = 0; i < BD9576_NUM_REGULATORS; i++) {
+ struct bd957x_regulator_data *rdata;
+ struct regulator_err_state *stat;
+
+ rdata = &d->regulator_data[i];
+ stat = &rid->states[i];
+
+ stat->notifs = rdata->temp_notif;
+ stat->errors = rdata->temp_err;
+ }
+
+ /* Clear the sub-IRQ status */
+ regmap_write(d->regmap, BD957X_REG_INT_THERM_STAT,
+ BD9576_THERM_IRQ_MASK_TW);
+
+ return 0;
+}
+
static int bd957x_probe(struct platform_device *pdev)
{
+ int i;
+ unsigned int num_reg_data;
+ bool vout_mode, ddr_sel, may_have_irqs = false;
struct regmap *regmap;
+ struct bd957x_data *ic_data;
struct regulator_config config = { 0 };
- int i;
- bool vout_mode, ddr_sel;
- const struct bd957x_regulator_data *reg_data = &bd9576_regulators[0];
- unsigned int num_reg_data = ARRAY_SIZE(bd9576_regulators);
+ /* All regulators are related to UVD and thermal IRQs... */
+ struct regulator_dev *rdevs[BD9576_NUM_REGULATORS];
+ /* ...But VoutS1 is not flagged by OVD IRQ */
+ struct regulator_dev *ovd_devs[BD9576_NUM_OVD_REGULATORS];
+ static const struct regulator_irq_desc bd9576_notif_uvd = {
+ .name = "bd9576-uvd",
+ .irq_off_ms = 1000,
+ .map_event = bd9576_uvd_handler,
+ .renable = bd9576_uvd_renable,
+ .data = &bd957x_regulators,
+ };
+ static const struct regulator_irq_desc bd9576_notif_ovd = {
+ .name = "bd9576-ovd",
+ .irq_off_ms = 1000,
+ .map_event = bd9576_ovd_handler,
+ .renable = bd9576_ovd_renable,
+ .data = &bd957x_regulators,
+ };
+ static const struct regulator_irq_desc bd9576_notif_temp = {
+ .name = "bd9576-temp",
+ .irq_off_ms = 1000,
+ .map_event = bd9576_thermal_handler,
+ .renable = bd9576_temp_renable,
+ .data = &bd957x_regulators,
+ };
enum rohm_chip_type chip = platform_get_device_id(pdev)->driver_data;
+ num_reg_data = ARRAY_SIZE(bd957x_regulators.regulator_data);
+
+ ic_data = &bd957x_regulators;
+
regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!regmap) {
dev_err(&pdev->dev, "No regmap\n");
return -EINVAL;
}
+
+ ic_data->regmap = regmap;
vout_mode = of_property_read_bool(pdev->dev.parent->of_node,
"rohm,vout1-en-low");
if (vout_mode) {
@@ -263,15 +985,17 @@ static int bd957x_probe(struct platform_device *pdev)
* bytes and use bd9576_regulators directly for non-constant configs
* like DDR voltage selection.
*/
+ platform_set_drvdata(pdev, ic_data);
ddr_sel = of_property_read_bool(pdev->dev.parent->of_node,
"rohm,ddr-sel-low");
if (ddr_sel)
- bd9576_regulators[2].desc.fixed_uV = 1350000;
+ ic_data->regulator_data[2].desc.fixed_uV = 1350000;
else
- bd9576_regulators[2].desc.fixed_uV = 1500000;
+ ic_data->regulator_data[2].desc.fixed_uV = 1500000;
switch (chip) {
case ROHM_CHIP_TYPE_BD9576:
+ may_have_irqs = true;
dev_dbg(&pdev->dev, "Found BD9576MUF\n");
break;
case ROHM_CHIP_TYPE_BD9573:
@@ -282,38 +1006,122 @@ static int bd957x_probe(struct platform_device *pdev)
return -EINVAL;
}
+ for (i = 0; i < num_reg_data; i++) {
+ struct regulator_desc *d;
+
+ d = &ic_data->regulator_data[i].desc;
+
+
+ if (may_have_irqs) {
+ if (d->id >= ARRAY_SIZE(bd9576_ops_arr))
+ return -EINVAL;
+
+ d->ops = bd9576_ops_arr[d->id];
+ } else {
+ if (d->id >= ARRAY_SIZE(bd9573_ops_arr))
+ return -EINVAL;
+
+ d->ops = bd9573_ops_arr[d->id];
+ }
+ }
+
config.dev = pdev->dev.parent;
config.regmap = regmap;
+ config.driver_data = ic_data;
for (i = 0; i < num_reg_data; i++) {
- const struct regulator_desc *desc;
- struct regulator_dev *rdev;
- const struct bd957x_regulator_data *r;
-
- r = &reg_data[i];
- desc = &r->desc;
+ struct bd957x_regulator_data *r = &ic_data->regulator_data[i];
+ const struct regulator_desc *desc = &r->desc;
- rdev = devm_regulator_register(&pdev->dev, desc, &config);
- if (IS_ERR(rdev)) {
+ r->rdev = devm_regulator_register(&pdev->dev, desc,
+ &config);
+ if (IS_ERR(r->rdev)) {
dev_err(&pdev->dev,
"failed to register %s regulator\n",
desc->name);
- return PTR_ERR(rdev);
+ return PTR_ERR(r->rdev);
}
/*
* Clear the VOUT1 GPIO setting - rest of the regulators do not
* support GPIO control
*/
config.ena_gpiod = NULL;
+
+ if (!may_have_irqs)
+ continue;
+
+ rdevs[i] = r->rdev;
+ if (i < BD957X_VOUTS1)
+ ovd_devs[i] = r->rdev;
}
+ if (may_have_irqs) {
+ void *ret;
+ /*
+ * We can add both the possible error and warning flags here
+ * because the core uses these only for status clearing and
+ * if we use warnings - errors are always clear and the other
+ * way around. We can also add CURRENT flag for all regulators
+ * because it is never set if it is not supported. Same applies
+ * to setting UVD for VoutS1 - it is not accidentally cleared
+ * as it is never set.
+ */
+ int uvd_errs = REGULATOR_ERROR_UNDER_VOLTAGE |
+ REGULATOR_ERROR_UNDER_VOLTAGE_WARN |
+ REGULATOR_ERROR_OVER_CURRENT |
+ REGULATOR_ERROR_OVER_CURRENT_WARN;
+ int ovd_errs = REGULATOR_ERROR_OVER_VOLTAGE_WARN |
+ REGULATOR_ERROR_REGULATION_OUT;
+ int temp_errs = REGULATOR_ERROR_OVER_TEMP |
+ REGULATOR_ERROR_OVER_TEMP_WARN;
+ int irq;
+
+ irq = platform_get_irq_byname(pdev, "bd9576-uvd");
+ /* Register notifiers - can fail if IRQ is not given */
+ ret = devm_regulator_irq_helper(&pdev->dev, &bd9576_notif_uvd,
+ irq, 0, uvd_errs, NULL,
+ &rdevs[0],
+ BD9576_NUM_REGULATORS);
+ if (IS_ERR(ret)) {
+ if (PTR_ERR(ret) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ dev_warn(&pdev->dev, "UVD disabled %pe\n", ret);
+ }
+
+ irq = platform_get_irq_byname(pdev, "bd9576-ovd");
+
+ ret = devm_regulator_irq_helper(&pdev->dev, &bd9576_notif_ovd,
+ irq, 0, ovd_errs, NULL,
+ &ovd_devs[0],
+ BD9576_NUM_OVD_REGULATORS);
+ if (IS_ERR(ret)) {
+ if (PTR_ERR(ret) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ dev_warn(&pdev->dev, "OVD disabled %pe\n", ret);
+ }
+ irq = platform_get_irq_byname(pdev, "bd9576-temp");
+
+ ret = devm_regulator_irq_helper(&pdev->dev, &bd9576_notif_temp,
+ irq, 0, temp_errs, NULL,
+ &rdevs[0],
+ BD9576_NUM_REGULATORS);
+ if (IS_ERR(ret)) {
+ if (PTR_ERR(ret) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ dev_warn(&pdev->dev, "Thermal warning disabled %pe\n",
+ ret);
+ }
+ }
return 0;
}
static const struct platform_device_id bd957x_pmic_id[] = {
- { "bd9573-pmic", ROHM_CHIP_TYPE_BD9573 },
- { "bd9576-pmic", ROHM_CHIP_TYPE_BD9576 },
+ { "bd9573-regulator", ROHM_CHIP_TYPE_BD9573 },
+ { "bd9576-regulator", ROHM_CHIP_TYPE_BD9576 },
{ },
};
MODULE_DEVICE_TABLE(platform, bd957x_pmic_id);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index f192bf19492e..ca6caba8a191 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -33,17 +33,6 @@
#include "dummy.h"
#include "internal.h"
-#define rdev_crit(rdev, fmt, ...) \
- pr_crit("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
-#define rdev_err(rdev, fmt, ...) \
- pr_err("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
-#define rdev_warn(rdev, fmt, ...) \
- pr_warn("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
-#define rdev_info(rdev, fmt, ...) \
- pr_info("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
-#define rdev_dbg(rdev, fmt, ...) \
- pr_debug("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
-
static DEFINE_WW_CLASS(regulator_ww_class);
static DEFINE_MUTEX(regulator_nesting_mutex);
static DEFINE_MUTEX(regulator_list_mutex);
@@ -117,6 +106,7 @@ const char *rdev_get_name(struct regulator_dev *rdev)
else
return "";
}
+EXPORT_SYMBOL_GPL(rdev_get_name);
static bool have_full_constraints(void)
{
@@ -591,8 +581,8 @@ regulator_get_suspend_state_check(struct regulator_dev *rdev, suspend_state_t st
return rstate;
}
-static ssize_t regulator_uV_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t microvolts_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
int uV;
@@ -605,16 +595,16 @@ static ssize_t regulator_uV_show(struct device *dev,
return uV;
return sprintf(buf, "%d\n", uV);
}
-static DEVICE_ATTR(microvolts, 0444, regulator_uV_show, NULL);
+static DEVICE_ATTR_RO(microvolts);
-static ssize_t regulator_uA_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t microamps_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", _regulator_get_current_limit(rdev));
}
-static DEVICE_ATTR(microamps, 0444, regulator_uA_show, NULL);
+static DEVICE_ATTR_RO(microamps);
static ssize_t name_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -645,14 +635,14 @@ static ssize_t regulator_print_opmode(char *buf, int mode)
return sprintf(buf, "%s\n", regulator_opmode_to_str(mode));
}
-static ssize_t regulator_opmode_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t opmode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
return regulator_print_opmode(buf, _regulator_get_mode(rdev));
}
-static DEVICE_ATTR(opmode, 0444, regulator_opmode_show, NULL);
+static DEVICE_ATTR_RO(opmode);
static ssize_t regulator_print_state(char *buf, int state)
{
@@ -664,8 +654,8 @@ static ssize_t regulator_print_state(char *buf, int state)
return sprintf(buf, "unknown\n");
}
-static ssize_t regulator_state_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
ssize_t ret;
@@ -676,10 +666,10 @@ static ssize_t regulator_state_show(struct device *dev,
return ret;
}
-static DEVICE_ATTR(state, 0444, regulator_state_show, NULL);
+static DEVICE_ATTR_RO(state);
-static ssize_t regulator_status_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
int status;
@@ -723,10 +713,10 @@ static ssize_t regulator_status_show(struct device *dev,
return sprintf(buf, "%s\n", label);
}
-static DEVICE_ATTR(status, 0444, regulator_status_show, NULL);
+static DEVICE_ATTR_RO(status);
-static ssize_t regulator_min_uA_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t min_microamps_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
@@ -735,10 +725,10 @@ static ssize_t regulator_min_uA_show(struct device *dev,
return sprintf(buf, "%d\n", rdev->constraints->min_uA);
}
-static DEVICE_ATTR(min_microamps, 0444, regulator_min_uA_show, NULL);
+static DEVICE_ATTR_RO(min_microamps);
-static ssize_t regulator_max_uA_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t max_microamps_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
@@ -747,10 +737,10 @@ static ssize_t regulator_max_uA_show(struct device *dev,
return sprintf(buf, "%d\n", rdev->constraints->max_uA);
}
-static DEVICE_ATTR(max_microamps, 0444, regulator_max_uA_show, NULL);
+static DEVICE_ATTR_RO(max_microamps);
-static ssize_t regulator_min_uV_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t min_microvolts_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
@@ -759,10 +749,10 @@ static ssize_t regulator_min_uV_show(struct device *dev,
return sprintf(buf, "%d\n", rdev->constraints->min_uV);
}
-static DEVICE_ATTR(min_microvolts, 0444, regulator_min_uV_show, NULL);
+static DEVICE_ATTR_RO(min_microvolts);
-static ssize_t regulator_max_uV_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t max_microvolts_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
@@ -771,10 +761,10 @@ static ssize_t regulator_max_uV_show(struct device *dev,
return sprintf(buf, "%d\n", rdev->constraints->max_uV);
}
-static DEVICE_ATTR(max_microvolts, 0444, regulator_max_uV_show, NULL);
+static DEVICE_ATTR_RO(max_microvolts);
-static ssize_t regulator_total_uA_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t requested_microamps_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
struct regulator *regulator;
@@ -788,7 +778,7 @@ static ssize_t regulator_total_uA_show(struct device *dev,
regulator_unlock(rdev);
return sprintf(buf, "%d\n", uA);
}
-static DEVICE_ATTR(requested_microamps, 0444, regulator_total_uA_show, NULL);
+static DEVICE_ATTR_RO(requested_microamps);
static ssize_t num_users_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -813,104 +803,95 @@ static ssize_t type_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(type);
-static ssize_t regulator_suspend_mem_uV_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t suspend_mem_microvolts_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", rdev->constraints->state_mem.uV);
}
-static DEVICE_ATTR(suspend_mem_microvolts, 0444,
- regulator_suspend_mem_uV_show, NULL);
+static DEVICE_ATTR_RO(suspend_mem_microvolts);
-static ssize_t regulator_suspend_disk_uV_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t suspend_disk_microvolts_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", rdev->constraints->state_disk.uV);
}
-static DEVICE_ATTR(suspend_disk_microvolts, 0444,
- regulator_suspend_disk_uV_show, NULL);
+static DEVICE_ATTR_RO(suspend_disk_microvolts);
-static ssize_t regulator_suspend_standby_uV_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t suspend_standby_microvolts_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", rdev->constraints->state_standby.uV);
}
-static DEVICE_ATTR(suspend_standby_microvolts, 0444,
- regulator_suspend_standby_uV_show, NULL);
+static DEVICE_ATTR_RO(suspend_standby_microvolts);
-static ssize_t regulator_suspend_mem_mode_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t suspend_mem_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
return regulator_print_opmode(buf,
rdev->constraints->state_mem.mode);
}
-static DEVICE_ATTR(suspend_mem_mode, 0444,
- regulator_suspend_mem_mode_show, NULL);
+static DEVICE_ATTR_RO(suspend_mem_mode);
-static ssize_t regulator_suspend_disk_mode_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t suspend_disk_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
return regulator_print_opmode(buf,
rdev->constraints->state_disk.mode);
}
-static DEVICE_ATTR(suspend_disk_mode, 0444,
- regulator_suspend_disk_mode_show, NULL);
+static DEVICE_ATTR_RO(suspend_disk_mode);
-static ssize_t regulator_suspend_standby_mode_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t suspend_standby_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
return regulator_print_opmode(buf,
rdev->constraints->state_standby.mode);
}
-static DEVICE_ATTR(suspend_standby_mode, 0444,
- regulator_suspend_standby_mode_show, NULL);
+static DEVICE_ATTR_RO(suspend_standby_mode);
-static ssize_t regulator_suspend_mem_state_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t suspend_mem_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
return regulator_print_state(buf,
rdev->constraints->state_mem.enabled);
}
-static DEVICE_ATTR(suspend_mem_state, 0444,
- regulator_suspend_mem_state_show, NULL);
+static DEVICE_ATTR_RO(suspend_mem_state);
-static ssize_t regulator_suspend_disk_state_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t suspend_disk_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
return regulator_print_state(buf,
rdev->constraints->state_disk.enabled);
}
-static DEVICE_ATTR(suspend_disk_state, 0444,
- regulator_suspend_disk_state_show, NULL);
+static DEVICE_ATTR_RO(suspend_disk_state);
-static ssize_t regulator_suspend_standby_state_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t suspend_standby_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
return regulator_print_state(buf,
rdev->constraints->state_standby.enabled);
}
-static DEVICE_ATTR(suspend_standby_state, 0444,
- regulator_suspend_standby_state_show, NULL);
+static DEVICE_ATTR_RO(suspend_standby_state);
-static ssize_t regulator_bypass_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t bypass_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
const char *report;
@@ -928,8 +909,7 @@ static ssize_t regulator_bypass_show(struct device *dev,
return sprintf(buf, "%s\n", report);
}
-static DEVICE_ATTR(bypass, 0444,
- regulator_bypass_show, NULL);
+static DEVICE_ATTR_RO(bypass);
/* Calculate the new optimum regulator operating mode based on the new total
* consumer load. All locks held by caller
@@ -1315,6 +1295,52 @@ static int machine_constraints_current(struct regulator_dev *rdev,
static int _regulator_do_enable(struct regulator_dev *rdev);
+static int notif_set_limit(struct regulator_dev *rdev,
+ int (*set)(struct regulator_dev *, int, int, bool),
+ int limit, int severity)
+{
+ bool enable;
+
+ if (limit == REGULATOR_NOTIF_LIMIT_DISABLE) {
+ enable = false;
+ limit = 0;
+ } else {
+ enable = true;
+ }
+
+ if (limit == REGULATOR_NOTIF_LIMIT_ENABLE)
+ limit = 0;
+
+ return set(rdev, limit, severity, enable);
+}
+
+static int handle_notify_limits(struct regulator_dev *rdev,
+ int (*set)(struct regulator_dev *, int, int, bool),
+ struct notification_limit *limits)
+{
+ int ret = 0;
+
+ if (!set)
+ return -EOPNOTSUPP;
+
+ if (limits->prot)
+ ret = notif_set_limit(rdev, set, limits->prot,
+ REGULATOR_SEVERITY_PROT);
+ if (ret)
+ return ret;
+
+ if (limits->err)
+ ret = notif_set_limit(rdev, set, limits->err,
+ REGULATOR_SEVERITY_ERR);
+ if (ret)
+ return ret;
+
+ if (limits->warn)
+ ret = notif_set_limit(rdev, set, limits->warn,
+ REGULATOR_SEVERITY_WARN);
+
+ return ret;
+}
/**
* set_machine_constraints - sets regulator constraints
* @rdev: regulator source
@@ -1400,9 +1426,27 @@ static int set_machine_constraints(struct regulator_dev *rdev)
}
}
+ /*
+ * Existing logic does not warn if over_current_protection is given as
+ * a constraint but driver does not support that. I think we should
+ * warn about this type of issues as it is possible someone changes
+ * PMIC on board to another type - and the another PMIC's driver does
+ * not support setting protection. Board composer may happily believe
+ * the DT limits are respected - especially if the new PMIC HW also
+ * supports protection but the driver does not. I won't change the logic
+ * without hearing more experienced opinion on this though.
+ *
+ * If warning is seen as a good idea then we can merge handling the
+ * over-curret protection and detection and get rid of this special
+ * handling.
+ */
if (rdev->constraints->over_current_protection
&& ops->set_over_current_protection) {
- ret = ops->set_over_current_protection(rdev);
+ int lim = rdev->constraints->over_curr_limits.prot;
+
+ ret = ops->set_over_current_protection(rdev, lim,
+ REGULATOR_SEVERITY_PROT,
+ true);
if (ret < 0) {
rdev_err(rdev, "failed to set over current protection: %pe\n",
ERR_PTR(ret));
@@ -1410,6 +1454,62 @@ static int set_machine_constraints(struct regulator_dev *rdev)
}
}
+ if (rdev->constraints->over_current_detection)
+ ret = handle_notify_limits(rdev,
+ ops->set_over_current_protection,
+ &rdev->constraints->over_curr_limits);
+ if (ret) {
+ if (ret != -EOPNOTSUPP) {
+ rdev_err(rdev, "failed to set over current limits: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+ rdev_warn(rdev,
+ "IC does not support requested over-current limits\n");
+ }
+
+ if (rdev->constraints->over_voltage_detection)
+ ret = handle_notify_limits(rdev,
+ ops->set_over_voltage_protection,
+ &rdev->constraints->over_voltage_limits);
+ if (ret) {
+ if (ret != -EOPNOTSUPP) {
+ rdev_err(rdev, "failed to set over voltage limits %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+ rdev_warn(rdev,
+ "IC does not support requested over voltage limits\n");
+ }
+
+ if (rdev->constraints->under_voltage_detection)
+ ret = handle_notify_limits(rdev,
+ ops->set_under_voltage_protection,
+ &rdev->constraints->under_voltage_limits);
+ if (ret) {
+ if (ret != -EOPNOTSUPP) {
+ rdev_err(rdev, "failed to set under voltage limits %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+ rdev_warn(rdev,
+ "IC does not support requested under voltage limits\n");
+ }
+
+ if (rdev->constraints->over_temp_detection)
+ ret = handle_notify_limits(rdev,
+ ops->set_thermal_protection,
+ &rdev->constraints->temp_limits);
+ if (ret) {
+ if (ret != -EOPNOTSUPP) {
+ rdev_err(rdev, "failed to set temperature limits %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+ rdev_warn(rdev,
+ "IC does not support requested temperature limits\n");
+ }
+
if (rdev->constraints->active_discharge && ops->set_active_discharge) {
bool ad_state = (rdev->constraints->active_discharge ==
REGULATOR_ACTIVE_DISCHARGE_ENABLE) ? true : false;
@@ -1425,6 +1525,12 @@ static int set_machine_constraints(struct regulator_dev *rdev)
* and we have control then make sure it is enabled.
*/
if (rdev->constraints->always_on || rdev->constraints->boot_on) {
+ /* If we want to enable this regulator, make sure that we know
+ * the supplying regulator.
+ */
+ if (rdev->supply_name && !rdev->supply)
+ return -EPROBE_DEFER;
+
if (rdev->supply) {
ret = regulator_enable(rdev->supply);
if (ret < 0) {
@@ -4105,6 +4211,29 @@ int regulator_set_voltage_time_sel(struct regulator_dev *rdev,
}
EXPORT_SYMBOL_GPL(regulator_set_voltage_time_sel);
+int regulator_sync_voltage_rdev(struct regulator_dev *rdev)
+{
+ int ret;
+
+ regulator_lock(rdev);
+
+ if (!rdev->desc->ops->set_voltage &&
+ !rdev->desc->ops->set_voltage_sel) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* balance only, if regulator is coupled */
+ if (rdev->coupling_desc.n_coupled > 1)
+ ret = regulator_balance_voltage(rdev, PM_SUSPEND_ON);
+ else
+ ret = -EOPNOTSUPP;
+
+out:
+ regulator_unlock(rdev);
+ return ret;
+}
+
/**
* regulator_sync_voltage - re-apply last regulator output voltage
* @regulator: regulator source
@@ -4380,22 +4509,36 @@ unsigned int regulator_get_mode(struct regulator *regulator)
}
EXPORT_SYMBOL_GPL(regulator_get_mode);
+static int rdev_get_cached_err_flags(struct regulator_dev *rdev)
+{
+ int ret = 0;
+
+ if (rdev->use_cached_err) {
+ spin_lock(&rdev->err_lock);
+ ret = rdev->cached_err;
+ spin_unlock(&rdev->err_lock);
+ }
+ return ret;
+}
+
static int _regulator_get_error_flags(struct regulator_dev *rdev,
unsigned int *flags)
{
- int ret;
+ int cached_flags, ret = 0;
regulator_lock(rdev);
- /* sanity check */
- if (!rdev->desc->ops->get_error_flags) {
+ cached_flags = rdev_get_cached_err_flags(rdev);
+
+ if (rdev->desc->ops->get_error_flags)
+ ret = rdev->desc->ops->get_error_flags(rdev, flags);
+ else if (!rdev->use_cached_err)
ret = -EINVAL;
- goto out;
- }
- ret = rdev->desc->ops->get_error_flags(rdev, flags);
-out:
+ *flags |= cached_flags;
+
regulator_unlock(rdev);
+
return ret;
}
@@ -5228,6 +5371,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
goto rinse;
}
device_initialize(&rdev->dev);
+ spin_lock_init(&rdev->err_lock);
/*
* Duplicate the config so the driver could override it after
diff --git a/drivers/regulator/cros-ec-regulator.c b/drivers/regulator/cros-ec-regulator.c
index eb3fc1db4edc..c4754f3cf233 100644
--- a/drivers/regulator/cros-ec-regulator.c
+++ b/drivers/regulator/cros-ec-regulator.c
@@ -225,8 +225,9 @@ static int cros_ec_regulator_probe(struct platform_device *pdev)
drvdata->dev = devm_regulator_register(dev, &drvdata->desc, &cfg);
if (IS_ERR(drvdata->dev)) {
+ ret = PTR_ERR(drvdata->dev);
dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
- return PTR_ERR(drvdata->dev);
+ return ret;
}
platform_set_drvdata(pdev, drvdata);
diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
index e18d291c7f21..23fa429ebe76 100644
--- a/drivers/regulator/da9052-regulator.c
+++ b/drivers/regulator/da9052-regulator.c
@@ -250,7 +250,8 @@ static int da9052_regulator_set_voltage_time_sel(struct regulator_dev *rdev,
case DA9052_ID_BUCK3:
case DA9052_ID_LDO2:
case DA9052_ID_LDO3:
- ret = (new_sel - old_sel) * info->step_uV / 6250;
+ ret = DIV_ROUND_UP(abs(new_sel - old_sel) * info->step_uV,
+ 6250);
break;
}
diff --git a/drivers/regulator/da9121-regulator.c b/drivers/regulator/da9121-regulator.c
index 08cbf688e14d..e66925090258 100644
--- a/drivers/regulator/da9121-regulator.c
+++ b/drivers/regulator/da9121-regulator.c
@@ -280,7 +280,7 @@ static unsigned int da9121_map_mode(unsigned int mode)
case DA9121_BUCK_MODE_FORCE_PFM:
return REGULATOR_MODE_STANDBY;
default:
- return -EINVAL;
+ return REGULATOR_MODE_INVALID;
}
}
@@ -317,7 +317,7 @@ static unsigned int da9121_buck_get_mode(struct regulator_dev *rdev)
{
struct da9121 *chip = rdev_get_drvdata(rdev);
int id = rdev_get_id(rdev);
- unsigned int val;
+ unsigned int val, mode;
int ret = 0;
ret = regmap_read(chip->regmap, da9121_mode_field[id].reg, &val);
@@ -326,7 +326,11 @@ static unsigned int da9121_buck_get_mode(struct regulator_dev *rdev)
return -EINVAL;
}
- return da9121_map_mode(val & da9121_mode_field[id].msk);
+ mode = da9121_map_mode(val & da9121_mode_field[id].msk);
+ if (mode == REGULATOR_MODE_INVALID)
+ return -EINVAL;
+
+ return mode;
}
static const struct regulator_ops da9121_buck_ops = {
diff --git a/drivers/regulator/devres.c b/drivers/regulator/devres.c
index 3091210889e3..a8de0aa88bad 100644
--- a/drivers/regulator/devres.c
+++ b/drivers/regulator/devres.c
@@ -481,3 +481,55 @@ void devm_regulator_unregister_notifier(struct regulator *regulator,
WARN_ON(rc);
}
EXPORT_SYMBOL_GPL(devm_regulator_unregister_notifier);
+
+static void regulator_irq_helper_drop(void *res)
+{
+ regulator_irq_helper_cancel(&res);
+}
+
+/**
+ * devm_regulator_irq_helper - resource managed registration of IRQ based
+ * regulator event/error notifier
+ *
+ * @dev: device to which lifetime the helper's lifetime is
+ * bound.
+ * @d: IRQ helper descriptor.
+ * @irq: IRQ used to inform events/errors to be notified.
+ * @irq_flags: Extra IRQ flags to be OR'ed with the default
+ * IRQF_ONESHOT when requesting the (threaded) irq.
+ * @common_errs: Errors which can be flagged by this IRQ for all rdevs.
+ * When IRQ is re-enabled these errors will be cleared
+ * from all associated regulators
+ * @per_rdev_errs: Optional error flag array describing errors specific
+ * for only some of the regulators. These errors will be
+ * or'ed with common errors. If this is given the array
+ * should contain rdev_amount flags. Can be set to NULL
+ * if there is no regulator specific error flags for this
+ * IRQ.
+ * @rdev: Array of pointers to regulators associated with this
+ * IRQ.
+ * @rdev_amount: Amount of regulators associated with this IRQ.
+ *
+ * Return: handle to irq_helper or an ERR_PTR() encoded error code.
+ */
+void *devm_regulator_irq_helper(struct device *dev,
+ const struct regulator_irq_desc *d, int irq,
+ int irq_flags, int common_errs,
+ int *per_rdev_errs,
+ struct regulator_dev **rdev, int rdev_amount)
+{
+ void *ptr;
+ int ret;
+
+ ptr = regulator_irq_helper(dev, d, irq, irq_flags, common_errs,
+ per_rdev_errs, rdev, rdev_amount);
+ if (IS_ERR(ptr))
+ return ptr;
+
+ ret = devm_add_action_or_reset(dev, regulator_irq_helper_drop, ptr);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return ptr;
+}
+EXPORT_SYMBOL_GPL(devm_regulator_irq_helper);
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index f3918f03aaf3..dac1fb584fa3 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -55,9 +55,7 @@
#define FAN53555_NVOLTAGES 64 /* Numbers of voltages */
#define FAN53526_NVOLTAGES 128
-#define TCS4525_NVOLTAGES 127 /* Numbers of voltages */
-#define TCS_VSEL_NSEL_MASK 0x7f
#define TCS_VSEL0_MODE (1 << 7)
#define TCS_VSEL1_MODE (1 << 6)
@@ -68,7 +66,7 @@ enum fan53555_vendor {
FAN53526_VENDOR_FAIRCHILD = 0,
FAN53555_VENDOR_FAIRCHILD,
FAN53555_VENDOR_SILERGY,
- FAN53555_VENDOR_TCS,
+ FAN53526_VENDOR_TCS,
};
enum {
@@ -90,6 +88,14 @@ enum {
FAN53555_CHIP_ID_08 = 8,
};
+enum {
+ TCS4525_CHIP_ID_12 = 12,
+};
+
+enum {
+ TCS4526_CHIP_ID_00 = 0,
+};
+
/* IC mask revision */
enum {
FAN53555_CHIP_REV_00 = 0x3,
@@ -124,7 +130,8 @@ struct fan53555_device_info {
/* Slew rate */
unsigned int slew_reg;
unsigned int slew_mask;
- unsigned int slew_shift;
+ const unsigned int *ramp_delay_table;
+ unsigned int n_ramp_values;
unsigned int slew_rate;
};
@@ -198,7 +205,7 @@ static unsigned int fan53555_get_mode(struct regulator_dev *rdev)
return REGULATOR_MODE_NORMAL;
}
-static const int slew_rates[] = {
+static const unsigned int slew_rates[] = {
64000,
32000,
16000,
@@ -209,51 +216,13 @@ static const int slew_rates[] = {
500,
};
-static const int tcs_slew_rates[] = {
+static const unsigned int tcs_slew_rates[] = {
18700,
9300,
4600,
2300,
};
-static int fan53555_set_ramp(struct regulator_dev *rdev, int ramp)
-{
- struct fan53555_device_info *di = rdev_get_drvdata(rdev);
- int regval = -1, i;
- const int *slew_rate_t;
- int slew_rate_n;
-
- switch (di->vendor) {
- case FAN53526_VENDOR_FAIRCHILD:
- case FAN53555_VENDOR_FAIRCHILD:
- case FAN53555_VENDOR_SILERGY:
- slew_rate_t = slew_rates;
- slew_rate_n = ARRAY_SIZE(slew_rates);
- break;
- case FAN53555_VENDOR_TCS:
- slew_rate_t = tcs_slew_rates;
- slew_rate_n = ARRAY_SIZE(tcs_slew_rates);
- break;
- default:
- return -EINVAL;
- }
-
- for (i = 0; i < slew_rate_n; i++) {
- if (ramp <= slew_rate_t[i])
- regval = i;
- else
- break;
- }
-
- if (regval < 0) {
- dev_err(di->dev, "unsupported ramp value %d\n", ramp);
- return -EINVAL;
- }
-
- return regmap_update_bits(rdev->regmap, di->slew_reg,
- di->slew_mask, regval << di->slew_shift);
-}
-
static const struct regulator_ops fan53555_regulator_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -266,7 +235,7 @@ static const struct regulator_ops fan53555_regulator_ops = {
.is_enabled = regulator_is_enabled_regmap,
.set_mode = fan53555_set_mode,
.get_mode = fan53555_get_mode,
- .set_ramp_delay = fan53555_set_ramp,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
.set_suspend_enable = fan53555_set_suspend_enable,
.set_suspend_disable = fan53555_set_suspend_disable,
};
@@ -294,6 +263,10 @@ static int fan53526_voltages_setup_fairchild(struct fan53555_device_info *di)
return -EINVAL;
}
+ di->slew_reg = FAN53555_CONTROL;
+ di->slew_mask = CTL_SLEW_MASK;
+ di->ramp_delay_table = slew_rates;
+ di->n_ramp_values = ARRAY_SIZE(slew_rates);
di->vsel_count = FAN53526_NVOLTAGES;
return 0;
@@ -338,7 +311,8 @@ static int fan53555_voltages_setup_fairchild(struct fan53555_device_info *di)
}
di->slew_reg = FAN53555_CONTROL;
di->slew_mask = CTL_SLEW_MASK;
- di->slew_shift = CTL_SLEW_SHIFT;
+ di->ramp_delay_table = slew_rates;
+ di->n_ramp_values = ARRAY_SIZE(slew_rates);
di->vsel_count = FAN53555_NVOLTAGES;
return 0;
@@ -359,24 +333,33 @@ static int fan53555_voltages_setup_silergy(struct fan53555_device_info *di)
return -EINVAL;
}
di->slew_reg = FAN53555_CONTROL;
- di->slew_reg = FAN53555_CONTROL;
di->slew_mask = CTL_SLEW_MASK;
- di->slew_shift = CTL_SLEW_SHIFT;
+ di->ramp_delay_table = slew_rates;
+ di->n_ramp_values = ARRAY_SIZE(slew_rates);
di->vsel_count = FAN53555_NVOLTAGES;
return 0;
}
-static int fan53555_voltages_setup_tcs(struct fan53555_device_info *di)
+static int fan53526_voltages_setup_tcs(struct fan53555_device_info *di)
{
- di->slew_reg = TCS4525_TIME;
- di->slew_mask = TCS_SLEW_MASK;
- di->slew_shift = TCS_SLEW_MASK;
-
- /* Init voltage range and step */
- di->vsel_min = 600000;
- di->vsel_step = 6250;
- di->vsel_count = TCS4525_NVOLTAGES;
+ switch (di->chip_id) {
+ case TCS4525_CHIP_ID_12:
+ case TCS4526_CHIP_ID_00:
+ di->slew_reg = TCS4525_TIME;
+ di->slew_mask = TCS_SLEW_MASK;
+ di->ramp_delay_table = tcs_slew_rates;
+ di->n_ramp_values = ARRAY_SIZE(tcs_slew_rates);
+
+ /* Init voltage range and step */
+ di->vsel_min = 600000;
+ di->vsel_step = 6250;
+ di->vsel_count = FAN53526_NVOLTAGES;
+ break;
+ default:
+ dev_err(di->dev, "Chip ID %d not supported!\n", di->chip_id);
+ return -EINVAL;
+ }
return 0;
}
@@ -410,7 +393,7 @@ static int fan53555_device_setup(struct fan53555_device_info *di,
return -EINVAL;
}
break;
- case FAN53555_VENDOR_TCS:
+ case FAN53526_VENDOR_TCS:
switch (pdata->sleep_vsel_id) {
case FAN53555_VSEL_ID_0:
di->sleep_reg = TCS4525_VSEL0;
@@ -449,7 +432,7 @@ static int fan53555_device_setup(struct fan53555_device_info *di,
di->mode_reg = di->vol_reg;
di->mode_mask = VSEL_MODE;
break;
- case FAN53555_VENDOR_TCS:
+ case FAN53526_VENDOR_TCS:
di->mode_reg = TCS4525_COMMAND;
switch (pdata->sleep_vsel_id) {
@@ -477,8 +460,8 @@ static int fan53555_device_setup(struct fan53555_device_info *di,
case FAN53555_VENDOR_SILERGY:
ret = fan53555_voltages_setup_silergy(di);
break;
- case FAN53555_VENDOR_TCS:
- ret = fan53555_voltages_setup_tcs(di);
+ case FAN53526_VENDOR_TCS:
+ ret = fan53526_voltages_setup_tcs(di);
break;
default:
dev_err(di->dev, "vendor %d not supported!\n", di->vendor);
@@ -505,6 +488,10 @@ static int fan53555_regulator_register(struct fan53555_device_info *di,
rdesc->uV_step = di->vsel_step;
rdesc->vsel_reg = di->vol_reg;
rdesc->vsel_mask = di->vsel_count - 1;
+ rdesc->ramp_reg = di->slew_reg;
+ rdesc->ramp_mask = di->slew_mask;
+ rdesc->ramp_delay_table = di->ramp_delay_table;
+ rdesc->n_ramp_values = di->n_ramp_values;
rdesc->owner = THIS_MODULE;
rdev = devm_regulator_register(di->dev, &di->desc, config);
@@ -553,7 +540,10 @@ static const struct of_device_id __maybe_unused fan53555_dt_ids[] = {
.data = (void *)FAN53555_VENDOR_SILERGY,
}, {
.compatible = "tcs,tcs4525",
- .data = (void *)FAN53555_VENDOR_TCS
+ .data = (void *)FAN53526_VENDOR_TCS
+ }, {
+ .compatible = "tcs,tcs4526",
+ .data = (void *)FAN53526_VENDOR_TCS
},
{ }
};
@@ -661,7 +651,10 @@ static const struct i2c_device_id fan53555_id[] = {
.driver_data = FAN53555_VENDOR_SILERGY
}, {
.name = "tcs4525",
- .driver_data = FAN53555_VENDOR_TCS
+ .driver_data = FAN53526_VENDOR_TCS
+ }, {
+ .name = "tcs4526",
+ .driver_data = FAN53526_VENDOR_TCS
},
{ },
};
diff --git a/drivers/regulator/fan53880.c b/drivers/regulator/fan53880.c
index e83eb4fb1876..8f25930d2769 100644
--- a/drivers/regulator/fan53880.c
+++ b/drivers/regulator/fan53880.c
@@ -51,6 +51,7 @@ static const struct regulator_ops fan53880_ops = {
REGULATOR_LINEAR_RANGE(800000, 0xf, 0x73, 25000), \
}, \
.n_linear_ranges = 2, \
+ .n_voltages = 0x74, \
.vsel_reg = FAN53880_LDO ## _num ## VOUT, \
.vsel_mask = 0x7f, \
.enable_reg = FAN53880_ENABLE, \
@@ -76,8 +77,9 @@ static const struct regulator_desc fan53880_regulators[] = {
REGULATOR_LINEAR_RANGE(600000, 0x1f, 0xf7, 12500),
},
.n_linear_ranges = 2,
+ .n_voltages = 0xf8,
.vsel_reg = FAN53880_BUCKVOUT,
- .vsel_mask = 0x7f,
+ .vsel_mask = 0xff,
.enable_reg = FAN53880_ENABLE,
.enable_mask = 0x10,
.enable_time = 480,
@@ -95,6 +97,7 @@ static const struct regulator_desc fan53880_regulators[] = {
REGULATOR_LINEAR_RANGE(3000000, 0x4, 0x70, 25000),
},
.n_linear_ranges = 2,
+ .n_voltages = 0x71,
.vsel_reg = FAN53880_BOOSTVOUT,
.vsel_mask = 0x7f,
.enable_reg = FAN53880_ENABLE_BOOST,
@@ -111,8 +114,7 @@ static const struct regmap_config fan53880_regmap = {
.max_register = FAN53880_ENABLE_BOOST,
};
-static int fan53880_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int fan53880_i2c_probe(struct i2c_client *i2c)
{
struct regulator_config config = { };
struct regulator_dev *rdev;
@@ -174,7 +176,7 @@ static struct i2c_driver fan53880_regulator_driver = {
.name = "fan53880",
.of_match_table = of_match_ptr(fan53880_dt_ids),
},
- .probe = fan53880_i2c_probe,
+ .probe_new = fan53880_i2c_probe,
.id_table = fan53880_i2c_id,
};
module_i2c_driver(fan53880_regulator_driver);
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index 02ad83153e19..39284610a536 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -88,10 +88,15 @@ static int reg_domain_disable(struct regulator_dev *rdev)
{
struct fixed_voltage_data *priv = rdev_get_drvdata(rdev);
struct device *dev = rdev->dev.parent;
+ int ret;
+
+ ret = dev_pm_genpd_set_performance_state(dev, 0);
+ if (ret)
+ return ret;
priv->enable_counter--;
- return dev_pm_genpd_set_performance_state(dev, 0);
+ return 0;
}
static int reg_is_enabled(struct regulator_dev *rdev)
@@ -271,7 +276,8 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
*/
cfg.ena_gpiod = gpiod_get_optional(&pdev->dev, NULL, gflags);
if (IS_ERR(cfg.ena_gpiod))
- return PTR_ERR(cfg.ena_gpiod);
+ return dev_err_probe(&pdev->dev, PTR_ERR(cfg.ena_gpiod),
+ "can't get GPIO\n");
cfg.dev = &pdev->dev;
cfg.init_data = config->init_data;
diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c
index 0e16e31c968f..ad2237a95572 100644
--- a/drivers/regulator/helpers.c
+++ b/drivers/regulator/helpers.c
@@ -948,7 +948,7 @@ int regulator_set_ramp_delay_regmap(struct regulator_dev *rdev, int ramp_delay)
int ret;
unsigned int sel;
- if (!rdev->desc->n_ramp_values)
+ if (WARN_ON(!rdev->desc->n_ramp_values || !rdev->desc->ramp_delay_table))
return -EINVAL;
ret = find_closest_bigger(ramp_delay, rdev->desc->ramp_delay_table,
diff --git a/drivers/regulator/hi6421-regulator.c b/drivers/regulator/hi6421-regulator.c
index dc631c1a46b4..bff8c515dcde 100644
--- a/drivers/regulator/hi6421-regulator.c
+++ b/drivers/regulator/hi6421-regulator.c
@@ -386,7 +386,7 @@ static int hi6421_regulator_enable(struct regulator_dev *rdev)
static unsigned int hi6421_regulator_ldo_get_mode(struct regulator_dev *rdev)
{
struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
- u32 reg_val;
+ unsigned int reg_val;
regmap_read(rdev->regmap, rdev->desc->enable_reg, &reg_val);
if (reg_val & info->mode_mask)
@@ -398,7 +398,7 @@ static unsigned int hi6421_regulator_ldo_get_mode(struct regulator_dev *rdev)
static unsigned int hi6421_regulator_buck_get_mode(struct regulator_dev *rdev)
{
struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
- u32 reg_val;
+ unsigned int reg_val;
regmap_read(rdev->regmap, rdev->desc->enable_reg, &reg_val);
if (reg_val & info->mode_mask)
@@ -411,7 +411,7 @@ static int hi6421_regulator_ldo_set_mode(struct regulator_dev *rdev,
unsigned int mode)
{
struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
- u32 new_mode;
+ unsigned int new_mode;
switch (mode) {
case REGULATOR_MODE_NORMAL:
@@ -435,7 +435,7 @@ static int hi6421_regulator_buck_set_mode(struct regulator_dev *rdev,
unsigned int mode)
{
struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
- u32 new_mode;
+ unsigned int new_mode;
switch (mode) {
case REGULATOR_MODE_NORMAL:
diff --git a/drivers/regulator/hi6421v600-regulator.c b/drivers/regulator/hi6421v600-regulator.c
index f6a14e9c3cbf..9b162c0555c3 100644
--- a/drivers/regulator/hi6421v600-regulator.c
+++ b/drivers/regulator/hi6421v600-regulator.c
@@ -3,7 +3,7 @@
// Device driver for regulators in Hisi IC
//
// Copyright (c) 2013 Linaro Ltd.
-// Copyright (c) 2011 Hisilicon.
+// Copyright (c) 2011 HiSilicon Ltd.
// Copyright (c) 2020-2021 Huawei Technologies Co., Ltd
//
// Guodong Xu <guodong.xu@linaro.org>
@@ -16,14 +16,15 @@
#include <linux/regulator/driver.h>
#include <linux/spmi.h>
+struct hi6421_spmi_reg_priv {
+ /* Serialize regulator enable logic */
+ struct mutex enable_mutex;
+};
+
struct hi6421_spmi_reg_info {
struct regulator_desc desc;
- struct hi6421_spmi_pmic *pmic;
u8 eco_mode_mask;
u32 eco_uA;
-
- /* Serialize regulator enable logic */
- struct mutex enable_mutex;
};
static const unsigned int ldo3_voltages[] = {
@@ -83,7 +84,7 @@ static const unsigned int ldo34_voltages[] = {
.owner = THIS_MODULE, \
.volt_table = vtable, \
.n_voltages = ARRAY_SIZE(vtable), \
- .vsel_mask = (1 << (ARRAY_SIZE(vtable) - 1)) - 1, \
+ .vsel_mask = ARRAY_SIZE(vtable) - 1, \
.vsel_reg = vreg, \
.enable_reg = ereg, \
.enable_mask = emask, \
@@ -97,41 +98,31 @@ static const unsigned int ldo34_voltages[] = {
static int hi6421_spmi_regulator_enable(struct regulator_dev *rdev)
{
- struct hi6421_spmi_reg_info *sreg = rdev_get_drvdata(rdev);
- struct hi6421_spmi_pmic *pmic = sreg->pmic;
+ struct hi6421_spmi_reg_priv *priv;
int ret;
+ priv = dev_get_drvdata(rdev->dev.parent);
/* cannot enable more than one regulator at one time */
- mutex_lock(&sreg->enable_mutex);
+ mutex_lock(&priv->enable_mutex);
- ret = regmap_update_bits(pmic->regmap, rdev->desc->enable_reg,
+ ret = regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
rdev->desc->enable_mask,
rdev->desc->enable_mask);
/* Avoid powering up multiple devices at the same time */
usleep_range(rdev->desc->off_on_delay, rdev->desc->off_on_delay + 60);
- mutex_unlock(&sreg->enable_mutex);
+ mutex_unlock(&priv->enable_mutex);
return ret;
}
-static int hi6421_spmi_regulator_disable(struct regulator_dev *rdev)
-{
- struct hi6421_spmi_reg_info *sreg = rdev_get_drvdata(rdev);
- struct hi6421_spmi_pmic *pmic = sreg->pmic;
-
- return regmap_update_bits(pmic->regmap, rdev->desc->enable_reg,
- rdev->desc->enable_mask, 0);
-}
-
static unsigned int hi6421_spmi_regulator_get_mode(struct regulator_dev *rdev)
{
struct hi6421_spmi_reg_info *sreg = rdev_get_drvdata(rdev);
- struct hi6421_spmi_pmic *pmic = sreg->pmic;
- u32 reg_val;
+ unsigned int reg_val;
- regmap_read(pmic->regmap, rdev->desc->enable_reg, &reg_val);
+ regmap_read(rdev->regmap, rdev->desc->enable_reg, &reg_val);
if (reg_val & sreg->eco_mode_mask)
return REGULATOR_MODE_IDLE;
@@ -143,21 +134,23 @@ static int hi6421_spmi_regulator_set_mode(struct regulator_dev *rdev,
unsigned int mode)
{
struct hi6421_spmi_reg_info *sreg = rdev_get_drvdata(rdev);
- struct hi6421_spmi_pmic *pmic = sreg->pmic;
- u32 val;
+ unsigned int val;
switch (mode) {
case REGULATOR_MODE_NORMAL:
val = 0;
break;
case REGULATOR_MODE_IDLE:
- val = sreg->eco_mode_mask << (ffs(sreg->eco_mode_mask) - 1);
+ if (!sreg->eco_mode_mask)
+ return -EINVAL;
+
+ val = sreg->eco_mode_mask;
break;
default:
return -EINVAL;
}
- return regmap_update_bits(pmic->regmap, rdev->desc->enable_reg,
+ return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
sreg->eco_mode_mask, val);
}
@@ -177,9 +170,9 @@ hi6421_spmi_regulator_get_optimum_mode(struct regulator_dev *rdev,
static const struct regulator_ops hi6421_spmi_ldo_rops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = hi6421_spmi_regulator_enable,
- .disable = hi6421_spmi_regulator_disable,
+ .disable = regulator_disable_regmap,
.list_voltage = regulator_list_voltage_table,
- .map_voltage = regulator_map_voltage_iterate,
+ .map_voltage = regulator_map_voltage_ascend,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_mode = hi6421_spmi_regulator_get_mode,
@@ -238,7 +231,7 @@ static int hi6421_spmi_regulator_probe(struct platform_device *pdev)
{
struct device *pmic_dev = pdev->dev.parent;
struct regulator_config config = { };
- struct hi6421_spmi_reg_info *sreg;
+ struct hi6421_spmi_reg_priv *priv;
struct hi6421_spmi_reg_info *info;
struct device *dev = &pdev->dev;
struct hi6421_spmi_pmic *pmic;
@@ -254,18 +247,18 @@ static int hi6421_spmi_regulator_probe(struct platform_device *pdev)
if (WARN_ON(!pmic))
return -ENODEV;
- sreg = devm_kzalloc(dev, sizeof(*sreg), GFP_KERNEL);
- if (!sreg)
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- sreg->pmic = pmic;
- mutex_init(&sreg->enable_mutex);
+ mutex_init(&priv->enable_mutex);
+ platform_set_drvdata(pdev, priv);
for (i = 0; i < ARRAY_SIZE(regulator_info); i++) {
info = &regulator_info[i];
config.dev = pdev->dev.parent;
- config.driver_data = sreg;
+ config.driver_data = info;
config.regmap = pmic->regmap;
rdev = devm_regulator_register(dev, &info->desc, &config);
diff --git a/drivers/regulator/hi655x-regulator.c b/drivers/regulator/hi655x-regulator.c
index ac2ee2030211..556bb73f3329 100644
--- a/drivers/regulator/hi655x-regulator.c
+++ b/drivers/regulator/hi655x-regulator.c
@@ -2,7 +2,7 @@
//
// Device driver for regulators in Hi655x IC
//
-// Copyright (c) 2016 Hisilicon.
+// Copyright (c) 2016 HiSilicon Ltd.
//
// Authors:
// Chen Feng <puck.chen@hisilicon.com>
@@ -72,7 +72,7 @@ enum hi655x_regulator_id {
static int hi655x_is_enabled(struct regulator_dev *rdev)
{
unsigned int value = 0;
- struct hi655x_regulator *regulator = rdev_get_drvdata(rdev);
+ const struct hi655x_regulator *regulator = rdev_get_drvdata(rdev);
regmap_read(rdev->regmap, regulator->status_reg, &value);
return (value & rdev->desc->enable_mask);
@@ -80,7 +80,7 @@ static int hi655x_is_enabled(struct regulator_dev *rdev)
static int hi655x_disable(struct regulator_dev *rdev)
{
- struct hi655x_regulator *regulator = rdev_get_drvdata(rdev);
+ const struct hi655x_regulator *regulator = rdev_get_drvdata(rdev);
return regmap_write(rdev->regmap, regulator->disable_reg,
rdev->desc->enable_mask);
@@ -169,7 +169,6 @@ static const struct hi655x_regulator regulators[] = {
static int hi655x_regulator_probe(struct platform_device *pdev)
{
unsigned int i;
- struct hi655x_regulator *regulator;
struct hi655x_pmic *pmic;
struct regulator_config config = { };
struct regulator_dev *rdev;
@@ -180,22 +179,17 @@ static int hi655x_regulator_probe(struct platform_device *pdev)
return -ENODEV;
}
- regulator = devm_kzalloc(&pdev->dev, sizeof(*regulator), GFP_KERNEL);
- if (!regulator)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, regulator);
-
config.dev = pdev->dev.parent;
config.regmap = pmic->regmap;
- config.driver_data = regulator;
for (i = 0; i < ARRAY_SIZE(regulators); i++) {
+ config.driver_data = (void *) &regulators[i];
+
rdev = devm_regulator_register(&pdev->dev,
&regulators[i].rdesc,
&config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
- regulator->rdesc.name);
+ regulators[i].rdesc.name);
return PTR_ERR(rdev);
}
}
diff --git a/drivers/regulator/internal.h b/drivers/regulator/internal.h
index 2391b565ef11..1e9c71642143 100644
--- a/drivers/regulator/internal.h
+++ b/drivers/regulator/internal.h
@@ -15,6 +15,17 @@
#define REGULATOR_STATES_NUM (PM_SUSPEND_MAX + 1)
+#define rdev_crit(rdev, fmt, ...) \
+ pr_crit("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
+#define rdev_err(rdev, fmt, ...) \
+ pr_err("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
+#define rdev_warn(rdev, fmt, ...) \
+ pr_warn("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
+#define rdev_info(rdev, fmt, ...) \
+ pr_info("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
+#define rdev_dbg(rdev, fmt, ...) \
+ pr_debug("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
+
struct regulator_voltage {
int min_uV;
int max_uV;
diff --git a/drivers/regulator/irq_helpers.c b/drivers/regulator/irq_helpers.c
new file mode 100644
index 000000000000..fabe2e53093e
--- /dev/null
+++ b/drivers/regulator/irq_helpers.c
@@ -0,0 +1,397 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2021 ROHM Semiconductors
+// regulator IRQ based event notification helpers
+//
+// Logic has been partially adapted from qcom-labibb driver.
+//
+// Author: Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/reboot.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/regulator/driver.h>
+
+#include "internal.h"
+
+#define REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS 10000
+
+struct regulator_irq {
+ struct regulator_irq_data rdata;
+ struct regulator_irq_desc desc;
+ int irq;
+ int retry_cnt;
+ struct delayed_work isr_work;
+};
+
+/*
+ * Should only be called from threaded handler to prevent potential deadlock
+ */
+static void rdev_flag_err(struct regulator_dev *rdev, int err)
+{
+ spin_lock(&rdev->err_lock);
+ rdev->cached_err |= err;
+ spin_unlock(&rdev->err_lock);
+}
+
+static void rdev_clear_err(struct regulator_dev *rdev, int err)
+{
+ spin_lock(&rdev->err_lock);
+ rdev->cached_err &= ~err;
+ spin_unlock(&rdev->err_lock);
+}
+
+static void regulator_notifier_isr_work(struct work_struct *work)
+{
+ struct regulator_irq *h;
+ struct regulator_irq_desc *d;
+ struct regulator_irq_data *rid;
+ int ret = 0;
+ int tmo, i;
+ int num_rdevs;
+
+ h = container_of(work, struct regulator_irq,
+ isr_work.work);
+ d = &h->desc;
+ rid = &h->rdata;
+ num_rdevs = rid->num_states;
+
+reread:
+ if (d->fatal_cnt && h->retry_cnt > d->fatal_cnt) {
+ if (!d->die)
+ return hw_protection_shutdown("Regulator HW failure? - no IC recovery",
+ REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
+ ret = d->die(rid);
+ /*
+ * If the 'last resort' IC recovery failed we will have
+ * nothing else left to do...
+ */
+ if (ret)
+ return hw_protection_shutdown("Regulator HW failure. IC recovery failed",
+ REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
+
+ /*
+ * If h->die() was implemented we assume recovery has been
+ * attempted (probably regulator was shut down) and we
+ * just enable IRQ and bail-out.
+ */
+ goto enable_out;
+ }
+ if (d->renable) {
+ ret = d->renable(rid);
+
+ if (ret == REGULATOR_FAILED_RETRY) {
+ /* Driver could not get current status */
+ h->retry_cnt++;
+ if (!d->reread_ms)
+ goto reread;
+
+ tmo = d->reread_ms;
+ goto reschedule;
+ }
+
+ if (ret) {
+ /*
+ * IC status reading succeeded. update error info
+ * just in case the renable changed it.
+ */
+ for (i = 0; i < num_rdevs; i++) {
+ struct regulator_err_state *stat;
+ struct regulator_dev *rdev;
+
+ stat = &rid->states[i];
+ rdev = stat->rdev;
+ rdev_clear_err(rdev, (~stat->errors) &
+ stat->possible_errs);
+ }
+ h->retry_cnt++;
+ /*
+ * The IC indicated problem is still ON - no point in
+ * re-enabling the IRQ. Retry later.
+ */
+ tmo = d->irq_off_ms;
+ goto reschedule;
+ }
+ }
+
+ /*
+ * Either IC reported problem cleared or no status checker was provided.
+ * If problems are gone - good. If not - then the IRQ will fire again
+ * and we'll have a new nice loop. In any case we should clear error
+ * flags here and re-enable IRQs.
+ */
+ for (i = 0; i < num_rdevs; i++) {
+ struct regulator_err_state *stat;
+ struct regulator_dev *rdev;
+
+ stat = &rid->states[i];
+ rdev = stat->rdev;
+ rdev_clear_err(rdev, stat->possible_errs);
+ }
+
+ /*
+ * Things have been seemingly successful => zero retry-counter.
+ */
+ h->retry_cnt = 0;
+
+enable_out:
+ enable_irq(h->irq);
+
+ return;
+
+reschedule:
+ if (!d->high_prio)
+ mod_delayed_work(system_wq, &h->isr_work,
+ msecs_to_jiffies(tmo));
+ else
+ mod_delayed_work(system_highpri_wq, &h->isr_work,
+ msecs_to_jiffies(tmo));
+}
+
+static irqreturn_t regulator_notifier_isr(int irq, void *data)
+{
+ struct regulator_irq *h = data;
+ struct regulator_irq_desc *d;
+ struct regulator_irq_data *rid;
+ unsigned long rdev_map = 0;
+ int num_rdevs;
+ int ret, i;
+
+ d = &h->desc;
+ rid = &h->rdata;
+ num_rdevs = rid->num_states;
+
+ if (d->fatal_cnt)
+ h->retry_cnt++;
+
+ /*
+ * we spare a few cycles by not clearing statuses prior to this call.
+ * The IC driver must initialize the status buffers for rdevs
+ * which it indicates having active events via rdev_map.
+ *
+ * Maybe we should just to be on a safer side(?)
+ */
+ ret = d->map_event(irq, rid, &rdev_map);
+
+ /*
+ * If status reading fails (which is unlikely) we don't ack/disable
+ * IRQ but just increase fail count and retry when IRQ fires again.
+ * If retry_count exceeds the given safety limit we call IC specific die
+ * handler which can try disabling regulator(s).
+ *
+ * If no die handler is given we will just bug() as a last resort.
+ *
+ * We could try disabling all associated rdevs - but we might shoot
+ * ourselves in the head and leave the problematic regulator enabled. So
+ * if IC has no die-handler populated we just assume the regulator
+ * can't be disabled.
+ */
+ if (unlikely(ret == REGULATOR_FAILED_RETRY))
+ goto fail_out;
+
+ h->retry_cnt = 0;
+ /*
+ * Let's not disable IRQ if there were no status bits for us. We'd
+ * better leave spurious IRQ handling to genirq
+ */
+ if (ret || !rdev_map)
+ return IRQ_NONE;
+
+ /*
+ * Some events are bogus if the regulator is disabled. Skip such events
+ * if all relevant regulators are disabled
+ */
+ if (d->skip_off) {
+ for_each_set_bit(i, &rdev_map, num_rdevs) {
+ struct regulator_dev *rdev;
+ const struct regulator_ops *ops;
+
+ rdev = rid->states[i].rdev;
+ ops = rdev->desc->ops;
+
+ /*
+ * If any of the flagged regulators is enabled we do
+ * handle this
+ */
+ if (ops->is_enabled(rdev))
+ break;
+ }
+ if (i == num_rdevs)
+ return IRQ_NONE;
+ }
+
+ /* Disable IRQ if HW keeps line asserted */
+ if (d->irq_off_ms)
+ disable_irq_nosync(irq);
+
+ /*
+ * IRQ seems to be for us. Let's fire correct notifiers / store error
+ * flags
+ */
+ for_each_set_bit(i, &rdev_map, num_rdevs) {
+ struct regulator_err_state *stat;
+ struct regulator_dev *rdev;
+
+ stat = &rid->states[i];
+ rdev = stat->rdev;
+
+ rdev_dbg(rdev, "Sending regulator notification EVT 0x%lx\n",
+ stat->notifs);
+
+ regulator_notifier_call_chain(rdev, stat->notifs, NULL);
+ rdev_flag_err(rdev, stat->errors);
+ }
+
+ if (d->irq_off_ms) {
+ if (!d->high_prio)
+ schedule_delayed_work(&h->isr_work,
+ msecs_to_jiffies(d->irq_off_ms));
+ else
+ mod_delayed_work(system_highpri_wq,
+ &h->isr_work,
+ msecs_to_jiffies(d->irq_off_ms));
+ }
+
+ return IRQ_HANDLED;
+
+fail_out:
+ if (d->fatal_cnt && h->retry_cnt > d->fatal_cnt) {
+ /* If we have no recovery, just try shut down straight away */
+ if (!d->die) {
+ hw_protection_shutdown("Regulator failure. Retry count exceeded",
+ REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
+ } else {
+ ret = d->die(rid);
+ /* If die() failed shut down as a last attempt to save the HW */
+ if (ret)
+ hw_protection_shutdown("Regulator failure. Recovery failed",
+ REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
+ }
+ }
+
+ return IRQ_NONE;
+}
+
+static int init_rdev_state(struct device *dev, struct regulator_irq *h,
+ struct regulator_dev **rdev, int common_err,
+ int *rdev_err, int rdev_amount)
+{
+ int i;
+
+ h->rdata.states = devm_kzalloc(dev, sizeof(*h->rdata.states) *
+ rdev_amount, GFP_KERNEL);
+ if (!h->rdata.states)
+ return -ENOMEM;
+
+ h->rdata.num_states = rdev_amount;
+ h->rdata.data = h->desc.data;
+
+ for (i = 0; i < rdev_amount; i++) {
+ h->rdata.states[i].possible_errs = common_err;
+ if (rdev_err)
+ h->rdata.states[i].possible_errs |= *rdev_err++;
+ h->rdata.states[i].rdev = *rdev++;
+ }
+
+ return 0;
+}
+
+static void init_rdev_errors(struct regulator_irq *h)
+{
+ int i;
+
+ for (i = 0; i < h->rdata.num_states; i++)
+ if (h->rdata.states[i].possible_errs)
+ h->rdata.states[i].rdev->use_cached_err = true;
+}
+
+/**
+ * regulator_irq_helper - register IRQ based regulator event/error notifier
+ *
+ * @dev: device providing the IRQs
+ * @d: IRQ helper descriptor.
+ * @irq: IRQ used to inform events/errors to be notified.
+ * @irq_flags: Extra IRQ flags to be OR'ed with the default
+ * IRQF_ONESHOT when requesting the (threaded) irq.
+ * @common_errs: Errors which can be flagged by this IRQ for all rdevs.
+ * When IRQ is re-enabled these errors will be cleared
+ * from all associated regulators
+ * @per_rdev_errs: Optional error flag array describing errors specific
+ * for only some of the regulators. These errors will be
+ * or'ed with common errors. If this is given the array
+ * should contain rdev_amount flags. Can be set to NULL
+ * if there is no regulator specific error flags for this
+ * IRQ.
+ * @rdev: Array of pointers to regulators associated with this
+ * IRQ.
+ * @rdev_amount: Amount of regulators associated with this IRQ.
+ *
+ * Return: handle to irq_helper or an ERR_PTR() encoded error code.
+ */
+void *regulator_irq_helper(struct device *dev,
+ const struct regulator_irq_desc *d, int irq,
+ int irq_flags, int common_errs, int *per_rdev_errs,
+ struct regulator_dev **rdev, int rdev_amount)
+{
+ struct regulator_irq *h;
+ int ret;
+
+ if (!rdev_amount || !d || !d->map_event || !d->name)
+ return ERR_PTR(-EINVAL);
+
+ h = devm_kzalloc(dev, sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return ERR_PTR(-ENOMEM);
+
+ h->irq = irq;
+ h->desc = *d;
+
+ ret = init_rdev_state(dev, h, rdev, common_errs, per_rdev_errs,
+ rdev_amount);
+ if (ret)
+ return ERR_PTR(ret);
+
+ init_rdev_errors(h);
+
+ if (h->desc.irq_off_ms)
+ INIT_DELAYED_WORK(&h->isr_work, regulator_notifier_isr_work);
+
+ ret = request_threaded_irq(h->irq, NULL, regulator_notifier_isr,
+ IRQF_ONESHOT | irq_flags, h->desc.name, h);
+ if (ret) {
+ dev_err(dev, "Failed to request IRQ %d\n", irq);
+
+ return ERR_PTR(ret);
+ }
+
+ return h;
+}
+EXPORT_SYMBOL_GPL(regulator_irq_helper);
+
+/**
+ * regulator_irq_helper_cancel - drop IRQ based regulator event/error notifier
+ *
+ * @handle: Pointer to handle returned by a successful call to
+ * regulator_irq_helper(). Will be NULLed upon return.
+ *
+ * The associated IRQ is released and work is cancelled when the function
+ * returns.
+ */
+void regulator_irq_helper_cancel(void **handle)
+{
+ if (handle && *handle) {
+ struct regulator_irq *h = *handle;
+
+ free_irq(h->irq, h);
+ if (h->desc.irq_off_ms)
+ cancel_delayed_work_sync(&h->isr_work);
+
+ h = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(regulator_irq_helper_cancel);
diff --git a/drivers/regulator/lp8755.c b/drivers/regulator/lp8755.c
index 13c535711265..321bec6e3f8d 100644
--- a/drivers/regulator/lp8755.c
+++ b/drivers/regulator/lp8755.c
@@ -136,52 +136,9 @@ err_i2c:
return 0;
}
-static int lp8755_buck_set_ramp(struct regulator_dev *rdev, int ramp)
-{
- int ret;
- unsigned int regval = 0x00;
- enum lp8755_bucks id = rdev_get_id(rdev);
-
- /* uV/us */
- switch (ramp) {
- case 0 ... 230:
- regval = 0x07;
- break;
- case 231 ... 470:
- regval = 0x06;
- break;
- case 471 ... 940:
- regval = 0x05;
- break;
- case 941 ... 1900:
- regval = 0x04;
- break;
- case 1901 ... 3800:
- regval = 0x03;
- break;
- case 3801 ... 7500:
- regval = 0x02;
- break;
- case 7501 ... 15000:
- regval = 0x01;
- break;
- case 15001 ... 30000:
- regval = 0x00;
- break;
- default:
- dev_err(&rdev->dev,
- "Not supported ramp value %d %s\n", ramp, __func__);
- return -EINVAL;
- }
-
- ret = regmap_update_bits(rdev->regmap, 0x07 + id, 0x07, regval);
- if (ret < 0)
- goto err_i2c;
- return ret;
-err_i2c:
- dev_err(&rdev->dev, "i2c access error %s\n", __func__);
- return ret;
-}
+static const unsigned int lp8755_buck_ramp_table[] = {
+ 30000, 15000, 7500, 3800, 1900, 940, 470, 230
+};
static const struct regulator_ops lp8755_buck_ops = {
.map_voltage = regulator_map_voltage_linear,
@@ -194,7 +151,7 @@ static const struct regulator_ops lp8755_buck_ops = {
.enable_time = lp8755_buck_enable_time,
.set_mode = lp8755_buck_set_mode,
.get_mode = lp8755_buck_get_mode,
- .set_ramp_delay = lp8755_buck_set_ramp,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
};
#define lp8755_rail(_id) "lp8755_buck"#_id
@@ -269,6 +226,10 @@ out_i2c_error:
.enable_mask = LP8755_BUCK_EN_M,\
.vsel_reg = LP8755_REG_BUCK##_id,\
.vsel_mask = LP8755_BUCK_VOUT_M,\
+ .ramp_reg = (LP8755_BUCK##_id) + 0x7,\
+ .ramp_mask = 0x7,\
+ .ramp_delay_table = lp8755_buck_ramp_table,\
+ .n_ramp_values = ARRAY_SIZE(lp8755_buck_ramp_table),\
}
static const struct regulator_desc lp8755_regulators[] = {
diff --git a/drivers/regulator/ltc3589.c b/drivers/regulator/ltc3589.c
index 38f7ccb63b52..5e0b669c3a01 100644
--- a/drivers/regulator/ltc3589.c
+++ b/drivers/regulator/ltc3589.c
@@ -54,6 +54,11 @@
#define LTC3589_VCCR_SW3_GO BIT(4)
#define LTC3589_VCCR_LDO2_GO BIT(6)
+#define LTC3589_VRRCR_SW1_RAMP_MASK GENMASK(1, 0)
+#define LTC3589_VRRCR_SW2_RAMP_MASK GENMASK(3, 2)
+#define LTC3589_VRRCR_SW3_RAMP_MASK GENMASK(5, 4)
+#define LTC3589_VRRCR_LDO2_RAMP_MASK GENMASK(7, 6)
+
enum ltc3589_variant {
LTC3589,
LTC3589_1,
@@ -88,27 +93,9 @@ static const int ltc3589_12_ldo4[] = {
1200000, 1800000, 2500000, 3200000,
};
-static int ltc3589_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
-{
- struct ltc3589 *ltc3589 = rdev_get_drvdata(rdev);
- int sel, shift;
-
- if (unlikely(ramp_delay <= 0))
- return -EINVAL;
-
- /* VRRCR slew rate offsets are the same as VCCR go bit offsets */
- shift = ffs(rdev->desc->apply_bit) - 1;
-
- /* The slew rate can be set to 0.88, 1.75, 3.5, or 7 mV/uS */
- for (sel = 0; sel < 4; sel++) {
- if ((880 << sel) >= ramp_delay) {
- return regmap_update_bits(ltc3589->regmap,
- LTC3589_VRRCR,
- 0x3 << shift, sel << shift);
- }
- }
- return -EINVAL;
-}
+static const unsigned int ltc3589_ramp_table[] = {
+ 880, 1750, 3500, 7000
+};
static int ltc3589_set_suspend_voltage(struct regulator_dev *rdev, int uV)
{
@@ -149,7 +136,7 @@ static const struct regulator_ops ltc3589_linear_regulator_ops = {
.list_voltage = regulator_list_voltage_linear,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_ramp_delay = ltc3589_set_ramp_delay,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.set_suspend_voltage = ltc3589_set_suspend_voltage,
.set_suspend_mode = ltc3589_set_suspend_mode,
@@ -218,16 +205,13 @@ static int ltc3589_of_parse_cb(struct device_node *np,
return 0;
}
-#define LTC3589_REG(_name, _of_name, _ops, en_bit, dtv1_reg, dtv_mask, go_bit)\
+#define LTC3589_REG(_name, _of_name, _ops, en_bit, dtv1_reg, dtv_mask) \
[LTC3589_ ## _name] = { \
.name = #_name, \
.of_match = of_match_ptr(#_of_name), \
.regulators_node = of_match_ptr("regulators"), \
.of_parse_cb = ltc3589_of_parse_cb, \
.n_voltages = (dtv_mask) + 1, \
- .min_uV = (go_bit) ? 362500 : 0, \
- .uV_step = (go_bit) ? 12500 : 0, \
- .ramp_delay = (go_bit) ? 1750 : 0, \
.fixed_uV = (dtv_mask) ? 0 : 800000, \
.ops = &ltc3589_ ## _ops ## _regulator_ops, \
.type = REGULATOR_VOLTAGE, \
@@ -235,30 +219,49 @@ static int ltc3589_of_parse_cb(struct device_node *np,
.owner = THIS_MODULE, \
.vsel_reg = (dtv1_reg), \
.vsel_mask = (dtv_mask), \
- .apply_reg = (go_bit) ? LTC3589_VCCR : 0, \
- .apply_bit = (go_bit), \
.enable_reg = (en_bit) ? LTC3589_OVEN : 0, \
.enable_mask = (en_bit), \
}
#define LTC3589_LINEAR_REG(_name, _of_name, _dtv1) \
- LTC3589_REG(_name, _of_name, linear, LTC3589_OVEN_ ## _name, \
- LTC3589_ ## _dtv1, 0x1f, \
- LTC3589_VCCR_ ## _name ## _GO)
+ [LTC3589_ ## _name] = { \
+ .name = #_name, \
+ .of_match = of_match_ptr(#_of_name), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .of_parse_cb = ltc3589_of_parse_cb, \
+ .n_voltages = 32, \
+ .min_uV = 362500, \
+ .uV_step = 12500, \
+ .ramp_delay = 1750, \
+ .ops = &ltc3589_linear_regulator_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = LTC3589_ ## _name, \
+ .owner = THIS_MODULE, \
+ .vsel_reg = LTC3589_ ## _dtv1, \
+ .vsel_mask = 0x1f, \
+ .apply_reg = LTC3589_VCCR, \
+ .apply_bit = LTC3589_VCCR_ ## _name ## _GO, \
+ .enable_reg = LTC3589_OVEN, \
+ .enable_mask = (LTC3589_OVEN_ ## _name), \
+ .ramp_reg = LTC3589_VRRCR, \
+ .ramp_mask = LTC3589_VRRCR_ ## _name ## _RAMP_MASK, \
+ .ramp_delay_table = ltc3589_ramp_table, \
+ .n_ramp_values = ARRAY_SIZE(ltc3589_ramp_table), \
+ }
+
#define LTC3589_FIXED_REG(_name, _of_name) \
- LTC3589_REG(_name, _of_name, fixed, LTC3589_OVEN_ ## _name, 0, 0, 0)
+ LTC3589_REG(_name, _of_name, fixed, LTC3589_OVEN_ ## _name, 0, 0)
static const struct regulator_desc ltc3589_regulators[] = {
LTC3589_LINEAR_REG(SW1, sw1, B1DTV1),
LTC3589_LINEAR_REG(SW2, sw2, B2DTV1),
LTC3589_LINEAR_REG(SW3, sw3, B3DTV1),
LTC3589_FIXED_REG(BB_OUT, bb-out),
- LTC3589_REG(LDO1, ldo1, fixed_standby, 0, 0, 0, 0),
+ LTC3589_REG(LDO1, ldo1, fixed_standby, 0, 0, 0),
LTC3589_LINEAR_REG(LDO2, ldo2, L2DTV1),
LTC3589_FIXED_REG(LDO3, ldo3),
- LTC3589_REG(LDO4, ldo4, table, LTC3589_OVEN_LDO4, LTC3589_L2DTV2,
- 0x60, 0),
+ LTC3589_REG(LDO4, ldo4, table, LTC3589_OVEN_LDO4, LTC3589_L2DTV2, 0x60),
};
static bool ltc3589_writeable_reg(struct device *dev, unsigned int reg)
diff --git a/drivers/regulator/max77620-regulator.c b/drivers/regulator/max77620-regulator.c
index 8d9731e4052b..3cf8f085170a 100644
--- a/drivers/regulator/max77620-regulator.c
+++ b/drivers/regulator/max77620-regulator.c
@@ -814,6 +814,13 @@ static int max77620_regulator_probe(struct platform_device *pdev)
config.dev = dev;
config.driver_data = pmic;
+ /*
+ * Set of_node_reuse flag to prevent driver core from attempting to
+ * claim any pinmux resources already claimed by the parent device.
+ * Otherwise PMIC driver will fail to re-probe.
+ */
+ device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
+
for (id = 0; id < MAX77620_NUM_REGS; id++) {
struct regulator_dev *rdev;
struct regulator_desc *rdesc;
@@ -839,12 +846,10 @@ static int max77620_regulator_probe(struct platform_device *pdev)
return ret;
rdev = devm_regulator_register(dev, rdesc, &config);
- if (IS_ERR(rdev)) {
- ret = PTR_ERR(rdev);
- dev_err(dev, "Regulator registration %s failed: %d\n",
- rdesc->name, ret);
- return ret;
- }
+ if (IS_ERR(rdev))
+ return dev_err_probe(dev, PTR_ERR(rdev),
+ "Regulator registration %s failed\n",
+ rdesc->name);
}
return 0;
diff --git a/drivers/regulator/max77686-regulator.c b/drivers/regulator/max77686-regulator.c
index 9089ec608fcc..55a07d3f3ee2 100644
--- a/drivers/regulator/max77686-regulator.c
+++ b/drivers/regulator/max77686-regulator.c
@@ -67,13 +67,6 @@
#define MAX77686_REGULATORS MAX77686_REG_MAX
#define MAX77686_LDOS 26
-enum max77686_ramp_rate {
- RAMP_RATE_13P75MV,
- RAMP_RATE_27P5MV,
- RAMP_RATE_55MV,
- RAMP_RATE_NO_CTRL, /* 100mV/us */
-};
-
struct max77686_data {
struct device *dev;
DECLARE_BITMAP(gpio_enabled, MAX77686_REGULATORS);
@@ -220,31 +213,6 @@ static int max77686_enable(struct regulator_dev *rdev)
max77686->opmode[id] << shift);
}
-static int max77686_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
-{
- unsigned int ramp_value = RAMP_RATE_NO_CTRL;
-
- switch (ramp_delay) {
- case 1 ... 13750:
- ramp_value = RAMP_RATE_13P75MV;
- break;
- case 13751 ... 27500:
- ramp_value = RAMP_RATE_27P5MV;
- break;
- case 27501 ... 55000:
- ramp_value = RAMP_RATE_55MV;
- break;
- case 55001 ... 100000:
- break;
- default:
- pr_warn("%s: ramp_delay: %d not supported, setting 100000\n",
- rdev->desc->name, ramp_delay);
- }
-
- return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
- MAX77686_RAMP_RATE_MASK, ramp_value << 6);
-}
-
static int max77686_of_parse_cb(struct device_node *np,
const struct regulator_desc *desc,
struct regulator_config *config)
@@ -284,6 +252,10 @@ static int max77686_of_parse_cb(struct device_node *np,
return 0;
}
+static const unsigned int max77686_buck_dvs_ramp_table[] = {
+ 13750, 27500, 55000, 100000
+};
+
static const struct regulator_ops max77686_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
@@ -330,7 +302,7 @@ static const struct regulator_ops max77686_buck_dvs_ops = {
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
- .set_ramp_delay = max77686_set_ramp_delay,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
.set_suspend_disable = max77686_set_suspend_disable,
};
@@ -462,6 +434,10 @@ static const struct regulator_ops max77686_buck_dvs_ops = {
.enable_reg = MAX77686_REG_BUCK2CTRL1 + (num - 2) * 10, \
.enable_mask = MAX77686_OPMODE_MASK \
<< MAX77686_OPMODE_BUCK234_SHIFT, \
+ .ramp_reg = MAX77686_REG_BUCK2CTRL1 + (num - 2) * 10, \
+ .ramp_mask = MAX77686_RAMP_RATE_MASK, \
+ .ramp_delay_table = max77686_buck_dvs_ramp_table, \
+ .n_ramp_values = ARRAY_SIZE(max77686_buck_dvs_ramp_table), \
}
static const struct regulator_desc regulators[] = {
diff --git a/drivers/regulator/max77802-regulator.c b/drivers/regulator/max77802-regulator.c
index 7b8ec8c0bd15..21e0eb0f43f9 100644
--- a/drivers/regulator/max77802-regulator.c
+++ b/drivers/regulator/max77802-regulator.c
@@ -43,15 +43,14 @@
#define MAX77802_OFF_PWRREQ 0x1
#define MAX77802_LP_PWRREQ 0x2
-/* MAX77802 has two register formats: 2-bit and 4-bit */
-static const unsigned int ramp_table_77802_2bit[] = {
+static const unsigned int max77802_buck234_ramp_table[] = {
12500,
25000,
50000,
100000,
};
-static unsigned int ramp_table_77802_4bit[] = {
+static const unsigned int max77802_buck16_ramp_table[] = {
1000, 2000, 3030, 4000,
5000, 5880, 7140, 8330,
9090, 10000, 11110, 12500,
@@ -221,58 +220,6 @@ static int max77802_enable(struct regulator_dev *rdev)
max77802->opmode[id] << shift);
}
-static int max77802_find_ramp_value(struct regulator_dev *rdev,
- const unsigned int limits[], int size,
- unsigned int ramp_delay)
-{
- int i;
-
- for (i = 0; i < size; i++) {
- if (ramp_delay <= limits[i])
- return i;
- }
-
- /* Use maximum value for no ramp control */
- dev_warn(&rdev->dev, "%s: ramp_delay: %d not supported, setting 100000\n",
- rdev->desc->name, ramp_delay);
- return size - 1;
-}
-
-/* Used for BUCKs 2-4 */
-static int max77802_set_ramp_delay_2bit(struct regulator_dev *rdev,
- int ramp_delay)
-{
- int id = rdev_get_id(rdev);
- unsigned int ramp_value;
-
- if (id > MAX77802_BUCK4) {
- dev_warn(&rdev->dev,
- "%s: regulator: ramp delay not supported\n",
- rdev->desc->name);
- return -EINVAL;
- }
- ramp_value = max77802_find_ramp_value(rdev, ramp_table_77802_2bit,
- ARRAY_SIZE(ramp_table_77802_2bit), ramp_delay);
-
- return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
- MAX77802_RAMP_RATE_MASK_2BIT,
- ramp_value << MAX77802_RAMP_RATE_SHIFT_2BIT);
-}
-
-/* For BUCK1, 6 */
-static int max77802_set_ramp_delay_4bit(struct regulator_dev *rdev,
- int ramp_delay)
-{
- unsigned int ramp_value;
-
- ramp_value = max77802_find_ramp_value(rdev, ramp_table_77802_4bit,
- ARRAY_SIZE(ramp_table_77802_4bit), ramp_delay);
-
- return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
- MAX77802_RAMP_RATE_MASK_4BIT,
- ramp_value << MAX77802_RAMP_RATE_SHIFT_4BIT);
-}
-
/*
* LDOs 2, 4-19, 22-35
*/
@@ -316,7 +263,7 @@ static const struct regulator_ops max77802_buck_16_dvs_ops = {
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
- .set_ramp_delay = max77802_set_ramp_delay_4bit,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
.set_suspend_disable = max77802_set_suspend_disable,
};
@@ -330,7 +277,7 @@ static const struct regulator_ops max77802_buck_234_ops = {
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
- .set_ramp_delay = max77802_set_ramp_delay_2bit,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
.set_suspend_disable = max77802_set_suspend_disable,
.set_suspend_mode = max77802_set_suspend_mode,
};
@@ -345,7 +292,6 @@ static const struct regulator_ops max77802_buck_dvs_ops = {
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
- .set_ramp_delay = max77802_set_ramp_delay_2bit,
.set_suspend_disable = max77802_set_suspend_disable,
};
@@ -409,6 +355,10 @@ static const struct regulator_ops max77802_buck_dvs_ops = {
.vsel_mask = MAX77802_DVS_VSEL_MASK, \
.enable_reg = MAX77802_REG_BUCK ## num ## CTRL, \
.enable_mask = MAX77802_OPMODE_MASK, \
+ .ramp_reg = MAX77802_REG_BUCK ## num ## CTRL, \
+ .ramp_mask = MAX77802_RAMP_RATE_MASK_4BIT, \
+ .ramp_delay_table = max77802_buck16_ramp_table, \
+ .n_ramp_values = ARRAY_SIZE(max77802_buck16_ramp_table), \
.of_map_mode = max77802_map_mode, \
}
@@ -431,6 +381,10 @@ static const struct regulator_ops max77802_buck_dvs_ops = {
.enable_reg = MAX77802_REG_BUCK ## num ## CTRL1, \
.enable_mask = MAX77802_OPMODE_MASK << \
MAX77802_OPMODE_BUCK234_SHIFT, \
+ .ramp_reg = MAX77802_REG_BUCK ## num ## CTRL1, \
+ .ramp_mask = MAX77802_RAMP_RATE_MASK_2BIT, \
+ .ramp_delay_table = max77802_buck234_ramp_table, \
+ .n_ramp_values = ARRAY_SIZE(max77802_buck234_ramp_table), \
.of_map_mode = max77802_map_mode, \
}
diff --git a/drivers/regulator/max8893.c b/drivers/regulator/max8893.c
new file mode 100644
index 000000000000..1519bf760da7
--- /dev/null
+++ b/drivers/regulator/max8893.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+
+static const struct regulator_ops max8893_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+};
+
+static const struct regulator_desc max8893_regulators[] = {
+ {
+ .name = "BUCK",
+ .supply_name = "in-buck",
+ .of_match = of_match_ptr("buck"),
+ .regulators_node = of_match_ptr("regulators"),
+ .n_voltages = 0x11,
+ .id = 6,
+ .ops = &max8893_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .min_uV = 800000,
+ .uV_step = 100000,
+ .vsel_reg = 0x4,
+ .vsel_mask = 0x1f,
+ .enable_reg = 0x0,
+ .enable_mask = BIT(7),
+ },
+ {
+ .name = "LDO1",
+ .supply_name = "in-ldo1",
+ .of_match = of_match_ptr("ldo1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .n_voltages = 0x12,
+ .id = 1,
+ .ops = &max8893_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .min_uV = 1600000,
+ .uV_step = 100000,
+ .vsel_reg = 0x5,
+ .vsel_mask = 0x1f,
+ .enable_reg = 0x0,
+ .enable_mask = BIT(5),
+ },
+ {
+ .name = "LDO2",
+ .supply_name = "in-ldo2",
+ .of_match = of_match_ptr("ldo2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .n_voltages = 0x16,
+ .id = 2,
+ .ops = &max8893_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .min_uV = 1200000,
+ .uV_step = 100000,
+ .vsel_reg = 0x6,
+ .vsel_mask = 0x1f,
+ .enable_reg = 0x0,
+ .enable_mask = BIT(4),
+ },
+ {
+ .name = "LDO3",
+ .supply_name = "in-ldo3",
+ .of_match = of_match_ptr("ldo3"),
+ .regulators_node = of_match_ptr("regulators"),
+ .n_voltages = 0x12,
+ .id = 3,
+ .ops = &max8893_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .min_uV = 1600000,
+ .uV_step = 100000,
+ .vsel_reg = 0x7,
+ .vsel_mask = 0x1f,
+ .enable_reg = 0x0,
+ .enable_mask = BIT(3),
+ },
+ {
+ .name = "LDO4",
+ .supply_name = "in-ldo4",
+ .of_match = of_match_ptr("ldo4"),
+ .regulators_node = of_match_ptr("regulators"),
+ .n_voltages = 0x1a,
+ .id = 4,
+ .ops = &max8893_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .min_uV = 800000,
+ .uV_step = 100000,
+ .vsel_reg = 0x8,
+ .vsel_mask = 0x1f,
+ .enable_reg = 0x0,
+ .enable_mask = BIT(2),
+ },
+ {
+ .name = "LDO5",
+ .supply_name = "in-ldo5",
+ .of_match = of_match_ptr("ldo5"),
+ .regulators_node = of_match_ptr("regulators"),
+ .n_voltages = 0x1a,
+ .id = 5,
+ .ops = &max8893_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .min_uV = 800000,
+ .uV_step = 100000,
+ .vsel_reg = 0x9,
+ .vsel_mask = 0x1f,
+ .enable_reg = 0x0,
+ .enable_mask = BIT(1),
+ }
+};
+
+static const struct regmap_config max8893_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int max8893_probe_new(struct i2c_client *i2c)
+{
+ int id, ret;
+ struct regulator_config config = {.dev = &i2c->dev};
+ struct regmap *regmap = devm_regmap_init_i2c(i2c, &max8893_regmap);
+
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ dev_err(&i2c->dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ for (id = 0; id < ARRAY_SIZE(max8893_regulators); id++) {
+ struct regulator_dev *rdev;
+ rdev = devm_regulator_register(&i2c->dev,
+ &max8893_regulators[id],
+ &config);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
+ dev_err(&i2c->dev, "failed to register %s: %d\n",
+ max8893_regulators[id].name, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id max8893_dt_match[] = {
+ { .compatible = "maxim,max8893" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, max8893_dt_match);
+#endif
+
+static const struct i2c_device_id max8893_ids[] = {
+ { "max8893", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, max8893_ids);
+
+static struct i2c_driver max8893_driver = {
+ .probe_new = max8893_probe_new,
+ .driver = {
+ .name = "max8893",
+ .of_match_table = of_match_ptr(max8893_dt_match),
+ },
+ .id_table = max8893_ids,
+};
+
+module_i2c_driver(max8893_driver);
+
+MODULE_DESCRIPTION("Maxim MAX8893 PMIC driver");
+MODULE_AUTHOR("Sergey Larin <cerg2010cerg2010@mail.ru>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index 9aee1444181d..8da8f9b6c4fd 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -265,33 +265,6 @@ static unsigned int max8973_dcdc_get_mode(struct regulator_dev *rdev)
REGULATOR_MODE_FAST : REGULATOR_MODE_NORMAL;
}
-static int max8973_set_ramp_delay(struct regulator_dev *rdev,
- int ramp_delay)
-{
- struct max8973_chip *max = rdev_get_drvdata(rdev);
- unsigned int control;
- int ret;
-
- /* Set ramp delay */
- if (ramp_delay <= 12000)
- control = MAX8973_RAMP_12mV_PER_US;
- else if (ramp_delay <= 25000)
- control = MAX8973_RAMP_25mV_PER_US;
- else if (ramp_delay <= 50000)
- control = MAX8973_RAMP_50mV_PER_US;
- else if (ramp_delay <= 200000)
- control = MAX8973_RAMP_200mV_PER_US;
- else
- return -EINVAL;
-
- ret = regmap_update_bits(max->regmap, MAX8973_CONTROL1,
- MAX8973_RAMP_MASK, control);
- if (ret < 0)
- dev_err(max->dev, "register %d update failed, %d",
- MAX8973_CONTROL1, ret);
- return ret;
-}
-
static int max8973_set_current_limit(struct regulator_dev *rdev,
int min_ua, int max_ua)
{
@@ -341,6 +314,10 @@ static int max8973_get_current_limit(struct regulator_dev *rdev)
return 9000000;
}
+static const unsigned int max8973_buck_ramp_table[] = {
+ 12000, 25000, 50000, 200000
+};
+
static const struct regulator_ops max8973_dcdc_ops = {
.get_voltage_sel = max8973_dcdc_get_voltage_sel,
.set_voltage_sel = max8973_dcdc_set_voltage_sel,
@@ -348,7 +325,7 @@ static const struct regulator_ops max8973_dcdc_ops = {
.set_mode = max8973_dcdc_set_mode,
.get_mode = max8973_dcdc_get_mode,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
- .set_ramp_delay = max8973_set_ramp_delay,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
};
static int max8973_init_dcdc(struct max8973_chip *max,
@@ -694,6 +671,10 @@ static int max8973_probe(struct i2c_client *client,
max->desc.min_uV = MAX8973_MIN_VOLATGE;
max->desc.uV_step = MAX8973_VOLATGE_STEP;
max->desc.n_voltages = MAX8973_BUCK_N_VOLTAGE;
+ max->desc.ramp_reg = MAX8973_CONTROL1;
+ max->desc.ramp_mask = MAX8973_RAMP_MASK;
+ max->desc.ramp_delay_table = max8973_buck_ramp_table;
+ max->desc.n_ramp_values = ARRAY_SIZE(max8973_buck_ramp_table);
max->dvs_gpio = (pdata->dvs_gpio) ? pdata->dvs_gpio : -EINVAL;
max->enable_external_control = pdata->enable_ext_control;
diff --git a/drivers/regulator/mcp16502.c b/drivers/regulator/mcp16502.c
index 88c6bd5b6c78..042668385678 100644
--- a/drivers/regulator/mcp16502.c
+++ b/drivers/regulator/mcp16502.c
@@ -90,10 +90,14 @@ enum mcp16502_reg {
};
/* Ramp delay (uV/us) for buck1, ldo1, ldo2. */
-static const int mcp16502_ramp_b1l12[] = { 6250, 3125, 2083, 1563 };
+static const unsigned int mcp16502_ramp_b1l12[] = {
+ 6250, 3125, 2083, 1563
+};
/* Ramp delay (uV/us) for buck2, buck3, buck4. */
-static const int mcp16502_ramp_b234[] = { 3125, 1563, 1042, 781 };
+static const unsigned int mcp16502_ramp_b234[] = {
+ 3125, 1563, 1042, 781
+};
static unsigned int mcp16502_of_map_mode(unsigned int mode)
{
@@ -103,7 +107,7 @@ static unsigned int mcp16502_of_map_mode(unsigned int mode)
return REGULATOR_MODE_INVALID;
}
-#define MCP16502_REGULATOR(_name, _id, _ranges, _ops) \
+#define MCP16502_REGULATOR(_name, _id, _ranges, _ops, _ramp_table) \
[_id] = { \
.name = _name, \
.regulators_node = of_match_ptr("regulators"), \
@@ -121,6 +125,10 @@ static unsigned int mcp16502_of_map_mode(unsigned int mode)
.vsel_mask = MCP16502_VSEL, \
.enable_reg = (((_id) + 1) << 4), \
.enable_mask = MCP16502_EN, \
+ .ramp_reg = MCP16502_REG_BASE(_id, CFG), \
+ .ramp_mask = MCP16502_DVSR, \
+ .ramp_delay_table = _ramp_table, \
+ .n_ramp_values = ARRAY_SIZE(_ramp_table), \
}
enum {
@@ -314,42 +322,6 @@ static int mcp16502_set_voltage_time_sel(struct regulator_dev *rdev,
return ret;
}
-static int mcp16502_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
-{
- const int *ramp;
- int id = rdev_get_id(rdev);
- unsigned int i, size;
-
- switch (id) {
- case BUCK1:
- case LDO1:
- case LDO2:
- ramp = mcp16502_ramp_b1l12;
- size = ARRAY_SIZE(mcp16502_ramp_b1l12);
- break;
-
- case BUCK2:
- case BUCK3:
- case BUCK4:
- ramp = mcp16502_ramp_b234;
- size = ARRAY_SIZE(mcp16502_ramp_b234);
- break;
-
- default:
- return -EINVAL;
- }
-
- for (i = 0; i < size; i++) {
- if (ramp[i] == ramp_delay)
- break;
- }
- if (i == size)
- return -EINVAL;
-
- return regmap_update_bits(rdev->regmap, MCP16502_REG_BASE(id, CFG),
- MCP16502_DVSR, (i << 2));
-}
-
#ifdef CONFIG_SUSPEND
/*
* mcp16502_suspend_get_target_reg() - get the reg of the target suspend PMIC
@@ -445,7 +417,7 @@ static const struct regulator_ops mcp16502_buck_ops = {
.is_enabled = regulator_is_enabled_regmap,
.get_status = mcp16502_get_status,
.set_voltage_time_sel = mcp16502_set_voltage_time_sel,
- .set_ramp_delay = mcp16502_set_ramp_delay,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
.set_mode = mcp16502_set_mode,
.get_mode = mcp16502_get_mode,
@@ -471,7 +443,7 @@ static const struct regulator_ops mcp16502_ldo_ops = {
.is_enabled = regulator_is_enabled_regmap,
.get_status = mcp16502_get_status,
.set_voltage_time_sel = mcp16502_set_voltage_time_sel,
- .set_ramp_delay = mcp16502_set_ramp_delay,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
#ifdef CONFIG_SUSPEND
.set_suspend_voltage = mcp16502_set_suspend_voltage,
@@ -495,13 +467,19 @@ static const struct linear_range b234_ranges[] = {
};
static const struct regulator_desc mcp16502_desc[] = {
- /* MCP16502_REGULATOR(_name, _id, ranges, regulator_ops) */
- MCP16502_REGULATOR("VDD_IO", BUCK1, b1l12_ranges, mcp16502_buck_ops),
- MCP16502_REGULATOR("VDD_DDR", BUCK2, b234_ranges, mcp16502_buck_ops),
- MCP16502_REGULATOR("VDD_CORE", BUCK3, b234_ranges, mcp16502_buck_ops),
- MCP16502_REGULATOR("VDD_OTHER", BUCK4, b234_ranges, mcp16502_buck_ops),
- MCP16502_REGULATOR("LDO1", LDO1, b1l12_ranges, mcp16502_ldo_ops),
- MCP16502_REGULATOR("LDO2", LDO2, b1l12_ranges, mcp16502_ldo_ops)
+ /* MCP16502_REGULATOR(_name, _id, ranges, regulator_ops, ramp_table) */
+ MCP16502_REGULATOR("VDD_IO", BUCK1, b1l12_ranges, mcp16502_buck_ops,
+ mcp16502_ramp_b1l12),
+ MCP16502_REGULATOR("VDD_DDR", BUCK2, b234_ranges, mcp16502_buck_ops,
+ mcp16502_ramp_b234),
+ MCP16502_REGULATOR("VDD_CORE", BUCK3, b234_ranges, mcp16502_buck_ops,
+ mcp16502_ramp_b234),
+ MCP16502_REGULATOR("VDD_OTHER", BUCK4, b234_ranges, mcp16502_buck_ops,
+ mcp16502_ramp_b234),
+ MCP16502_REGULATOR("LDO1", LDO1, b1l12_ranges, mcp16502_ldo_ops,
+ mcp16502_ramp_b1l12),
+ MCP16502_REGULATOR("LDO2", LDO2, b1l12_ranges, mcp16502_ldo_ops,
+ mcp16502_ramp_b1l12)
};
static const struct regmap_range mcp16502_ranges[] = {
@@ -522,8 +500,7 @@ static const struct regmap_config mcp16502_regmap_config = {
.wr_table = &mcp16502_yes_reg_table,
};
-static int mcp16502_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int mcp16502_probe(struct i2c_client *client)
{
struct regulator_config config = { };
struct regulator_dev *rdev;
@@ -606,7 +583,7 @@ static const struct i2c_device_id mcp16502_i2c_id[] = {
MODULE_DEVICE_TABLE(i2c, mcp16502_i2c_id);
static struct i2c_driver mcp16502_drv = {
- .probe = mcp16502_probe,
+ .probe_new = mcp16502_probe,
.driver = {
.name = "mcp16502-regulator",
.of_match_table = of_match_ptr(mcp16502_ids),
diff --git a/drivers/regulator/mp5416.c b/drivers/regulator/mp5416.c
index 67ce1b52a1a1..39cebec0edb6 100644
--- a/drivers/regulator/mp5416.c
+++ b/drivers/regulator/mp5416.c
@@ -67,6 +67,10 @@
.vsel_mask = MP5416_MASK_VSET, \
.enable_reg = MP5416_REG_BUCK ## _id, \
.enable_mask = MP5416_REGULATOR_EN, \
+ .ramp_reg = MP5416_REG_CTL2, \
+ .ramp_mask = MP5416_MASK_DVS_SLEWRATE, \
+ .ramp_delay_table = mp5416_buck_ramp_table, \
+ .n_ramp_values = ARRAY_SIZE(mp5416_buck_ramp_table), \
.active_discharge_on = _dval, \
.active_discharge_reg = _dreg, \
.active_discharge_mask = _dval, \
@@ -123,7 +127,16 @@ static const unsigned int mp5416_I_limits2[] = {
2200000, 3200000, 4200000, 5200000
};
-static int mp5416_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay);
+/*
+ * DVS ramp rate BUCK1 to BUCK4
+ * 00: 32mV/us
+ * 01: 16mV/us
+ * 10: 8mV/us
+ * 11: 4mV/us
+ */
+static const unsigned int mp5416_buck_ramp_table[] = {
+ 32000, 16000, 8000, 4000
+};
static const struct regulator_ops mp5416_ldo_ops = {
.enable = regulator_enable_regmap,
@@ -147,7 +160,7 @@ static const struct regulator_ops mp5416_buck_ops = {
.set_active_discharge = regulator_set_active_discharge_regmap,
.get_current_limit = regulator_get_current_limit_regmap,
.set_current_limit = regulator_set_current_limit_regmap,
- .set_ramp_delay = mp5416_set_ramp_delay,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
};
static struct regulator_desc mp5416_regulators_desc[MP5416_MAX_REGULATORS] = {
@@ -161,33 +174,6 @@ static struct regulator_desc mp5416_regulators_desc[MP5416_MAX_REGULATORS] = {
MP5416LDO("ldo4", 4, BIT(1)),
};
-/*
- * DVS ramp rate BUCK1 to BUCK4
- * 00: 32mV/us
- * 01: 16mV/us
- * 10: 8mV/us
- * 11: 4mV/us
- */
-static int mp5416_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
-{
- unsigned int ramp_val;
-
- if (ramp_delay > 32000 || ramp_delay < 0)
- return -EINVAL;
-
- if (ramp_delay <= 4000)
- ramp_val = 3;
- else if (ramp_delay <= 8000)
- ramp_val = 2;
- else if (ramp_delay <= 16000)
- ramp_val = 1;
- else
- ramp_val = 0;
-
- return regmap_update_bits(rdev->regmap, MP5416_REG_CTL2,
- MP5416_MASK_DVS_SLEWRATE, ramp_val << 6);
-}
-
static int mp5416_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
diff --git a/drivers/regulator/mp886x.c b/drivers/regulator/mp886x.c
index a84fd74081de..8ad4722eca4b 100644
--- a/drivers/regulator/mp886x.c
+++ b/drivers/regulator/mp886x.c
@@ -26,7 +26,7 @@
struct mp886x_cfg_info {
const struct regulator_ops *rops;
- const int slew_rates[8];
+ const unsigned int slew_rates[8];
const int switch_freq[4];
const u8 fs_reg;
const u8 fs_shift;
@@ -42,28 +42,6 @@ struct mp886x_device_info {
unsigned int sel;
};
-static int mp886x_set_ramp(struct regulator_dev *rdev, int ramp)
-{
- struct mp886x_device_info *di = rdev_get_drvdata(rdev);
- const struct mp886x_cfg_info *ci = di->ci;
- int reg = -1, i;
-
- for (i = 0; i < ARRAY_SIZE(ci->slew_rates); i++) {
- if (ramp <= ci->slew_rates[i])
- reg = i;
- else
- break;
- }
-
- if (reg < 0) {
- dev_err(di->dev, "unsupported ramp value %d\n", ramp);
- return -EINVAL;
- }
-
- return regmap_update_bits(rdev->regmap, MP886X_SYSCNTLREG1,
- MP886X_SLEW_MASK, reg << MP886X_SLEW_SHIFT);
-}
-
static void mp886x_set_switch_freq(struct mp886x_device_info *di,
struct regmap *regmap,
u32 freq)
@@ -169,7 +147,7 @@ static const struct regulator_ops mp8869_regulator_ops = {
.is_enabled = regulator_is_enabled_regmap,
.set_mode = mp886x_set_mode,
.get_mode = mp886x_get_mode,
- .set_ramp_delay = mp886x_set_ramp,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
};
static const struct mp886x_cfg_info mp8869_ci = {
@@ -248,7 +226,7 @@ static const struct regulator_ops mp8867_regulator_ops = {
.is_enabled = regulator_is_enabled_regmap,
.set_mode = mp886x_set_mode,
.get_mode = mp886x_get_mode,
- .set_ramp_delay = mp886x_set_ramp,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
};
static const struct mp886x_cfg_info mp8867_ci = {
@@ -290,6 +268,10 @@ static int mp886x_regulator_register(struct mp886x_device_info *di,
rdesc->uV_step = 10000;
rdesc->vsel_reg = MP886X_VSEL;
rdesc->vsel_mask = 0x3f;
+ rdesc->ramp_reg = MP886X_SYSCNTLREG1;
+ rdesc->ramp_mask = MP886X_SLEW_MASK;
+ rdesc->ramp_delay_table = di->ci->slew_rates;
+ rdesc->n_ramp_values = ARRAY_SIZE(di->ci->slew_rates);
rdesc->owner = THIS_MODULE;
rdev = devm_regulator_register(di->dev, &di->desc, config);
diff --git a/drivers/regulator/mt6315-regulator.c b/drivers/regulator/mt6315-regulator.c
index 9edc34981ee0..284c229e1aa4 100644
--- a/drivers/regulator/mt6315-regulator.c
+++ b/drivers/regulator/mt6315-regulator.c
@@ -59,7 +59,7 @@ static const struct linear_range mt_volt_range1[] = {
REGULATOR_LINEAR_RANGE(0, 0, 0xbf, 6250),
};
-static unsigned int mt6315_map_mode(u32 mode)
+static unsigned int mt6315_map_mode(unsigned int mode)
{
switch (mode) {
case MT6315_BUCK_MODE_AUTO:
@@ -84,7 +84,7 @@ static unsigned int mt6315_regulator_get_mode(struct regulator_dev *rdev)
modeset_mask = init->modeset_mask[rdev_get_id(rdev)];
ret = regmap_read(rdev->regmap, MT6315_BUCK_TOP_4PHASE_ANA_CON42, &regval);
if (ret != 0) {
- dev_notice(&rdev->dev, "Failed to get mode: %d\n", ret);
+ dev_err(&rdev->dev, "Failed to get mode: %d\n", ret);
return ret;
}
@@ -93,7 +93,7 @@ static unsigned int mt6315_regulator_get_mode(struct regulator_dev *rdev)
ret = regmap_read(rdev->regmap, MT6315_BUCK_TOP_CON1, &regval);
if (ret != 0) {
- dev_notice(&rdev->dev, "Failed to get lp mode: %d\n", ret);
+ dev_err(&rdev->dev, "Failed to get lp mode: %d\n", ret);
return ret;
}
@@ -147,12 +147,12 @@ static int mt6315_regulator_set_mode(struct regulator_dev *rdev,
break;
default:
ret = -EINVAL;
- dev_notice(&rdev->dev, "Unsupported mode: %d\n", mode);
+ dev_err(&rdev->dev, "Unsupported mode: %d\n", mode);
break;
}
if (ret != 0) {
- dev_notice(&rdev->dev, "Failed to set mode: %d\n", ret);
+ dev_err(&rdev->dev, "Failed to set mode: %d\n", ret);
return ret;
}
@@ -168,7 +168,7 @@ static int mt6315_get_status(struct regulator_dev *rdev)
info = container_of(rdev->desc, struct mt6315_regulator_info, desc);
ret = regmap_read(rdev->regmap, info->status_reg, &regval);
if (ret < 0) {
- dev_notice(&rdev->dev, "Failed to get enable reg: %d\n", ret);
+ dev_err(&rdev->dev, "Failed to get enable reg: %d\n", ret);
return ret;
}
@@ -223,8 +223,8 @@ static int mt6315_regulator_probe(struct spmi_device *pdev)
int i;
regmap = devm_regmap_init_spmi_ext(pdev, &mt6315_regmap_config);
- if (!regmap)
- return -ENODEV;
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
chip = devm_kzalloc(dev, sizeof(struct mt6315_chip), GFP_KERNEL);
if (!chip)
@@ -260,8 +260,9 @@ static int mt6315_regulator_probe(struct spmi_device *pdev)
config.driver_data = init_data;
rdev = devm_regulator_register(dev, &mt6315_regulators[i].desc, &config);
if (IS_ERR(rdev)) {
- dev_notice(dev, "Failed to register %s\n", mt6315_regulators[i].desc.name);
- continue;
+ dev_err(dev, "Failed to register %s\n",
+ mt6315_regulators[i].desc.name);
+ return PTR_ERR(rdev);
}
}
@@ -279,7 +280,7 @@ static void mt6315_regulator_shutdown(struct spmi_device *pdev)
ret |= regmap_write(chip->regmap, MT6315_TOP_TMA_KEY, 0);
ret |= regmap_write(chip->regmap, MT6315_TOP_TMA_KEY_H, 0);
if (ret < 0)
- dev_notice(&pdev->dev, "[%#x] Failed to enable power off sequence. %d\n",
+ dev_err(&pdev->dev, "[%#x] Failed to enable power off sequence. %d\n",
pdev->usid, ret);
}
diff --git a/drivers/regulator/mt6358-regulator.c b/drivers/regulator/mt6358-regulator.c
index 13cb6ac9a892..0d35be4e0e5a 100644
--- a/drivers/regulator/mt6358-regulator.c
+++ b/drivers/regulator/mt6358-regulator.c
@@ -153,50 +153,50 @@ static const struct linear_range buck_volt_range4[] = {
REGULATOR_LINEAR_RANGE(1000000, 0, 0x7f, 12500),
};
-static const u32 vdram2_voltages[] = {
+static const unsigned int vdram2_voltages[] = {
600000, 1800000,
};
-static const u32 vsim_voltages[] = {
+static const unsigned int vsim_voltages[] = {
1700000, 1800000, 2700000, 3000000, 3100000,
};
-static const u32 vibr_voltages[] = {
+static const unsigned int vibr_voltages[] = {
1200000, 1300000, 1500000, 1800000,
2000000, 2800000, 3000000, 3300000,
};
-static const u32 vusb_voltages[] = {
+static const unsigned int vusb_voltages[] = {
3000000, 3100000,
};
-static const u32 vcamd_voltages[] = {
+static const unsigned int vcamd_voltages[] = {
900000, 1000000, 1100000, 1200000,
1300000, 1500000, 1800000,
};
-static const u32 vefuse_voltages[] = {
+static const unsigned int vefuse_voltages[] = {
1700000, 1800000, 1900000,
};
-static const u32 vmch_vemc_voltages[] = {
+static const unsigned int vmch_vemc_voltages[] = {
2900000, 3000000, 3300000,
};
-static const u32 vcama_voltages[] = {
+static const unsigned int vcama_voltages[] = {
1800000, 2500000, 2700000,
2800000, 2900000, 3000000,
};
-static const u32 vcn33_bt_wifi_voltages[] = {
+static const unsigned int vcn33_bt_wifi_voltages[] = {
3300000, 3400000, 3500000,
};
-static const u32 vmc_voltages[] = {
+static const unsigned int vmc_voltages[] = {
1800000, 2900000, 3000000, 3300000,
};
-static const u32 vldo28_voltages[] = {
+static const unsigned int vldo28_voltages[] = {
2800000, 3000000,
};
@@ -457,7 +457,7 @@ static struct mt6358_regulator_info mt6358_regulators[] = {
MT6358_REG_FIXED("ldo_vaud28", VAUD28,
MT6358_LDO_VAUD28_CON0, 0, 2800000),
MT6358_LDO("ldo_vdram2", VDRAM2, vdram2_voltages, vdram2_idx,
- MT6358_LDO_VDRAM2_CON0, 0, MT6358_LDO_VDRAM2_ELR0, 0x10, 0),
+ MT6358_LDO_VDRAM2_CON0, 0, MT6358_LDO_VDRAM2_ELR0, 0xf, 0),
MT6358_LDO("ldo_vsim1", VSIM1, vsim_voltages, vsim_idx,
MT6358_LDO_VSIM1_CON0, 0, MT6358_VSIM1_ANA_CON0, 0xf00, 8),
MT6358_LDO("ldo_vibr", VIBR, vibr_voltages, vibr_idx,
diff --git a/drivers/regulator/mt6359-regulator.c b/drivers/regulator/mt6359-regulator.c
new file mode 100644
index 000000000000..7ce0bd377a08
--- /dev/null
+++ b/drivers/regulator/mt6359-regulator.c
@@ -0,0 +1,997 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2021 MediaTek Inc.
+
+#include <linux/platform_device.h>
+#include <linux/mfd/mt6359/registers.h>
+#include <linux/mfd/mt6359p/registers.h>
+#include <linux/mfd/mt6397/core.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/mt6359-regulator.h>
+#include <linux/regulator/of_regulator.h>
+
+#define MT6359_BUCK_MODE_AUTO 0
+#define MT6359_BUCK_MODE_FORCE_PWM 1
+#define MT6359_BUCK_MODE_NORMAL 0
+#define MT6359_BUCK_MODE_LP 2
+
+/*
+ * MT6359 regulators' information
+ *
+ * @desc: standard fields of regulator description.
+ * @status_reg: for query status of regulators.
+ * @qi: Mask for query enable signal status of regulators.
+ * @modeset_reg: for operating AUTO/PWM mode register.
+ * @modeset_mask: MASK for operating modeset register.
+ * @modeset_shift: SHIFT for operating modeset register.
+ */
+struct mt6359_regulator_info {
+ struct regulator_desc desc;
+ u32 status_reg;
+ u32 qi;
+ u32 modeset_reg;
+ u32 modeset_mask;
+ u32 modeset_shift;
+ u32 lp_mode_reg;
+ u32 lp_mode_mask;
+ u32 lp_mode_shift;
+};
+
+#define MT6359_BUCK(match, _name, min, max, step, \
+ _enable_reg, _status_reg, \
+ _vsel_reg, _vsel_mask, \
+ _lp_mode_reg, _lp_mode_shift, \
+ _modeset_reg, _modeset_shift) \
+[MT6359_ID_##_name] = { \
+ .desc = { \
+ .name = #_name, \
+ .of_match = of_match_ptr(match), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .ops = &mt6359_volt_linear_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6359_ID_##_name, \
+ .owner = THIS_MODULE, \
+ .uV_step = (step), \
+ .n_voltages = ((max) - (min)) / (step) + 1, \
+ .min_uV = (min), \
+ .vsel_reg = _vsel_reg, \
+ .vsel_mask = _vsel_mask, \
+ .enable_reg = _enable_reg, \
+ .enable_mask = BIT(0), \
+ .of_map_mode = mt6359_map_mode, \
+ }, \
+ .status_reg = _status_reg, \
+ .qi = BIT(0), \
+ .lp_mode_reg = _lp_mode_reg, \
+ .lp_mode_mask = BIT(_lp_mode_shift), \
+ .lp_mode_shift = _lp_mode_shift, \
+ .modeset_reg = _modeset_reg, \
+ .modeset_mask = BIT(_modeset_shift), \
+ .modeset_shift = _modeset_shift \
+}
+
+#define MT6359_LDO_LINEAR(match, _name, min, max, step, \
+ _enable_reg, _status_reg, _vsel_reg, _vsel_mask) \
+[MT6359_ID_##_name] = { \
+ .desc = { \
+ .name = #_name, \
+ .of_match = of_match_ptr(match), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .ops = &mt6359_volt_linear_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6359_ID_##_name, \
+ .owner = THIS_MODULE, \
+ .uV_step = (step), \
+ .n_voltages = ((max) - (min)) / (step) + 1, \
+ .min_uV = (min), \
+ .vsel_reg = _vsel_reg, \
+ .vsel_mask = _vsel_mask, \
+ .enable_reg = _enable_reg, \
+ .enable_mask = BIT(0), \
+ }, \
+ .status_reg = _status_reg, \
+ .qi = BIT(0), \
+}
+
+#define MT6359_LDO(match, _name, _volt_table, \
+ _enable_reg, _enable_mask, _status_reg, \
+ _vsel_reg, _vsel_mask, _en_delay) \
+[MT6359_ID_##_name] = { \
+ .desc = { \
+ .name = #_name, \
+ .of_match = of_match_ptr(match), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .ops = &mt6359_volt_table_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6359_ID_##_name, \
+ .owner = THIS_MODULE, \
+ .n_voltages = ARRAY_SIZE(_volt_table), \
+ .volt_table = _volt_table, \
+ .vsel_reg = _vsel_reg, \
+ .vsel_mask = _vsel_mask, \
+ .enable_reg = _enable_reg, \
+ .enable_mask = BIT(_enable_mask), \
+ .enable_time = _en_delay, \
+ }, \
+ .status_reg = _status_reg, \
+ .qi = BIT(0), \
+}
+
+#define MT6359_REG_FIXED(match, _name, _enable_reg, \
+ _status_reg, _fixed_volt) \
+[MT6359_ID_##_name] = { \
+ .desc = { \
+ .name = #_name, \
+ .of_match = of_match_ptr(match), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .ops = &mt6359_volt_fixed_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6359_ID_##_name, \
+ .owner = THIS_MODULE, \
+ .n_voltages = 1, \
+ .enable_reg = _enable_reg, \
+ .enable_mask = BIT(0), \
+ .fixed_uV = (_fixed_volt), \
+ }, \
+ .status_reg = _status_reg, \
+ .qi = BIT(0), \
+}
+
+#define MT6359P_LDO1(match, _name, _ops, _volt_table, \
+ _enable_reg, _enable_mask, _status_reg, \
+ _vsel_reg, _vsel_mask) \
+[MT6359_ID_##_name] = { \
+ .desc = { \
+ .name = #_name, \
+ .of_match = of_match_ptr(match), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .ops = &_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6359_ID_##_name, \
+ .owner = THIS_MODULE, \
+ .n_voltages = ARRAY_SIZE(_volt_table), \
+ .volt_table = _volt_table, \
+ .vsel_reg = _vsel_reg, \
+ .vsel_mask = _vsel_mask, \
+ .enable_reg = _enable_reg, \
+ .enable_mask = BIT(_enable_mask), \
+ }, \
+ .status_reg = _status_reg, \
+ .qi = BIT(0), \
+}
+
+static const unsigned int vsim1_voltages[] = {
+ 0, 0, 0, 1700000, 1800000, 0, 0, 0, 2700000, 0, 0, 3000000, 3100000,
+};
+
+static const unsigned int vibr_voltages[] = {
+ 1200000, 1300000, 1500000, 0, 1800000, 2000000, 0, 0, 2700000, 2800000,
+ 0, 3000000, 0, 3300000,
+};
+
+static const unsigned int vrf12_voltages[] = {
+ 0, 0, 1100000, 1200000, 1300000,
+};
+
+static const unsigned int volt18_voltages[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1700000, 1800000, 1900000,
+};
+
+static const unsigned int vcn13_voltages[] = {
+ 900000, 1000000, 0, 1200000, 1300000,
+};
+
+static const unsigned int vcn33_voltages[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 2800000, 0, 0, 0, 3300000, 3400000, 3500000,
+};
+
+static const unsigned int vefuse_voltages[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1700000, 1800000, 1900000, 2000000,
+};
+
+static const unsigned int vxo22_voltages[] = {
+ 1800000, 0, 0, 0, 2200000,
+};
+
+static const unsigned int vrfck_voltages[] = {
+ 0, 0, 1500000, 0, 0, 0, 0, 1600000, 0, 0, 0, 0, 1700000,
+};
+
+static const unsigned int vrfck_voltages_1[] = {
+ 1240000, 1600000,
+};
+
+static const unsigned int vio28_voltages[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 2800000, 2900000, 3000000, 3100000, 3300000,
+};
+
+static const unsigned int vemc_voltages[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2900000, 3000000, 0, 3300000,
+};
+
+static const unsigned int vemc_voltages_1[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 2500000, 2800000, 2900000, 3000000, 3100000,
+ 3300000,
+};
+
+static const unsigned int va12_voltages[] = {
+ 0, 0, 0, 0, 0, 0, 1200000, 1300000,
+};
+
+static const unsigned int va09_voltages[] = {
+ 0, 0, 800000, 900000, 0, 0, 1200000,
+};
+
+static const unsigned int vrf18_voltages[] = {
+ 0, 0, 0, 0, 0, 1700000, 1800000, 1810000,
+};
+
+static const unsigned int vbbck_voltages[] = {
+ 0, 0, 0, 0, 1100000, 0, 0, 0, 1150000, 0, 0, 0, 1200000,
+};
+
+static const unsigned int vsim2_voltages[] = {
+ 0, 0, 0, 1700000, 1800000, 0, 0, 0, 2700000, 0, 0, 3000000, 3100000,
+};
+
+static inline unsigned int mt6359_map_mode(unsigned int mode)
+{
+ switch (mode) {
+ case MT6359_BUCK_MODE_NORMAL:
+ return REGULATOR_MODE_NORMAL;
+ case MT6359_BUCK_MODE_FORCE_PWM:
+ return REGULATOR_MODE_FAST;
+ case MT6359_BUCK_MODE_LP:
+ return REGULATOR_MODE_IDLE;
+ default:
+ return REGULATOR_MODE_INVALID;
+ }
+}
+
+static int mt6359_get_status(struct regulator_dev *rdev)
+{
+ int ret;
+ u32 regval;
+ struct mt6359_regulator_info *info = rdev_get_drvdata(rdev);
+
+ ret = regmap_read(rdev->regmap, info->status_reg, &regval);
+ if (ret != 0) {
+ dev_err(&rdev->dev, "Failed to get enable reg: %d\n", ret);
+ return ret;
+ }
+
+ if (regval & info->qi)
+ return REGULATOR_STATUS_ON;
+ else
+ return REGULATOR_STATUS_OFF;
+}
+
+static unsigned int mt6359_regulator_get_mode(struct regulator_dev *rdev)
+{
+ struct mt6359_regulator_info *info = rdev_get_drvdata(rdev);
+ int ret, regval;
+
+ ret = regmap_read(rdev->regmap, info->modeset_reg, &regval);
+ if (ret != 0) {
+ dev_err(&rdev->dev,
+ "Failed to get mt6359 buck mode: %d\n", ret);
+ return ret;
+ }
+
+ if ((regval & info->modeset_mask) >> info->modeset_shift ==
+ MT6359_BUCK_MODE_FORCE_PWM)
+ return REGULATOR_MODE_FAST;
+
+ ret = regmap_read(rdev->regmap, info->lp_mode_reg, &regval);
+ if (ret != 0) {
+ dev_err(&rdev->dev,
+ "Failed to get mt6359 buck lp mode: %d\n", ret);
+ return ret;
+ }
+
+ if (regval & info->lp_mode_mask)
+ return REGULATOR_MODE_IDLE;
+ else
+ return REGULATOR_MODE_NORMAL;
+}
+
+static int mt6359_regulator_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ struct mt6359_regulator_info *info = rdev_get_drvdata(rdev);
+ int ret = 0, val;
+ int curr_mode;
+
+ curr_mode = mt6359_regulator_get_mode(rdev);
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ val = MT6359_BUCK_MODE_FORCE_PWM;
+ val <<= info->modeset_shift;
+ ret = regmap_update_bits(rdev->regmap,
+ info->modeset_reg,
+ info->modeset_mask,
+ val);
+ break;
+ case REGULATOR_MODE_NORMAL:
+ if (curr_mode == REGULATOR_MODE_FAST) {
+ val = MT6359_BUCK_MODE_AUTO;
+ val <<= info->modeset_shift;
+ ret = regmap_update_bits(rdev->regmap,
+ info->modeset_reg,
+ info->modeset_mask,
+ val);
+ } else if (curr_mode == REGULATOR_MODE_IDLE) {
+ val = MT6359_BUCK_MODE_NORMAL;
+ val <<= info->lp_mode_shift;
+ ret = regmap_update_bits(rdev->regmap,
+ info->lp_mode_reg,
+ info->lp_mode_mask,
+ val);
+ udelay(100);
+ }
+ break;
+ case REGULATOR_MODE_IDLE:
+ val = MT6359_BUCK_MODE_LP >> 1;
+ val <<= info->lp_mode_shift;
+ ret = regmap_update_bits(rdev->regmap,
+ info->lp_mode_reg,
+ info->lp_mode_mask,
+ val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (ret != 0) {
+ dev_err(&rdev->dev,
+ "Failed to set mt6359 buck mode: %d\n", ret);
+ }
+
+ return ret;
+}
+
+static int mt6359p_vemc_set_voltage_sel(struct regulator_dev *rdev,
+ u32 sel)
+{
+ struct mt6359_regulator_info *info = rdev_get_drvdata(rdev);
+ int ret;
+ u32 val = 0;
+
+ sel <<= ffs(info->desc.vsel_mask) - 1;
+ ret = regmap_write(rdev->regmap, MT6359P_TMA_KEY_ADDR, TMA_KEY);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(rdev->regmap, MT6359P_VM_MODE_ADDR, &val);
+ if (ret)
+ return ret;
+
+ switch (val) {
+ case 0:
+ /* If HW trapping is 0, use VEMC_VOSEL_0 */
+ ret = regmap_update_bits(rdev->regmap,
+ info->desc.vsel_reg,
+ info->desc.vsel_mask, sel);
+ break;
+ case 1:
+ /* If HW trapping is 1, use VEMC_VOSEL_1 */
+ ret = regmap_update_bits(rdev->regmap,
+ info->desc.vsel_reg + 0x2,
+ info->desc.vsel_mask, sel);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ ret = regmap_write(rdev->regmap, MT6359P_TMA_KEY_ADDR, 0);
+ return ret;
+}
+
+static int mt6359p_vemc_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct mt6359_regulator_info *info = rdev_get_drvdata(rdev);
+ int ret;
+ u32 val = 0;
+
+ ret = regmap_read(rdev->regmap, MT6359P_VM_MODE_ADDR, &val);
+ if (ret)
+ return ret;
+ switch (val) {
+ case 0:
+ /* If HW trapping is 0, use VEMC_VOSEL_0 */
+ ret = regmap_read(rdev->regmap,
+ info->desc.vsel_reg, &val);
+ break;
+ case 1:
+ /* If HW trapping is 1, use VEMC_VOSEL_1 */
+ ret = regmap_read(rdev->regmap,
+ info->desc.vsel_reg + 0x2, &val);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (ret)
+ return ret;
+
+ val &= info->desc.vsel_mask;
+ val >>= ffs(info->desc.vsel_mask) - 1;
+
+ return val;
+}
+
+static const struct regulator_ops mt6359_volt_linear_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6359_get_status,
+ .set_mode = mt6359_regulator_set_mode,
+ .get_mode = mt6359_regulator_get_mode,
+};
+
+static const struct regulator_ops mt6359_volt_table_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6359_get_status,
+};
+
+static const struct regulator_ops mt6359_volt_fixed_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6359_get_status,
+};
+
+static const struct regulator_ops mt6359p_vemc_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+ .set_voltage_sel = mt6359p_vemc_set_voltage_sel,
+ .get_voltage_sel = mt6359p_vemc_get_voltage_sel,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6359_get_status,
+};
+
+/* The array is indexed by id(MT6359_ID_XXX) */
+static struct mt6359_regulator_info mt6359_regulators[] = {
+ MT6359_BUCK("buck_vs1", VS1, 800000, 2200000, 12500,
+ MT6359_RG_BUCK_VS1_EN_ADDR,
+ MT6359_DA_VS1_EN_ADDR, MT6359_RG_BUCK_VS1_VOSEL_ADDR,
+ MT6359_RG_BUCK_VS1_VOSEL_MASK <<
+ MT6359_RG_BUCK_VS1_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VS1_LP_ADDR, MT6359_RG_BUCK_VS1_LP_SHIFT,
+ MT6359_RG_VS1_FPWM_ADDR, MT6359_RG_VS1_FPWM_SHIFT),
+ MT6359_BUCK("buck_vgpu11", VGPU11, 400000, 1193750, 6250,
+ MT6359_RG_BUCK_VGPU11_EN_ADDR,
+ MT6359_DA_VGPU11_EN_ADDR, MT6359_RG_BUCK_VGPU11_VOSEL_ADDR,
+ MT6359_RG_BUCK_VGPU11_VOSEL_MASK <<
+ MT6359_RG_BUCK_VGPU11_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VGPU11_LP_ADDR,
+ MT6359_RG_BUCK_VGPU11_LP_SHIFT,
+ MT6359_RG_VGPU11_FCCM_ADDR, MT6359_RG_VGPU11_FCCM_SHIFT),
+ MT6359_BUCK("buck_vmodem", VMODEM, 400000, 1100000, 6250,
+ MT6359_RG_BUCK_VMODEM_EN_ADDR,
+ MT6359_DA_VMODEM_EN_ADDR, MT6359_RG_BUCK_VMODEM_VOSEL_ADDR,
+ MT6359_RG_BUCK_VMODEM_VOSEL_MASK <<
+ MT6359_RG_BUCK_VMODEM_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VMODEM_LP_ADDR,
+ MT6359_RG_BUCK_VMODEM_LP_SHIFT,
+ MT6359_RG_VMODEM_FCCM_ADDR, MT6359_RG_VMODEM_FCCM_SHIFT),
+ MT6359_BUCK("buck_vpu", VPU, 400000, 1193750, 6250,
+ MT6359_RG_BUCK_VPU_EN_ADDR,
+ MT6359_DA_VPU_EN_ADDR, MT6359_RG_BUCK_VPU_VOSEL_ADDR,
+ MT6359_RG_BUCK_VPU_VOSEL_MASK <<
+ MT6359_RG_BUCK_VPU_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VPU_LP_ADDR, MT6359_RG_BUCK_VPU_LP_SHIFT,
+ MT6359_RG_VPU_FCCM_ADDR, MT6359_RG_VPU_FCCM_SHIFT),
+ MT6359_BUCK("buck_vcore", VCORE, 400000, 1193750, 6250,
+ MT6359_RG_BUCK_VCORE_EN_ADDR,
+ MT6359_DA_VCORE_EN_ADDR, MT6359_RG_BUCK_VCORE_VOSEL_ADDR,
+ MT6359_RG_BUCK_VCORE_VOSEL_MASK <<
+ MT6359_RG_BUCK_VCORE_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VCORE_LP_ADDR, MT6359_RG_BUCK_VCORE_LP_SHIFT,
+ MT6359_RG_VCORE_FCCM_ADDR, MT6359_RG_VCORE_FCCM_SHIFT),
+ MT6359_BUCK("buck_vs2", VS2, 800000, 1600000, 12500,
+ MT6359_RG_BUCK_VS2_EN_ADDR,
+ MT6359_DA_VS2_EN_ADDR, MT6359_RG_BUCK_VS2_VOSEL_ADDR,
+ MT6359_RG_BUCK_VS2_VOSEL_MASK <<
+ MT6359_RG_BUCK_VS2_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VS2_LP_ADDR, MT6359_RG_BUCK_VS2_LP_SHIFT,
+ MT6359_RG_VS2_FPWM_ADDR, MT6359_RG_VS2_FPWM_SHIFT),
+ MT6359_BUCK("buck_vpa", VPA, 500000, 3650000, 50000,
+ MT6359_RG_BUCK_VPA_EN_ADDR,
+ MT6359_DA_VPA_EN_ADDR, MT6359_RG_BUCK_VPA_VOSEL_ADDR,
+ MT6359_RG_BUCK_VPA_VOSEL_MASK <<
+ MT6359_RG_BUCK_VPA_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VPA_LP_ADDR, MT6359_RG_BUCK_VPA_LP_SHIFT,
+ MT6359_RG_VPA_MODESET_ADDR, MT6359_RG_VPA_MODESET_SHIFT),
+ MT6359_BUCK("buck_vproc2", VPROC2, 400000, 1193750, 6250,
+ MT6359_RG_BUCK_VPROC2_EN_ADDR,
+ MT6359_DA_VPROC2_EN_ADDR, MT6359_RG_BUCK_VPROC2_VOSEL_ADDR,
+ MT6359_RG_BUCK_VPROC2_VOSEL_MASK <<
+ MT6359_RG_BUCK_VPROC2_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VPROC2_LP_ADDR,
+ MT6359_RG_BUCK_VPROC2_LP_SHIFT,
+ MT6359_RG_VPROC2_FCCM_ADDR, MT6359_RG_VPROC2_FCCM_SHIFT),
+ MT6359_BUCK("buck_vproc1", VPROC1, 400000, 1193750, 6250,
+ MT6359_RG_BUCK_VPROC1_EN_ADDR,
+ MT6359_DA_VPROC1_EN_ADDR, MT6359_RG_BUCK_VPROC1_VOSEL_ADDR,
+ MT6359_RG_BUCK_VPROC1_VOSEL_MASK <<
+ MT6359_RG_BUCK_VPROC1_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VPROC1_LP_ADDR,
+ MT6359_RG_BUCK_VPROC1_LP_SHIFT,
+ MT6359_RG_VPROC1_FCCM_ADDR, MT6359_RG_VPROC1_FCCM_SHIFT),
+ MT6359_BUCK("buck_vcore_sshub", VCORE_SSHUB, 400000, 1193750, 6250,
+ MT6359_RG_BUCK_VCORE_SSHUB_EN_ADDR,
+ MT6359_DA_VCORE_EN_ADDR,
+ MT6359_RG_BUCK_VCORE_SSHUB_VOSEL_ADDR,
+ MT6359_RG_BUCK_VCORE_SSHUB_VOSEL_MASK <<
+ MT6359_RG_BUCK_VCORE_SSHUB_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VCORE_LP_ADDR, MT6359_RG_BUCK_VCORE_LP_SHIFT,
+ MT6359_RG_VCORE_FCCM_ADDR, MT6359_RG_VCORE_FCCM_SHIFT),
+ MT6359_REG_FIXED("ldo_vaud18", VAUD18, MT6359_RG_LDO_VAUD18_EN_ADDR,
+ MT6359_DA_VAUD18_B_EN_ADDR, 1800000),
+ MT6359_LDO("ldo_vsim1", VSIM1, vsim1_voltages,
+ MT6359_RG_LDO_VSIM1_EN_ADDR, MT6359_RG_LDO_VSIM1_EN_SHIFT,
+ MT6359_DA_VSIM1_B_EN_ADDR, MT6359_RG_VSIM1_VOSEL_ADDR,
+ MT6359_RG_VSIM1_VOSEL_MASK << MT6359_RG_VSIM1_VOSEL_SHIFT,
+ 480),
+ MT6359_LDO("ldo_vibr", VIBR, vibr_voltages,
+ MT6359_RG_LDO_VIBR_EN_ADDR, MT6359_RG_LDO_VIBR_EN_SHIFT,
+ MT6359_DA_VIBR_B_EN_ADDR, MT6359_RG_VIBR_VOSEL_ADDR,
+ MT6359_RG_VIBR_VOSEL_MASK << MT6359_RG_VIBR_VOSEL_SHIFT,
+ 240),
+ MT6359_LDO("ldo_vrf12", VRF12, vrf12_voltages,
+ MT6359_RG_LDO_VRF12_EN_ADDR, MT6359_RG_LDO_VRF12_EN_SHIFT,
+ MT6359_DA_VRF12_B_EN_ADDR, MT6359_RG_VRF12_VOSEL_ADDR,
+ MT6359_RG_VRF12_VOSEL_MASK << MT6359_RG_VRF12_VOSEL_SHIFT,
+ 120),
+ MT6359_REG_FIXED("ldo_vusb", VUSB, MT6359_RG_LDO_VUSB_EN_0_ADDR,
+ MT6359_DA_VUSB_B_EN_ADDR, 3000000),
+ MT6359_LDO_LINEAR("ldo_vsram_proc2", VSRAM_PROC2, 500000, 1293750, 6250,
+ MT6359_RG_LDO_VSRAM_PROC2_EN_ADDR,
+ MT6359_DA_VSRAM_PROC2_B_EN_ADDR,
+ MT6359_RG_LDO_VSRAM_PROC2_VOSEL_ADDR,
+ MT6359_RG_LDO_VSRAM_PROC2_VOSEL_MASK <<
+ MT6359_RG_LDO_VSRAM_PROC2_VOSEL_SHIFT),
+ MT6359_LDO("ldo_vio18", VIO18, volt18_voltages,
+ MT6359_RG_LDO_VIO18_EN_ADDR, MT6359_RG_LDO_VIO18_EN_SHIFT,
+ MT6359_DA_VIO18_B_EN_ADDR, MT6359_RG_VIO18_VOSEL_ADDR,
+ MT6359_RG_VIO18_VOSEL_MASK << MT6359_RG_VIO18_VOSEL_SHIFT,
+ 960),
+ MT6359_LDO("ldo_vcamio", VCAMIO, volt18_voltages,
+ MT6359_RG_LDO_VCAMIO_EN_ADDR, MT6359_RG_LDO_VCAMIO_EN_SHIFT,
+ MT6359_DA_VCAMIO_B_EN_ADDR, MT6359_RG_VCAMIO_VOSEL_ADDR,
+ MT6359_RG_VCAMIO_VOSEL_MASK << MT6359_RG_VCAMIO_VOSEL_SHIFT,
+ 1290),
+ MT6359_REG_FIXED("ldo_vcn18", VCN18, MT6359_RG_LDO_VCN18_EN_ADDR,
+ MT6359_DA_VCN18_B_EN_ADDR, 1800000),
+ MT6359_REG_FIXED("ldo_vfe28", VFE28, MT6359_RG_LDO_VFE28_EN_ADDR,
+ MT6359_DA_VFE28_B_EN_ADDR, 2800000),
+ MT6359_LDO("ldo_vcn13", VCN13, vcn13_voltages,
+ MT6359_RG_LDO_VCN13_EN_ADDR, MT6359_RG_LDO_VCN13_EN_SHIFT,
+ MT6359_DA_VCN13_B_EN_ADDR, MT6359_RG_VCN13_VOSEL_ADDR,
+ MT6359_RG_VCN13_VOSEL_MASK << MT6359_RG_VCN13_VOSEL_SHIFT,
+ 240),
+ MT6359_LDO("ldo_vcn33_1_bt", VCN33_1_BT, vcn33_voltages,
+ MT6359_RG_LDO_VCN33_1_EN_0_ADDR,
+ MT6359_RG_LDO_VCN33_1_EN_0_SHIFT,
+ MT6359_DA_VCN33_1_B_EN_ADDR, MT6359_RG_VCN33_1_VOSEL_ADDR,
+ MT6359_RG_VCN33_1_VOSEL_MASK <<
+ MT6359_RG_VCN33_1_VOSEL_SHIFT, 240),
+ MT6359_LDO("ldo_vcn33_1_wifi", VCN33_1_WIFI, vcn33_voltages,
+ MT6359_RG_LDO_VCN33_1_EN_1_ADDR,
+ MT6359_RG_LDO_VCN33_1_EN_1_SHIFT,
+ MT6359_DA_VCN33_1_B_EN_ADDR, MT6359_RG_VCN33_1_VOSEL_ADDR,
+ MT6359_RG_VCN33_1_VOSEL_MASK <<
+ MT6359_RG_VCN33_1_VOSEL_SHIFT, 240),
+ MT6359_REG_FIXED("ldo_vaux18", VAUX18, MT6359_RG_LDO_VAUX18_EN_ADDR,
+ MT6359_DA_VAUX18_B_EN_ADDR, 1800000),
+ MT6359_LDO_LINEAR("ldo_vsram_others", VSRAM_OTHERS, 500000, 1293750,
+ 6250,
+ MT6359_RG_LDO_VSRAM_OTHERS_EN_ADDR,
+ MT6359_DA_VSRAM_OTHERS_B_EN_ADDR,
+ MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_ADDR,
+ MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_MASK <<
+ MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_SHIFT),
+ MT6359_LDO("ldo_vefuse", VEFUSE, vefuse_voltages,
+ MT6359_RG_LDO_VEFUSE_EN_ADDR, MT6359_RG_LDO_VEFUSE_EN_SHIFT,
+ MT6359_DA_VEFUSE_B_EN_ADDR, MT6359_RG_VEFUSE_VOSEL_ADDR,
+ MT6359_RG_VEFUSE_VOSEL_MASK << MT6359_RG_VEFUSE_VOSEL_SHIFT,
+ 240),
+ MT6359_LDO("ldo_vxo22", VXO22, vxo22_voltages,
+ MT6359_RG_LDO_VXO22_EN_ADDR, MT6359_RG_LDO_VXO22_EN_SHIFT,
+ MT6359_DA_VXO22_B_EN_ADDR, MT6359_RG_VXO22_VOSEL_ADDR,
+ MT6359_RG_VXO22_VOSEL_MASK << MT6359_RG_VXO22_VOSEL_SHIFT,
+ 120),
+ MT6359_LDO("ldo_vrfck", VRFCK, vrfck_voltages,
+ MT6359_RG_LDO_VRFCK_EN_ADDR, MT6359_RG_LDO_VRFCK_EN_SHIFT,
+ MT6359_DA_VRFCK_B_EN_ADDR, MT6359_RG_VRFCK_VOSEL_ADDR,
+ MT6359_RG_VRFCK_VOSEL_MASK << MT6359_RG_VRFCK_VOSEL_SHIFT,
+ 480),
+ MT6359_REG_FIXED("ldo_vbif28", VBIF28, MT6359_RG_LDO_VBIF28_EN_ADDR,
+ MT6359_DA_VBIF28_B_EN_ADDR, 2800000),
+ MT6359_LDO("ldo_vio28", VIO28, vio28_voltages,
+ MT6359_RG_LDO_VIO28_EN_ADDR, MT6359_RG_LDO_VIO28_EN_SHIFT,
+ MT6359_DA_VIO28_B_EN_ADDR, MT6359_RG_VIO28_VOSEL_ADDR,
+ MT6359_RG_VIO28_VOSEL_MASK << MT6359_RG_VIO28_VOSEL_SHIFT,
+ 240),
+ MT6359_LDO("ldo_vemc", VEMC, vemc_voltages,
+ MT6359_RG_LDO_VEMC_EN_ADDR, MT6359_RG_LDO_VEMC_EN_SHIFT,
+ MT6359_DA_VEMC_B_EN_ADDR, MT6359_RG_VEMC_VOSEL_ADDR,
+ MT6359_RG_VEMC_VOSEL_MASK << MT6359_RG_VEMC_VOSEL_SHIFT,
+ 240),
+ MT6359_LDO("ldo_vcn33_2_bt", VCN33_2_BT, vcn33_voltages,
+ MT6359_RG_LDO_VCN33_2_EN_0_ADDR,
+ MT6359_RG_LDO_VCN33_2_EN_0_SHIFT,
+ MT6359_DA_VCN33_2_B_EN_ADDR, MT6359_RG_VCN33_2_VOSEL_ADDR,
+ MT6359_RG_VCN33_2_VOSEL_MASK <<
+ MT6359_RG_VCN33_2_VOSEL_SHIFT, 240),
+ MT6359_LDO("ldo_vcn33_2_wifi", VCN33_2_WIFI, vcn33_voltages,
+ MT6359_RG_LDO_VCN33_2_EN_1_ADDR,
+ MT6359_RG_LDO_VCN33_2_EN_1_SHIFT,
+ MT6359_DA_VCN33_2_B_EN_ADDR, MT6359_RG_VCN33_2_VOSEL_ADDR,
+ MT6359_RG_VCN33_2_VOSEL_MASK <<
+ MT6359_RG_VCN33_2_VOSEL_SHIFT, 240),
+ MT6359_LDO("ldo_va12", VA12, va12_voltages,
+ MT6359_RG_LDO_VA12_EN_ADDR, MT6359_RG_LDO_VA12_EN_SHIFT,
+ MT6359_DA_VA12_B_EN_ADDR, MT6359_RG_VA12_VOSEL_ADDR,
+ MT6359_RG_VA12_VOSEL_MASK << MT6359_RG_VA12_VOSEL_SHIFT,
+ 240),
+ MT6359_LDO("ldo_va09", VA09, va09_voltages,
+ MT6359_RG_LDO_VA09_EN_ADDR, MT6359_RG_LDO_VA09_EN_SHIFT,
+ MT6359_DA_VA09_B_EN_ADDR, MT6359_RG_VA09_VOSEL_ADDR,
+ MT6359_RG_VA09_VOSEL_MASK << MT6359_RG_VA09_VOSEL_SHIFT,
+ 240),
+ MT6359_LDO("ldo_vrf18", VRF18, vrf18_voltages,
+ MT6359_RG_LDO_VRF18_EN_ADDR, MT6359_RG_LDO_VRF18_EN_SHIFT,
+ MT6359_DA_VRF18_B_EN_ADDR, MT6359_RG_VRF18_VOSEL_ADDR,
+ MT6359_RG_VRF18_VOSEL_MASK << MT6359_RG_VRF18_VOSEL_SHIFT,
+ 120),
+ MT6359_LDO_LINEAR("ldo_vsram_md", VSRAM_MD, 500000, 1100000, 6250,
+ MT6359_RG_LDO_VSRAM_MD_EN_ADDR,
+ MT6359_DA_VSRAM_MD_B_EN_ADDR,
+ MT6359_RG_LDO_VSRAM_MD_VOSEL_ADDR,
+ MT6359_RG_LDO_VSRAM_MD_VOSEL_MASK <<
+ MT6359_RG_LDO_VSRAM_MD_VOSEL_SHIFT),
+ MT6359_LDO("ldo_vufs", VUFS, volt18_voltages,
+ MT6359_RG_LDO_VUFS_EN_ADDR, MT6359_RG_LDO_VUFS_EN_SHIFT,
+ MT6359_DA_VUFS_B_EN_ADDR, MT6359_RG_VUFS_VOSEL_ADDR,
+ MT6359_RG_VUFS_VOSEL_MASK << MT6359_RG_VUFS_VOSEL_SHIFT,
+ 1920),
+ MT6359_LDO("ldo_vm18", VM18, volt18_voltages,
+ MT6359_RG_LDO_VM18_EN_ADDR, MT6359_RG_LDO_VM18_EN_SHIFT,
+ MT6359_DA_VM18_B_EN_ADDR, MT6359_RG_VM18_VOSEL_ADDR,
+ MT6359_RG_VM18_VOSEL_MASK << MT6359_RG_VM18_VOSEL_SHIFT,
+ 1920),
+ MT6359_LDO("ldo_vbbck", VBBCK, vbbck_voltages,
+ MT6359_RG_LDO_VBBCK_EN_ADDR, MT6359_RG_LDO_VBBCK_EN_SHIFT,
+ MT6359_DA_VBBCK_B_EN_ADDR, MT6359_RG_VBBCK_VOSEL_ADDR,
+ MT6359_RG_VBBCK_VOSEL_MASK << MT6359_RG_VBBCK_VOSEL_SHIFT,
+ 240),
+ MT6359_LDO_LINEAR("ldo_vsram_proc1", VSRAM_PROC1, 500000, 1293750, 6250,
+ MT6359_RG_LDO_VSRAM_PROC1_EN_ADDR,
+ MT6359_DA_VSRAM_PROC1_B_EN_ADDR,
+ MT6359_RG_LDO_VSRAM_PROC1_VOSEL_ADDR,
+ MT6359_RG_LDO_VSRAM_PROC1_VOSEL_MASK <<
+ MT6359_RG_LDO_VSRAM_PROC1_VOSEL_SHIFT),
+ MT6359_LDO("ldo_vsim2", VSIM2, vsim2_voltages,
+ MT6359_RG_LDO_VSIM2_EN_ADDR, MT6359_RG_LDO_VSIM2_EN_SHIFT,
+ MT6359_DA_VSIM2_B_EN_ADDR, MT6359_RG_VSIM2_VOSEL_ADDR,
+ MT6359_RG_VSIM2_VOSEL_MASK << MT6359_RG_VSIM2_VOSEL_SHIFT,
+ 480),
+ MT6359_LDO_LINEAR("ldo_vsram_others_sshub", VSRAM_OTHERS_SSHUB,
+ 500000, 1293750, 6250,
+ MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_EN_ADDR,
+ MT6359_DA_VSRAM_OTHERS_B_EN_ADDR,
+ MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_ADDR,
+ MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_MASK <<
+ MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_SHIFT),
+};
+
+static struct mt6359_regulator_info mt6359p_regulators[] = {
+ MT6359_BUCK("buck_vs1", VS1, 800000, 2200000, 12500,
+ MT6359_RG_BUCK_VS1_EN_ADDR,
+ MT6359_DA_VS1_EN_ADDR, MT6359_RG_BUCK_VS1_VOSEL_ADDR,
+ MT6359_RG_BUCK_VS1_VOSEL_MASK <<
+ MT6359_RG_BUCK_VS1_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VS1_LP_ADDR, MT6359_RG_BUCK_VS1_LP_SHIFT,
+ MT6359_RG_VS1_FPWM_ADDR, MT6359_RG_VS1_FPWM_SHIFT),
+ MT6359_BUCK("buck_vgpu11", VGPU11, 400000, 1193750, 6250,
+ MT6359_RG_BUCK_VGPU11_EN_ADDR,
+ MT6359_DA_VGPU11_EN_ADDR, MT6359P_RG_BUCK_VGPU11_VOSEL_ADDR,
+ MT6359_RG_BUCK_VGPU11_VOSEL_MASK <<
+ MT6359_RG_BUCK_VGPU11_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VGPU11_LP_ADDR,
+ MT6359_RG_BUCK_VGPU11_LP_SHIFT,
+ MT6359_RG_VGPU11_FCCM_ADDR, MT6359_RG_VGPU11_FCCM_SHIFT),
+ MT6359_BUCK("buck_vmodem", VMODEM, 400000, 1100000, 6250,
+ MT6359_RG_BUCK_VMODEM_EN_ADDR,
+ MT6359_DA_VMODEM_EN_ADDR, MT6359_RG_BUCK_VMODEM_VOSEL_ADDR,
+ MT6359_RG_BUCK_VMODEM_VOSEL_MASK <<
+ MT6359_RG_BUCK_VMODEM_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VMODEM_LP_ADDR,
+ MT6359_RG_BUCK_VMODEM_LP_SHIFT,
+ MT6359_RG_VMODEM_FCCM_ADDR, MT6359_RG_VMODEM_FCCM_SHIFT),
+ MT6359_BUCK("buck_vpu", VPU, 400000, 1193750, 6250,
+ MT6359_RG_BUCK_VPU_EN_ADDR,
+ MT6359_DA_VPU_EN_ADDR, MT6359_RG_BUCK_VPU_VOSEL_ADDR,
+ MT6359_RG_BUCK_VPU_VOSEL_MASK <<
+ MT6359_RG_BUCK_VPU_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VPU_LP_ADDR, MT6359_RG_BUCK_VPU_LP_SHIFT,
+ MT6359_RG_VPU_FCCM_ADDR, MT6359_RG_VPU_FCCM_SHIFT),
+ MT6359_BUCK("buck_vcore", VCORE, 506250, 1300000, 6250,
+ MT6359_RG_BUCK_VCORE_EN_ADDR,
+ MT6359_DA_VCORE_EN_ADDR, MT6359P_RG_BUCK_VCORE_VOSEL_ADDR,
+ MT6359_RG_BUCK_VCORE_VOSEL_MASK <<
+ MT6359_RG_BUCK_VCORE_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VCORE_LP_ADDR, MT6359_RG_BUCK_VCORE_LP_SHIFT,
+ MT6359_RG_VCORE_FCCM_ADDR, MT6359_RG_VCORE_FCCM_SHIFT),
+ MT6359_BUCK("buck_vs2", VS2, 800000, 1600000, 12500,
+ MT6359_RG_BUCK_VS2_EN_ADDR,
+ MT6359_DA_VS2_EN_ADDR, MT6359_RG_BUCK_VS2_VOSEL_ADDR,
+ MT6359_RG_BUCK_VS2_VOSEL_MASK <<
+ MT6359_RG_BUCK_VS2_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VS2_LP_ADDR, MT6359_RG_BUCK_VS2_LP_SHIFT,
+ MT6359_RG_VS2_FPWM_ADDR, MT6359_RG_VS2_FPWM_SHIFT),
+ MT6359_BUCK("buck_vpa", VPA, 500000, 3650000, 50000,
+ MT6359_RG_BUCK_VPA_EN_ADDR,
+ MT6359_DA_VPA_EN_ADDR, MT6359_RG_BUCK_VPA_VOSEL_ADDR,
+ MT6359_RG_BUCK_VPA_VOSEL_MASK <<
+ MT6359_RG_BUCK_VPA_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VPA_LP_ADDR, MT6359_RG_BUCK_VPA_LP_SHIFT,
+ MT6359_RG_VPA_MODESET_ADDR, MT6359_RG_VPA_MODESET_SHIFT),
+ MT6359_BUCK("buck_vproc2", VPROC2, 400000, 1193750, 6250,
+ MT6359_RG_BUCK_VPROC2_EN_ADDR,
+ MT6359_DA_VPROC2_EN_ADDR, MT6359_RG_BUCK_VPROC2_VOSEL_ADDR,
+ MT6359_RG_BUCK_VPROC2_VOSEL_MASK <<
+ MT6359_RG_BUCK_VPROC2_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VPROC2_LP_ADDR,
+ MT6359_RG_BUCK_VPROC2_LP_SHIFT,
+ MT6359_RG_VPROC2_FCCM_ADDR, MT6359_RG_VPROC2_FCCM_SHIFT),
+ MT6359_BUCK("buck_vproc1", VPROC1, 400000, 1193750, 6250,
+ MT6359_RG_BUCK_VPROC1_EN_ADDR,
+ MT6359_DA_VPROC1_EN_ADDR, MT6359_RG_BUCK_VPROC1_VOSEL_ADDR,
+ MT6359_RG_BUCK_VPROC1_VOSEL_MASK <<
+ MT6359_RG_BUCK_VPROC1_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VPROC1_LP_ADDR,
+ MT6359_RG_BUCK_VPROC1_LP_SHIFT,
+ MT6359_RG_VPROC1_FCCM_ADDR, MT6359_RG_VPROC1_FCCM_SHIFT),
+ MT6359_BUCK("buck_vgpu11_sshub", VGPU11_SSHUB, 400000, 1193750, 6250,
+ MT6359P_RG_BUCK_VGPU11_SSHUB_EN_ADDR,
+ MT6359_DA_VGPU11_EN_ADDR,
+ MT6359P_RG_BUCK_VGPU11_SSHUB_VOSEL_ADDR,
+ MT6359P_RG_BUCK_VGPU11_SSHUB_VOSEL_MASK <<
+ MT6359P_RG_BUCK_VGPU11_SSHUB_VOSEL_SHIFT,
+ MT6359_RG_BUCK_VGPU11_LP_ADDR,
+ MT6359_RG_BUCK_VGPU11_LP_SHIFT,
+ MT6359_RG_VGPU11_FCCM_ADDR, MT6359_RG_VGPU11_FCCM_SHIFT),
+ MT6359_REG_FIXED("ldo_vaud18", VAUD18, MT6359P_RG_LDO_VAUD18_EN_ADDR,
+ MT6359P_DA_VAUD18_B_EN_ADDR, 1800000),
+ MT6359_LDO("ldo_vsim1", VSIM1, vsim1_voltages,
+ MT6359P_RG_LDO_VSIM1_EN_ADDR, MT6359P_RG_LDO_VSIM1_EN_SHIFT,
+ MT6359P_DA_VSIM1_B_EN_ADDR, MT6359P_RG_VSIM1_VOSEL_ADDR,
+ MT6359_RG_VSIM1_VOSEL_MASK << MT6359_RG_VSIM1_VOSEL_SHIFT,
+ 480),
+ MT6359_LDO("ldo_vibr", VIBR, vibr_voltages,
+ MT6359P_RG_LDO_VIBR_EN_ADDR, MT6359P_RG_LDO_VIBR_EN_SHIFT,
+ MT6359P_DA_VIBR_B_EN_ADDR, MT6359P_RG_VIBR_VOSEL_ADDR,
+ MT6359_RG_VIBR_VOSEL_MASK << MT6359_RG_VIBR_VOSEL_SHIFT,
+ 240),
+ MT6359_LDO("ldo_vrf12", VRF12, vrf12_voltages,
+ MT6359P_RG_LDO_VRF12_EN_ADDR, MT6359P_RG_LDO_VRF12_EN_SHIFT,
+ MT6359P_DA_VRF12_B_EN_ADDR, MT6359P_RG_VRF12_VOSEL_ADDR,
+ MT6359_RG_VRF12_VOSEL_MASK << MT6359_RG_VRF12_VOSEL_SHIFT,
+ 480),
+ MT6359_REG_FIXED("ldo_vusb", VUSB, MT6359P_RG_LDO_VUSB_EN_0_ADDR,
+ MT6359P_DA_VUSB_B_EN_ADDR, 3000000),
+ MT6359_LDO_LINEAR("ldo_vsram_proc2", VSRAM_PROC2, 500000, 1293750, 6250,
+ MT6359P_RG_LDO_VSRAM_PROC2_EN_ADDR,
+ MT6359P_DA_VSRAM_PROC2_B_EN_ADDR,
+ MT6359P_RG_LDO_VSRAM_PROC2_VOSEL_ADDR,
+ MT6359_RG_LDO_VSRAM_PROC2_VOSEL_MASK <<
+ MT6359_RG_LDO_VSRAM_PROC2_VOSEL_SHIFT),
+ MT6359_LDO("ldo_vio18", VIO18, volt18_voltages,
+ MT6359P_RG_LDO_VIO18_EN_ADDR, MT6359P_RG_LDO_VIO18_EN_SHIFT,
+ MT6359P_DA_VIO18_B_EN_ADDR, MT6359P_RG_VIO18_VOSEL_ADDR,
+ MT6359_RG_VIO18_VOSEL_MASK << MT6359_RG_VIO18_VOSEL_SHIFT,
+ 960),
+ MT6359_LDO("ldo_vcamio", VCAMIO, volt18_voltages,
+ MT6359P_RG_LDO_VCAMIO_EN_ADDR,
+ MT6359P_RG_LDO_VCAMIO_EN_SHIFT,
+ MT6359P_DA_VCAMIO_B_EN_ADDR, MT6359P_RG_VCAMIO_VOSEL_ADDR,
+ MT6359_RG_VCAMIO_VOSEL_MASK << MT6359_RG_VCAMIO_VOSEL_SHIFT,
+ 1290),
+ MT6359_REG_FIXED("ldo_vcn18", VCN18, MT6359P_RG_LDO_VCN18_EN_ADDR,
+ MT6359P_DA_VCN18_B_EN_ADDR, 1800000),
+ MT6359_REG_FIXED("ldo_vfe28", VFE28, MT6359P_RG_LDO_VFE28_EN_ADDR,
+ MT6359P_DA_VFE28_B_EN_ADDR, 2800000),
+ MT6359_LDO("ldo_vcn13", VCN13, vcn13_voltages,
+ MT6359P_RG_LDO_VCN13_EN_ADDR, MT6359P_RG_LDO_VCN13_EN_SHIFT,
+ MT6359P_DA_VCN13_B_EN_ADDR, MT6359P_RG_VCN13_VOSEL_ADDR,
+ MT6359_RG_VCN13_VOSEL_MASK << MT6359_RG_VCN13_VOSEL_SHIFT,
+ 240),
+ MT6359_LDO("ldo_vcn33_1_bt", VCN33_1_BT, vcn33_voltages,
+ MT6359P_RG_LDO_VCN33_1_EN_0_ADDR,
+ MT6359_RG_LDO_VCN33_1_EN_0_SHIFT,
+ MT6359P_DA_VCN33_1_B_EN_ADDR, MT6359P_RG_VCN33_1_VOSEL_ADDR,
+ MT6359_RG_VCN33_1_VOSEL_MASK <<
+ MT6359_RG_VCN33_1_VOSEL_SHIFT, 240),
+ MT6359_LDO("ldo_vcn33_1_wifi", VCN33_1_WIFI, vcn33_voltages,
+ MT6359P_RG_LDO_VCN33_1_EN_1_ADDR,
+ MT6359P_RG_LDO_VCN33_1_EN_1_SHIFT,
+ MT6359P_DA_VCN33_1_B_EN_ADDR, MT6359P_RG_VCN33_1_VOSEL_ADDR,
+ MT6359_RG_VCN33_1_VOSEL_MASK <<
+ MT6359_RG_VCN33_1_VOSEL_SHIFT, 240),
+ MT6359_REG_FIXED("ldo_vaux18", VAUX18, MT6359P_RG_LDO_VAUX18_EN_ADDR,
+ MT6359P_DA_VAUX18_B_EN_ADDR, 1800000),
+ MT6359_LDO_LINEAR("ldo_vsram_others", VSRAM_OTHERS, 500000, 1293750,
+ 6250,
+ MT6359P_RG_LDO_VSRAM_OTHERS_EN_ADDR,
+ MT6359P_DA_VSRAM_OTHERS_B_EN_ADDR,
+ MT6359P_RG_LDO_VSRAM_OTHERS_VOSEL_ADDR,
+ MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_MASK <<
+ MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_SHIFT),
+ MT6359_LDO("ldo_vefuse", VEFUSE, vefuse_voltages,
+ MT6359P_RG_LDO_VEFUSE_EN_ADDR,
+ MT6359P_RG_LDO_VEFUSE_EN_SHIFT,
+ MT6359P_DA_VEFUSE_B_EN_ADDR, MT6359P_RG_VEFUSE_VOSEL_ADDR,
+ MT6359_RG_VEFUSE_VOSEL_MASK << MT6359_RG_VEFUSE_VOSEL_SHIFT,
+ 240),
+ MT6359_LDO("ldo_vxo22", VXO22, vxo22_voltages,
+ MT6359P_RG_LDO_VXO22_EN_ADDR, MT6359P_RG_LDO_VXO22_EN_SHIFT,
+ MT6359P_DA_VXO22_B_EN_ADDR, MT6359P_RG_VXO22_VOSEL_ADDR,
+ MT6359_RG_VXO22_VOSEL_MASK << MT6359_RG_VXO22_VOSEL_SHIFT,
+ 480),
+ MT6359_LDO("ldo_vrfck_1", VRFCK, vrfck_voltages_1,
+ MT6359P_RG_LDO_VRFCK_EN_ADDR, MT6359P_RG_LDO_VRFCK_EN_SHIFT,
+ MT6359P_DA_VRFCK_B_EN_ADDR, MT6359P_RG_VRFCK_VOSEL_ADDR,
+ MT6359_RG_VRFCK_VOSEL_MASK << MT6359_RG_VRFCK_VOSEL_SHIFT,
+ 480),
+ MT6359_REG_FIXED("ldo_vbif28", VBIF28, MT6359P_RG_LDO_VBIF28_EN_ADDR,
+ MT6359P_DA_VBIF28_B_EN_ADDR, 2800000),
+ MT6359_LDO("ldo_vio28", VIO28, vio28_voltages,
+ MT6359P_RG_LDO_VIO28_EN_ADDR, MT6359P_RG_LDO_VIO28_EN_SHIFT,
+ MT6359P_DA_VIO28_B_EN_ADDR, MT6359P_RG_VIO28_VOSEL_ADDR,
+ MT6359_RG_VIO28_VOSEL_MASK << MT6359_RG_VIO28_VOSEL_SHIFT,
+ 1920),
+ MT6359P_LDO1("ldo_vemc_1", VEMC, mt6359p_vemc_ops, vemc_voltages_1,
+ MT6359P_RG_LDO_VEMC_EN_ADDR, MT6359P_RG_LDO_VEMC_EN_SHIFT,
+ MT6359P_DA_VEMC_B_EN_ADDR,
+ MT6359P_RG_LDO_VEMC_VOSEL_0_ADDR,
+ MT6359P_RG_LDO_VEMC_VOSEL_0_MASK <<
+ MT6359P_RG_LDO_VEMC_VOSEL_0_SHIFT),
+ MT6359_LDO("ldo_vcn33_2_bt", VCN33_2_BT, vcn33_voltages,
+ MT6359P_RG_LDO_VCN33_2_EN_0_ADDR,
+ MT6359P_RG_LDO_VCN33_2_EN_0_SHIFT,
+ MT6359P_DA_VCN33_2_B_EN_ADDR, MT6359P_RG_VCN33_2_VOSEL_ADDR,
+ MT6359_RG_VCN33_2_VOSEL_MASK <<
+ MT6359_RG_VCN33_2_VOSEL_SHIFT, 240),
+ MT6359_LDO("ldo_vcn33_2_wifi", VCN33_2_WIFI, vcn33_voltages,
+ MT6359P_RG_LDO_VCN33_2_EN_1_ADDR,
+ MT6359_RG_LDO_VCN33_2_EN_1_SHIFT,
+ MT6359P_DA_VCN33_2_B_EN_ADDR, MT6359P_RG_VCN33_2_VOSEL_ADDR,
+ MT6359_RG_VCN33_2_VOSEL_MASK <<
+ MT6359_RG_VCN33_2_VOSEL_SHIFT, 240),
+ MT6359_LDO("ldo_va12", VA12, va12_voltages,
+ MT6359P_RG_LDO_VA12_EN_ADDR, MT6359P_RG_LDO_VA12_EN_SHIFT,
+ MT6359P_DA_VA12_B_EN_ADDR, MT6359P_RG_VA12_VOSEL_ADDR,
+ MT6359_RG_VA12_VOSEL_MASK << MT6359_RG_VA12_VOSEL_SHIFT,
+ 960),
+ MT6359_LDO("ldo_va09", VA09, va09_voltages,
+ MT6359P_RG_LDO_VA09_EN_ADDR, MT6359P_RG_LDO_VA09_EN_SHIFT,
+ MT6359P_DA_VA09_B_EN_ADDR, MT6359P_RG_VA09_VOSEL_ADDR,
+ MT6359_RG_VA09_VOSEL_MASK << MT6359_RG_VA09_VOSEL_SHIFT,
+ 960),
+ MT6359_LDO("ldo_vrf18", VRF18, vrf18_voltages,
+ MT6359P_RG_LDO_VRF18_EN_ADDR, MT6359P_RG_LDO_VRF18_EN_SHIFT,
+ MT6359P_DA_VRF18_B_EN_ADDR, MT6359P_RG_VRF18_VOSEL_ADDR,
+ MT6359_RG_VRF18_VOSEL_MASK << MT6359_RG_VRF18_VOSEL_SHIFT,
+ 240),
+ MT6359_LDO_LINEAR("ldo_vsram_md", VSRAM_MD, 500000, 1293750, 6250,
+ MT6359P_RG_LDO_VSRAM_MD_EN_ADDR,
+ MT6359P_DA_VSRAM_MD_B_EN_ADDR,
+ MT6359P_RG_LDO_VSRAM_MD_VOSEL_ADDR,
+ MT6359_RG_LDO_VSRAM_MD_VOSEL_MASK <<
+ MT6359_RG_LDO_VSRAM_MD_VOSEL_SHIFT),
+ MT6359_LDO("ldo_vufs", VUFS, volt18_voltages,
+ MT6359P_RG_LDO_VUFS_EN_ADDR, MT6359P_RG_LDO_VUFS_EN_SHIFT,
+ MT6359P_DA_VUFS_B_EN_ADDR, MT6359P_RG_VUFS_VOSEL_ADDR,
+ MT6359_RG_VUFS_VOSEL_MASK << MT6359_RG_VUFS_VOSEL_SHIFT,
+ 1920),
+ MT6359_LDO("ldo_vm18", VM18, volt18_voltages,
+ MT6359P_RG_LDO_VM18_EN_ADDR, MT6359P_RG_LDO_VM18_EN_SHIFT,
+ MT6359P_DA_VM18_B_EN_ADDR, MT6359P_RG_VM18_VOSEL_ADDR,
+ MT6359_RG_VM18_VOSEL_MASK << MT6359_RG_VM18_VOSEL_SHIFT,
+ 1920),
+ MT6359_LDO("ldo_vbbck", VBBCK, vbbck_voltages,
+ MT6359P_RG_LDO_VBBCK_EN_ADDR, MT6359P_RG_LDO_VBBCK_EN_SHIFT,
+ MT6359P_DA_VBBCK_B_EN_ADDR, MT6359P_RG_VBBCK_VOSEL_ADDR,
+ MT6359P_RG_VBBCK_VOSEL_MASK << MT6359P_RG_VBBCK_VOSEL_SHIFT,
+ 480),
+ MT6359_LDO_LINEAR("ldo_vsram_proc1", VSRAM_PROC1, 500000, 1293750, 6250,
+ MT6359P_RG_LDO_VSRAM_PROC1_EN_ADDR,
+ MT6359P_DA_VSRAM_PROC1_B_EN_ADDR,
+ MT6359P_RG_LDO_VSRAM_PROC1_VOSEL_ADDR,
+ MT6359_RG_LDO_VSRAM_PROC1_VOSEL_MASK <<
+ MT6359_RG_LDO_VSRAM_PROC1_VOSEL_SHIFT),
+ MT6359_LDO("ldo_vsim2", VSIM2, vsim2_voltages,
+ MT6359P_RG_LDO_VSIM2_EN_ADDR, MT6359P_RG_LDO_VSIM2_EN_SHIFT,
+ MT6359P_DA_VSIM2_B_EN_ADDR, MT6359P_RG_VSIM2_VOSEL_ADDR,
+ MT6359_RG_VSIM2_VOSEL_MASK << MT6359_RG_VSIM2_VOSEL_SHIFT,
+ 480),
+ MT6359_LDO_LINEAR("ldo_vsram_others_sshub", VSRAM_OTHERS_SSHUB,
+ 500000, 1293750, 6250,
+ MT6359P_RG_LDO_VSRAM_OTHERS_SSHUB_EN_ADDR,
+ MT6359P_DA_VSRAM_OTHERS_B_EN_ADDR,
+ MT6359P_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_ADDR,
+ MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_MASK <<
+ MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_SHIFT),
+};
+
+static int mt6359_regulator_probe(struct platform_device *pdev)
+{
+ struct mt6397_chip *mt6397 = dev_get_drvdata(pdev->dev.parent);
+ struct regulator_config config = {};
+ struct regulator_dev *rdev;
+ struct mt6359_regulator_info *mt6359_info;
+ int i, hw_ver;
+
+ regmap_read(mt6397->regmap, MT6359P_HWCID, &hw_ver);
+ if (hw_ver >= MT6359P_CHIP_VER)
+ mt6359_info = mt6359p_regulators;
+ else
+ mt6359_info = mt6359_regulators;
+
+ config.dev = mt6397->dev;
+ config.regmap = mt6397->regmap;
+ for (i = 0; i < MT6359_MAX_REGULATOR; i++, mt6359_info++) {
+ config.driver_data = mt6359_info;
+ rdev = devm_regulator_register(&pdev->dev, &mt6359_info->desc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "failed to register %s\n", mt6359_info->desc.name);
+ return PTR_ERR(rdev);
+ }
+ }
+
+ return 0;
+}
+
+static const struct platform_device_id mt6359_platform_ids[] = {
+ {"mt6359-regulator", 0},
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, mt6359_platform_ids);
+
+static struct platform_driver mt6359_regulator_driver = {
+ .driver = {
+ .name = "mt6359-regulator",
+ },
+ .probe = mt6359_regulator_probe,
+ .id_table = mt6359_platform_ids,
+};
+
+module_platform_driver(mt6359_regulator_driver);
+
+MODULE_AUTHOR("Wen Su <wen.su@mediatek.com>");
+MODULE_DESCRIPTION("Regulator Driver for MediaTek MT6359 PMIC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 49f6c05fee34..f54d4f176882 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -21,6 +21,62 @@ static const char *const regulator_states[PM_SUSPEND_MAX + 1] = {
[PM_SUSPEND_MAX] = "regulator-state-disk",
};
+static void fill_limit(int *limit, int val)
+{
+ if (val)
+ if (val == 1)
+ *limit = REGULATOR_NOTIF_LIMIT_ENABLE;
+ else
+ *limit = val;
+ else
+ *limit = REGULATOR_NOTIF_LIMIT_DISABLE;
+}
+
+static void of_get_regulator_prot_limits(struct device_node *np,
+ struct regulation_constraints *constraints)
+{
+ u32 pval;
+ int i;
+ static const char *const props[] = {
+ "regulator-oc-%s-microamp",
+ "regulator-ov-%s-microvolt",
+ "regulator-temp-%s-kelvin",
+ "regulator-uv-%s-microvolt",
+ };
+ struct notification_limit *limits[] = {
+ &constraints->over_curr_limits,
+ &constraints->over_voltage_limits,
+ &constraints->temp_limits,
+ &constraints->under_voltage_limits,
+ };
+ bool set[4] = {0};
+
+ /* Protection limits: */
+ for (i = 0; i < ARRAY_SIZE(props); i++) {
+ char prop[255];
+ bool found;
+ int j;
+ static const char *const lvl[] = {
+ "protection", "error", "warn"
+ };
+ int *l[] = {
+ &limits[i]->prot, &limits[i]->err, &limits[i]->warn,
+ };
+
+ for (j = 0; j < ARRAY_SIZE(lvl); j++) {
+ snprintf(prop, 255, props[i], lvl[j]);
+ found = !of_property_read_u32(np, prop, &pval);
+ if (found)
+ fill_limit(l[j], pval);
+ set[i] |= found;
+ }
+ }
+ constraints->over_current_detection = set[0];
+ constraints->over_voltage_detection = set[1];
+ constraints->over_temp_detection = set[2];
+ constraints->under_voltage_detection = set[3];
+}
+
static int of_get_regulation_constraints(struct device *dev,
struct device_node *np,
struct regulator_init_data **init_data,
@@ -188,6 +244,8 @@ static int of_get_regulation_constraints(struct device *dev,
constraints->over_current_protection = of_property_read_bool(np,
"regulator-over-current-protection");
+ of_get_regulator_prot_limits(np, constraints);
+
for (i = 0; i < ARRAY_SIZE(regulator_states); i++) {
switch (i) {
case PM_SUSPEND_MEM:
diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c
index 2f7ee212cb8c..64e5f5f0cc84 100644
--- a/drivers/regulator/pca9450-regulator.c
+++ b/drivers/regulator/pca9450-regulator.c
@@ -65,32 +65,9 @@ static const struct regmap_config pca9450_regmap_config = {
* 10: 25mV/4usec
* 11: 25mV/8usec
*/
-static int pca9450_dvs_set_ramp_delay(struct regulator_dev *rdev,
- int ramp_delay)
-{
- int id = rdev_get_id(rdev);
- unsigned int ramp_value;
-
- switch (ramp_delay) {
- case 1 ... 3125:
- ramp_value = BUCK1_RAMP_3P125MV;
- break;
- case 3126 ... 6250:
- ramp_value = BUCK1_RAMP_6P25MV;
- break;
- case 6251 ... 12500:
- ramp_value = BUCK1_RAMP_12P5MV;
- break;
- case 12501 ... 25000:
- ramp_value = BUCK1_RAMP_25MV;
- break;
- default:
- ramp_value = BUCK1_RAMP_25MV;
- }
-
- return regmap_update_bits(rdev->regmap, PCA9450_REG_BUCK1CTRL + id * 3,
- BUCK1_RAMP_MASK, ramp_value << 6);
-}
+static const unsigned int pca9450_dvs_buck_ramp_table[] = {
+ 25000, 12500, 6250, 3125
+};
static const struct regulator_ops pca9450_dvs_buck_regulator_ops = {
.enable = regulator_enable_regmap,
@@ -100,7 +77,7 @@ static const struct regulator_ops pca9450_dvs_buck_regulator_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
- .set_ramp_delay = pca9450_dvs_set_ramp_delay,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
};
static const struct regulator_ops pca9450_buck_regulator_ops = {
@@ -251,6 +228,10 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = {
.vsel_mask = BUCK1OUT_DVS0_MASK,
.enable_reg = PCA9450_REG_BUCK1CTRL,
.enable_mask = BUCK1_ENMODE_MASK,
+ .ramp_reg = PCA9450_REG_BUCK1CTRL,
+ .ramp_mask = BUCK1_RAMP_MASK,
+ .ramp_delay_table = pca9450_dvs_buck_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(pca9450_dvs_buck_ramp_table),
.owner = THIS_MODULE,
.of_parse_cb = pca9450_set_dvs_levels,
},
@@ -276,6 +257,10 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = {
.vsel_mask = BUCK2OUT_DVS0_MASK,
.enable_reg = PCA9450_REG_BUCK2CTRL,
.enable_mask = BUCK1_ENMODE_MASK,
+ .ramp_reg = PCA9450_REG_BUCK2CTRL,
+ .ramp_mask = BUCK2_RAMP_MASK,
+ .ramp_delay_table = pca9450_dvs_buck_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(pca9450_dvs_buck_ramp_table),
.owner = THIS_MODULE,
.of_parse_cb = pca9450_set_dvs_levels,
},
@@ -301,6 +286,10 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = {
.vsel_mask = BUCK3OUT_DVS0_MASK,
.enable_reg = PCA9450_REG_BUCK3CTRL,
.enable_mask = BUCK3_ENMODE_MASK,
+ .ramp_reg = PCA9450_REG_BUCK3CTRL,
+ .ramp_mask = BUCK3_RAMP_MASK,
+ .ramp_delay_table = pca9450_dvs_buck_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(pca9450_dvs_buck_ramp_table),
.owner = THIS_MODULE,
.of_parse_cb = pca9450_set_dvs_levels,
},
@@ -477,6 +466,10 @@ static const struct pca9450_regulator_desc pca9450bc_regulators[] = {
.vsel_mask = BUCK1OUT_DVS0_MASK,
.enable_reg = PCA9450_REG_BUCK1CTRL,
.enable_mask = BUCK1_ENMODE_MASK,
+ .ramp_reg = PCA9450_REG_BUCK1CTRL,
+ .ramp_mask = BUCK1_RAMP_MASK,
+ .ramp_delay_table = pca9450_dvs_buck_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(pca9450_dvs_buck_ramp_table),
.owner = THIS_MODULE,
.of_parse_cb = pca9450_set_dvs_levels,
},
@@ -502,6 +495,10 @@ static const struct pca9450_regulator_desc pca9450bc_regulators[] = {
.vsel_mask = BUCK2OUT_DVS0_MASK,
.enable_reg = PCA9450_REG_BUCK2CTRL,
.enable_mask = BUCK1_ENMODE_MASK,
+ .ramp_reg = PCA9450_REG_BUCK2CTRL,
+ .ramp_mask = BUCK2_RAMP_MASK,
+ .ramp_delay_table = pca9450_dvs_buck_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(pca9450_dvs_buck_ramp_table),
.owner = THIS_MODULE,
.of_parse_cb = pca9450_set_dvs_levels,
},
diff --git a/drivers/regulator/qcom-labibb-regulator.c b/drivers/regulator/qcom-labibb-regulator.c
index de25e3279b4b..b3da0dc58782 100644
--- a/drivers/regulator/qcom-labibb-regulator.c
+++ b/drivers/regulator/qcom-labibb-regulator.c
@@ -307,13 +307,21 @@ end:
return IRQ_HANDLED;
}
-static int qcom_labibb_set_ocp(struct regulator_dev *rdev)
+static int qcom_labibb_set_ocp(struct regulator_dev *rdev, int lim,
+ int severity, bool enable)
{
struct labibb_regulator *vreg = rdev_get_drvdata(rdev);
char *ocp_irq_name;
u32 irq_flags = IRQF_ONESHOT;
int irq_trig_low, ret;
+ /*
+ * labibb supports only protection - and does not support setting
+ * limit. Furthermore, we don't support disabling protection.
+ */
+ if (lim || severity != REGULATOR_SEVERITY_PROT || !enable)
+ return -EINVAL;
+
/* If there is no OCP interrupt, there's nothing to set */
if (vreg->ocp_irq <= 0)
return -EINVAL;
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
index 22fec370fa61..6cca910a76de 100644
--- a/drivers/regulator/qcom-rpmh-regulator.c
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -811,12 +811,12 @@ static const struct rpmh_vreg_init_data pm8998_vreg_data[] = {
RPMH_VREG("ldo28", "ldo%s28", &pmic4_pldo, "vdd-l16-l28"),
RPMH_VREG("lvs1", "vs%s1", &pmic4_lvs, "vin-lvs-1-2"),
RPMH_VREG("lvs2", "vs%s2", &pmic4_lvs, "vin-lvs-1-2"),
- {},
+ {}
};
static const struct rpmh_vreg_init_data pmi8998_vreg_data[] = {
RPMH_VREG("bob", "bob%s1", &pmic4_bob, "vdd-bob"),
- {},
+ {}
};
static const struct rpmh_vreg_init_data pm8005_vreg_data[] = {
@@ -824,7 +824,7 @@ static const struct rpmh_vreg_init_data pm8005_vreg_data[] = {
RPMH_VREG("smps2", "smp%s2", &pmic4_ftsmps426, "vdd-s2"),
RPMH_VREG("smps3", "smp%s3", &pmic4_ftsmps426, "vdd-s3"),
RPMH_VREG("smps4", "smp%s4", &pmic4_ftsmps426, "vdd-s4"),
- {},
+ {}
};
static const struct rpmh_vreg_init_data pm8150_vreg_data[] = {
@@ -856,7 +856,7 @@ static const struct rpmh_vreg_init_data pm8150_vreg_data[] = {
RPMH_VREG("ldo16", "ldo%s16", &pmic5_pldo, "vdd-l13-l16-l17"),
RPMH_VREG("ldo17", "ldo%s17", &pmic5_pldo, "vdd-l13-l16-l17"),
RPMH_VREG("ldo18", "ldo%s18", &pmic5_nldo, "vdd-l3-l4-l5-l18"),
- {},
+ {}
};
static const struct rpmh_vreg_init_data pm8150l_vreg_data[] = {
@@ -880,7 +880,39 @@ static const struct rpmh_vreg_init_data pm8150l_vreg_data[] = {
RPMH_VREG("ldo10", "ldo%s10", &pmic5_pldo, "vdd-l9-l10"),
RPMH_VREG("ldo11", "ldo%s11", &pmic5_pldo, "vdd-l7-l11"),
RPMH_VREG("bob", "bob%s1", &pmic5_bob, "vdd-bob"),
- {},
+ {}
+};
+
+static const struct rpmh_vreg_init_data pmm8155au_vreg_data[] = {
+ RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps510, "vdd-s1"),
+ RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps510, "vdd-s2"),
+ RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps510, "vdd-s3"),
+ RPMH_VREG("smps4", "smp%s4", &pmic5_hfsmps510, "vdd-s4"),
+ RPMH_VREG("smps5", "smp%s5", &pmic5_hfsmps510, "vdd-s5"),
+ RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps510, "vdd-s6"),
+ RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps510, "vdd-s7"),
+ RPMH_VREG("smps8", "smp%s8", &pmic5_ftsmps510, "vdd-s8"),
+ RPMH_VREG("smps9", "smp%s9", &pmic5_ftsmps510, "vdd-s9"),
+ RPMH_VREG("smps10", "smp%s10", &pmic5_ftsmps510, "vdd-s10"),
+ RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo, "vdd-l1-l8-l11"),
+ RPMH_VREG("ldo2", "ldo%s2", &pmic5_pldo, "vdd-l2-l10"),
+ RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo, "vdd-l3-l4-l5-l18"),
+ RPMH_VREG("ldo4", "ldo%s4", &pmic5_nldo, "vdd-l3-l4-l5-l18"),
+ RPMH_VREG("ldo5", "ldo%s5", &pmic5_nldo, "vdd-l3-l4-l5-l18"),
+ RPMH_VREG("ldo6", "ldo%s6", &pmic5_nldo, "vdd-l6-l9"),
+ RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo_lv, "vdd-l7-l12-l14-l15"),
+ RPMH_VREG("ldo8", "ldo%s8", &pmic5_nldo, "vdd-l1-l8-l11"),
+ RPMH_VREG("ldo9", "ldo%s9", &pmic5_nldo, "vdd-l6-l9"),
+ RPMH_VREG("ldo10", "ldo%s10", &pmic5_pldo, "vdd-l2-l10"),
+ RPMH_VREG("ldo11", "ldo%s11", &pmic5_nldo, "vdd-l1-l8-l11"),
+ RPMH_VREG("ldo12", "ldo%s12", &pmic5_pldo_lv, "vdd-l7-l12-l14-l15"),
+ RPMH_VREG("ldo13", "ldo%s13", &pmic5_pldo, "vdd-l13-l16-l17"),
+ RPMH_VREG("ldo14", "ldo%s14", &pmic5_pldo_lv, "vdd-l7-l12-l14-l15"),
+ RPMH_VREG("ldo15", "ldo%s15", &pmic5_pldo_lv, "vdd-l7-l12-l14-l15"),
+ RPMH_VREG("ldo16", "ldo%s16", &pmic5_pldo, "vdd-l13-l16-l17"),
+ RPMH_VREG("ldo17", "ldo%s17", &pmic5_pldo, "vdd-l13-l16-l17"),
+ RPMH_VREG("ldo18", "ldo%s18", &pmic5_nldo, "vdd-l3-l4-l5-l18"),
+ {}
};
static const struct rpmh_vreg_init_data pm8350_vreg_data[] = {
@@ -906,7 +938,7 @@ static const struct rpmh_vreg_init_data pm8350_vreg_data[] = {
RPMH_VREG("ldo8", "ldo%s8", &pmic5_nldo, "vdd-l8"),
RPMH_VREG("ldo9", "ldo%s9", &pmic5_nldo, "vdd-l6-l9-l10"),
RPMH_VREG("ldo10", "ldo%s10", &pmic5_nldo, "vdd-l6-l9-l10"),
- {},
+ {}
};
static const struct rpmh_vreg_init_data pm8350c_vreg_data[] = {
@@ -934,7 +966,7 @@ static const struct rpmh_vreg_init_data pm8350c_vreg_data[] = {
RPMH_VREG("ldo12", "ldo%s12", &pmic5_pldo_lv, "vdd-l1-l12"),
RPMH_VREG("ldo13", "ldo%s13", &pmic5_pldo, "vdd-l3-l4-l5-l7-l13"),
RPMH_VREG("bob", "bob%s1", &pmic5_bob, "vdd-bob"),
- {},
+ {}
};
static const struct rpmh_vreg_init_data pm8009_vreg_data[] = {
@@ -947,7 +979,7 @@ static const struct rpmh_vreg_init_data pm8009_vreg_data[] = {
RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo, "vdd-l5-l6"),
RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo, "vdd-l5-l6"),
RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo_lv, "vdd-l7"),
- {},
+ {}
};
static const struct rpmh_vreg_init_data pm8009_1_vreg_data[] = {
@@ -960,7 +992,7 @@ static const struct rpmh_vreg_init_data pm8009_1_vreg_data[] = {
RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo, "vdd-l5-l6"),
RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo, "vdd-l5-l6"),
RPMH_VREG("ldo7", "ldo%s6", &pmic5_pldo_lv, "vdd-l7"),
- {},
+ {}
};
static const struct rpmh_vreg_init_data pm6150_vreg_data[] = {
@@ -988,7 +1020,7 @@ static const struct rpmh_vreg_init_data pm6150_vreg_data[] = {
RPMH_VREG("ldo17", "ldo%s17", &pmic5_pldo, "vdd-l5-l16-l17-l18-l19"),
RPMH_VREG("ldo18", "ldo%s18", &pmic5_pldo, "vdd-l5-l16-l17-l18-l19"),
RPMH_VREG("ldo19", "ldo%s19", &pmic5_pldo, "vdd-l5-l16-l17-l18-l19"),
- {},
+ {}
};
static const struct rpmh_vreg_init_data pm6150l_vreg_data[] = {
@@ -1012,7 +1044,7 @@ static const struct rpmh_vreg_init_data pm6150l_vreg_data[] = {
RPMH_VREG("ldo10", "ldo%s10", &pmic5_pldo, "vdd-l9-l10"),
RPMH_VREG("ldo11", "ldo%s11", &pmic5_pldo, "vdd-l7-l11"),
RPMH_VREG("bob", "bob%s1", &pmic5_bob, "vdd-bob"),
- {},
+ {}
};
static const struct rpmh_vreg_init_data pmx55_vreg_data[] = {
@@ -1039,7 +1071,7 @@ static const struct rpmh_vreg_init_data pmx55_vreg_data[] = {
RPMH_VREG("ldo14", "ldo%s14", &pmic5_nldo, "vdd-l14"),
RPMH_VREG("ldo15", "ldo%s15", &pmic5_nldo, "vdd-l15"),
RPMH_VREG("ldo16", "ldo%s16", &pmic5_pldo, "vdd-l16"),
- {},
+ {}
};
static const struct rpmh_vreg_init_data pm7325_vreg_data[] = {
@@ -1070,6 +1102,7 @@ static const struct rpmh_vreg_init_data pm7325_vreg_data[] = {
RPMH_VREG("ldo17", "ldo%s17", &pmic5_pldo_lv, "vdd-l11-l17-l18-l19"),
RPMH_VREG("ldo18", "ldo%s18", &pmic5_pldo_lv, "vdd-l11-l17-l18-l19"),
RPMH_VREG("ldo19", "ldo%s19", &pmic5_pldo_lv, "vdd-l11-l17-l18-l19"),
+ {}
};
static const struct rpmh_vreg_init_data pmr735a_vreg_data[] = {
@@ -1083,6 +1116,7 @@ static const struct rpmh_vreg_init_data pmr735a_vreg_data[] = {
RPMH_VREG("ldo5", "ldo%s5", &pmic5_nldo, "vdd-l5-l6"),
RPMH_VREG("ldo6", "ldo%s6", &pmic5_nldo, "vdd-l5-l6"),
RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo, "vdd-l7-bob"),
+ {}
};
static int rpmh_regulator_probe(struct platform_device *pdev)
@@ -1176,6 +1210,10 @@ static const struct of_device_id __maybe_unused rpmh_regulator_match_table[] = {
.data = pm8150l_vreg_data,
},
{
+ .compatible = "qcom,pmm8155au-rpmh-regulators",
+ .data = pmm8155au_vreg_data,
+ },
+ {
.compatible = "qcom,pmx55-rpmh-regulators",
.data = pmx55_vreg_data,
},
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index bb944ee5fe3b..198fcc6551f6 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -251,6 +251,50 @@ static const struct regulator_desc pma8084_switch = {
.ops = &rpm_switch_ops,
};
+static const struct regulator_desc pm8226_hfsmps = {
+ .linear_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(375000, 0, 95, 12500),
+ REGULATOR_LINEAR_RANGE(1575000, 96, 158, 25000),
+ },
+ .n_linear_ranges = 2,
+ .n_voltages = 159,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8226_ftsmps = {
+ .linear_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(350000, 0, 184, 5000),
+ REGULATOR_LINEAR_RANGE(1280000, 185, 261, 10000),
+ },
+ .n_linear_ranges = 2,
+ .n_voltages = 262,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8226_pldo = {
+ .linear_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(750000, 0, 63, 12500),
+ REGULATOR_LINEAR_RANGE(1550000, 64, 126, 25000),
+ REGULATOR_LINEAR_RANGE(3100000, 127, 163, 50000),
+ },
+ .n_linear_ranges = 3,
+ .n_voltages = 164,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8226_nldo = {
+ .linear_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(750000, 0, 63, 12500),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 64,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8226_switch = {
+ .ops = &rpm_switch_ops,
+};
+
static const struct regulator_desc pm8x41_hfsmps = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE( 375000, 0, 95, 12500),
@@ -405,8 +449,8 @@ static const struct regulator_desc pm8950_pldo = {
static const struct regulator_desc pm8953_lnldo = {
.linear_ranges = (struct linear_range[]) {
- REGULATOR_LINEAR_RANGE(1380000, 8, 15, 120000),
REGULATOR_LINEAR_RANGE(690000, 0, 7, 60000),
+ REGULATOR_LINEAR_RANGE(1380000, 8, 15, 120000),
},
.n_linear_ranges = 2,
.n_voltages = 16,
@@ -746,6 +790,44 @@ static const struct rpm_regulator_data rpm_pm8916_regulators[] = {
{}
};
+static const struct rpm_regulator_data rpm_pm8226_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPA, 1, &pm8226_hfsmps, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPA, 2, &pm8226_ftsmps, "vdd_s2" },
+ { "s3", QCOM_SMD_RPM_SMPA, 3, &pm8226_hfsmps, "vdd_s3" },
+ { "s4", QCOM_SMD_RPM_SMPA, 4, &pm8226_hfsmps, "vdd_s4" },
+ { "s5", QCOM_SMD_RPM_SMPA, 5, &pm8226_hfsmps, "vdd_s5" },
+ { "l1", QCOM_SMD_RPM_LDOA, 1, &pm8226_nldo, "vdd_l1_l2_l4_l5" },
+ { "l2", QCOM_SMD_RPM_LDOA, 2, &pm8226_nldo, "vdd_l1_l2_l4_l5" },
+ { "l3", QCOM_SMD_RPM_LDOA, 3, &pm8226_nldo, "vdd_l3_l24_l26" },
+ { "l4", QCOM_SMD_RPM_LDOA, 4, &pm8226_nldo, "vdd_l1_l2_l4_l5" },
+ { "l5", QCOM_SMD_RPM_LDOA, 5, &pm8226_nldo, "vdd_l1_l2_l4_l5" },
+ { "l6", QCOM_SMD_RPM_LDOA, 6, &pm8226_pldo, "vdd_l6_l7_l8_l9_l27" },
+ { "l7", QCOM_SMD_RPM_LDOA, 7, &pm8226_pldo, "vdd_l6_l7_l8_l9_l27" },
+ { "l8", QCOM_SMD_RPM_LDOA, 8, &pm8226_pldo, "vdd_l6_l7_l8_l9_l27" },
+ { "l9", QCOM_SMD_RPM_LDOA, 9, &pm8226_pldo, "vdd_l6_l7_l8_l9_l27" },
+ { "l10", QCOM_SMD_RPM_LDOA, 10, &pm8226_pldo, "vdd_l10_l11_l13" },
+ { "l11", QCOM_SMD_RPM_LDOA, 11, &pm8226_pldo, "vdd_l10_l11_l13" },
+ { "l12", QCOM_SMD_RPM_LDOA, 12, &pm8226_pldo, "vdd_l12_l14" },
+ { "l13", QCOM_SMD_RPM_LDOA, 13, &pm8226_pldo, "vdd_l10_l11_l13" },
+ { "l14", QCOM_SMD_RPM_LDOA, 14, &pm8226_pldo, "vdd_l12_l14" },
+ { "l15", QCOM_SMD_RPM_LDOA, 15, &pm8226_pldo, "vdd_l15_l16_l17_l18" },
+ { "l16", QCOM_SMD_RPM_LDOA, 16, &pm8226_pldo, "vdd_l15_l16_l17_l18" },
+ { "l17", QCOM_SMD_RPM_LDOA, 17, &pm8226_pldo, "vdd_l15_l16_l17_l18" },
+ { "l18", QCOM_SMD_RPM_LDOA, 18, &pm8226_pldo, "vdd_l15_l16_l17_l18" },
+ { "l19", QCOM_SMD_RPM_LDOA, 19, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
+ { "l20", QCOM_SMD_RPM_LDOA, 20, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
+ { "l21", QCOM_SMD_RPM_LDOA, 21, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
+ { "l22", QCOM_SMD_RPM_LDOA, 22, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
+ { "l23", QCOM_SMD_RPM_LDOA, 23, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
+ { "l24", QCOM_SMD_RPM_LDOA, 24, &pm8226_nldo, "vdd_l3_l24_l26" },
+ { "l25", QCOM_SMD_RPM_LDOA, 25, &pm8226_pldo, "vdd_l25" },
+ { "l26", QCOM_SMD_RPM_LDOA, 26, &pm8226_nldo, "vdd_l3_l24_l26" },
+ { "l27", QCOM_SMD_RPM_LDOA, 27, &pm8226_pldo, "vdd_l6_l7_l8_l9_l27" },
+ { "l28", QCOM_SMD_RPM_LDOA, 28, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
+ { "lvs1", QCOM_SMD_RPM_VSA, 1, &pm8226_switch, "vdd_lvs1" },
+ {}
+};
+
static const struct rpm_regulator_data rpm_pm8941_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPA, 1, &pm8x41_hfsmps, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPA, 2, &pm8x41_hfsmps, "vdd_s2" },
@@ -1092,6 +1174,7 @@ static const struct of_device_id rpm_of_match[] = {
{ .compatible = "qcom,rpm-mp5496-regulators", .data = &rpm_mp5496_regulators },
{ .compatible = "qcom,rpm-pm8841-regulators", .data = &rpm_pm8841_regulators },
{ .compatible = "qcom,rpm-pm8916-regulators", .data = &rpm_pm8916_regulators },
+ { .compatible = "qcom,rpm-pm8226-regulators", .data = &rpm_pm8226_regulators },
{ .compatible = "qcom,rpm-pm8941-regulators", .data = &rpm_pm8941_regulators },
{ .compatible = "qcom,rpm-pm8950-regulators", .data = &rpm_pm8950_regulators },
{ .compatible = "qcom,rpm-pm8953-regulators", .data = &rpm_pm8953_regulators },
diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c
index 95677c51c1fa..41424a3366d0 100644
--- a/drivers/regulator/qcom_spmi-regulator.c
+++ b/drivers/regulator/qcom_spmi-regulator.c
@@ -595,11 +595,15 @@ static int spmi_regulator_vs_enable(struct regulator_dev *rdev)
return regulator_enable_regmap(rdev);
}
-static int spmi_regulator_vs_ocp(struct regulator_dev *rdev)
+static int spmi_regulator_vs_ocp(struct regulator_dev *rdev, int lim_uA,
+ int severity, bool enable)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
u8 reg = SPMI_VS_OCP_OVERRIDE;
+ if (lim_uA || !enable || severity != REGULATOR_SEVERITY_PROT)
+ return -EINVAL;
+
return spmi_vreg_write(vreg, SPMI_VS_REG_OCP, &reg, 1);
}
diff --git a/drivers/regulator/qcom_usb_vbus-regulator.c b/drivers/regulator/qcom_usb_vbus-regulator.c
index 457788b50572..2e627c2b6c51 100644
--- a/drivers/regulator/qcom_usb_vbus-regulator.c
+++ b/drivers/regulator/qcom_usb_vbus-regulator.c
@@ -16,13 +16,21 @@
#define CMD_OTG 0x40
#define OTG_EN BIT(0)
+#define OTG_CURRENT_LIMIT_CFG 0x52
+#define OTG_CURRENT_LIMIT_MASK GENMASK(2, 0)
#define OTG_CFG 0x53
#define OTG_EN_SRC_CFG BIT(1)
+static const unsigned int curr_table[] = {
+ 500000, 1000000, 1500000, 2000000, 2500000, 3000000,
+};
+
static const struct regulator_ops qcom_usb_vbus_reg_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
+ .set_current_limit = regulator_set_current_limit_regmap,
};
static struct regulator_desc qcom_usb_vbus_rdesc = {
@@ -30,6 +38,8 @@ static struct regulator_desc qcom_usb_vbus_rdesc = {
.ops = &qcom_usb_vbus_reg_ops,
.owner = THIS_MODULE,
.type = REGULATOR_VOLTAGE,
+ .curr_table = curr_table,
+ .n_current_limits = ARRAY_SIZE(curr_table),
};
static int qcom_usb_vbus_regulator_probe(struct platform_device *pdev)
@@ -61,6 +71,8 @@ static int qcom_usb_vbus_regulator_probe(struct platform_device *pdev)
qcom_usb_vbus_rdesc.enable_reg = base + CMD_OTG;
qcom_usb_vbus_rdesc.enable_mask = OTG_EN;
+ qcom_usb_vbus_rdesc.csel_reg = base + OTG_CURRENT_LIMIT_CFG;
+ qcom_usb_vbus_rdesc.csel_mask = OTG_CURRENT_LIMIT_MASK;
config.dev = dev;
config.init_data = init_data;
config.of_node = dev->of_node;
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index e926c1a85846..127dc2e2e690 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -158,13 +158,6 @@ struct rk808_regulator_data {
struct gpio_desc *dvs_gpio[2];
};
-static const int rk808_buck_config_regs[] = {
- RK808_BUCK1_CONFIG_REG,
- RK808_BUCK2_CONFIG_REG,
- RK808_BUCK3_CONFIG_REG,
- RK808_BUCK4_CONFIG_REG,
-};
-
static const struct linear_range rk808_ldo3_voltage_ranges[] = {
REGULATOR_LINEAR_RANGE(800000, 0, 13, 100000),
REGULATOR_LINEAR_RANGE(2500000, 15, 15, 0),
@@ -215,6 +208,15 @@ static const struct linear_range rk817_buck3_voltage_ranges[] = {
RK817_BUCK3_SEL_CNT, RK817_BUCK1_STP1),
};
+static const unsigned int rk808_buck1_2_ramp_table[] = {
+ 2000, 4000, 6000, 10000
+};
+
+/* RK817 RK809 */
+static const unsigned int rk817_buck1_4_ramp_table[] = {
+ 3000, 6300, 12500, 25000
+};
+
static int rk808_buck1_2_get_voltage_sel_regmap(struct regulator_dev *rdev)
{
struct rk808_regulator_data *pdata = rdev_get_drvdata(rdev);
@@ -340,62 +342,6 @@ static int rk808_buck1_2_set_voltage_time_sel(struct regulator_dev *rdev,
return regulator_set_voltage_time_sel(rdev, old_selector, new_selector);
}
-static int rk808_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
-{
- unsigned int ramp_value = RK808_RAMP_RATE_10MV_PER_US;
- unsigned int reg = rk808_buck_config_regs[rdev_get_id(rdev)];
-
- switch (ramp_delay) {
- case 1 ... 2000:
- ramp_value = RK808_RAMP_RATE_2MV_PER_US;
- break;
- case 2001 ... 4000:
- ramp_value = RK808_RAMP_RATE_4MV_PER_US;
- break;
- case 4001 ... 6000:
- ramp_value = RK808_RAMP_RATE_6MV_PER_US;
- break;
- case 6001 ... 10000:
- break;
- default:
- pr_warn("%s ramp_delay: %d not supported, setting 10000\n",
- rdev->desc->name, ramp_delay);
- }
-
- return regmap_update_bits(rdev->regmap, reg,
- RK808_RAMP_RATE_MASK, ramp_value);
-}
-
-/*
- * RK817 RK809
- */
-static int rk817_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
-{
- unsigned int ramp_value = RK817_RAMP_RATE_25MV_PER_US;
- unsigned int reg = RK817_BUCK_CONFIG_REG(rdev_get_id(rdev));
-
- switch (ramp_delay) {
- case 0 ... 3000:
- ramp_value = RK817_RAMP_RATE_3MV_PER_US;
- break;
- case 3001 ... 6300:
- ramp_value = RK817_RAMP_RATE_6_3MV_PER_US;
- break;
- case 6301 ... 12500:
- ramp_value = RK817_RAMP_RATE_12_5MV_PER_US;
- break;
- case 12501 ... 25000:
- break;
- default:
- dev_warn(&rdev->dev,
- "%s ramp_delay: %d not supported, setting 25000\n",
- rdev->desc->name, ramp_delay);
- }
-
- return regmap_update_bits(rdev->regmap, reg,
- RK817_RAMP_RATE_MASK, ramp_value);
-}
-
static int rk808_set_suspend_voltage(struct regulator_dev *rdev, int uv)
{
unsigned int reg;
@@ -625,7 +571,7 @@ static const struct regulator_ops rk808_buck1_2_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
- .set_ramp_delay = rk808_set_ramp_delay,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
.set_suspend_voltage = rk808_set_suspend_voltage,
.set_suspend_enable = rk808_set_suspend_enable,
.set_suspend_disable = rk808_set_suspend_disable,
@@ -722,7 +668,7 @@ static const struct regulator_ops rk817_buck_ops_range = {
.set_mode = rk8xx_set_mode,
.get_mode = rk8xx_get_mode,
.set_suspend_mode = rk8xx_set_suspend_mode,
- .set_ramp_delay = rk817_set_ramp_delay,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
.set_suspend_voltage = rk808_set_suspend_voltage_range,
.set_suspend_enable = rk817_set_suspend_enable,
.set_suspend_disable = rk817_set_suspend_disable,
@@ -814,6 +760,10 @@ static const struct regulator_desc rk808_reg[] = {
.vsel_mask = RK808_BUCK_VSEL_MASK,
.enable_reg = RK808_DCDC_EN_REG,
.enable_mask = BIT(0),
+ .ramp_reg = RK808_BUCK1_CONFIG_REG,
+ .ramp_mask = RK808_RAMP_RATE_MASK,
+ .ramp_delay_table = rk808_buck1_2_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(rk808_buck1_2_ramp_table),
.owner = THIS_MODULE,
}, {
.name = "DCDC_REG2",
@@ -830,6 +780,10 @@ static const struct regulator_desc rk808_reg[] = {
.vsel_mask = RK808_BUCK_VSEL_MASK,
.enable_reg = RK808_DCDC_EN_REG,
.enable_mask = BIT(1),
+ .ramp_reg = RK808_BUCK2_CONFIG_REG,
+ .ramp_mask = RK808_RAMP_RATE_MASK,
+ .ramp_delay_table = rk808_buck1_2_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(rk808_buck1_2_ramp_table),
.owner = THIS_MODULE,
}, {
.name = "DCDC_REG3",
@@ -910,6 +864,10 @@ static const struct regulator_desc rk809_reg[] = {
.enable_mask = ENABLE_MASK(RK817_ID_DCDC1),
.enable_val = ENABLE_MASK(RK817_ID_DCDC1),
.disable_val = DISABLE_VAL(RK817_ID_DCDC1),
+ .ramp_reg = RK817_BUCK_CONFIG_REG(RK817_ID_DCDC1),
+ .ramp_mask = RK817_RAMP_RATE_MASK,
+ .ramp_delay_table = rk817_buck1_4_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(rk817_buck1_4_ramp_table),
.of_map_mode = rk8xx_regulator_of_map_mode,
.owner = THIS_MODULE,
}, {
@@ -929,6 +887,10 @@ static const struct regulator_desc rk809_reg[] = {
.enable_mask = ENABLE_MASK(RK817_ID_DCDC2),
.enable_val = ENABLE_MASK(RK817_ID_DCDC2),
.disable_val = DISABLE_VAL(RK817_ID_DCDC2),
+ .ramp_reg = RK817_BUCK_CONFIG_REG(RK817_ID_DCDC2),
+ .ramp_mask = RK817_RAMP_RATE_MASK,
+ .ramp_delay_table = rk817_buck1_4_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(rk817_buck1_4_ramp_table),
.of_map_mode = rk8xx_regulator_of_map_mode,
.owner = THIS_MODULE,
}, {
@@ -948,6 +910,10 @@ static const struct regulator_desc rk809_reg[] = {
.enable_mask = ENABLE_MASK(RK817_ID_DCDC3),
.enable_val = ENABLE_MASK(RK817_ID_DCDC3),
.disable_val = DISABLE_VAL(RK817_ID_DCDC3),
+ .ramp_reg = RK817_BUCK_CONFIG_REG(RK817_ID_DCDC3),
+ .ramp_mask = RK817_RAMP_RATE_MASK,
+ .ramp_delay_table = rk817_buck1_4_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(rk817_buck1_4_ramp_table),
.of_map_mode = rk8xx_regulator_of_map_mode,
.owner = THIS_MODULE,
}, {
@@ -967,6 +933,10 @@ static const struct regulator_desc rk809_reg[] = {
.enable_mask = ENABLE_MASK(RK817_ID_DCDC4),
.enable_val = ENABLE_MASK(RK817_ID_DCDC4),
.disable_val = DISABLE_VAL(RK817_ID_DCDC4),
+ .ramp_reg = RK817_BUCK_CONFIG_REG(RK817_ID_DCDC4),
+ .ramp_mask = RK817_RAMP_RATE_MASK,
+ .ramp_delay_table = rk817_buck1_4_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(rk817_buck1_4_ramp_table),
.of_map_mode = rk8xx_regulator_of_map_mode,
.owner = THIS_MODULE,
},
@@ -1052,6 +1022,10 @@ static const struct regulator_desc rk817_reg[] = {
.enable_mask = ENABLE_MASK(RK817_ID_DCDC1),
.enable_val = ENABLE_MASK(RK817_ID_DCDC1),
.disable_val = DISABLE_VAL(RK817_ID_DCDC1),
+ .ramp_reg = RK817_BUCK_CONFIG_REG(RK817_ID_DCDC1),
+ .ramp_mask = RK817_RAMP_RATE_MASK,
+ .ramp_delay_table = rk817_buck1_4_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(rk817_buck1_4_ramp_table),
.of_map_mode = rk8xx_regulator_of_map_mode,
.owner = THIS_MODULE,
}, {
@@ -1071,6 +1045,10 @@ static const struct regulator_desc rk817_reg[] = {
.enable_mask = ENABLE_MASK(RK817_ID_DCDC2),
.enable_val = ENABLE_MASK(RK817_ID_DCDC2),
.disable_val = DISABLE_VAL(RK817_ID_DCDC2),
+ .ramp_reg = RK817_BUCK_CONFIG_REG(RK817_ID_DCDC2),
+ .ramp_mask = RK817_RAMP_RATE_MASK,
+ .ramp_delay_table = rk817_buck1_4_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(rk817_buck1_4_ramp_table),
.of_map_mode = rk8xx_regulator_of_map_mode,
.owner = THIS_MODULE,
}, {
@@ -1090,6 +1068,10 @@ static const struct regulator_desc rk817_reg[] = {
.enable_mask = ENABLE_MASK(RK817_ID_DCDC3),
.enable_val = ENABLE_MASK(RK817_ID_DCDC3),
.disable_val = DISABLE_VAL(RK817_ID_DCDC3),
+ .ramp_reg = RK817_BUCK_CONFIG_REG(RK817_ID_DCDC3),
+ .ramp_mask = RK817_RAMP_RATE_MASK,
+ .ramp_delay_table = rk817_buck1_4_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(rk817_buck1_4_ramp_table),
.of_map_mode = rk8xx_regulator_of_map_mode,
.owner = THIS_MODULE,
}, {
@@ -1109,6 +1091,10 @@ static const struct regulator_desc rk817_reg[] = {
.enable_mask = ENABLE_MASK(RK817_ID_DCDC4),
.enable_val = ENABLE_MASK(RK817_ID_DCDC4),
.disable_val = DISABLE_VAL(RK817_ID_DCDC4),
+ .ramp_reg = RK817_BUCK_CONFIG_REG(RK817_ID_DCDC4),
+ .ramp_mask = RK817_RAMP_RATE_MASK,
+ .ramp_delay_table = rk817_buck1_4_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(rk817_buck1_4_ramp_table),
.of_map_mode = rk8xx_regulator_of_map_mode,
.owner = THIS_MODULE,
},
diff --git a/drivers/regulator/rt4801-regulator.c b/drivers/regulator/rt4801-regulator.c
index 2055a9cb13ba..7a87788d3f09 100644
--- a/drivers/regulator/rt4801-regulator.c
+++ b/drivers/regulator/rt4801-regulator.c
@@ -66,7 +66,7 @@ static int rt4801_enable(struct regulator_dev *rdev)
struct gpio_descs *gpios = priv->enable_gpios;
int id = rdev_get_id(rdev), ret;
- if (gpios->ndescs <= id) {
+ if (!gpios || gpios->ndescs <= id) {
dev_warn(&rdev->dev, "no dedicated gpio can control\n");
goto bypass_gpio;
}
@@ -88,7 +88,7 @@ static int rt4801_disable(struct regulator_dev *rdev)
struct gpio_descs *gpios = priv->enable_gpios;
int id = rdev_get_id(rdev);
- if (gpios->ndescs <= id) {
+ if (!gpios || gpios->ndescs <= id) {
dev_warn(&rdev->dev, "no dedicated gpio can control\n");
goto bypass_gpio;
}
diff --git a/drivers/regulator/rt4831-regulator.c b/drivers/regulator/rt4831-regulator.c
index e3aaac90d238..676b0419e48f 100644
--- a/drivers/regulator/rt4831-regulator.c
+++ b/drivers/regulator/rt4831-regulator.c
@@ -108,6 +108,7 @@ static const struct regulator_desc rt4831_regulator_descs[] = {
.bypass_reg = RT4831_REG_DSVEN,
.bypass_val_on = DSV_MODE_BYPASS,
.bypass_val_off = DSV_MODE_NORMAL,
+ .owner = THIS_MODULE,
},
{
.name = "DSVP",
@@ -125,6 +126,7 @@ static const struct regulator_desc rt4831_regulator_descs[] = {
.enable_mask = RT4831_POSEN_MASK,
.active_discharge_reg = RT4831_REG_DSVEN,
.active_discharge_mask = RT4831_POSADEN_MASK,
+ .owner = THIS_MODULE,
},
{
.name = "DSVN",
@@ -142,6 +144,7 @@ static const struct regulator_desc rt4831_regulator_descs[] = {
.enable_mask = RT4831_NEGEN_MASK,
.active_discharge_reg = RT4831_REG_DSVEN,
.active_discharge_mask = RT4831_NEGADEN_MASK,
+ .owner = THIS_MODULE,
}
};
diff --git a/drivers/regulator/rt6160-regulator.c b/drivers/regulator/rt6160-regulator.c
new file mode 100644
index 000000000000..5d7b0e7ad69a
--- /dev/null
+++ b/drivers/regulator/rt6160-regulator.c
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+
+#define RT6160_MODE_AUTO 0
+#define RT6160_MODE_FPWM 1
+
+#define RT6160_REG_CNTL 0x01
+#define RT6160_REG_STATUS 0x02
+#define RT6160_REG_DEVID 0x03
+#define RT6160_REG_VSELL 0x04
+#define RT6160_REG_VSELH 0x05
+#define RT6160_NUM_REGS (RT6160_REG_VSELH + 1)
+
+#define RT6160_FPWM_MASK BIT(3)
+#define RT6160_RAMPRATE_MASK GENMASK(1, 0)
+#define RT6160_VID_MASK GENMASK(7, 4)
+#define RT6160_VSEL_MASK GENMASK(6, 0)
+#define RT6160_HDSTAT_MASK BIT(4)
+#define RT6160_UVSTAT_MASK BIT(3)
+#define RT6160_OCSTAT_MASK BIT(2)
+#define RT6160_TSDSTAT_MASK BIT(1)
+#define RT6160_PGSTAT_MASK BIT(0)
+
+#define RT6160_VENDOR_ID 0xA0
+#define RT6160_VOUT_MINUV 2025000
+#define RT6160_VOUT_MAXUV 5200000
+#define RT6160_VOUT_STPUV 25000
+#define RT6160_N_VOUTS ((RT6160_VOUT_MAXUV - RT6160_VOUT_MINUV) / RT6160_VOUT_STPUV + 1)
+
+#define RT6160_I2CRDY_TIMEUS 100
+
+struct rt6160_priv {
+ struct regulator_desc desc;
+ struct gpio_desc *enable_gpio;
+ struct regmap *regmap;
+ bool enable_state;
+};
+
+static const unsigned int rt6160_ramp_tables[] = {
+ 1000, 2500, 5000, 10000
+};
+
+static int rt6160_enable(struct regulator_dev *rdev)
+{
+ struct rt6160_priv *priv = rdev_get_drvdata(rdev);
+
+ if (!priv->enable_gpio)
+ return 0;
+
+ gpiod_set_value_cansleep(priv->enable_gpio, 1);
+ priv->enable_state = true;
+
+ usleep_range(RT6160_I2CRDY_TIMEUS, RT6160_I2CRDY_TIMEUS + 100);
+
+ regcache_cache_only(priv->regmap, false);
+ return regcache_sync(priv->regmap);
+}
+
+static int rt6160_disable(struct regulator_dev *rdev)
+{
+ struct rt6160_priv *priv = rdev_get_drvdata(rdev);
+
+ if (!priv->enable_gpio)
+ return -EINVAL;
+
+ /* Mark regcache as dirty and cache only before HW disabled */
+ regcache_cache_only(priv->regmap, true);
+ regcache_mark_dirty(priv->regmap);
+
+ priv->enable_state = false;
+ gpiod_set_value_cansleep(priv->enable_gpio, 0);
+
+ return 0;
+}
+
+static int rt6160_is_enabled(struct regulator_dev *rdev)
+{
+ struct rt6160_priv *priv = rdev_get_drvdata(rdev);
+
+ return priv->enable_state ? 1 : 0;
+}
+
+static int rt6160_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ struct regmap *regmap = rdev_get_regmap(rdev);
+ unsigned int mode_val;
+
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ mode_val = RT6160_FPWM_MASK;
+ break;
+ case REGULATOR_MODE_NORMAL:
+ mode_val = 0;
+ break;
+ default:
+ dev_err(&rdev->dev, "mode not supported\n");
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(regmap, RT6160_REG_CNTL, RT6160_FPWM_MASK, mode_val);
+}
+
+static unsigned int rt6160_get_mode(struct regulator_dev *rdev)
+{
+ struct regmap *regmap = rdev_get_regmap(rdev);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(regmap, RT6160_REG_CNTL, &val);
+ if (ret)
+ return ret;
+
+ if (val & RT6160_FPWM_MASK)
+ return REGULATOR_MODE_FAST;
+
+ return REGULATOR_MODE_NORMAL;
+}
+
+static int rt6160_set_suspend_voltage(struct regulator_dev *rdev, int uV)
+{
+ struct regmap *regmap = rdev_get_regmap(rdev);
+ unsigned int suspend_vsel_reg;
+ int vsel;
+
+ vsel = regulator_map_voltage_linear(rdev, uV, uV);
+ if (vsel < 0)
+ return vsel;
+
+ if (rdev->desc->vsel_reg == RT6160_REG_VSELL)
+ suspend_vsel_reg = RT6160_REG_VSELH;
+ else
+ suspend_vsel_reg = RT6160_REG_VSELL;
+
+ return regmap_update_bits(regmap, suspend_vsel_reg,
+ RT6160_VSEL_MASK, vsel);
+}
+
+static int rt6160_get_error_flags(struct regulator_dev *rdev, unsigned int *flags)
+{
+ struct regmap *regmap = rdev_get_regmap(rdev);
+ unsigned int val, events = 0;
+ int ret;
+
+ ret = regmap_read(regmap, RT6160_REG_STATUS, &val);
+ if (ret)
+ return ret;
+
+ if (val & (RT6160_HDSTAT_MASK | RT6160_TSDSTAT_MASK))
+ events |= REGULATOR_ERROR_OVER_TEMP;
+
+ if (val & RT6160_UVSTAT_MASK)
+ events |= REGULATOR_ERROR_UNDER_VOLTAGE;
+
+ if (val & RT6160_OCSTAT_MASK)
+ events |= REGULATOR_ERROR_OVER_CURRENT;
+
+ if (val & RT6160_PGSTAT_MASK)
+ events |= REGULATOR_ERROR_FAIL;
+
+ *flags = events;
+ return 0;
+}
+
+static const struct regulator_ops rt6160_regulator_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+
+ .enable = rt6160_enable,
+ .disable = rt6160_disable,
+ .is_enabled = rt6160_is_enabled,
+
+ .set_mode = rt6160_set_mode,
+ .get_mode = rt6160_get_mode,
+ .set_suspend_voltage = rt6160_set_suspend_voltage,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
+ .get_error_flags = rt6160_get_error_flags,
+};
+
+static unsigned int rt6160_of_map_mode(unsigned int mode)
+{
+ switch (mode) {
+ case RT6160_MODE_FPWM:
+ return REGULATOR_MODE_FAST;
+ case RT6160_MODE_AUTO:
+ return REGULATOR_MODE_NORMAL;
+ }
+
+ return REGULATOR_MODE_INVALID;
+}
+
+static bool rt6160_is_accessible_reg(struct device *dev, unsigned int reg)
+{
+ if (reg >= RT6160_REG_CNTL && reg <= RT6160_REG_VSELH)
+ return true;
+ return false;
+}
+
+static bool rt6160_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ if (reg == RT6160_REG_STATUS)
+ return true;
+ return false;
+}
+
+static const struct regmap_config rt6160_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = RT6160_REG_VSELH,
+ .num_reg_defaults_raw = RT6160_NUM_REGS,
+ .cache_type = REGCACHE_FLAT,
+
+ .writeable_reg = rt6160_is_accessible_reg,
+ .readable_reg = rt6160_is_accessible_reg,
+ .volatile_reg = rt6160_is_volatile_reg,
+};
+
+static int rt6160_probe(struct i2c_client *i2c)
+{
+ struct rt6160_priv *priv;
+ struct regulator_config regulator_cfg = {};
+ struct regulator_dev *rdev;
+ bool vsel_active_low;
+ unsigned int devid;
+ int ret;
+
+ priv = devm_kzalloc(&i2c->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ vsel_active_low =
+ device_property_present(&i2c->dev, "richtek,vsel-active-low");
+
+ priv->enable_gpio = devm_gpiod_get_optional(&i2c->dev, "enable", GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->enable_gpio)) {
+ dev_err(&i2c->dev, "Failed to get 'enable' gpio\n");
+ return PTR_ERR(priv->enable_gpio);
+ }
+ priv->enable_state = true;
+
+ usleep_range(RT6160_I2CRDY_TIMEUS, RT6160_I2CRDY_TIMEUS + 100);
+
+ priv->regmap = devm_regmap_init_i2c(i2c, &rt6160_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ ret = PTR_ERR(priv->regmap);
+ dev_err(&i2c->dev, "Failed to init regmap (%d)\n", ret);
+ return ret;
+ }
+
+ ret = regmap_read(priv->regmap, RT6160_REG_DEVID, &devid);
+ if (ret)
+ return ret;
+
+ if ((devid & RT6160_VID_MASK) != RT6160_VENDOR_ID) {
+ dev_err(&i2c->dev, "VID not correct [0x%02x]\n", devid);
+ return -ENODEV;
+ }
+
+ priv->desc.name = "rt6160-buckboost";
+ priv->desc.type = REGULATOR_VOLTAGE;
+ priv->desc.owner = THIS_MODULE;
+ priv->desc.min_uV = RT6160_VOUT_MINUV;
+ priv->desc.uV_step = RT6160_VOUT_STPUV;
+ if (vsel_active_low)
+ priv->desc.vsel_reg = RT6160_REG_VSELL;
+ else
+ priv->desc.vsel_reg = RT6160_REG_VSELH;
+ priv->desc.vsel_mask = RT6160_VSEL_MASK;
+ priv->desc.n_voltages = RT6160_N_VOUTS;
+ priv->desc.ramp_reg = RT6160_REG_CNTL;
+ priv->desc.ramp_mask = RT6160_RAMPRATE_MASK;
+ priv->desc.ramp_delay_table = rt6160_ramp_tables;
+ priv->desc.n_ramp_values = ARRAY_SIZE(rt6160_ramp_tables);
+ priv->desc.of_map_mode = rt6160_of_map_mode;
+ priv->desc.ops = &rt6160_regulator_ops;
+
+ regulator_cfg.dev = &i2c->dev;
+ regulator_cfg.of_node = i2c->dev.of_node;
+ regulator_cfg.regmap = priv->regmap;
+ regulator_cfg.driver_data = priv;
+ regulator_cfg.init_data = of_get_regulator_init_data(&i2c->dev, i2c->dev.of_node,
+ &priv->desc);
+
+ rdev = devm_regulator_register(&i2c->dev, &priv->desc, &regulator_cfg);
+ if (IS_ERR(rdev)) {
+ dev_err(&i2c->dev, "Failed to register regulator\n");
+ return PTR_ERR(rdev);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id __maybe_unused rt6160_of_match_table[] = {
+ { .compatible = "richtek,rt6160", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rt6160_of_match_table);
+
+static struct i2c_driver rt6160_driver = {
+ .driver = {
+ .name = "rt6160",
+ .of_match_table = rt6160_of_match_table,
+ },
+ .probe_new = rt6160_probe,
+};
+module_i2c_driver(rt6160_driver);
+
+MODULE_DESCRIPTION("Richtek RT6160 voltage regulator driver");
+MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/rt6245-regulator.c b/drivers/regulator/rt6245-regulator.c
new file mode 100644
index 000000000000..d3299a72fd10
--- /dev/null
+++ b/drivers/regulator/rt6245-regulator.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+
+#define RT6245_VIRT_OCLIMIT 0x00
+#define RT6245_VIRT_OTLEVEL 0x01
+#define RT6245_VIRT_PGDLYTIME 0x02
+#define RT6245_VIRT_SLEWRATE 0x03
+#define RT6245_VIRT_SWFREQ 0x04
+#define RT6245_VIRT_VOUT 0x05
+
+#define RT6245_VOUT_MASK GENMASK(6, 0)
+#define RT6245_SLEW_MASK GENMASK(2, 0)
+#define RT6245_CHKSUM_MASK BIT(7)
+#define RT6245_CODE_MASK GENMASK(6, 0)
+
+/* HW Enable + Soft start time */
+#define RT6245_ENTIME_IN_US 5000
+
+#define RT6245_VOUT_MINUV 437500
+#define RT6245_VOUT_MAXUV 1387500
+#define RT6245_VOUT_STEPUV 12500
+#define RT6245_NUM_VOUT ((RT6245_VOUT_MAXUV - RT6245_VOUT_MINUV) / RT6245_VOUT_STEPUV + 1)
+
+struct rt6245_priv {
+ struct gpio_desc *enable_gpio;
+ bool enable_state;
+};
+
+static int rt6245_enable(struct regulator_dev *rdev)
+{
+ struct rt6245_priv *priv = rdev_get_drvdata(rdev);
+ struct regmap *regmap = rdev_get_regmap(rdev);
+ int ret;
+
+ if (!priv->enable_gpio)
+ return 0;
+
+ gpiod_direction_output(priv->enable_gpio, 1);
+ usleep_range(RT6245_ENTIME_IN_US, RT6245_ENTIME_IN_US + 1000);
+
+ regcache_cache_only(regmap, false);
+ ret = regcache_sync(regmap);
+ if (ret)
+ return ret;
+
+ priv->enable_state = true;
+ return 0;
+}
+
+static int rt6245_disable(struct regulator_dev *rdev)
+{
+ struct rt6245_priv *priv = rdev_get_drvdata(rdev);
+ struct regmap *regmap = rdev_get_regmap(rdev);
+
+ if (!priv->enable_gpio)
+ return -EINVAL;
+
+ regcache_cache_only(regmap, true);
+ regcache_mark_dirty(regmap);
+
+ gpiod_direction_output(priv->enable_gpio, 0);
+
+ priv->enable_state = false;
+ return 0;
+}
+
+static int rt6245_is_enabled(struct regulator_dev *rdev)
+{
+ struct rt6245_priv *priv = rdev_get_drvdata(rdev);
+
+ return priv->enable_state ? 1 : 0;
+}
+
+static const struct regulator_ops rt6245_regulator_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
+ .enable = rt6245_enable,
+ .disable = rt6245_disable,
+ .is_enabled = rt6245_is_enabled,
+};
+
+/* ramp delay dividend is 12500 uV/uS, and divisor from 1 to 8 */
+static const unsigned int rt6245_ramp_delay_table[] = {
+ 12500, 6250, 4167, 3125, 2500, 2083, 1786, 1562
+};
+
+static const struct regulator_desc rt6245_regulator_desc = {
+ .name = "rt6245-regulator",
+ .ops = &rt6245_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .min_uV = RT6245_VOUT_MINUV,
+ .uV_step = RT6245_VOUT_STEPUV,
+ .n_voltages = RT6245_NUM_VOUT,
+ .ramp_delay_table = rt6245_ramp_delay_table,
+ .n_ramp_values = ARRAY_SIZE(rt6245_ramp_delay_table),
+ .owner = THIS_MODULE,
+ .vsel_reg = RT6245_VIRT_VOUT,
+ .vsel_mask = RT6245_VOUT_MASK,
+ .ramp_reg = RT6245_VIRT_SLEWRATE,
+ .ramp_mask = RT6245_SLEW_MASK,
+};
+
+static int rt6245_init_device_properties(struct device *dev)
+{
+ const struct {
+ const char *name;
+ unsigned int reg;
+ } rt6245_props[] = {
+ { "richtek,oc-level-select", RT6245_VIRT_OCLIMIT },
+ { "richtek,ot-level-select", RT6245_VIRT_OTLEVEL },
+ { "richtek,pgdly-time-select", RT6245_VIRT_PGDLYTIME },
+ { "richtek,switch-freq-select", RT6245_VIRT_SWFREQ }
+ };
+ struct regmap *regmap = dev_get_regmap(dev, NULL);
+ u8 propval;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(rt6245_props); i++) {
+ ret = device_property_read_u8(dev, rt6245_props[i].name, &propval);
+ if (ret)
+ continue;
+
+ ret = regmap_write(regmap, rt6245_props[i].reg, propval);
+ if (ret) {
+ dev_err(dev, "Fail to apply [%s:%d]\n", rt6245_props[i].name, propval);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int rt6245_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct i2c_client *i2c = context;
+ const u8 func_base[] = { 0x6F, 0x73, 0x78, 0x61, 0x7C, 0 };
+ unsigned int code, bit_count;
+
+ code = func_base[reg];
+ code += val;
+
+ /* xor checksum for bit 6 to 0 */
+ bit_count = hweight8(code & RT6245_CODE_MASK);
+ if (bit_count % 2)
+ code |= RT6245_CHKSUM_MASK;
+ else
+ code &= ~RT6245_CHKSUM_MASK;
+
+ return i2c_smbus_write_byte(i2c, code);
+}
+
+static const struct reg_default rt6245_reg_defaults[] = {
+ /* Default over current 14A */
+ { RT6245_VIRT_OCLIMIT, 2 },
+ /* Default over temperature 150'c */
+ { RT6245_VIRT_OTLEVEL, 0 },
+ /* Default power good delay time 10us */
+ { RT6245_VIRT_PGDLYTIME, 1 },
+ /* Default slewrate 12.5mV/uS */
+ { RT6245_VIRT_SLEWRATE, 0 },
+ /* Default switch frequency 800KHz */
+ { RT6245_VIRT_SWFREQ, 1 },
+ /* Default voltage 750mV */
+ { RT6245_VIRT_VOUT, 0x19 }
+};
+
+static const struct regmap_config rt6245_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = RT6245_VIRT_VOUT,
+ .cache_type = REGCACHE_FLAT,
+ .reg_defaults = rt6245_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(rt6245_reg_defaults),
+ .reg_write = rt6245_reg_write,
+};
+
+static int rt6245_probe(struct i2c_client *i2c)
+{
+ struct rt6245_priv *priv;
+ struct regmap *regmap;
+ struct regulator_config regulator_cfg = {};
+ struct regulator_dev *rdev;
+ int ret;
+
+ priv = devm_kzalloc(&i2c->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->enable_state = true;
+
+ priv->enable_gpio = devm_gpiod_get_optional(&i2c->dev, "enable", GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->enable_gpio)) {
+ dev_err(&i2c->dev, "Failed to get 'enable' gpio\n");
+ return PTR_ERR(priv->enable_gpio);
+ }
+
+ usleep_range(RT6245_ENTIME_IN_US, RT6245_ENTIME_IN_US + 1000);
+
+ regmap = devm_regmap_init(&i2c->dev, NULL, i2c, &rt6245_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&i2c->dev, "Failed to initialize the regmap\n");
+ return PTR_ERR(regmap);
+ }
+
+ ret = rt6245_init_device_properties(&i2c->dev);
+ if (ret) {
+ dev_err(&i2c->dev, "Failed to initialize device properties\n");
+ return ret;
+ }
+
+ regulator_cfg.dev = &i2c->dev;
+ regulator_cfg.of_node = i2c->dev.of_node;
+ regulator_cfg.regmap = regmap;
+ regulator_cfg.driver_data = priv;
+ regulator_cfg.init_data = of_get_regulator_init_data(&i2c->dev, i2c->dev.of_node,
+ &rt6245_regulator_desc);
+ rdev = devm_regulator_register(&i2c->dev, &rt6245_regulator_desc, &regulator_cfg);
+ if (IS_ERR(rdev)) {
+ dev_err(&i2c->dev, "Failed to register regulator\n");
+ return PTR_ERR(rdev);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id __maybe_unused rt6245_of_match_table[] = {
+ { .compatible = "richtek,rt6245", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rt6245_of_match_table);
+
+static struct i2c_driver rt6245_driver = {
+ .driver = {
+ .name = "rt6245",
+ .of_match_table = rt6245_of_match_table,
+ },
+ .probe_new = rt6245_probe,
+};
+module_i2c_driver(rt6245_driver);
+
+MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_DESCRIPTION("Richtek RT6245 Regulator Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/rtmv20-regulator.c b/drivers/regulator/rtmv20-regulator.c
index 852fb2596ffd..4bca64de0f67 100644
--- a/drivers/regulator/rtmv20-regulator.c
+++ b/drivers/regulator/rtmv20-regulator.c
@@ -27,6 +27,7 @@
#define RTMV20_REG_LDIRQ 0x30
#define RTMV20_REG_LDSTAT 0x40
#define RTMV20_REG_LDMASK 0x50
+#define RTMV20_MAX_REGS (RTMV20_REG_LDMASK + 1)
#define RTMV20_VID_MASK GENMASK(7, 4)
#define RICHTEK_VID 0x80
@@ -103,9 +104,47 @@ static int rtmv20_lsw_disable(struct regulator_dev *rdev)
return 0;
}
+static int rtmv20_lsw_set_current_limit(struct regulator_dev *rdev, int min_uA,
+ int max_uA)
+{
+ int sel;
+
+ if (min_uA > RTMV20_LSW_MAXUA || max_uA < RTMV20_LSW_MINUA)
+ return -EINVAL;
+
+ if (max_uA > RTMV20_LSW_MAXUA)
+ max_uA = RTMV20_LSW_MAXUA;
+
+ sel = (max_uA - RTMV20_LSW_MINUA) / RTMV20_LSW_STEPUA;
+
+ /* Ensure the selected setting is still in range */
+ if ((sel * RTMV20_LSW_STEPUA + RTMV20_LSW_MINUA) < min_uA)
+ return -EINVAL;
+
+ sel <<= ffs(rdev->desc->csel_mask) - 1;
+
+ return regmap_update_bits(rdev->regmap, rdev->desc->csel_reg,
+ rdev->desc->csel_mask, sel);
+}
+
+static int rtmv20_lsw_get_current_limit(struct regulator_dev *rdev)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, rdev->desc->csel_reg, &val);
+ if (ret)
+ return ret;
+
+ val &= rdev->desc->csel_mask;
+ val >>= ffs(rdev->desc->csel_mask) - 1;
+
+ return val * RTMV20_LSW_STEPUA + RTMV20_LSW_MINUA;
+}
+
static const struct regulator_ops rtmv20_regulator_ops = {
- .set_current_limit = regulator_set_current_limit_regmap,
- .get_current_limit = regulator_get_current_limit_regmap,
+ .set_current_limit = rtmv20_lsw_set_current_limit,
+ .get_current_limit = rtmv20_lsw_get_current_limit,
.enable = rtmv20_lsw_enable,
.disable = rtmv20_lsw_disable,
.is_enabled = regulator_is_enabled_regmap,
@@ -275,6 +314,7 @@ static const struct regmap_config rtmv20_regmap_config = {
.val_bits = 8,
.cache_type = REGCACHE_RBTREE,
.max_register = RTMV20_REG_LDMASK,
+ .num_reg_defaults_raw = RTMV20_MAX_REGS,
.writeable_reg = rtmv20_is_accessible_reg,
.readable_reg = rtmv20_is_accessible_reg,
diff --git a/drivers/regulator/scmi-regulator.c b/drivers/regulator/scmi-regulator.c
index bbadf72b94e8..1f02f60ad136 100644
--- a/drivers/regulator/scmi-regulator.c
+++ b/drivers/regulator/scmi-regulator.c
@@ -173,7 +173,7 @@ scmi_config_linear_regulator_mappings(struct scmi_regulator *sreg,
sreg->desc.uV_step =
vinfo->levels_uv[SCMI_VOLTAGE_SEGMENT_STEP];
sreg->desc.linear_min_sel = 0;
- sreg->desc.n_voltages = delta_uV / sreg->desc.uV_step;
+ sreg->desc.n_voltages = (delta_uV / sreg->desc.uV_step) + 1;
sreg->desc.ops = &scmi_reg_linear_ops;
}
diff --git a/drivers/regulator/stpmic1_regulator.c b/drivers/regulator/stpmic1_regulator.c
index cf10fdb72e32..2d7597c76e4a 100644
--- a/drivers/regulator/stpmic1_regulator.c
+++ b/drivers/regulator/stpmic1_regulator.c
@@ -32,7 +32,8 @@ struct stpmic1_regulator_cfg {
static int stpmic1_set_mode(struct regulator_dev *rdev, unsigned int mode);
static unsigned int stpmic1_get_mode(struct regulator_dev *rdev);
-static int stpmic1_set_icc(struct regulator_dev *rdev);
+static int stpmic1_set_icc(struct regulator_dev *rdev, int lim, int severity,
+ bool enable);
static unsigned int stpmic1_map_mode(unsigned int mode);
enum {
@@ -491,11 +492,26 @@ static int stpmic1_set_mode(struct regulator_dev *rdev, unsigned int mode)
STPMIC1_BUCK_MODE_LP, value);
}
-static int stpmic1_set_icc(struct regulator_dev *rdev)
+static int stpmic1_set_icc(struct regulator_dev *rdev, int lim, int severity,
+ bool enable)
{
struct stpmic1_regulator_cfg *cfg = rdev_get_drvdata(rdev);
struct regmap *regmap = rdev_get_regmap(rdev);
+ /*
+ * The code seems like one bit in a register controls whether OCP is
+ * enabled. So we might be able to turn it off here is if that
+ * was requested. I won't support this because I don't have the HW.
+ * Feel free to try and implement if you have the HW and need kernel
+ * to disable this.
+ *
+ * Also, I don't know if limit can be configured or if we support
+ * error/warning instead of protect. So I just keep existing logic
+ * and assume no.
+ */
+ if (lim || severity != REGULATOR_SEVERITY_PROT || !enable)
+ return -EINVAL;
+
/* enable switch off in case of over current */
return regmap_update_bits(regmap, cfg->icc_reg, cfg->icc_mask,
cfg->icc_mask);
diff --git a/drivers/regulator/sy7636a-regulator.c b/drivers/regulator/sy7636a-regulator.c
new file mode 100644
index 000000000000..e021ae08cbaa
--- /dev/null
+++ b/drivers/regulator/sy7636a-regulator.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Functions to access SY3686A power management chip voltages
+//
+// Copyright (C) 2019 reMarkable AS - http://www.remarkable.com/
+//
+// Authors: Lars Ivar Miljeteig <lars.ivar.miljeteig@remarkable.com>
+// Alistair Francis <alistair@alistair23.me>
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mfd/sy7636a.h>
+
+#define SY7636A_POLL_ENABLED_TIME 500
+
+static int sy7636a_get_vcom_voltage_op(struct regulator_dev *rdev)
+{
+ int ret;
+ unsigned int val, val_h;
+
+ ret = regmap_read(rdev->regmap, SY7636A_REG_VCOM_ADJUST_CTRL_L, &val);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(rdev->regmap, SY7636A_REG_VCOM_ADJUST_CTRL_H, &val_h);
+ if (ret)
+ return ret;
+
+ val |= (val_h << VCOM_ADJUST_CTRL_SHIFT);
+
+ return (val & VCOM_ADJUST_CTRL_MASK) * VCOM_ADJUST_CTRL_SCAL;
+}
+
+static int sy7636a_get_status(struct regulator_dev *rdev)
+{
+ struct sy7636a *sy7636a = rdev_get_drvdata(rdev);
+ int ret = 0;
+
+ ret = gpiod_get_value_cansleep(sy7636a->pgood_gpio);
+ if (ret < 0)
+ dev_err(&rdev->dev, "Failed to read pgood gpio: %d\n", ret);
+
+ return ret;
+}
+
+static const struct regulator_ops sy7636a_vcom_volt_ops = {
+ .get_voltage = sy7636a_get_vcom_voltage_op,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = sy7636a_get_status,
+};
+
+static const struct regulator_desc desc = {
+ .name = "vcom",
+ .id = 0,
+ .ops = &sy7636a_vcom_volt_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_reg = SY7636A_REG_OPERATION_MODE_CRL,
+ .enable_mask = SY7636A_OPERATION_MODE_CRL_ONOFF,
+ .poll_enabled_time = SY7636A_POLL_ENABLED_TIME,
+ .regulators_node = of_match_ptr("regulators"),
+ .of_match = of_match_ptr("vcom"),
+};
+
+static int sy7636a_regulator_probe(struct platform_device *pdev)
+{
+ struct sy7636a *sy7636a = dev_get_drvdata(pdev->dev.parent);
+ struct regulator_config config = { };
+ struct regulator_dev *rdev;
+ struct gpio_desc *gdp;
+ int ret;
+
+ if (!sy7636a)
+ return -EPROBE_DEFER;
+
+ platform_set_drvdata(pdev, sy7636a);
+
+ gdp = devm_gpiod_get(sy7636a->dev, "epd-pwr-good", GPIOD_IN);
+ if (IS_ERR(gdp)) {
+ dev_err(sy7636a->dev, "Power good GPIO fault %ld\n", PTR_ERR(gdp));
+ return PTR_ERR(gdp);
+ }
+
+ sy7636a->pgood_gpio = gdp;
+
+ ret = regmap_write(sy7636a->regmap, SY7636A_REG_POWER_ON_DELAY_TIME, 0x0);
+ if (ret) {
+ dev_err(sy7636a->dev, "Failed to initialize regulator: %d\n", ret);
+ return ret;
+ }
+
+ config.dev = &pdev->dev;
+ config.dev->of_node = sy7636a->dev->of_node;
+ config.driver_data = sy7636a;
+ config.regmap = sy7636a->regmap;
+
+ rdev = devm_regulator_register(&pdev->dev, &desc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(sy7636a->dev, "Failed to register %s regulator\n",
+ pdev->name);
+ return PTR_ERR(rdev);
+ }
+
+ return 0;
+}
+
+static const struct platform_device_id sy7636a_regulator_id_table[] = {
+ { "sy7636a-regulator", },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, sy7636a_regulator_id_table);
+
+static struct platform_driver sy7636a_regulator_driver = {
+ .driver = {
+ .name = "sy7636a-regulator",
+ },
+ .probe = sy7636a_regulator_probe,
+ .id_table = sy7636a_regulator_id_table,
+};
+module_platform_driver(sy7636a_regulator_driver);
+
+MODULE_AUTHOR("Lars Ivar Miljeteig <lars.ivar.miljeteig@remarkable.com>");
+MODULE_DESCRIPTION("SY7636A voltage regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/uniphier-regulator.c b/drivers/regulator/uniphier-regulator.c
index 2e02e26b516c..e75b0973e325 100644
--- a/drivers/regulator/uniphier-regulator.c
+++ b/drivers/regulator/uniphier-regulator.c
@@ -201,6 +201,7 @@ static const struct of_device_id uniphier_regulator_match[] = {
},
{ /* Sentinel */ },
};
+MODULE_DEVICE_TABLE(of, uniphier_regulator_match);
static struct platform_driver uniphier_regulator_driver = {
.probe = uniphier_regulator_probe,
diff --git a/drivers/regulator/userspace-consumer.c b/drivers/regulator/userspace-consumer.c
index 8e3b5a67cfd8..8ca28664776e 100644
--- a/drivers/regulator/userspace-consumer.c
+++ b/drivers/regulator/userspace-consumer.c
@@ -29,15 +29,15 @@ struct userspace_consumer_data {
struct regulator_bulk_data *supplies;
};
-static ssize_t reg_show_name(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct userspace_consumer_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", data->name);
}
-static ssize_t reg_show_state(struct device *dev,
+static ssize_t state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct userspace_consumer_data *data = dev_get_drvdata(dev);
@@ -48,8 +48,8 @@ static ssize_t reg_show_state(struct device *dev,
return sprintf(buf, "disabled\n");
}
-static ssize_t reg_set_state(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t state_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct userspace_consumer_data *data = dev_get_drvdata(dev);
bool enabled;
@@ -87,8 +87,8 @@ static ssize_t reg_set_state(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(name, 0444, reg_show_name, NULL);
-static DEVICE_ATTR(state, 0644, reg_show_state, reg_set_state);
+static DEVICE_ATTR_RO(name);
+static DEVICE_ATTR_RW(state);
static struct attribute *attributes[] = {
&dev_attr_name.attr,
diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
index 6655035e5164..80dc479a6ff0 100644
--- a/drivers/rtc/rtc-mt6397.c
+++ b/drivers/rtc/rtc-mt6397.c
@@ -75,7 +75,7 @@ static int __mtk_rtc_read_time(struct mt6397_rtc *rtc,
tm->tm_min = data[RTC_OFFSET_MIN];
tm->tm_hour = data[RTC_OFFSET_HOUR];
tm->tm_mday = data[RTC_OFFSET_DOM];
- tm->tm_mon = data[RTC_OFFSET_MTH];
+ tm->tm_mon = data[RTC_OFFSET_MTH] & RTC_TC_MTH_MASK;
tm->tm_year = data[RTC_OFFSET_YEAR];
ret = regmap_read(rtc->regmap, rtc->addr_base + RTC_TC_SEC, sec);
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 1b9e1442e6a5..fd42a5fffaed 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -642,12 +642,18 @@ static void dasd_diag_setup_blk_queue(struct dasd_block *block)
blk_queue_segment_boundary(q, PAGE_SIZE - 1);
}
+static int dasd_diag_pe_handler(struct dasd_device *device,
+ __u8 tbvpm, __u8 fcsecpm)
+{
+ return dasd_generic_verify_path(device, tbvpm);
+}
+
static struct dasd_discipline dasd_diag_discipline = {
.owner = THIS_MODULE,
.name = "DIAG",
.ebcname = "DIAG",
.check_device = dasd_diag_check_device,
- .verify_path = dasd_generic_verify_path,
+ .pe_handler = dasd_diag_pe_handler,
.fill_geometry = dasd_diag_fill_geometry,
.setup_blk_queue = dasd_diag_setup_blk_queue,
.start_IO = dasd_start_diag,
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 4789410885e4..3ad319aee51e 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -794,13 +794,19 @@ static void dasd_fba_setup_blk_queue(struct dasd_block *block)
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
}
+static int dasd_fba_pe_handler(struct dasd_device *device,
+ __u8 tbvpm, __u8 fcsecpm)
+{
+ return dasd_generic_verify_path(device, tbvpm);
+}
+
static struct dasd_discipline dasd_fba_discipline = {
.owner = THIS_MODULE,
.name = "FBA ",
.ebcname = "FBA ",
.check_device = dasd_fba_check_characteristics,
.do_analysis = dasd_fba_do_analysis,
- .verify_path = dasd_generic_verify_path,
+ .pe_handler = dasd_fba_pe_handler,
.setup_blk_queue = dasd_fba_setup_blk_queue,
.fill_geometry = dasd_fba_fill_geometry,
.start_IO = dasd_start_IO,
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 1c59b0e86a9f..155428bfed8a 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -297,7 +297,6 @@ struct dasd_discipline {
* e.g. verify that new path is compatible with the current
* configuration.
*/
- int (*verify_path)(struct dasd_device *, __u8);
int (*pe_handler)(struct dasd_device *, __u8, __u8);
/*
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index b9febc581b1f..8d1b2771c1aa 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -638,6 +638,10 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 1);
int ret;
+ /* this is an error in the caller */
+ if (cp->initialized)
+ return -EBUSY;
+
/*
* We only support prefetching the channel program. We assume all channel
* programs executed by supported guests likewise support prefetching.
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index 8c625b530035..9b61e9b131ad 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -86,6 +86,7 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
struct vfio_ccw_private *private;
struct irb *irb;
bool is_final;
+ bool cp_is_finished = false;
private = container_of(work, struct vfio_ccw_private, io_work);
irb = &private->irb;
@@ -94,14 +95,21 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
(SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
if (scsw_is_solicited(&irb->scsw)) {
cp_update_scsw(&private->cp, &irb->scsw);
- if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING)
+ if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING) {
cp_free(&private->cp);
+ cp_is_finished = true;
+ }
}
mutex_lock(&private->io_mutex);
memcpy(private->io_region->irb_area, irb, sizeof(*irb));
mutex_unlock(&private->io_mutex);
- if (private->mdev && is_final)
+ /*
+ * Reset to IDLE only if processing of a channel program
+ * has finished. Do not overwrite a possible processing
+ * state if the final interrupt was for HSCH or CSCH.
+ */
+ if (private->mdev && cp_is_finished)
private->state = VFIO_CCW_STATE_IDLE;
if (private->io_trigger)
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index 23e61aa638e4..e435a9cd92da 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -318,6 +318,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
}
err_out:
+ private->state = VFIO_CCW_STATE_IDLE;
trace_vfio_ccw_fsm_io_request(scsw->cmd.fctl, schid,
io_region->ret_code, errstr);
}
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index 491a64c61fff..c57d2a7f0919 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -279,8 +279,6 @@ static ssize_t vfio_ccw_mdev_write_io_region(struct vfio_ccw_private *private,
}
vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ);
- if (region->ret_code != 0)
- private->state = VFIO_CCW_STATE_IDLE;
ret = (region->ret_code != 0) ? region->ret_code : count;
out_unlock:
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index ecefc25eff0c..337353c9655e 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -135,12 +135,13 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
{
struct ap_queue_status status;
struct ap_message *ap_msg;
+ bool found = false;
status = ap_dqap(aq->qid, &aq->reply->psmid,
aq->reply->msg, aq->reply->len);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
- aq->queue_count--;
+ aq->queue_count = max_t(int, 0, aq->queue_count - 1);
if (aq->queue_count > 0)
mod_timer(&aq->timeout,
jiffies + aq->request_timeout);
@@ -150,8 +151,14 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
list_del_init(&ap_msg->list);
aq->pendingq_count--;
ap_msg->receive(aq, ap_msg, aq->reply);
+ found = true;
break;
}
+ if (!found) {
+ AP_DBF_WARN("%s unassociated reply psmid=0x%016llx on 0x%02x.%04x\n",
+ __func__, aq->reply->psmid,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ }
fallthrough;
case AP_RESPONSE_NO_PENDING_REPLY:
if (!status.queue_empty || aq->queue_count <= 0)
@@ -232,7 +239,7 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
ap_msg->flags & AP_MSG_FLAG_SPECIAL);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
- aq->queue_count++;
+ aq->queue_count = max_t(int, 1, aq->queue_count + 1);
if (aq->queue_count == 1)
mod_timer(&aq->timeout, jiffies + aq->request_timeout);
list_move_tail(&ap_msg->list, &aq->pendingq);
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index b2c7e10dfdcd..122c85c22469 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -366,16 +366,6 @@ static int vfio_ap_mdev_remove(struct mdev_device *mdev)
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
mutex_lock(&matrix_dev->lock);
-
- /*
- * If the KVM pointer is in flux or the guest is running, disallow
- * un-assignment of control domain.
- */
- if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
- mutex_unlock(&matrix_dev->lock);
- return -EBUSY;
- }
-
vfio_ap_mdev_reset_queues(mdev);
list_del(&matrix_mdev->node);
kfree(matrix_mdev);
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 3ee46a843cb5..adddcd589941 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -2926,11 +2926,11 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,
ccb->opcode = BLOGIC_INITIATOR_CCB_SG;
ccb->datalen = count * sizeof(struct blogic_sg_seg);
if (blogic_multimaster_type(adapter))
- ccb->data = (void *)((unsigned int) ccb->dma_handle +
+ ccb->data = (unsigned int) ccb->dma_handle +
((unsigned long) &ccb->sglist -
- (unsigned long) ccb));
+ (unsigned long) ccb);
else
- ccb->data = ccb->sglist;
+ ccb->data = virt_to_32bit_virt(ccb->sglist);
scsi_for_each_sg(command, sg, count, i) {
ccb->sglist[i].segbytes = sg_dma_len(sg);
diff --git a/drivers/scsi/BusLogic.h b/drivers/scsi/BusLogic.h
index a8e4a19788a7..7d1ec10f2430 100644
--- a/drivers/scsi/BusLogic.h
+++ b/drivers/scsi/BusLogic.h
@@ -806,7 +806,7 @@ struct blogic_ccb {
unsigned char cdblen; /* Byte 2 */
unsigned char sense_datalen; /* Byte 3 */
u32 datalen; /* Bytes 4-7 */
- void *data; /* Bytes 8-11 */
+ u32 data; /* Bytes 8-11 */
unsigned char:8; /* Byte 12 */
unsigned char:8; /* Byte 13 */
enum blogic_adapter_status adapter_status; /* Byte 14 */
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
index 924d55a8acbf..65182ad9cdf8 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
@@ -58,7 +58,6 @@
#include "aicasm_symbol.h"
#include "aicasm_insformat.h"
-int yylineno;
char *yyfilename;
char stock_prefix[] = "aic_";
char *prefix = stock_prefix;
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
index 7bf7fd5953ac..ed3bdd43c297 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
@@ -108,7 +108,7 @@ struct macro_arg {
regex_t arg_regex;
char *replacement_text;
};
-STAILQ_HEAD(macro_arg_list, macro_arg) args;
+STAILQ_HEAD(macro_arg_list, macro_arg);
struct macro_info {
struct macro_arg_list args;
diff --git a/drivers/scsi/aic7xxx/scsi_message.h b/drivers/scsi/aic7xxx/scsi_message.h
index a7515c3039ed..53343a6d8ae1 100644
--- a/drivers/scsi/aic7xxx/scsi_message.h
+++ b/drivers/scsi/aic7xxx/scsi_message.h
@@ -3,6 +3,17 @@
* $FreeBSD: src/sys/cam/scsi/scsi_message.h,v 1.2 2000/05/01 20:21:29 peter Exp $
*/
+/* Messages (1 byte) */ /* I/T (M)andatory or (O)ptional */
+#define MSG_SAVEDATAPOINTER 0x02 /* O/O */
+#define MSG_RESTOREPOINTERS 0x03 /* O/O */
+#define MSG_DISCONNECT 0x04 /* O/O */
+#define MSG_MESSAGE_REJECT 0x07 /* M/M */
+#define MSG_NOOP 0x08 /* M/M */
+
+/* Messages (2 byte) */
+#define MSG_SIMPLE_Q_TAG 0x20 /* O/O */
+#define MSG_IGN_WIDE_RESIDUE 0x23 /* O/O */
+
/* Identify message */ /* M/M */
#define MSG_IDENTIFYFLAG 0x80
#define MSG_IDENTIFY_DISCFLAG 0x40
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 1a0dc18d6915..ed300a279a38 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1220,6 +1220,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
was a result from the ABTS request rather than the CLEANUP
request */
set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags);
+ rc = FAILED;
goto done;
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 499c770d405c..e95408314078 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -4811,14 +4811,14 @@ hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba)
{
int i;
- free_irq(pci_irq_vector(pdev, 1), hisi_hba);
- free_irq(pci_irq_vector(pdev, 2), hisi_hba);
- free_irq(pci_irq_vector(pdev, 11), hisi_hba);
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 1), hisi_hba);
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 2), hisi_hba);
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 11), hisi_hba);
for (i = 0; i < hisi_hba->cq_nvecs; i++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
int nr = hisi_sas_intr_conv ? 16 : 16 + i;
- free_irq(pci_irq_vector(pdev, nr), cq);
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, nr), cq);
}
pci_free_irq_vectors(pdev);
}
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 697c09ef259b..cd52664920e1 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -254,12 +254,11 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
device_enable_async_suspend(&shost->shost_dev);
+ get_device(&shost->shost_gendev);
error = device_add(&shost->shost_dev);
if (error)
goto out_del_gendev;
- get_device(&shost->shost_gendev);
-
if (shost->transportt->host_size) {
shost->shost_data = kzalloc(shost->transportt->host_size,
GFP_KERNEL);
@@ -278,33 +277,36 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
if (!shost->work_q) {
error = -EINVAL;
- goto out_free_shost_data;
+ goto out_del_dev;
}
}
error = scsi_sysfs_add_host(shost);
if (error)
- goto out_destroy_host;
+ goto out_del_dev;
scsi_proc_host_add(shost);
scsi_autopm_put_host(shost);
return error;
- out_destroy_host:
- if (shost->work_q)
- destroy_workqueue(shost->work_q);
- out_free_shost_data:
- kfree(shost->shost_data);
+ /*
+ * Any host allocation in this function will be freed in
+ * scsi_host_dev_release().
+ */
out_del_dev:
device_del(&shost->shost_dev);
out_del_gendev:
+ /*
+ * Host state is SHOST_RUNNING so we have to explicitly release
+ * ->shost_dev.
+ */
+ put_device(&shost->shost_dev);
device_del(&shost->shost_gendev);
out_disable_runtime_pm:
device_disable_async_suspend(&shost->shost_gendev);
pm_runtime_disable(&shost->shost_gendev);
pm_runtime_set_suspended(&shost->shost_gendev);
pm_runtime_put_noidle(&shost->shost_gendev);
- scsi_mq_destroy_tags(shost);
fail:
return error;
}
@@ -345,7 +347,7 @@ static void scsi_host_dev_release(struct device *dev)
ida_simple_remove(&host_index_ida, shost->host_no);
- if (parent)
+ if (shost->shost_state != SHOST_CREATED)
put_device(parent);
kfree(shost);
}
@@ -388,8 +390,10 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
mutex_init(&shost->scan_mutex);
index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
- if (index < 0)
- goto fail_kfree;
+ if (index < 0) {
+ kfree(shost);
+ return NULL;
+ }
shost->host_no = index;
shost->dma_channel = 0xff;
@@ -481,7 +485,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
shost_printk(KERN_WARNING, shost,
"error handler thread failed to spawn, error = %ld\n",
PTR_ERR(shost->ehandler));
- goto fail_index_remove;
+ goto fail;
}
shost->tmf_work_q = alloc_workqueue("scsi_tmf_%d",
@@ -490,17 +494,18 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
if (!shost->tmf_work_q) {
shost_printk(KERN_WARNING, shost,
"failed to create tmf workq\n");
- goto fail_kthread;
+ goto fail;
}
scsi_proc_hostdir_add(shost->hostt);
return shost;
+ fail:
+ /*
+ * Host state is still SHOST_CREATED and that is enough to release
+ * ->shost_gendev. scsi_host_dev_release() will free
+ * dev_name(&shost->shost_dev).
+ */
+ put_device(&shost->shost_gendev);
- fail_kthread:
- kthread_stop(shost->ehandler);
- fail_index_remove:
- ida_simple_remove(&host_index_ida, shost->host_no);
- fail_kfree:
- kfree(shost);
return NULL;
}
EXPORT_SYMBOL(scsi_host_alloc);
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 19cf418928fa..e3d03d744713 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -25,7 +25,7 @@ static bool phy_is_wideport_member(struct asd_sas_port *port, struct asd_sas_phy
static void sas_resume_port(struct asd_sas_phy *phy)
{
- struct domain_device *dev;
+ struct domain_device *dev, *n;
struct asd_sas_port *port = phy->port;
struct sas_ha_struct *sas_ha = phy->ha;
struct sas_internal *si = to_sas_internal(sas_ha->core.shost->transportt);
@@ -44,7 +44,7 @@ static void sas_resume_port(struct asd_sas_phy *phy)
* 1/ presume every device came back
* 2/ force the next revalidation to check all expander phys
*/
- list_for_each_entry(dev, &port->dev_list, dev_list_node) {
+ list_for_each_entry_safe(dev, n, &port->dev_list, dev_list_node) {
int i, rc;
rc = sas_notify_lldd_dev_found(dev);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 573c8599d71c..fc3682f15f50 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -20589,10 +20589,8 @@ lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
abtswqe = &abtsiocb->wqe;
memset(abtswqe, 0, sizeof(*abtswqe));
- if (lpfc_is_link_up(phba))
+ if (!lpfc_is_link_up(phba))
bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1);
- else
- bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 0);
bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG);
abtswqe->abort_cmd.rsrvd5 = 0;
abtswqe->abort_cmd.wqe_com.abort_tag = xritag;
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index ecd06d2d7e81..71aa6af08340 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -3765,11 +3765,13 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
case HW_EVENT_PHY_START_STATUS:
pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_START_STATUS status = %x\n",
status);
- if (status == 0) {
+ if (status == 0)
phy->phy_state = 1;
- if (pm8001_ha->flags == PM8001F_RUN_TIME &&
- phy->enable_completion != NULL)
- complete(phy->enable_completion);
+
+ if (pm8001_ha->flags == PM8001F_RUN_TIME &&
+ phy->enable_completion != NULL) {
+ complete(phy->enable_completion);
+ phy->enable_completion = NULL;
}
break;
case HW_EVENT_SAS_PHY_UP:
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 390c33df0357..af09bd282cb9 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -1151,8 +1151,8 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
goto err_out_shost;
}
list_add_tail(&pm8001_ha->list, &hba_list);
- scsi_scan_host(pm8001_ha->shost);
pm8001_ha->flags = PM8001F_RUN_TIME;
+ scsi_scan_host(pm8001_ha->shost);
return 0;
err_out_shost:
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index d28af413b93a..335cf37e6cb9 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -264,12 +264,17 @@ void pm8001_scan_start(struct Scsi_Host *shost)
int i;
struct pm8001_hba_info *pm8001_ha;
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ DECLARE_COMPLETION_ONSTACK(completion);
pm8001_ha = sha->lldd_ha;
/* SAS_RE_INITIALIZATION not available in SPCv/ve */
if (pm8001_ha->chip_id == chip_8001)
PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
- for (i = 0; i < pm8001_ha->chip->n_phy; ++i)
+ for (i = 0; i < pm8001_ha->chip->n_phy; ++i) {
+ pm8001_ha->phy[i].enable_completion = &completion;
PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
+ wait_for_completion(&completion);
+ msleep(300);
+ }
}
int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time)
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 4e980830f9f5..700530e969ac 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -3487,13 +3487,13 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_dbg(pm8001_ha, INIT,
"phy start resp status:0x%x, phyid:0x%x\n",
status, phy_id);
- if (status == 0) {
+ if (status == 0)
phy->phy_state = PHY_LINK_DOWN;
- if (pm8001_ha->flags == PM8001F_RUN_TIME &&
- phy->enable_completion != NULL) {
- complete(phy->enable_completion);
- phy->enable_completion = NULL;
- }
+
+ if (pm8001_ha->flags == PM8001F_RUN_TIME &&
+ phy->enable_completion != NULL) {
+ complete(phy->enable_completion);
+ phy->enable_completion = NULL;
}
return 0;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 69f7784233f9..b92570a7c309 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -536,7 +536,9 @@ static void qedf_update_link_speed(struct qedf_ctx *qedf,
if (linkmode_intersects(link->supported_caps, sup_caps))
lport->link_supported_speeds |= FC_PORTSPEED_20GBIT;
- fc_host_supported_speeds(lport->host) = lport->link_supported_speeds;
+ if (lport->host && lport->host->shost_data)
+ fc_host_supported_speeds(lport->host) =
+ lport->link_supported_speeds;
}
static void qedf_bw_update(void *dev)
@@ -1825,22 +1827,20 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
QEDF_WARN(&(base_qedf->dbg_ctx), "Failed to create vport, "
"WWPN (0x%s) already exists.\n", buf);
- goto err1;
+ return rc;
}
if (atomic_read(&base_qedf->link_state) != QEDF_LINK_UP) {
QEDF_WARN(&(base_qedf->dbg_ctx), "Cannot create vport "
"because link is not up.\n");
- rc = -EIO;
- goto err1;
+ return -EIO;
}
vn_port = libfc_vport_create(vport, sizeof(struct qedf_ctx));
if (!vn_port) {
QEDF_WARN(&(base_qedf->dbg_ctx), "Could not create lport "
"for vport.\n");
- rc = -ENOMEM;
- goto err1;
+ return -ENOMEM;
}
fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
@@ -1864,7 +1864,7 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
if (rc) {
QEDF_ERR(&(base_qedf->dbg_ctx), "Could not allocate memory "
"for lport stats.\n");
- goto err2;
+ goto err;
}
fc_set_wwnn(vn_port, vport->node_name);
@@ -1882,7 +1882,7 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
if (rc) {
QEDF_WARN(&base_qedf->dbg_ctx,
"Error adding Scsi_Host rc=0x%x.\n", rc);
- goto err2;
+ goto err;
}
/* Set default dev_loss_tmo based on module parameter */
@@ -1923,9 +1923,10 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
vport_qedf->dbg_ctx.host_no = vn_port->host->host_no;
vport_qedf->dbg_ctx.pdev = base_qedf->pdev;
-err2:
+ return 0;
+
+err:
scsi_host_put(vn_port->host);
-err1:
return rc;
}
@@ -1966,8 +1967,7 @@ static int qedf_vport_destroy(struct fc_vport *vport)
fc_lport_free_stats(vn_port);
/* Release Scsi_Host */
- if (vn_port->host)
- scsi_host_put(vn_port->host);
+ scsi_host_put(vn_port->host);
out:
return 0;
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 0677295957bc..615e44af1ca6 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1063,7 +1063,8 @@ qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
return ret;
}
- if (qla82xx_flash_set_write_enable(ha))
+ ret = qla82xx_flash_set_write_enable(ha);
+ if (ret < 0)
goto done_write;
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index b2008fb1dd38..12a6848ade43 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1563,10 +1563,12 @@ void qlt_stop_phase2(struct qla_tgt *tgt)
return;
}
+ mutex_lock(&tgt->ha->optrom_mutex);
mutex_lock(&vha->vha_tgt.tgt_mutex);
tgt->tgt_stop = 0;
tgt->tgt_stopped = 1;
mutex_unlock(&vha->vha_tgt.tgt_mutex);
+ mutex_unlock(&tgt->ha->optrom_mutex);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n",
tgt);
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index d92cec12454c..d33355ab6e14 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -184,6 +184,7 @@ static struct {
{"HP", "C3323-300", "4269", BLIST_NOTQ},
{"HP", "C5713A", NULL, BLIST_NOREPORTLUN},
{"HP", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2},
+ {"HPE", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES},
{"IBM", "AuSaV1S2", NULL, BLIST_FORCELUN},
{"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"IBM", "2105", NULL, BLIST_RETRY_HWERROR},
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index cb3c37d1e009..a2c3d9ad9ee4 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1387,6 +1387,22 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt)
}
}
+static bool sd_need_revalidate(struct block_device *bdev,
+ struct scsi_disk *sdkp)
+{
+ if (sdkp->device->removable || sdkp->write_prot) {
+ if (bdev_check_media_change(bdev))
+ return true;
+ }
+
+ /*
+ * Force a full rescan after ioctl(BLKRRPART). While the disk state has
+ * nothing to do with partitions, BLKRRPART is used to force a full
+ * revalidate after things like a format for historical reasons.
+ */
+ return test_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
+}
+
/**
* sd_open - open a scsi disk device
* @bdev: Block device of the scsi disk to open
@@ -1423,10 +1439,8 @@ static int sd_open(struct block_device *bdev, fmode_t mode)
if (!scsi_block_when_processing_errors(sdev))
goto error_out;
- if (sdev->removable || sdkp->write_prot) {
- if (bdev_check_media_change(bdev))
- sd_revalidate_disk(bdev->bd_disk);
- }
+ if (sd_need_revalidate(bdev, sdkp))
+ sd_revalidate_disk(bdev->bd_disk);
/*
* If the drive is empty, just let the open fail.
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index e4633b84c556..7815ed642d43 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -220,6 +220,8 @@ static unsigned int sr_get_events(struct scsi_device *sdev)
return DISK_EVENT_EJECT_REQUEST;
else if (med->media_event_code == 2)
return DISK_EVENT_MEDIA_CHANGE;
+ else if (med->media_event_code == 3)
+ return DISK_EVENT_EJECT_REQUEST;
return 0;
}
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
index 0aa58131e791..d0626773eb38 100644
--- a/drivers/scsi/ufs/ufs-hisi.c
+++ b/drivers/scsi/ufs/ufs-hisi.c
@@ -467,21 +467,24 @@ static int ufs_hisi_init_common(struct ufs_hba *hba)
host->hba = hba;
ufshcd_set_variant(hba, host);
- host->rst = devm_reset_control_get(dev, "rst");
+ host->rst = devm_reset_control_get(dev, "rst");
if (IS_ERR(host->rst)) {
dev_err(dev, "%s: failed to get reset control\n", __func__);
- return PTR_ERR(host->rst);
+ err = PTR_ERR(host->rst);
+ goto error;
}
ufs_hisi_set_pm_lvl(hba);
err = ufs_hisi_get_resource(host);
- if (err) {
- ufshcd_set_variant(hba, NULL);
- return err;
- }
+ if (err)
+ goto error;
return 0;
+
+error:
+ ufshcd_set_variant(hba, NULL);
+ return err;
}
static int ufs_hi3660_init(struct ufs_hba *hba)
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index a981f261b304..0a84ec9e7cea 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -603,11 +603,23 @@ static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
if (!ret) {
- if (ver >= UFS_UNIPRO_VER_1_8)
+ if (ver >= UFS_UNIPRO_VER_1_8) {
host->hw_ver.major = 3;
+ /*
+ * Fix HCI version for some platforms with
+ * incorrect version
+ */
+ if (hba->ufs_version < ufshci_version(3, 0))
+ hba->ufs_version = ufshci_version(3, 0);
+ }
}
}
+static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
+{
+ return hba->ufs_version;
+}
+
/**
* ufs_mtk_init - find other essential mmio bases
* @hba: host controller instance
@@ -922,6 +934,7 @@ static void ufs_mtk_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
int err;
+ struct arm_smccc_res res;
if (ufshcd_is_link_hibern8(hba)) {
err = ufs_mtk_link_set_lpm(hba);
@@ -941,6 +954,9 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
goto fail;
}
+ if (ufshcd_is_link_off(hba))
+ ufs_mtk_device_reset_ctrl(0, res);
+
return 0;
fail:
/*
@@ -1044,6 +1060,7 @@ static void ufs_mtk_event_notify(struct ufs_hba *hba,
static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
.name = "mediatek.ufshci",
.init = ufs_mtk_init,
+ .get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
.setup_clocks = ufs_mtk_setup_clocks,
.hce_enable_notify = ufs_mtk_hce_enable_notify,
.link_startup_notify = ufs_mtk_link_startup_notify,
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 3eb54937f1d8..72fd41bfbd54 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -2842,7 +2842,7 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
* ufshcd_exec_dev_cmd - API for sending device management requests
* @hba: UFS hba
* @cmd_type: specifies the type (NOP, Query...)
- * @timeout: time in seconds
+ * @timeout: timeout in milliseconds
*
* NOTE: Since there is only one available tag for device management commands,
* it is expected you hold the hba->dev_cmd.lock mutex.
@@ -2872,6 +2872,9 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
}
tag = req->tag;
WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
+ /* Set the timeout such that the SCSI error handler is not activated. */
+ req->timeout = msecs_to_jiffies(2 * timeout);
+ blk_mq_start_request(req);
init_completion(&wait);
lrbp = &hba->lrb[tag];
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 8a79605d9652..b9969fce6b4d 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -585,7 +585,13 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
case BTSTAT_SUCCESS:
case BTSTAT_LINKED_COMMAND_COMPLETED:
case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG:
- /* If everything went fine, let's move on.. */
+ /*
+ * Commands like INQUIRY may transfer less data than
+ * requested by the initiator via bufflen. Set residual
+ * count to make upper layer aware of the actual amount
+ * of data returned.
+ */
+ scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
cmd->result = (DID_OK << 16);
break;
diff --git a/drivers/soc/amlogic/meson-clk-measure.c b/drivers/soc/amlogic/meson-clk-measure.c
index e1957476a006..6dd190270123 100644
--- a/drivers/soc/amlogic/meson-clk-measure.c
+++ b/drivers/soc/amlogic/meson-clk-measure.c
@@ -626,10 +626,8 @@ static int meson_msr_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(base)) {
- dev_err(&pdev->dev, "io resource mapping failed\n");
+ if (IS_ERR(base))
return PTR_ERR(base);
- }
priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
&meson_clk_msr_regmap_config);
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index 2827085a323b..0ef79d60e88e 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -1150,8 +1150,16 @@ static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl)
ret = of_property_read_u8_array(np, "qcom,ports-block-pack-mode",
bp_mode, nports);
- if (ret)
- return ret;
+ if (ret) {
+ u32 version;
+
+ ctrl->reg_read(ctrl, SWRM_COMP_HW_VERSION, &version);
+
+ if (version <= 0x01030000)
+ memset(bp_mode, SWR_INVALID_PARAM, QCOM_SDW_MAX_PORTS);
+ else
+ return ret;
+ }
memset(hstart, SWR_INVALID_PARAM, QCOM_SDW_MAX_PORTS);
of_property_read_u8_array(np, "qcom,ports-hstart", hstart, nports);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 8b161ec4943b..e71a4c514f7b 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -65,7 +65,7 @@ config SPI_ALTERA
This is the driver for the Altera SPI Controller.
config SPI_ALTERA_CORE
- tristate "Altera SPI Controller core code"
+ tristate "Altera SPI Controller core code" if COMPILE_TEST
select REGMAP
help
"The core code for the Altera SPI Controller"
@@ -806,6 +806,7 @@ config SPI_STM32_QSPI
tristate "STMicroelectronics STM32 QUAD SPI controller"
depends on ARCH_STM32 || COMPILE_TEST
depends on OF
+ depends on SPI_MEM
help
This enables support for the Quad SPI controller in master mode.
This driver does not support generic SPI. The implementation only
diff --git a/drivers/spi/spi-altera-dfl.c b/drivers/spi/spi-altera-dfl.c
index 3e32e4fe5895..39a3e1a032e0 100644
--- a/drivers/spi/spi-altera-dfl.c
+++ b/drivers/spi/spi-altera-dfl.c
@@ -148,10 +148,8 @@ static int dfl_spi_altera_probe(struct dfl_device *dfl_dev)
base = devm_ioremap_resource(dev, &dfl_dev->mmio_res);
- if (IS_ERR(base)) {
- dev_err(dev, "%s get mem resource fail!\n", __func__);
+ if (IS_ERR(base))
return PTR_ERR(base);
- }
config_spi_master(base, master);
dev_dbg(dev, "%s cs %u bpm 0x%x mode 0x%x\n", __func__,
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 98ace748cd98..d1e287d2d9cd 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -19,7 +19,6 @@
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/err.h>
-#include <linux/platform_data/spi-ath79.h>
#define DRV_NAME "ath79-spi"
@@ -138,7 +137,6 @@ static int ath79_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct ath79_spi *sp;
- struct ath79_spi_platform_data *pdata;
unsigned long rate;
int ret;
@@ -152,15 +150,10 @@ static int ath79_spi_probe(struct platform_device *pdev)
master->dev.of_node = pdev->dev.of_node;
platform_set_drvdata(pdev, sp);
- pdata = dev_get_platdata(&pdev->dev);
-
master->use_gpio_descriptors = true;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
master->flags = SPI_MASTER_GPIO_SS;
- if (pdata) {
- master->bus_num = pdata->bus_num;
- master->num_chipselect = pdata->num_chipselect;
- }
+ master->num_chipselect = 3;
sp->bitbang.master = master;
sp->bitbang.chipselect = ath79_spi_chipselect;
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 7cd5fe00dfc1..2ef74885ffa2 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -700,7 +700,6 @@ static void atmel_spi_next_xfer_pio(struct spi_master *master,
static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
struct spi_transfer *xfer,
u32 *plen)
- __must_hold(&as->lock)
{
struct atmel_spi *as = spi_master_get_devdata(master);
struct dma_chan *rxchan = master->dma_rx;
@@ -716,8 +715,6 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
if (!rxchan || !txchan)
return -ENODEV;
- /* release lock for DMA operations */
- atmel_spi_unlock(as);
*plen = xfer->len;
@@ -786,15 +783,12 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
rxchan->device->device_issue_pending(rxchan);
txchan->device->device_issue_pending(txchan);
- /* take back lock */
- atmel_spi_lock(as);
return 0;
err_dma:
spi_writel(as, IDR, SPI_BIT(OVRES));
atmel_spi_stop_dma(master);
err_exit:
- atmel_spi_lock(as);
return -ENOMEM;
}
@@ -863,7 +857,6 @@ static int atmel_spi_set_xfer_speed(struct atmel_spi *as,
* lock is held, spi irq is blocked
*/
static void atmel_spi_pdc_next_xfer(struct spi_master *master,
- struct spi_message *msg,
struct spi_transfer *xfer)
{
struct atmel_spi *as = spi_master_get_devdata(master);
@@ -879,12 +872,12 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master,
spi_writel(as, RPR, rx_dma);
spi_writel(as, TPR, tx_dma);
- if (msg->spi->bits_per_word > 8)
+ if (xfer->bits_per_word > 8)
len >>= 1;
spi_writel(as, RCR, len);
spi_writel(as, TCR, len);
- dev_dbg(&msg->spi->dev,
+ dev_dbg(&master->dev,
" start xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
xfer, xfer->len, xfer->tx_buf,
(unsigned long long)xfer->tx_dma, xfer->rx_buf,
@@ -898,12 +891,12 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master,
spi_writel(as, RNPR, rx_dma);
spi_writel(as, TNPR, tx_dma);
- if (msg->spi->bits_per_word > 8)
+ if (xfer->bits_per_word > 8)
len >>= 1;
spi_writel(as, RNCR, len);
spi_writel(as, TNCR, len);
- dev_dbg(&msg->spi->dev,
+ dev_dbg(&master->dev,
" next xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
xfer, xfer->len, xfer->tx_buf,
(unsigned long long)xfer->tx_dma, xfer->rx_buf,
@@ -1054,8 +1047,6 @@ atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
/* Interrupt
*
- * No need for locking in this Interrupt handler: done_status is the
- * only information modified.
*/
static irqreturn_t
atmel_spi_pio_interrupt(int irq, void *dev_id)
@@ -1273,12 +1264,28 @@ static int atmel_spi_setup(struct spi_device *spi)
return 0;
}
+static void atmel_spi_set_cs(struct spi_device *spi, bool enable)
+{
+ struct atmel_spi *as = spi_master_get_devdata(spi->master);
+ /* the core doesn't really pass us enable/disable, but CS HIGH vs CS LOW
+ * since we already have routines for activate/deactivate translate
+ * high/low to active/inactive
+ */
+ enable = (!!(spi->mode & SPI_CS_HIGH) == enable);
+
+ if (enable) {
+ cs_activate(as, spi);
+ } else {
+ cs_deactivate(as, spi);
+ }
+
+}
+
static int atmel_spi_one_transfer(struct spi_master *master,
- struct spi_message *msg,
+ struct spi_device *spi,
struct spi_transfer *xfer)
{
struct atmel_spi *as;
- struct spi_device *spi = msg->spi;
u8 bits;
u32 len;
struct atmel_spi_device *asd;
@@ -1288,11 +1295,6 @@ static int atmel_spi_one_transfer(struct spi_master *master,
as = spi_master_get_devdata(master);
- if (!(xfer->tx_buf || xfer->rx_buf) && xfer->len) {
- dev_dbg(&spi->dev, "missing rx or tx buf\n");
- return -EINVAL;
- }
-
asd = spi->controller_state;
bits = (asd->csr >> 4) & 0xf;
if (bits != xfer->bits_per_word - 8) {
@@ -1305,13 +1307,13 @@ static int atmel_spi_one_transfer(struct spi_master *master,
* DMA map early, for performance (empties dcache ASAP) and
* better fault reporting.
*/
- if ((!msg->is_dma_mapped)
+ if ((!master->cur_msg_mapped)
&& as->use_pdc) {
if (atmel_spi_dma_map_xfer(as, xfer) < 0)
return -ENOMEM;
}
- atmel_spi_set_xfer_speed(as, msg->spi, xfer);
+ atmel_spi_set_xfer_speed(as, spi, xfer);
as->done_status = 0;
as->current_transfer = xfer;
@@ -1320,7 +1322,9 @@ static int atmel_spi_one_transfer(struct spi_master *master,
reinit_completion(&as->xfer_completion);
if (as->use_pdc) {
- atmel_spi_pdc_next_xfer(master, msg, xfer);
+ atmel_spi_lock(as);
+ atmel_spi_pdc_next_xfer(master, xfer);
+ atmel_spi_unlock(as);
} else if (atmel_spi_use_dma(as, xfer)) {
len = as->current_remaining_bytes;
ret = atmel_spi_next_xfer_dma_submit(master,
@@ -1328,21 +1332,21 @@ static int atmel_spi_one_transfer(struct spi_master *master,
if (ret) {
dev_err(&spi->dev,
"unable to use DMA, fallback to PIO\n");
- atmel_spi_next_xfer_pio(master, xfer);
+ as->done_status = ret;
+ break;
} else {
as->current_remaining_bytes -= len;
if (as->current_remaining_bytes < 0)
as->current_remaining_bytes = 0;
}
} else {
+ atmel_spi_lock(as);
atmel_spi_next_xfer_pio(master, xfer);
+ atmel_spi_unlock(as);
}
- /* interrupts are disabled, so free the lock for schedule */
- atmel_spi_unlock(as);
dma_timeout = wait_for_completion_timeout(&as->xfer_completion,
SPI_DMA_TIMEOUT);
- atmel_spi_lock(as);
if (WARN_ON(dma_timeout == 0)) {
dev_err(&spi->dev, "spi transfer timeout\n");
as->done_status = -EIO;
@@ -1381,90 +1385,16 @@ static int atmel_spi_one_transfer(struct spi_master *master,
} else if (atmel_spi_use_dma(as, xfer)) {
atmel_spi_stop_dma(master);
}
-
- if (!msg->is_dma_mapped
- && as->use_pdc)
- atmel_spi_dma_unmap_xfer(master, xfer);
-
- return 0;
-
- } else {
- /* only update length if no error */
- msg->actual_length += xfer->len;
}
- if (!msg->is_dma_mapped
+ if (!master->cur_msg_mapped
&& as->use_pdc)
atmel_spi_dma_unmap_xfer(master, xfer);
- spi_transfer_delay_exec(xfer);
-
- if (xfer->cs_change) {
- if (list_is_last(&xfer->transfer_list,
- &msg->transfers)) {
- as->keep_cs = true;
- } else {
- cs_deactivate(as, msg->spi);
- udelay(10);
- cs_activate(as, msg->spi);
- }
- }
-
- return 0;
-}
-
-static int atmel_spi_transfer_one_message(struct spi_master *master,
- struct spi_message *msg)
-{
- struct atmel_spi *as;
- struct spi_transfer *xfer;
- struct spi_device *spi = msg->spi;
- int ret = 0;
-
- as = spi_master_get_devdata(master);
-
- dev_dbg(&spi->dev, "new message %p submitted for %s\n",
- msg, dev_name(&spi->dev));
-
- atmel_spi_lock(as);
- cs_activate(as, spi);
-
- as->keep_cs = false;
-
- msg->status = 0;
- msg->actual_length = 0;
-
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- trace_spi_transfer_start(msg, xfer);
-
- ret = atmel_spi_one_transfer(master, msg, xfer);
- if (ret)
- goto msg_done;
-
- trace_spi_transfer_stop(msg, xfer);
- }
-
if (as->use_pdc)
atmel_spi_disable_pdc_transfer(as);
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- dev_dbg(&spi->dev,
- " xfer %p: len %u tx %p/%pad rx %p/%pad\n",
- xfer, xfer->len,
- xfer->tx_buf, &xfer->tx_dma,
- xfer->rx_buf, &xfer->rx_dma);
- }
-
-msg_done:
- if (!as->keep_cs)
- cs_deactivate(as, msg->spi);
-
- atmel_spi_unlock(as);
-
- msg->status = as->done_status;
- spi_finalize_current_message(spi->master);
-
- return ret;
+ return as->done_status;
}
static void atmel_spi_cleanup(struct spi_device *spi)
@@ -1554,7 +1484,8 @@ static int atmel_spi_probe(struct platform_device *pdev)
master->num_chipselect = 4;
master->setup = atmel_spi_setup;
master->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX);
- master->transfer_one_message = atmel_spi_transfer_one_message;
+ master->transfer_one = atmel_spi_one_transfer;
+ master->set_cs = atmel_spi_set_cs;
master->cleanup = atmel_spi_cleanup;
master->auto_runtime_pm = true;
master->max_dma_len = SPI_MAX_DMA_XFER;
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 8965fe61c8b4..5f8771fe1a31 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -68,7 +68,6 @@
#define BCM2835_SPI_FIFO_SIZE 64
#define BCM2835_SPI_FIFO_SIZE_3_4 48
#define BCM2835_SPI_DMA_MIN_LENGTH 96
-#define BCM2835_SPI_NUM_CS 4 /* raise as necessary */
#define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
| SPI_NO_CS | SPI_3WIRE)
@@ -96,8 +95,6 @@ MODULE_PARM_DESC(polling_limit_us,
* @rx_prologue: bytes received without DMA if first RX sglist entry's
* length is not a multiple of 4 (to overcome hardware limitation)
* @tx_spillover: whether @tx_prologue spills over to second TX sglist entry
- * @prepare_cs: precalculated CS register value for ->prepare_message()
- * (uses slave-specific clock polarity and phase settings)
* @debugfs_dir: the debugfs directory - neede to remove debugfs when
* unloading the module
* @count_transfer_polling: count of how often polling mode is used
@@ -107,7 +104,7 @@ MODULE_PARM_DESC(polling_limit_us,
* These are counted as well in @count_transfer_polling and
* @count_transfer_irq
* @count_transfer_dma: count how often dma mode is used
- * @chip_select: SPI slave currently selected
+ * @slv: SPI slave currently selected
* (used by bcm2835_spi_dma_tx_done() to write @clear_rx_cs)
* @tx_dma_active: whether a TX DMA descriptor is in progress
* @rx_dma_active: whether a RX DMA descriptor is in progress
@@ -115,11 +112,6 @@ MODULE_PARM_DESC(polling_limit_us,
* @fill_tx_desc: preallocated TX DMA descriptor used for RX-only transfers
* (cyclically copies from zero page to TX FIFO)
* @fill_tx_addr: bus address of zero page
- * @clear_rx_desc: preallocated RX DMA descriptor used for TX-only transfers
- * (cyclically clears RX FIFO by writing @clear_rx_cs to CS register)
- * @clear_rx_addr: bus address of @clear_rx_cs
- * @clear_rx_cs: precalculated CS register value to clear RX FIFO
- * (uses slave-specific clock polarity and phase settings)
*/
struct bcm2835_spi {
void __iomem *regs;
@@ -134,7 +126,6 @@ struct bcm2835_spi {
int tx_prologue;
int rx_prologue;
unsigned int tx_spillover;
- u32 prepare_cs[BCM2835_SPI_NUM_CS];
struct dentry *debugfs_dir;
u64 count_transfer_polling;
@@ -142,14 +133,28 @@ struct bcm2835_spi {
u64 count_transfer_irq_after_polling;
u64 count_transfer_dma;
- u8 chip_select;
+ struct bcm2835_spidev *slv;
unsigned int tx_dma_active;
unsigned int rx_dma_active;
struct dma_async_tx_descriptor *fill_tx_desc;
dma_addr_t fill_tx_addr;
- struct dma_async_tx_descriptor *clear_rx_desc[BCM2835_SPI_NUM_CS];
+};
+
+/**
+ * struct bcm2835_spidev - BCM2835 SPI slave
+ * @prepare_cs: precalculated CS register value for ->prepare_message()
+ * (uses slave-specific clock polarity and phase settings)
+ * @clear_rx_desc: preallocated RX DMA descriptor used for TX-only transfers
+ * (cyclically clears RX FIFO by writing @clear_rx_cs to CS register)
+ * @clear_rx_addr: bus address of @clear_rx_cs
+ * @clear_rx_cs: precalculated CS register value to clear RX FIFO
+ * (uses slave-specific clock polarity and phase settings)
+ */
+struct bcm2835_spidev {
+ u32 prepare_cs;
+ struct dma_async_tx_descriptor *clear_rx_desc;
dma_addr_t clear_rx_addr;
- u32 clear_rx_cs[BCM2835_SPI_NUM_CS] ____cacheline_aligned;
+ u32 clear_rx_cs ____cacheline_aligned;
};
#if defined(CONFIG_DEBUG_FS)
@@ -624,8 +629,7 @@ static void bcm2835_spi_dma_tx_done(void *data)
/* busy-wait for TX FIFO to empty */
while (!(bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE))
- bcm2835_wr(bs, BCM2835_SPI_CS,
- bs->clear_rx_cs[bs->chip_select]);
+ bcm2835_wr(bs, BCM2835_SPI_CS, bs->slv->clear_rx_cs);
bs->tx_dma_active = false;
smp_wmb();
@@ -646,18 +650,18 @@ static void bcm2835_spi_dma_tx_done(void *data)
/**
* bcm2835_spi_prepare_sg() - prepare and submit DMA descriptor for sglist
* @ctlr: SPI master controller
- * @spi: SPI slave
* @tfr: SPI transfer
* @bs: BCM2835 SPI controller
+ * @slv: BCM2835 SPI slave
* @is_tx: whether to submit DMA descriptor for TX or RX sglist
*
* Prepare and submit a DMA descriptor for the TX or RX sglist of @tfr.
* Return 0 on success or a negative error number.
*/
static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
- struct spi_device *spi,
struct spi_transfer *tfr,
struct bcm2835_spi *bs,
+ struct bcm2835_spidev *slv,
bool is_tx)
{
struct dma_chan *chan;
@@ -697,7 +701,7 @@ static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
} else if (!tfr->rx_buf) {
desc->callback = bcm2835_spi_dma_tx_done;
desc->callback_param = ctlr;
- bs->chip_select = spi->chip_select;
+ bs->slv = slv;
}
/* submit it to DMA-engine */
@@ -709,8 +713,8 @@ static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
/**
* bcm2835_spi_transfer_one_dma() - perform SPI transfer using DMA engine
* @ctlr: SPI master controller
- * @spi: SPI slave
* @tfr: SPI transfer
+ * @slv: BCM2835 SPI slave
* @cs: CS register
*
* For *bidirectional* transfers (both tx_buf and rx_buf are non-%NULL), set up
@@ -754,8 +758,8 @@ static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
* performed at the end of an RX-only transfer.
*/
static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr,
- struct spi_device *spi,
struct spi_transfer *tfr,
+ struct bcm2835_spidev *slv,
u32 cs)
{
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
@@ -773,7 +777,7 @@ static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr,
/* setup tx-DMA */
if (bs->tx_buf) {
- ret = bcm2835_spi_prepare_sg(ctlr, spi, tfr, bs, true);
+ ret = bcm2835_spi_prepare_sg(ctlr, tfr, bs, slv, true);
} else {
cookie = dmaengine_submit(bs->fill_tx_desc);
ret = dma_submit_error(cookie);
@@ -799,9 +803,9 @@ static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr,
* this saves 10us or more.
*/
if (bs->rx_buf) {
- ret = bcm2835_spi_prepare_sg(ctlr, spi, tfr, bs, false);
+ ret = bcm2835_spi_prepare_sg(ctlr, tfr, bs, slv, false);
} else {
- cookie = dmaengine_submit(bs->clear_rx_desc[spi->chip_select]);
+ cookie = dmaengine_submit(slv->clear_rx_desc);
ret = dma_submit_error(cookie);
}
if (ret) {
@@ -850,8 +854,6 @@ static bool bcm2835_spi_can_dma(struct spi_controller *ctlr,
static void bcm2835_dma_release(struct spi_controller *ctlr,
struct bcm2835_spi *bs)
{
- int i;
-
if (ctlr->dma_tx) {
dmaengine_terminate_sync(ctlr->dma_tx);
@@ -870,17 +872,6 @@ static void bcm2835_dma_release(struct spi_controller *ctlr,
if (ctlr->dma_rx) {
dmaengine_terminate_sync(ctlr->dma_rx);
-
- for (i = 0; i < BCM2835_SPI_NUM_CS; i++)
- if (bs->clear_rx_desc[i])
- dmaengine_desc_free(bs->clear_rx_desc[i]);
-
- if (bs->clear_rx_addr)
- dma_unmap_single(ctlr->dma_rx->device->dev,
- bs->clear_rx_addr,
- sizeof(bs->clear_rx_cs),
- DMA_TO_DEVICE);
-
dma_release_channel(ctlr->dma_rx);
ctlr->dma_rx = NULL;
}
@@ -892,7 +883,7 @@ static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
struct dma_slave_config slave_config;
const __be32 *addr;
dma_addr_t dma_reg_base;
- int ret, i;
+ int ret;
/* base address in dma-space */
addr = of_get_address(ctlr->dev.of_node, 0, NULL, NULL);
@@ -972,35 +963,6 @@ static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
if (ret)
goto err_config;
- bs->clear_rx_addr = dma_map_single(ctlr->dma_rx->device->dev,
- bs->clear_rx_cs,
- sizeof(bs->clear_rx_cs),
- DMA_TO_DEVICE);
- if (dma_mapping_error(ctlr->dma_rx->device->dev, bs->clear_rx_addr)) {
- dev_err(dev, "cannot map clear_rx_cs - not using DMA mode\n");
- bs->clear_rx_addr = 0;
- ret = -ENOMEM;
- goto err_release;
- }
-
- for (i = 0; i < BCM2835_SPI_NUM_CS; i++) {
- bs->clear_rx_desc[i] = dmaengine_prep_dma_cyclic(ctlr->dma_rx,
- bs->clear_rx_addr + i * sizeof(u32),
- sizeof(u32), 0,
- DMA_MEM_TO_DEV, 0);
- if (!bs->clear_rx_desc[i]) {
- dev_err(dev, "cannot prepare clear_rx_desc - not using DMA mode\n");
- ret = -ENOMEM;
- goto err_release;
- }
-
- ret = dmaengine_desc_set_reuse(bs->clear_rx_desc[i]);
- if (ret) {
- dev_err(dev, "cannot reuse clear_rx_desc - not using DMA mode\n");
- goto err_release;
- }
- }
-
/* all went well, so set can_dma */
ctlr->can_dma = bcm2835_spi_can_dma;
@@ -1082,9 +1044,10 @@ static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
struct spi_transfer *tfr)
{
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+ struct bcm2835_spidev *slv = spi_get_ctldata(spi);
unsigned long spi_hz, clk_hz, cdiv;
unsigned long hz_per_byte, byte_limit;
- u32 cs = bs->prepare_cs[spi->chip_select];
+ u32 cs = slv->prepare_cs;
/* set clock */
spi_hz = tfr->speed_hz;
@@ -1133,7 +1096,7 @@ static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
* this 1 idle clock cycle pattern but runs the spi clock without gaps
*/
if (ctlr->can_dma && bcm2835_spi_can_dma(ctlr, spi, tfr))
- return bcm2835_spi_transfer_one_dma(ctlr, spi, tfr, cs);
+ return bcm2835_spi_transfer_one_dma(ctlr, tfr, slv, cs);
/* run in interrupt-mode */
return bcm2835_spi_transfer_one_irq(ctlr, spi, tfr, cs, true);
@@ -1144,6 +1107,7 @@ static int bcm2835_spi_prepare_message(struct spi_controller *ctlr,
{
struct spi_device *spi = msg->spi;
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+ struct bcm2835_spidev *slv = spi_get_ctldata(spi);
int ret;
if (ctlr->can_dma) {
@@ -1162,7 +1126,7 @@ static int bcm2835_spi_prepare_message(struct spi_controller *ctlr,
* Set up clock polarity before spi_transfer_one_message() asserts
* chip select to avoid a gratuitous clock signal edge.
*/
- bcm2835_wr(bs, BCM2835_SPI_CS, bs->prepare_cs[spi->chip_select]);
+ bcm2835_wr(bs, BCM2835_SPI_CS, slv->prepare_cs);
return 0;
}
@@ -1188,13 +1152,83 @@ static int chip_match_name(struct gpio_chip *chip, void *data)
return !strcmp(chip->label, data);
}
+static void bcm2835_spi_cleanup(struct spi_device *spi)
+{
+ struct bcm2835_spidev *slv = spi_get_ctldata(spi);
+ struct spi_controller *ctlr = spi->controller;
+
+ if (slv->clear_rx_desc)
+ dmaengine_desc_free(slv->clear_rx_desc);
+
+ if (slv->clear_rx_addr)
+ dma_unmap_single(ctlr->dma_rx->device->dev,
+ slv->clear_rx_addr,
+ sizeof(u32),
+ DMA_TO_DEVICE);
+
+ kfree(slv);
+}
+
+static int bcm2835_spi_setup_dma(struct spi_controller *ctlr,
+ struct spi_device *spi,
+ struct bcm2835_spi *bs,
+ struct bcm2835_spidev *slv)
+{
+ int ret;
+
+ if (!ctlr->dma_rx)
+ return 0;
+
+ slv->clear_rx_addr = dma_map_single(ctlr->dma_rx->device->dev,
+ &slv->clear_rx_cs,
+ sizeof(u32),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctlr->dma_rx->device->dev, slv->clear_rx_addr)) {
+ dev_err(&spi->dev, "cannot map clear_rx_cs\n");
+ slv->clear_rx_addr = 0;
+ return -ENOMEM;
+ }
+
+ slv->clear_rx_desc = dmaengine_prep_dma_cyclic(ctlr->dma_rx,
+ slv->clear_rx_addr,
+ sizeof(u32), 0,
+ DMA_MEM_TO_DEV, 0);
+ if (!slv->clear_rx_desc) {
+ dev_err(&spi->dev, "cannot prepare clear_rx_desc\n");
+ return -ENOMEM;
+ }
+
+ ret = dmaengine_desc_set_reuse(slv->clear_rx_desc);
+ if (ret) {
+ dev_err(&spi->dev, "cannot reuse clear_rx_desc\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int bcm2835_spi_setup(struct spi_device *spi)
{
struct spi_controller *ctlr = spi->controller;
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+ struct bcm2835_spidev *slv = spi_get_ctldata(spi);
struct gpio_chip *chip;
+ int ret;
u32 cs;
+ if (!slv) {
+ slv = kzalloc(ALIGN(sizeof(*slv), dma_get_cache_alignment()),
+ GFP_KERNEL);
+ if (!slv)
+ return -ENOMEM;
+
+ spi_set_ctldata(spi, slv);
+
+ ret = bcm2835_spi_setup_dma(ctlr, spi, bs, slv);
+ if (ret)
+ goto err_cleanup;
+ }
+
/*
* Precalculate SPI slave's CS register value for ->prepare_message():
* The driver always uses software-controlled GPIO chip select, hence
@@ -1206,20 +1240,19 @@ static int bcm2835_spi_setup(struct spi_device *spi)
cs |= BCM2835_SPI_CS_CPOL;
if (spi->mode & SPI_CPHA)
cs |= BCM2835_SPI_CS_CPHA;
- bs->prepare_cs[spi->chip_select] = cs;
+ slv->prepare_cs = cs;
/*
* Precalculate SPI slave's CS register value to clear RX FIFO
* in case of a TX-only DMA transfer.
*/
if (ctlr->dma_rx) {
- bs->clear_rx_cs[spi->chip_select] = cs |
- BCM2835_SPI_CS_TA |
- BCM2835_SPI_CS_DMAEN |
- BCM2835_SPI_CS_CLEAR_RX;
+ slv->clear_rx_cs = cs | BCM2835_SPI_CS_TA |
+ BCM2835_SPI_CS_DMAEN |
+ BCM2835_SPI_CS_CLEAR_RX;
dma_sync_single_for_device(ctlr->dma_rx->device->dev,
- bs->clear_rx_addr,
- sizeof(bs->clear_rx_cs),
+ slv->clear_rx_addr,
+ sizeof(u32),
DMA_TO_DEVICE);
}
@@ -1241,7 +1274,8 @@ static int bcm2835_spi_setup(struct spi_device *spi)
*/
dev_err(&spi->dev,
"setup: only two native chip-selects are supported\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_cleanup;
}
/*
@@ -1262,14 +1296,20 @@ static int bcm2835_spi_setup(struct spi_device *spi)
DRV_NAME,
GPIO_LOOKUP_FLAGS_DEFAULT,
GPIOD_OUT_LOW);
- if (IS_ERR(spi->cs_gpiod))
- return PTR_ERR(spi->cs_gpiod);
+ if (IS_ERR(spi->cs_gpiod)) {
+ ret = PTR_ERR(spi->cs_gpiod);
+ goto err_cleanup;
+ }
/* and set up the "mode" and level */
dev_info(&spi->dev, "setting up native-CS%i to use GPIO\n",
spi->chip_select);
return 0;
+
+err_cleanup:
+ bcm2835_spi_cleanup(spi);
+ return ret;
}
static int bcm2835_spi_probe(struct platform_device *pdev)
@@ -1278,8 +1318,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
struct bcm2835_spi *bs;
int err;
- ctlr = devm_spi_alloc_master(&pdev->dev, ALIGN(sizeof(*bs),
- dma_get_cache_alignment()));
+ ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*bs));
if (!ctlr)
return -ENOMEM;
@@ -1288,8 +1327,9 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
ctlr->use_gpio_descriptors = true;
ctlr->mode_bits = BCM2835_SPI_MODE_BITS;
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
- ctlr->num_chipselect = BCM2835_SPI_NUM_CS;
+ ctlr->num_chipselect = 3;
ctlr->setup = bcm2835_spi_setup;
+ ctlr->cleanup = bcm2835_spi_cleanup;
ctlr->transfer_one = bcm2835_spi_transfer_one;
ctlr->handle_err = bcm2835_spi_handle_err;
ctlr->prepare_message = bcm2835_spi_prepare_message;
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index 75589ac6e95f..37eab100a7d8 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -384,7 +384,7 @@ static int bcm2835aux_spi_transfer_one(struct spi_master *master,
bs->pending = 0;
/* Calculate the estimated time in us the transfer runs. Note that
- * there are are 2 idle clocks cycles after each chunk getting
+ * there are 2 idle clocks cycles after each chunk getting
* transferred - in our case the chunk size is 3 bytes, so we
* approximate this by 9 cycles/byte. This is used to find the number
* of Hz per byte per polling limit. E.g., we can transfer 1 byte in
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index 6a6af85aebfd..27d0087f8688 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -184,6 +184,8 @@ int spi_bitbang_setup(struct spi_device *spi)
{
struct spi_bitbang_cs *cs = spi->controller_state;
struct spi_bitbang *bitbang;
+ bool initial_setup = false;
+ int retval;
bitbang = spi_master_get_devdata(spi->master);
@@ -192,22 +194,30 @@ int spi_bitbang_setup(struct spi_device *spi)
if (!cs)
return -ENOMEM;
spi->controller_state = cs;
+ initial_setup = true;
}
/* per-word shift register access, in hardware or bitbanging */
cs->txrx_word = bitbang->txrx_word[spi->mode & (SPI_CPOL|SPI_CPHA)];
- if (!cs->txrx_word)
- return -EINVAL;
+ if (!cs->txrx_word) {
+ retval = -EINVAL;
+ goto err_free;
+ }
if (bitbang->setup_transfer) {
- int retval = bitbang->setup_transfer(spi, NULL);
+ retval = bitbang->setup_transfer(spi, NULL);
if (retval < 0)
- return retval;
+ goto err_free;
}
dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs);
return 0;
+
+err_free:
+ if (initial_setup)
+ kfree(cs);
+ return retval;
}
EXPORT_SYMBOL_GPL(spi_bitbang_setup);
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index 17c06039a74d..3379720cfcb8 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -56,7 +56,7 @@ struct dw_spi_mscc {
/*
* The Designware SPI controller (referred to as master in the documentation)
* automatically deasserts chip select when the tx fifo is empty. The chip
- * selects then needs to be either driven as GPIOs or, for the first 4 using the
+ * selects then needs to be either driven as GPIOs or, for the first 4 using
* the SPI boot controller registers. the final chip select is an OR gate
* between the Designware SPI controller and the SPI boot controller.
*/
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 028736687488..fb45e6af6638 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -1375,11 +1375,13 @@ poll_mode:
ret = spi_register_controller(ctlr);
if (ret != 0) {
dev_err(&pdev->dev, "Problem registering DSPI ctlr\n");
- goto out_free_irq;
+ goto out_release_dma;
}
return ret;
+out_release_dma:
+ dspi_release_dma(dspi);
out_free_irq:
if (dspi->irq)
free_irq(dspi->irq, dspi);
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index d0e5aa18b7ba..bdf94cc7be1a 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -440,6 +440,7 @@ static int fsl_spi_setup(struct spi_device *spi)
{
struct mpc8xxx_spi *mpc8xxx_spi;
struct fsl_spi_reg __iomem *reg_base;
+ bool initial_setup = false;
int retval;
u32 hw_mode;
struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi);
@@ -452,6 +453,7 @@ static int fsl_spi_setup(struct spi_device *spi)
if (!cs)
return -ENOMEM;
spi_set_ctldata(spi, cs);
+ initial_setup = true;
}
mpc8xxx_spi = spi_master_get_devdata(spi->master);
@@ -475,6 +477,8 @@ static int fsl_spi_setup(struct spi_device *spi)
retval = fsl_spi_setup_transfer(spi, NULL);
if (retval < 0) {
cs->hw_mode = hw_mode; /* Restore settings */
+ if (initial_setup)
+ kfree(cs);
return retval;
}
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 3d0d8ddd5772..b3861fb88711 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -639,8 +639,8 @@ static irqreturn_t geni_spi_isr(int irq, void *data)
complete(&mas->abort_done);
/*
- * It's safe or a good idea to Ack all of our our interrupts at the
- * end of the function. Specifically:
+ * It's safe or a good idea to Ack all of our interrupts at the end
+ * of the function. Specifically:
* - M_CMD_DONE_EN / M_RX_FIFO_LAST_EN: Edge triggered interrupts and
* clearing Acks. Clearing at the end relies on nobody else having
* started a new transfer yet or else we could be clearing _their_
diff --git a/drivers/spi/spi-hisi-kunpeng.c b/drivers/spi/spi-hisi-kunpeng.c
index 3f986ba1c328..58b823a16fc4 100644
--- a/drivers/spi/spi-hisi-kunpeng.c
+++ b/drivers/spi/spi-hisi-kunpeng.c
@@ -9,6 +9,7 @@
#include <linux/acpi.h>
#include <linux/bitfield.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -126,6 +127,7 @@ struct hisi_spi {
void __iomem *regs;
int irq;
u32 fifo_len; /* depth of the FIFO buffer */
+ u16 bus_num;
/* Current message transfer state info */
const void *tx;
@@ -133,8 +135,49 @@ struct hisi_spi {
void *rx;
unsigned int rx_len;
u8 n_bytes; /* current is a 1/2/4 bytes op */
+
+ struct dentry *debugfs;
+ struct debugfs_regset32 regset;
+};
+
+#define HISI_SPI_DBGFS_REG(_name, _off) \
+{ \
+ .name = _name, \
+ .offset = _off, \
+}
+
+static const struct debugfs_reg32 hisi_spi_regs[] = {
+ HISI_SPI_DBGFS_REG("CSCR", HISI_SPI_CSCR),
+ HISI_SPI_DBGFS_REG("CR", HISI_SPI_CR),
+ HISI_SPI_DBGFS_REG("ENR", HISI_SPI_ENR),
+ HISI_SPI_DBGFS_REG("FIFOC", HISI_SPI_FIFOC),
+ HISI_SPI_DBGFS_REG("IMR", HISI_SPI_IMR),
+ HISI_SPI_DBGFS_REG("DIN", HISI_SPI_DIN),
+ HISI_SPI_DBGFS_REG("DOUT", HISI_SPI_DOUT),
+ HISI_SPI_DBGFS_REG("SR", HISI_SPI_SR),
+ HISI_SPI_DBGFS_REG("RISR", HISI_SPI_RISR),
+ HISI_SPI_DBGFS_REG("ISR", HISI_SPI_ISR),
+ HISI_SPI_DBGFS_REG("ICR", HISI_SPI_ICR),
+ HISI_SPI_DBGFS_REG("VERSION", HISI_SPI_VERSION),
};
+static int hisi_spi_debugfs_init(struct hisi_spi *hs)
+{
+ char name[32];
+
+ snprintf(name, 32, "hisi_spi%d", hs->bus_num);
+ hs->debugfs = debugfs_create_dir(name, NULL);
+ if (!hs->debugfs)
+ return -ENOMEM;
+
+ hs->regset.regs = hisi_spi_regs;
+ hs->regset.nregs = ARRAY_SIZE(hisi_spi_regs);
+ hs->regset.base = hs->regs;
+ debugfs_create_regset32("registers", 0400, hs->debugfs, &hs->regset);
+
+ return 0;
+}
+
static u32 hisi_spi_busy(struct hisi_spi *hs)
{
return readl(hs->regs + HISI_SPI_SR) & SR_BUSY;
@@ -424,6 +467,7 @@ static int hisi_spi_probe(struct platform_device *pdev)
hs = spi_controller_get_devdata(master);
hs->dev = dev;
hs->irq = irq;
+ hs->bus_num = pdev->id;
hs->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hs->regs))
@@ -446,7 +490,7 @@ static int hisi_spi_probe(struct platform_device *pdev)
master->use_gpio_descriptors = true;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
- master->bus_num = pdev->id;
+ master->bus_num = hs->bus_num;
master->setup = hisi_spi_setup;
master->cleanup = hisi_spi_cleanup;
master->transfer_one = hisi_spi_transfer_one;
@@ -462,6 +506,9 @@ static int hisi_spi_probe(struct platform_device *pdev)
return ret;
}
+ if (hisi_spi_debugfs_init(hs))
+ dev_info(dev, "failed to create debugfs dir\n");
+
ret = spi_register_controller(master);
if (ret) {
dev_err(dev, "failed to register spi master, ret=%d\n", ret);
@@ -478,7 +525,9 @@ static int hisi_spi_probe(struct platform_device *pdev)
static int hisi_spi_remove(struct platform_device *pdev)
{
struct spi_controller *master = platform_get_drvdata(pdev);
+ struct hisi_spi *hs = spi_controller_get_devdata(master);
+ debugfs_remove_recursive(hs->debugfs);
spi_unregister_controller(master);
return 0;
diff --git a/drivers/spi/spi-lm70llp.c b/drivers/spi/spi-lm70llp.c
index f914b8d2043e..ead0507c63be 100644
--- a/drivers/spi/spi-lm70llp.c
+++ b/drivers/spi/spi-lm70llp.c
@@ -202,7 +202,7 @@ static void spi_lm70llp_attach(struct parport *p)
* the lm70 driver could verify it, reading the manf ID.
*/
- master = spi_alloc_master(p->physport->dev, sizeof *pp);
+ master = spi_alloc_master(p->physport->dev, sizeof(*pp));
if (!master) {
status = -ENOMEM;
goto out_fail;
diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
index f1cf2232f0b5..4d4f77a186a9 100644
--- a/drivers/spi/spi-loopback-test.c
+++ b/drivers/spi/spi-loopback-test.c
@@ -875,7 +875,7 @@ static int spi_test_run_iter(struct spi_device *spi,
test.transfers[i].len = len;
if (test.transfers[i].tx_buf)
test.transfers[i].tx_buf += tx_off;
- if (test.transfers[i].tx_buf)
+ if (test.transfers[i].rx_buf)
test.transfers[i].rx_buf += rx_off;
}
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index 1513553e4080..37f4443ce9a0 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -6,6 +6,7 @@
* Author: Boris Brezillon <boris.brezillon@bootlin.com>
*/
#include <linux/dmaengine.h>
+#include <linux/iopoll.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
@@ -743,6 +744,91 @@ static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
return container_of(drv, struct spi_mem_driver, spidrv.driver);
}
+static int spi_mem_read_status(struct spi_mem *mem,
+ const struct spi_mem_op *op,
+ u16 *status)
+{
+ const u8 *bytes = (u8 *)op->data.buf.in;
+ int ret;
+
+ ret = spi_mem_exec_op(mem, op);
+ if (ret)
+ return ret;
+
+ if (op->data.nbytes > 1)
+ *status = ((u16)bytes[0] << 8) | bytes[1];
+ else
+ *status = bytes[0];
+
+ return 0;
+}
+
+/**
+ * spi_mem_poll_status() - Poll memory device status
+ * @mem: SPI memory device
+ * @op: the memory operation to execute
+ * @mask: status bitmask to ckeck
+ * @match: (status & mask) expected value
+ * @initial_delay_us: delay in us before starting to poll
+ * @polling_delay_us: time to sleep between reads in us
+ * @timeout_ms: timeout in milliseconds
+ *
+ * This function polls a status register and returns when
+ * (status & mask) == match or when the timeout has expired.
+ *
+ * Return: 0 in case of success, -ETIMEDOUT in case of error,
+ * -EOPNOTSUPP if not supported.
+ */
+int spi_mem_poll_status(struct spi_mem *mem,
+ const struct spi_mem_op *op,
+ u16 mask, u16 match,
+ unsigned long initial_delay_us,
+ unsigned long polling_delay_us,
+ u16 timeout_ms)
+{
+ struct spi_controller *ctlr = mem->spi->controller;
+ int ret = -EOPNOTSUPP;
+ int read_status_ret;
+ u16 status;
+
+ if (op->data.nbytes < 1 || op->data.nbytes > 2 ||
+ op->data.dir != SPI_MEM_DATA_IN)
+ return -EINVAL;
+
+ if (ctlr->mem_ops && ctlr->mem_ops->poll_status) {
+ ret = spi_mem_access_start(mem);
+ if (ret)
+ return ret;
+
+ ret = ctlr->mem_ops->poll_status(mem, op, mask, match,
+ initial_delay_us, polling_delay_us,
+ timeout_ms);
+
+ spi_mem_access_end(mem);
+ }
+
+ if (ret == -EOPNOTSUPP) {
+ if (!spi_mem_supports_op(mem, op))
+ return ret;
+
+ if (initial_delay_us < 10)
+ udelay(initial_delay_us);
+ else
+ usleep_range((initial_delay_us >> 2) + 1,
+ initial_delay_us);
+
+ ret = read_poll_timeout(spi_mem_read_status, read_status_ret,
+ (read_status_ret || ((status) & mask) == match),
+ polling_delay_us, timeout_ms * 1000, false, mem,
+ op, &status);
+ if (read_status_ret)
+ return read_status_ret;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(spi_mem_poll_status);
+
static int spi_mem_probe(struct spi_device *spi)
{
struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
@@ -810,7 +896,7 @@ int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv,
EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner);
/**
- * spi_mem_driver_unregister_with_owner() - Unregister a SPI memory driver
+ * spi_mem_driver_unregister() - Unregister a SPI memory driver
* @memdrv: the SPI memory driver to unregister
*
* Unregisters a SPI memory driver.
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
index ecba6b4a5d85..b2c4621db34d 100644
--- a/drivers/spi/spi-meson-spicc.c
+++ b/drivers/spi/spi-meson-spicc.c
@@ -725,7 +725,7 @@ static int meson_spicc_probe(struct platform_device *pdev)
ret = clk_prepare_enable(spicc->pclk);
if (ret) {
dev_err(&pdev->dev, "pclk clock enable failed\n");
- goto out_master;
+ goto out_core_clk;
}
device_reset_optional(&pdev->dev);
@@ -752,7 +752,7 @@ static int meson_spicc_probe(struct platform_device *pdev)
ret = meson_spicc_clk_init(spicc);
if (ret) {
dev_err(&pdev->dev, "clock registration failed\n");
- goto out_master;
+ goto out_clk;
}
ret = devm_spi_register_master(&pdev->dev, master);
@@ -764,9 +764,11 @@ static int meson_spicc_probe(struct platform_device *pdev)
return 0;
out_clk:
- clk_disable_unprepare(spicc->core);
clk_disable_unprepare(spicc->pclk);
+out_core_clk:
+ clk_disable_unprepare(spicc->core);
+
out_master:
spi_master_put(master);
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index ea1b07953d38..78a9bca8cc68 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -369,7 +369,7 @@ static int mpc512x_psc_spi_setup(struct spi_device *spi)
return -EINVAL;
if (!cs) {
- cs = kzalloc(sizeof *cs, GFP_KERNEL);
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return -ENOMEM;
@@ -491,7 +491,7 @@ static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
void *tempp;
struct clk *clk;
- master = spi_alloc_master(dev, sizeof *mps);
+ master = spi_alloc_master(dev, sizeof(*mps));
if (master == NULL)
return -ENOMEM;
diff --git a/drivers/spi/spi-mpc52xx-psc.c b/drivers/spi/spi-mpc52xx-psc.c
index 17935e71b02f..21ef5d481faf 100644
--- a/drivers/spi/spi-mpc52xx-psc.c
+++ b/drivers/spi/spi-mpc52xx-psc.c
@@ -265,7 +265,7 @@ static int mpc52xx_psc_spi_setup(struct spi_device *spi)
return -EINVAL;
if (!cs) {
- cs = kzalloc(sizeof *cs, GFP_KERNEL);
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return -ENOMEM;
spi->controller_state = cs;
@@ -365,7 +365,7 @@ static int mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr,
struct spi_master *master;
int ret;
- master = spi_alloc_master(dev, sizeof *mps);
+ master = spi_alloc_master(dev, sizeof(*mps));
if (master == NULL)
return -ENOMEM;
diff --git a/drivers/spi/spi-mpc52xx.c b/drivers/spi/spi-mpc52xx.c
index 124cba7213f1..51041526546d 100644
--- a/drivers/spi/spi-mpc52xx.c
+++ b/drivers/spi/spi-mpc52xx.c
@@ -415,7 +415,7 @@ static int mpc52xx_spi_probe(struct platform_device *op)
}
dev_dbg(&op->dev, "allocating spi_master struct\n");
- master = spi_alloc_master(&op->dev, sizeof *ms);
+ master = spi_alloc_master(&op->dev, sizeof(*ms));
if (!master) {
rc = -ENOMEM;
goto err_alloc;
diff --git a/drivers/spi/spi-npcm-pspi.c b/drivers/spi/spi-npcm-pspi.c
index 56d10c4511db..1668a347e003 100644
--- a/drivers/spi/spi-npcm-pspi.c
+++ b/drivers/spi/spi-npcm-pspi.c
@@ -105,7 +105,7 @@ static void npcm_pspi_set_mode(struct spi_device *spi)
u16 regtemp;
u16 mode_val;
- switch (spi->mode & (SPI_CPOL | SPI_CPHA)) {
+ switch (spi->mode & SPI_MODE_X_MASK) {
case SPI_MODE_0:
mode_val = 0;
break;
diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
index 6e6c2403944d..a66fa97046ee 100644
--- a/drivers/spi/spi-nxp-fspi.c
+++ b/drivers/spi/spi-nxp-fspi.c
@@ -1124,12 +1124,6 @@ static int nxp_fspi_probe(struct platform_device *pdev)
goto err_put_ctrl;
}
- /* Clear potential interrupts */
- reg = fspi_readl(f, f->iobase + FSPI_INTR);
- if (reg)
- fspi_writel(f, reg, f->iobase + FSPI_INTR);
-
-
/* find the resources - controller memory mapped space */
if (is_acpi_node(f->dev->fwnode))
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
@@ -1167,6 +1161,11 @@ static int nxp_fspi_probe(struct platform_device *pdev)
}
}
+ /* Clear potential interrupts */
+ reg = fspi_readl(f, f->iobase + FSPI_INTR);
+ if (reg)
+ fspi_writel(f, reg, f->iobase + FSPI_INTR);
+
/* find the irq */
ret = platform_get_irq(pdev, 0);
if (ret < 0)
diff --git a/drivers/spi/spi-oc-tiny.c b/drivers/spi/spi-oc-tiny.c
index f3843f0ff260..38c14c4e4e21 100644
--- a/drivers/spi/spi-oc-tiny.c
+++ b/drivers/spi/spi-oc-tiny.c
@@ -86,7 +86,7 @@ static int tiny_spi_setup(struct spi_device *spi)
hw->speed_hz = spi->max_speed_hz;
hw->baud = tiny_spi_baud(spi, hw->speed_hz);
}
- hw->mode = spi->mode & (SPI_CPOL | SPI_CPHA);
+ hw->mode = spi->mode & SPI_MODE_X_MASK;
return 0;
}
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
index 7062f2902253..20b047172965 100644
--- a/drivers/spi/spi-omap-100k.c
+++ b/drivers/spi/spi-omap-100k.c
@@ -6,7 +6,7 @@
*
* Copyright (C) 2005, 2006 Nokia Corporation
* Author: Samuel Ortiz <samuel.ortiz@nokia.com> and
- * Juha Yrj�l� <juha.yrjola@nokia.com>
+ * Juha Yrjola <juha.yrjola@nokia.com>
*/
#include <linux/kernel.h>
#include <linux/init.h>
@@ -241,7 +241,7 @@ static int omap1_spi100k_setup_transfer(struct spi_device *spi,
else
word_len = spi->bits_per_word;
- if (spi->bits_per_word > 32)
+ if (word_len > 32)
return -EINVAL;
cs->word_len = word_len;
@@ -296,7 +296,6 @@ static int omap1_spi100k_transfer_one_message(struct spi_master *master,
list_for_each_entry(t, &m->transfers, transfer_list) {
if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
- status = -EINVAL;
break;
}
status = omap1_spi100k_setup_transfer(spi, t);
@@ -315,7 +314,6 @@ static int omap1_spi100k_transfer_one_message(struct spi_master *master,
m->actual_length += count;
if (count != t->len) {
- status = -EIO;
break;
}
}
diff --git a/drivers/spi/spi-omap-uwire.c b/drivers/spi/spi-omap-uwire.c
index 71402f71ddd8..087172a193fa 100644
--- a/drivers/spi/spi-omap-uwire.c
+++ b/drivers/spi/spi-omap-uwire.c
@@ -330,7 +330,7 @@ static int uwire_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
if (spi->mode & SPI_CPOL)
flags |= UWIRE_CLK_INVERTED;
- switch (spi->mode & (SPI_CPOL | SPI_CPHA)) {
+ switch (spi->mode & SPI_MODE_X_MASK) {
case SPI_MODE_0:
case SPI_MODE_3:
flags |= UWIRE_WRITE_FALLING_EDGE | UWIRE_READ_RISING_EDGE;
@@ -424,15 +424,22 @@ done:
static int uwire_setup(struct spi_device *spi)
{
struct uwire_state *ust = spi->controller_state;
+ bool initial_setup = false;
+ int status;
if (ust == NULL) {
ust = kzalloc(sizeof(*ust), GFP_KERNEL);
if (ust == NULL)
return -ENOMEM;
spi->controller_state = ust;
+ initial_setup = true;
}
- return uwire_setup_transfer(spi, NULL);
+ status = uwire_setup_transfer(spi, NULL);
+ if (status && initial_setup)
+ kfree(ust);
+
+ return status;
}
static void uwire_cleanup(struct spi_device *spi)
@@ -453,7 +460,7 @@ static int uwire_probe(struct platform_device *pdev)
struct uwire_spi *uwire;
int status;
- master = spi_alloc_master(&pdev->dev, sizeof *uwire);
+ master = spi_alloc_master(&pdev->dev, sizeof(*uwire));
if (!master)
return -ENODEV;
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 999c22736416..60c9cdf1c94b 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -4,7 +4,7 @@
*
* Copyright (C) 2005, 2006 Nokia Corporation
* Author: Samuel Ortiz <samuel.ortiz@nokia.com> and
- * Juha Yrj�l� <juha.yrjola@nokia.com>
+ * Juha Yrjola <juha.yrjola@nokia.com>
*/
#include <linux/kernel.h>
@@ -1032,15 +1032,29 @@ static void omap2_mcspi_release_dma(struct spi_master *master)
}
}
+static void omap2_mcspi_cleanup(struct spi_device *spi)
+{
+ struct omap2_mcspi_cs *cs;
+
+ if (spi->controller_state) {
+ /* Unlink controller state from context save list */
+ cs = spi->controller_state;
+ list_del(&cs->node);
+
+ kfree(cs);
+ }
+}
+
static int omap2_mcspi_setup(struct spi_device *spi)
{
+ bool initial_setup = false;
int ret;
struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
struct omap2_mcspi_regs *ctx = &mcspi->ctx;
struct omap2_mcspi_cs *cs = spi->controller_state;
if (!cs) {
- cs = kzalloc(sizeof *cs, GFP_KERNEL);
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return -ENOMEM;
cs->base = mcspi->base + spi->chip_select * 0x14;
@@ -1051,35 +1065,28 @@ static int omap2_mcspi_setup(struct spi_device *spi)
spi->controller_state = cs;
/* Link this to context save list */
list_add_tail(&cs->node, &ctx->cs);
+ initial_setup = true;
}
ret = pm_runtime_get_sync(mcspi->dev);
if (ret < 0) {
pm_runtime_put_noidle(mcspi->dev);
+ if (initial_setup)
+ omap2_mcspi_cleanup(spi);
return ret;
}
ret = omap2_mcspi_setup_transfer(spi, NULL);
+ if (ret && initial_setup)
+ omap2_mcspi_cleanup(spi);
+
pm_runtime_mark_last_busy(mcspi->dev);
pm_runtime_put_autosuspend(mcspi->dev);
return ret;
}
-static void omap2_mcspi_cleanup(struct spi_device *spi)
-{
- struct omap2_mcspi_cs *cs;
-
- if (spi->controller_state) {
- /* Unlink controller state from context save list */
- cs = spi->controller_state;
- list_del(&cs->node);
-
- kfree(cs);
- }
-}
-
static irqreturn_t omap2_mcspi_irq_handler(int irq, void *data)
{
struct omap2_mcspi *mcspi = data;
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 0c9e3f270f05..feebda66f56e 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -288,7 +288,7 @@
#define SPI_POLLING_TIMEOUT 1000
/*
- * The type of reading going on on this chip
+ * The type of reading going on this chip
*/
enum ssp_reading {
READING_NULL,
@@ -298,7 +298,7 @@ enum ssp_reading {
};
/*
- * The type of writing going on on this chip
+ * The type of writing going on this chip
*/
enum ssp_writing {
WRITING_NULL,
diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c
index d8ee363fb714..d65f047b6c82 100644
--- a/drivers/spi/spi-ppc4xx.c
+++ b/drivers/spi/spi-ppc4xx.c
@@ -34,7 +34,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/dcr.h>
#include <asm/dcr-regs.h>
@@ -223,7 +223,7 @@ static int spi_ppc4xx_setup(struct spi_device *spi)
}
if (cs == NULL) {
- cs = kzalloc(sizeof *cs, GFP_KERNEL);
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return -ENOMEM;
spi->controller_state = cs;
@@ -235,7 +235,7 @@ static int spi_ppc4xx_setup(struct spi_device *spi)
*/
cs->mode = SPI_PPC4XX_MODE_SPE;
- switch (spi->mode & (SPI_CPHA | SPI_CPOL)) {
+ switch (spi->mode & SPI_MODE_X_MASK) {
case SPI_MODE_0:
cs->mode |= SPI_CLK_MODE0;
break;
@@ -326,7 +326,7 @@ static void spi_ppc4xx_enable(struct ppc4xx_spi *hw)
{
/*
* On all 4xx PPC's the SPI bus is shared/multiplexed with
- * the 2nd I2C bus. We need to enable the the SPI bus before
+ * the 2nd I2C bus. We need to enable the SPI bus before
* using it.
*/
@@ -349,7 +349,7 @@ static int spi_ppc4xx_of_probe(struct platform_device *op)
int ret;
const unsigned int *clk;
- master = spi_alloc_master(dev, sizeof *hw);
+ master = spi_alloc_master(dev, sizeof(*hw));
if (master == NULL)
return -ENOMEM;
master->dev.of_node = np;
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index 37567bc7a523..be563f0dd03a 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -2,18 +2,18 @@
/*
* PXA2xx SPI DMA engine support.
*
- * Copyright (C) 2013, Intel Corporation
+ * Copyright (C) 2013, 2021 Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
*/
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
-#include <linux/pxa2xx_ssp.h>
#include <linux/scatterlist.h>
#include <linux/sizes.h>
-#include <linux/spi/spi.h>
+
#include <linux/spi/pxa2xx_spi.h>
+#include <linux/spi/spi.h>
#include "spi-pxa2xx.h"
@@ -26,7 +26,7 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
* It is possible that one CPU is handling ROR interrupt and other
* just gets DMA completion. Calling pump_transfers() twice for the
* same transfer leads to problems thus we prevent concurrent calls
- * by using ->dma_running.
+ * by using dma_running.
*/
if (atomic_dec_and_test(&drv_data->dma_running)) {
/*
@@ -34,25 +34,18 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
* might not know about the error yet. So we re-check the
* ROR bit here before we clear the status register.
*/
- if (!error) {
- u32 status = pxa2xx_spi_read(drv_data, SSSR)
- & drv_data->mask_sr;
- error = status & SSSR_ROR;
- }
+ if (!error)
+ error = read_SSSR_bits(drv_data, drv_data->mask_sr) & SSSR_ROR;
/* Clear status & disable interrupts */
- pxa2xx_spi_write(drv_data, SSCR1,
- pxa2xx_spi_read(drv_data, SSCR1)
- & ~drv_data->dma_cr1);
+ clear_SSCR1_bits(drv_data, drv_data->dma_cr1);
write_SSSR_CS(drv_data, drv_data->clear_sr);
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, 0);
if (error) {
/* In case we got an error we disable the SSP now */
- pxa2xx_spi_write(drv_data, SSCR0,
- pxa2xx_spi_read(drv_data, SSCR0)
- & ~SSCR0_SSE);
+ pxa_ssp_disable(drv_data->ssp);
msg->status = -EIO;
}
@@ -94,14 +87,14 @@ pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data,
cfg.direction = dir;
if (dir == DMA_MEM_TO_DEV) {
- cfg.dst_addr = drv_data->ssdr_physical;
+ cfg.dst_addr = drv_data->ssp->phys_base + SSDR;
cfg.dst_addr_width = width;
cfg.dst_maxburst = chip->dma_burst_size;
sgt = &xfer->tx_sg;
chan = drv_data->controller->dma_tx;
} else {
- cfg.src_addr = drv_data->ssdr_physical;
+ cfg.src_addr = drv_data->ssp->phys_base + SSDR;
cfg.src_addr_width = width;
cfg.src_maxburst = chip->dma_burst_size;
@@ -111,7 +104,7 @@ pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data,
ret = dmaengine_slave_config(chan, &cfg);
if (ret) {
- dev_warn(&drv_data->pdev->dev, "DMA slave config failed\n");
+ dev_warn(drv_data->ssp->dev, "DMA slave config failed\n");
return NULL;
}
@@ -123,9 +116,9 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
{
u32 status;
- status = pxa2xx_spi_read(drv_data, SSSR) & drv_data->mask_sr;
+ status = read_SSSR_bits(drv_data, drv_data->mask_sr);
if (status & SSSR_ROR) {
- dev_err(&drv_data->pdev->dev, "FIFO overrun\n");
+ dev_err(drv_data->ssp->dev, "FIFO overrun\n");
dmaengine_terminate_async(drv_data->controller->dma_rx);
dmaengine_terminate_async(drv_data->controller->dma_tx);
@@ -145,16 +138,14 @@ int pxa2xx_spi_dma_prepare(struct driver_data *drv_data,
tx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_MEM_TO_DEV, xfer);
if (!tx_desc) {
- dev_err(&drv_data->pdev->dev,
- "failed to get DMA TX descriptor\n");
+ dev_err(drv_data->ssp->dev, "failed to get DMA TX descriptor\n");
err = -EBUSY;
goto err_tx;
}
rx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_DEV_TO_MEM, xfer);
if (!rx_desc) {
- dev_err(&drv_data->pdev->dev,
- "failed to get DMA RX descriptor\n");
+ dev_err(drv_data->ssp->dev, "failed to get DMA RX descriptor\n");
err = -EBUSY;
goto err_rx;
}
@@ -191,8 +182,8 @@ void pxa2xx_spi_dma_stop(struct driver_data *drv_data)
int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
{
struct pxa2xx_spi_controller *pdata = drv_data->controller_info;
- struct device *dev = &drv_data->pdev->dev;
struct spi_controller *controller = drv_data->controller;
+ struct device *dev = drv_data->ssp->dev;
dma_cap_mask_t mask;
dma_cap_zero(mask);
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index 1833f5876e9f..2e134eb4bd2c 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -1,13 +1,15 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * CE4100's SPI device is more or less the same one as found on PXA
+ * PCI glue driver for SPI PXA2xx compatible controllers.
+ * CE4100's SPI device is more or less the same one as found on PXA.
*
- * Copyright (C) 2016, Intel Corporation
+ * Copyright (C) 2016, 2021 Intel Corporation
*/
#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
+
#include <linux/spi/pxa2xx_spi.h>
#include <linux/dmaengine.h>
@@ -178,7 +180,7 @@ static struct pxa_spi_info spi_info_configs[] = {
.rx_param = &bsw2_rx_param,
},
[PORT_MRFLD] = {
- .type = PXA27x_SSP,
+ .type = MRFLD_SSP,
.max_clk_rate = 25000000,
.setup = mrfld_spi_setup,
},
@@ -239,6 +241,7 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
spi_pdata.dma_burst_size = c->dma_burst_size ? c->dma_burst_size : 1;
ssp = &spi_pdata.ssp;
+ ssp->dev = &dev->dev;
ssp->phys_base = pci_resource_start(dev, 0);
ssp->mmio_base = pcim_iomap_table(dev)[0];
ssp->port_id = (c->port_id >= 0) ? c->port_id : dev->devfn;
@@ -254,7 +257,7 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
snprintf(buf, sizeof(buf), "pxa2xx-spi.%d", ssp->port_id);
ssp->clk = clk_register_fixed_rate(&dev->dev, buf, NULL, 0,
c->max_clk_rate);
- if (IS_ERR(ssp->clk))
+ if (IS_ERR(ssp->clk))
return PTR_ERR(ssp->clk);
memset(&pi, 0, sizeof(pi));
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 5e59ba075bc7..974e30744b83 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
- * Copyright (C) 2013, Intel Corporation
+ * Copyright (C) 2013, 2021 Intel Corporation
*/
#include <linux/acpi.h>
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/gpio/consumer.h>
@@ -25,6 +26,7 @@
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/slab.h>
+
#include <linux/spi/pxa2xx_spi.h>
#include <linux/spi/spi.h>
@@ -38,11 +40,11 @@ MODULE_ALIAS("platform:pxa2xx-spi");
#define TIMOUT_DFLT 1000
/*
- * for testing SSCR1 changes that require SSP restart, basically
- * everything except the service and interrupt enables, the pxa270 developer
+ * For testing SSCR1 changes that require SSP restart, basically
+ * everything except the service and interrupt enables, the PXA270 developer
* manual says only SSCR1_SCFR, SSCR1_SPH, SSCR1_SPO need to be in this
- * list, but the PXA255 dev man says all bits without really meaning the
- * service and interrupt enables
+ * list, but the PXA255 developer manual says all bits without really meaning
+ * the service and interrupt enables.
*/
#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
| SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
@@ -198,6 +200,17 @@ static bool is_mmp2_ssp(const struct driver_data *drv_data)
return drv_data->ssp_type == MMP2_SSP;
}
+static bool is_mrfld_ssp(const struct driver_data *drv_data)
+{
+ return drv_data->ssp_type == MRFLD_SSP;
+}
+
+static void pxa2xx_spi_update(const struct driver_data *drv_data, u32 reg, u32 mask, u32 value)
+{
+ if ((pxa2xx_spi_read(drv_data, reg) & mask) != value)
+ pxa2xx_spi_write(drv_data, reg, value & mask);
+}
+
static u32 pxa2xx_spi_get_ssrc1_change_mask(const struct driver_data *drv_data)
{
switch (drv_data->ssp_type) {
@@ -239,7 +252,7 @@ static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data)
break;
}
- return (pxa2xx_spi_read(drv_data, SSSR) & mask) == mask;
+ return read_SSSR_bits(drv_data, mask) == mask;
}
static void pxa2xx_spi_clear_rx_thre(const struct driver_data *drv_data,
@@ -284,13 +297,11 @@ static u32 pxa2xx_configure_sscr0(const struct driver_data *drv_data,
case QUARK_X1000_SSP:
return clk_div
| QUARK_X1000_SSCR0_Motorola
- | QUARK_X1000_SSCR0_DataSize(bits > 32 ? 8 : bits)
- | SSCR0_SSE;
+ | QUARK_X1000_SSCR0_DataSize(bits > 32 ? 8 : bits);
default:
return clk_div
| SSCR0_Motorola
| SSCR0_DataSize(bits > 16 ? bits - 16 : bits)
- | SSCR0_SSE
| (bits > 16 ? SSCR0_EDSS : 0);
}
}
@@ -325,7 +336,7 @@ static void lpss_ssp_setup(struct driver_data *drv_data)
u32 value;
config = lpss_get_config(drv_data);
- drv_data->lpss_base = drv_data->ioaddr + config->offset;
+ drv_data->lpss_base = drv_data->ssp->mmio_base + config->offset;
/* Enable software chip select control */
value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
@@ -421,7 +432,7 @@ static void cs_assert(struct spi_device *spi)
spi_controller_get_devdata(spi->controller);
if (drv_data->ssp_type == CE4100_SSP) {
- pxa2xx_spi_write(drv_data, SSSR, chip->frm);
+ pxa2xx_spi_write(drv_data, SSSR, spi->chip_select);
return;
}
@@ -430,11 +441,6 @@ static void cs_assert(struct spi_device *spi)
return;
}
- if (chip->gpiod_cs) {
- gpiod_set_value(chip->gpiod_cs, chip->gpio_cs_inverted);
- return;
- }
-
if (is_lpss_ssp(drv_data))
lpss_ssp_cs_control(spi, true);
}
@@ -460,11 +466,6 @@ static void cs_deassert(struct spi_device *spi)
return;
}
- if (chip->gpiod_cs) {
- gpiod_set_value(chip->gpiod_cs, !chip->gpio_cs_inverted);
- return;
- }
-
if (is_lpss_ssp(drv_data))
lpss_ssp_cs_control(spi, false);
}
@@ -482,7 +483,7 @@ int pxa2xx_spi_flush(struct driver_data *drv_data)
unsigned long limit = loops_per_jiffy << 1;
do {
- while (pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
+ while (read_SSSR_bits(drv_data, SSSR_RNE))
pxa2xx_spi_read(drv_data, SSDR);
} while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY) && --limit);
write_SSSR_CS(drv_data, SSSR_ROR);
@@ -496,8 +497,7 @@ static void pxa2xx_spi_off(struct driver_data *drv_data)
if (is_mmp2_ssp(drv_data))
return;
- pxa2xx_spi_write(drv_data, SSCR0,
- pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+ pxa_ssp_disable(drv_data->ssp);
}
static int null_writer(struct driver_data *drv_data)
@@ -518,8 +518,7 @@ static int null_reader(struct driver_data *drv_data)
{
u8 n_bytes = drv_data->n_bytes;
- while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
- && (drv_data->rx < drv_data->rx_end)) {
+ while (read_SSSR_bits(drv_data, SSSR_RNE) && drv_data->rx < drv_data->rx_end) {
pxa2xx_spi_read(drv_data, SSDR);
drv_data->rx += n_bytes;
}
@@ -541,8 +540,7 @@ static int u8_writer(struct driver_data *drv_data)
static int u8_reader(struct driver_data *drv_data)
{
- while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
- && (drv_data->rx < drv_data->rx_end)) {
+ while (read_SSSR_bits(drv_data, SSSR_RNE) && drv_data->rx < drv_data->rx_end) {
*(u8 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
++drv_data->rx;
}
@@ -564,8 +562,7 @@ static int u16_writer(struct driver_data *drv_data)
static int u16_reader(struct driver_data *drv_data)
{
- while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
- && (drv_data->rx < drv_data->rx_end)) {
+ while (read_SSSR_bits(drv_data, SSSR_RNE) && drv_data->rx < drv_data->rx_end) {
*(u16 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
drv_data->rx += 2;
}
@@ -587,8 +584,7 @@ static int u32_writer(struct driver_data *drv_data)
static int u32_reader(struct driver_data *drv_data)
{
- while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
- && (drv_data->rx < drv_data->rx_end)) {
+ while (read_SSSR_bits(drv_data, SSSR_RNE) && drv_data->rx < drv_data->rx_end) {
*(u32 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
drv_data->rx += 4;
}
@@ -618,47 +614,51 @@ static void reset_sccr1(struct driver_data *drv_data)
pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
}
-static void int_error_stop(struct driver_data *drv_data, const char *msg)
+static void int_stop_and_reset(struct driver_data *drv_data)
{
- /* Stop and reset SSP */
+ /* Clear and disable interrupts */
write_SSSR_CS(drv_data, drv_data->clear_sr);
reset_sccr1(drv_data);
- if (!pxa25x_ssp_comp(drv_data))
- pxa2xx_spi_write(drv_data, SSTO, 0);
+ if (pxa25x_ssp_comp(drv_data))
+ return;
+
+ pxa2xx_spi_write(drv_data, SSTO, 0);
+}
+
+static void int_error_stop(struct driver_data *drv_data, const char *msg, int err)
+{
+ int_stop_and_reset(drv_data);
pxa2xx_spi_flush(drv_data);
pxa2xx_spi_off(drv_data);
- dev_err(&drv_data->pdev->dev, "%s\n", msg);
+ dev_err(drv_data->ssp->dev, "%s\n", msg);
- drv_data->controller->cur_msg->status = -EIO;
+ drv_data->controller->cur_msg->status = err;
spi_finalize_current_transfer(drv_data->controller);
}
static void int_transfer_complete(struct driver_data *drv_data)
{
- /* Clear and disable interrupts */
- write_SSSR_CS(drv_data, drv_data->clear_sr);
- reset_sccr1(drv_data);
- if (!pxa25x_ssp_comp(drv_data))
- pxa2xx_spi_write(drv_data, SSTO, 0);
+ int_stop_and_reset(drv_data);
spi_finalize_current_transfer(drv_data->controller);
}
static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
{
- u32 irq_mask = (pxa2xx_spi_read(drv_data, SSCR1) & SSCR1_TIE) ?
- drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
+ u32 irq_status;
- u32 irq_status = pxa2xx_spi_read(drv_data, SSSR) & irq_mask;
+ irq_status = read_SSSR_bits(drv_data, drv_data->mask_sr);
+ if (!(pxa2xx_spi_read(drv_data, SSCR1) & SSCR1_TIE))
+ irq_status &= ~SSSR_TFS;
if (irq_status & SSSR_ROR) {
- int_error_stop(drv_data, "interrupt_transfer: fifo overrun");
+ int_error_stop(drv_data, "interrupt_transfer: FIFO overrun", -EIO);
return IRQ_HANDLED;
}
if (irq_status & SSSR_TUR) {
- int_error_stop(drv_data, "interrupt_transfer: fifo underrun");
+ int_error_stop(drv_data, "interrupt_transfer: FIFO underrun", -EIO);
return IRQ_HANDLED;
}
@@ -670,7 +670,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
}
}
- /* Drain rx fifo, Fill tx fifo and prevent overruns */
+ /* Drain Rx FIFO, Fill Tx FIFO and prevent overruns */
do {
if (drv_data->read(drv_data)) {
int_transfer_complete(drv_data);
@@ -691,8 +691,8 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
sccr1_reg &= ~SSCR1_TIE;
/*
- * PXA25x_SSP has no timeout, set up rx threshould for the
- * remaining RX bytes.
+ * PXA25x_SSP has no timeout, set up Rx threshold for
+ * the remaining Rx bytes.
*/
if (pxa25x_ssp_comp(drv_data)) {
u32 rx_thre;
@@ -725,14 +725,12 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
static void handle_bad_msg(struct driver_data *drv_data)
{
pxa2xx_spi_off(drv_data);
- pxa2xx_spi_write(drv_data, SSCR1,
- pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1);
+ clear_SSCR1_bits(drv_data, drv_data->int_cr1);
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, 0);
write_SSSR_CS(drv_data, drv_data->clear_sr);
- dev_err(&drv_data->pdev->dev,
- "bad message state in interrupt handler\n");
+ dev_err(drv_data->ssp->dev, "bad message state in interrupt handler\n");
}
static irqreturn_t ssp_int(int irq, void *dev_id)
@@ -748,7 +746,7 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
* the IRQ was not for us (we shouldn't be RPM suspended when the
* interrupt is enabled).
*/
- if (pm_runtime_suspended(&drv_data->pdev->dev))
+ if (pm_runtime_suspended(drv_data->ssp->dev))
return IRQ_NONE;
/*
@@ -916,7 +914,7 @@ static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
/*
* Calculate the divisor for the SCR (Serial Clock Rate), avoiding
- * that the SSP transmission rate can be greater than the device rate
+ * that the SSP transmission rate can be greater than the device rate.
*/
if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP)
return (DIV_ROUND_UP(ssp_clk, 2 * rate) - 1) & 0xff;
@@ -974,7 +972,7 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
/* Check if we can DMA this transfer */
if (transfer->len > MAX_DMA_LEN && chip->enable_dma) {
- /* reject already-mapped transfers; PIO won't always work */
+ /* Reject already-mapped transfers; PIO won't always work */
if (message->is_dma_mapped
|| transfer->rx_dma || transfer->tx_dma) {
dev_err(&spi->dev,
@@ -983,10 +981,10 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
return -EINVAL;
}
- /* warn ... we force this to PIO mode */
+ /* Warn ... we force this to PIO mode */
dev_warn_ratelimited(&spi->dev,
- "DMA disabled for transfer length %ld greater than %d\n",
- (long)transfer->len, MAX_DMA_LEN);
+ "DMA disabled for transfer length %u greater than %d\n",
+ transfer->len, MAX_DMA_LEN);
}
/* Setup the transfer state based on the type of transfer */
@@ -1028,8 +1026,8 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
u32_writer : null_writer;
}
/*
- * if bits/word is changed in dma mode, then must check the
- * thresholds and burst also
+ * If bits per word is changed in DMA mode, then must check
+ * the thresholds and burst also.
*/
if (chip->enable_dma) {
if (pxa2xx_spi_set_dma_burst_and_threshold(chip,
@@ -1080,47 +1078,45 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
dma_mapped ? "DMA" : "PIO");
if (is_lpss_ssp(drv_data)) {
- if ((pxa2xx_spi_read(drv_data, SSIRF) & 0xff)
- != chip->lpss_rx_threshold)
- pxa2xx_spi_write(drv_data, SSIRF,
- chip->lpss_rx_threshold);
- if ((pxa2xx_spi_read(drv_data, SSITF) & 0xffff)
- != chip->lpss_tx_threshold)
- pxa2xx_spi_write(drv_data, SSITF,
- chip->lpss_tx_threshold);
+ pxa2xx_spi_update(drv_data, SSIRF, GENMASK(7, 0), chip->lpss_rx_threshold);
+ pxa2xx_spi_update(drv_data, SSITF, GENMASK(15, 0), chip->lpss_tx_threshold);
}
- if (is_quark_x1000_ssp(drv_data) &&
- (pxa2xx_spi_read(drv_data, DDS_RATE) != chip->dds_rate))
- pxa2xx_spi_write(drv_data, DDS_RATE, chip->dds_rate);
-
- /* see if we need to reload the config registers */
- if ((pxa2xx_spi_read(drv_data, SSCR0) != cr0)
- || (pxa2xx_spi_read(drv_data, SSCR1) & change_mask)
- != (cr1 & change_mask)) {
- /* stop the SSP, and update the other bits */
- if (!is_mmp2_ssp(drv_data))
- pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE);
- if (!pxa25x_ssp_comp(drv_data))
- pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
- /* first set CR1 without interrupt and service enables */
- pxa2xx_spi_write(drv_data, SSCR1, cr1 & change_mask);
- /* restart the SSP */
- pxa2xx_spi_write(drv_data, SSCR0, cr0);
+ if (is_mrfld_ssp(drv_data)) {
+ u32 mask = SFIFOTT_RFT | SFIFOTT_TFT;
+ u32 thresh = 0;
- } else {
- if (!pxa25x_ssp_comp(drv_data))
- pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
+ thresh |= SFIFOTT_RxThresh(chip->lpss_rx_threshold);
+ thresh |= SFIFOTT_TxThresh(chip->lpss_tx_threshold);
+
+ pxa2xx_spi_update(drv_data, SFIFOTT, mask, thresh);
}
+ if (is_quark_x1000_ssp(drv_data))
+ pxa2xx_spi_update(drv_data, DDS_RATE, GENMASK(23, 0), chip->dds_rate);
+
+ /* Stop the SSP */
+ if (!is_mmp2_ssp(drv_data))
+ pxa_ssp_disable(drv_data->ssp);
+
+ if (!pxa25x_ssp_comp(drv_data))
+ pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
+
+ /* First set CR1 without interrupt and service enables */
+ pxa2xx_spi_update(drv_data, SSCR1, change_mask, cr1);
+
+ /* See if we need to reload the configuration registers */
+ pxa2xx_spi_update(drv_data, SSCR0, GENMASK(31, 0), cr0);
+
+ /* Restart the SSP */
+ pxa_ssp_enable(drv_data->ssp);
+
if (is_mmp2_ssp(drv_data)) {
- u8 tx_level = (pxa2xx_spi_read(drv_data, SSSR)
- & SSSR_TFL_MASK) >> 8;
+ u8 tx_level = read_SSSR_bits(drv_data, SSSR_TFL_MASK) >> 8;
if (tx_level) {
- /* On MMP2, flipping SSE doesn't to empty TXFIFO. */
- dev_warn(&spi->dev, "%d bytes of garbage in TXFIFO!\n",
- tx_level);
+ /* On MMP2, flipping SSE doesn't to empty Tx FIFO. */
+ dev_warn(&spi->dev, "%u bytes of garbage in Tx FIFO!\n", tx_level);
if (tx_level > transfer->len)
tx_level = transfer->len;
drv_data->tx += tx_level;
@@ -1139,7 +1135,7 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
/*
* Release the data by enabling service requests and interrupts,
- * without changing any mode bits
+ * without changing any mode bits.
*/
pxa2xx_spi_write(drv_data, SSCR1, cr1);
@@ -1150,18 +1146,7 @@ static int pxa2xx_spi_slave_abort(struct spi_controller *controller)
{
struct driver_data *drv_data = spi_controller_get_devdata(controller);
- /* Stop and reset SSP */
- write_SSSR_CS(drv_data, drv_data->clear_sr);
- reset_sccr1(drv_data);
- if (!pxa25x_ssp_comp(drv_data))
- pxa2xx_spi_write(drv_data, SSTO, 0);
- pxa2xx_spi_flush(drv_data);
- pxa2xx_spi_off(drv_data);
-
- dev_dbg(&drv_data->pdev->dev, "transfer aborted\n");
-
- drv_data->controller->cur_msg->status = -EINTR;
- spi_finalize_current_transfer(drv_data->controller);
+ int_error_stop(drv_data, "transfer aborted", -EINTR);
return 0;
}
@@ -1175,9 +1160,7 @@ static void pxa2xx_spi_handle_err(struct spi_controller *controller,
pxa2xx_spi_off(drv_data);
/* Clear and disable interrupts and service requests */
write_SSSR_CS(drv_data, drv_data->clear_sr);
- pxa2xx_spi_write(drv_data, SSCR1,
- pxa2xx_spi_read(drv_data, SSCR1)
- & ~(drv_data->int_cr1 | drv_data->dma_cr1));
+ clear_SSCR1_bits(drv_data, drv_data->int_cr1 | drv_data->dma_cr1);
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, 0);
@@ -1202,61 +1185,61 @@ static int pxa2xx_spi_unprepare_transfer(struct spi_controller *controller)
return 0;
}
+static void cleanup_cs(struct spi_device *spi)
+{
+ if (!gpio_is_valid(spi->cs_gpio))
+ return;
+
+ gpio_free(spi->cs_gpio);
+ spi->cs_gpio = -ENOENT;
+}
+
static int setup_cs(struct spi_device *spi, struct chip_data *chip,
struct pxa2xx_spi_chip *chip_info)
{
- struct driver_data *drv_data =
- spi_controller_get_devdata(spi->controller);
- struct gpio_desc *gpiod;
- int err = 0;
+ struct driver_data *drv_data = spi_controller_get_devdata(spi->controller);
if (chip == NULL)
return 0;
- if (drv_data->cs_gpiods) {
- gpiod = drv_data->cs_gpiods[spi->chip_select];
- if (gpiod) {
- chip->gpiod_cs = gpiod;
- chip->gpio_cs_inverted = spi->mode & SPI_CS_HIGH;
- gpiod_set_value(gpiod, chip->gpio_cs_inverted);
- }
-
+ if (chip_info == NULL)
return 0;
- }
- if (chip_info == NULL)
+ if (drv_data->ssp_type == CE4100_SSP)
return 0;
- /* NOTE: setup() can be called multiple times, possibly with
- * different chip_info, release previously requested GPIO
+ /*
+ * NOTE: setup() can be called multiple times, possibly with
+ * different chip_info, release previously requested GPIO.
*/
- if (chip->gpiod_cs) {
- gpiod_put(chip->gpiod_cs);
- chip->gpiod_cs = NULL;
- }
+ cleanup_cs(spi);
- /* If (*cs_control) is provided, ignore GPIO chip select */
+ /* If ->cs_control() is provided, ignore GPIO chip select */
if (chip_info->cs_control) {
chip->cs_control = chip_info->cs_control;
return 0;
}
if (gpio_is_valid(chip_info->gpio_cs)) {
- err = gpio_request(chip_info->gpio_cs, "SPI_CS");
+ int gpio = chip_info->gpio_cs;
+ int err;
+
+ err = gpio_request(gpio, "SPI_CS");
if (err) {
- dev_err(&spi->dev, "failed to request chip select GPIO%d\n",
- chip_info->gpio_cs);
+ dev_err(&spi->dev, "failed to request chip select GPIO%d\n", gpio);
return err;
}
- gpiod = gpio_to_desc(chip_info->gpio_cs);
- chip->gpiod_cs = gpiod;
- chip->gpio_cs_inverted = spi->mode & SPI_CS_HIGH;
+ err = gpio_direction_output(gpio, !(spi->mode & SPI_CS_HIGH));
+ if (err) {
+ gpio_free(gpio);
+ return err;
+ }
- err = gpiod_direction_output(gpiod, !chip->gpio_cs_inverted);
+ spi->cs_gpio = gpio;
}
- return err;
+ return 0;
}
static int setup(struct spi_device *spi)
@@ -1267,6 +1250,7 @@ static int setup(struct spi_device *spi)
struct driver_data *drv_data =
spi_controller_get_devdata(spi->controller);
uint tx_thres, tx_hi_thres, rx_thres;
+ int err;
switch (drv_data->ssp_type) {
case QUARK_X1000_SSP:
@@ -1274,6 +1258,11 @@ static int setup(struct spi_device *spi)
tx_hi_thres = 0;
rx_thres = RX_THRESH_QUARK_X1000_DFLT;
break;
+ case MRFLD_SSP:
+ tx_thres = TX_THRESH_MRFLD_DFLT;
+ tx_hi_thres = 0;
+ rx_thres = RX_THRESH_MRFLD_DFLT;
+ break;
case CE4100_SSP:
tx_thres = TX_THRESH_CE4100_DFLT;
tx_hi_thres = 0;
@@ -1302,7 +1291,7 @@ static int setup(struct spi_device *spi)
break;
}
- /* Only alloc on first setup */
+ /* Only allocate on the first setup */
chip = spi_get_ctldata(spi);
if (!chip) {
chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
@@ -1316,15 +1305,15 @@ static int setup(struct spi_device *spi)
kfree(chip);
return -EINVAL;
}
-
- chip->frm = spi->chip_select;
}
chip->enable_dma = drv_data->controller_info->enable_dma;
chip->timeout = TIMOUT_DFLT;
}
- /* protocol drivers may change the chip settings, so...
- * if chip_info exists, use it */
+ /*
+ * Protocol drivers may change the chip settings, so...
+ * if chip_info exists, use it.
+ */
chip_info = spi->controller_data;
/* chip_info isn't always needed */
@@ -1349,15 +1338,24 @@ static int setup(struct spi_device *spi)
chip->cr1 |= SSCR1_SPH;
}
- chip->lpss_rx_threshold = SSIRF_RxThresh(rx_thres);
- chip->lpss_tx_threshold = SSITF_TxLoThresh(tx_thres)
- | SSITF_TxHiThresh(tx_hi_thres);
+ if (is_lpss_ssp(drv_data)) {
+ chip->lpss_rx_threshold = SSIRF_RxThresh(rx_thres);
+ chip->lpss_tx_threshold = SSITF_TxLoThresh(tx_thres) |
+ SSITF_TxHiThresh(tx_hi_thres);
+ }
+
+ if (is_mrfld_ssp(drv_data)) {
+ chip->lpss_rx_threshold = rx_thres;
+ chip->lpss_tx_threshold = tx_thres;
+ }
- /* set dma burst and threshold outside of chip_info path so that if
- * chip_info goes away after setting chip->enable_dma, the
- * burst and threshold can still respond to changes in bits_per_word */
+ /*
+ * Set DMA burst and threshold outside of chip_info path so that if
+ * chip_info goes away after setting chip->enable_dma, the burst and
+ * threshold can still respond to changes in bits_per_word.
+ */
if (chip->enable_dma) {
- /* set up legal burst and threshold for dma */
+ /* Set up legal burst and threshold for DMA */
if (pxa2xx_spi_set_dma_burst_and_threshold(chip, spi,
spi->bits_per_word,
&chip->dma_burst_size,
@@ -1388,8 +1386,8 @@ static int setup(struct spi_device *spi)
}
chip->cr1 &= ~(SSCR1_SPO | SSCR1_SPH);
- chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) ? SSCR1_SPH : 0)
- | (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0);
+ chip->cr1 |= ((spi->mode & SPI_CPHA) ? SSCR1_SPH : 0) |
+ ((spi->mode & SPI_CPOL) ? SSCR1_SPO : 0);
if (spi->mode & SPI_LOOP)
chip->cr1 |= SSCR1_LBM;
@@ -1413,22 +1411,18 @@ static int setup(struct spi_device *spi)
if (drv_data->ssp_type == CE4100_SSP)
return 0;
- return setup_cs(spi, chip, chip_info);
+ err = setup_cs(spi, chip, chip_info);
+ if (err)
+ kfree(chip);
+
+ return err;
}
static void cleanup(struct spi_device *spi)
{
struct chip_data *chip = spi_get_ctldata(spi);
- struct driver_data *drv_data =
- spi_controller_get_devdata(spi->controller);
-
- if (!chip)
- return;
-
- if (drv_data->ssp_type != CE4100_SSP && !drv_data->cs_gpiods &&
- chip->gpiod_cs)
- gpiod_put(chip->gpiod_cs);
+ cleanup_cs(spi);
kfree(chip);
}
@@ -1645,7 +1639,7 @@ static int pxa2xx_spi_fw_translate_cs(struct spi_controller *controller,
{
struct driver_data *drv_data = spi_controller_get_devdata(controller);
- if (has_acpi_companion(&drv_data->pdev->dev)) {
+ if (has_acpi_companion(drv_data->ssp->dev)) {
switch (drv_data->ssp_type) {
/*
* For Atoms the ACPI DeviceSelection used by the Windows
@@ -1677,7 +1671,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
struct driver_data *drv_data;
struct ssp_device *ssp;
const struct lpss_config *config;
- int status, count;
+ int status;
u32 tmp;
platform_info = dev_get_platdata(dev);
@@ -1694,7 +1688,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
ssp = &platform_info->ssp;
if (!ssp->mmio_base) {
- dev_err(&pdev->dev, "failed to get ssp\n");
+ dev_err(&pdev->dev, "failed to get SSP\n");
return -ENODEV;
}
@@ -1705,17 +1699,18 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
if (!controller) {
dev_err(&pdev->dev, "cannot alloc spi_controller\n");
- pxa_ssp_free(ssp);
- return -ENOMEM;
+ status = -ENOMEM;
+ goto out_error_controller_alloc;
}
drv_data = spi_controller_get_devdata(controller);
drv_data->controller = controller;
drv_data->controller_info = platform_info;
- drv_data->pdev = pdev;
drv_data->ssp = ssp;
- controller->dev.of_node = pdev->dev.of_node;
- /* the spi->mode bits understood by this driver: */
+ controller->dev.of_node = dev->of_node;
+ controller->dev.fwnode = dev->fwnode;
+
+ /* The spi->mode bits understood by this driver: */
controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
controller->bus_num = ssp->port_id;
@@ -1733,8 +1728,6 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
drv_data->ssp_type = ssp->type;
- drv_data->ioaddr = ssp->mmio_base;
- drv_data->ssdr_physical = ssp->phys_base + SSDR;
if (pxa25x_ssp_comp(drv_data)) {
switch (drv_data->ssp_type) {
case QUARK_X1000_SSP:
@@ -1796,15 +1789,16 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
controller->min_speed_hz =
DIV_ROUND_UP(controller->max_speed_hz, 512);
+ pxa_ssp_disable(ssp);
+
/* Load default SSP configuration */
- pxa2xx_spi_write(drv_data, SSCR0, 0);
switch (drv_data->ssp_type) {
case QUARK_X1000_SSP:
tmp = QUARK_X1000_SSCR1_RxTresh(RX_THRESH_QUARK_X1000_DFLT) |
QUARK_X1000_SSCR1_TxTresh(TX_THRESH_QUARK_X1000_DFLT);
pxa2xx_spi_write(drv_data, SSCR1, tmp);
- /* using the Motorola SPI protocol and use 8 bit frame */
+ /* Using the Motorola SPI protocol and use 8 bit frame */
tmp = QUARK_X1000_SSCR0_Motorola | QUARK_X1000_SSCR0_DataSize(8);
pxa2xx_spi_write(drv_data, SSCR0, tmp);
break;
@@ -1856,38 +1850,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
}
}
controller->num_chipselect = platform_info->num_chipselect;
-
- count = gpiod_count(&pdev->dev, "cs");
- if (count > 0) {
- int i;
-
- controller->num_chipselect = max_t(int, count,
- controller->num_chipselect);
-
- drv_data->cs_gpiods = devm_kcalloc(&pdev->dev,
- controller->num_chipselect, sizeof(struct gpio_desc *),
- GFP_KERNEL);
- if (!drv_data->cs_gpiods) {
- status = -ENOMEM;
- goto out_error_clock_enabled;
- }
-
- for (i = 0; i < controller->num_chipselect; i++) {
- struct gpio_desc *gpiod;
-
- gpiod = devm_gpiod_get_index(dev, "cs", i, GPIOD_ASIS);
- if (IS_ERR(gpiod)) {
- /* Means use native chip select */
- if (PTR_ERR(gpiod) == -ENOENT)
- continue;
-
- status = PTR_ERR(gpiod);
- goto out_error_clock_enabled;
- } else {
- drv_data->cs_gpiods[i] = gpiod;
- }
- }
- }
+ controller->use_gpio_descriptors = true;
if (platform_info->is_slave) {
drv_data->gpiod_ready = devm_gpiod_get_optional(dev,
@@ -1906,8 +1869,8 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
/* Register with the SPI framework */
platform_set_drvdata(pdev, drv_data);
status = spi_register_controller(controller);
- if (status != 0) {
- dev_err(&pdev->dev, "problem registering spi controller\n");
+ if (status) {
+ dev_err(&pdev->dev, "problem registering SPI controller\n");
goto out_error_pm_runtime_enabled;
}
@@ -1938,7 +1901,7 @@ static int pxa2xx_spi_remove(struct platform_device *pdev)
spi_unregister_controller(drv_data->controller);
/* Disable the SSP at the peripheral and SOC level */
- pxa2xx_spi_write(drv_data, SSCR0, 0);
+ pxa_ssp_disable(ssp);
clk_disable_unprepare(ssp->clk);
/* Release DMA */
@@ -1965,9 +1928,10 @@ static int pxa2xx_spi_suspend(struct device *dev)
int status;
status = spi_controller_suspend(drv_data->controller);
- if (status != 0)
+ if (status)
return status;
- pxa2xx_spi_write(drv_data, SSCR0, 0);
+
+ pxa_ssp_disable(ssp);
if (!pm_runtime_suspended(dev))
clk_disable_unprepare(ssp->clk);
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index 1400472bc986..9a20fb88e50f 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -1,28 +1,26 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
- * Copyright (C) 2013, Intel Corporation
+ * Copyright (C) 2013, 2021 Intel Corporation
*/
#ifndef SPI_PXA2XX_H
#define SPI_PXA2XX_H
-#include <linux/atomic.h>
-#include <linux/dmaengine.h>
-#include <linux/errno.h>
-#include <linux/io.h>
#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/pxa2xx_ssp.h>
-#include <linux/scatterlist.h>
+#include <linux/io.h>
+#include <linux/types.h>
#include <linux/sizes.h>
-#include <linux/spi/spi.h>
-#include <linux/spi/pxa2xx_spi.h>
-struct driver_data {
- /* Driver model hookup */
- struct platform_device *pdev;
+#include <linux/pxa2xx_ssp.h>
+
+struct gpio_desc;
+struct pxa2xx_spi_controller;
+struct spi_controller;
+struct spi_device;
+struct spi_transfer;
+struct driver_data {
/* SSP Info */
struct ssp_device *ssp;
@@ -33,10 +31,6 @@ struct driver_data {
/* PXA hookup */
struct pxa2xx_spi_controller *controller_info;
- /* SSP register addresses */
- void __iomem *ioaddr;
- phys_addr_t ssdr_physical;
-
/* SSP masks*/
u32 dma_cr1;
u32 int_cr1;
@@ -59,9 +53,6 @@ struct driver_data {
void __iomem *lpss_base;
- /* GPIOs for chip selects */
- struct gpio_desc **cs_gpiods;
-
/* Optional slave FIFO ready signal */
struct gpio_desc *gpiod_ready;
};
@@ -71,37 +62,32 @@ struct chip_data {
u32 dds_rate;
u32 timeout;
u8 n_bytes;
+ u8 enable_dma;
u32 dma_burst_size;
- u32 threshold;
u32 dma_threshold;
+ u32 threshold;
u16 lpss_rx_threshold;
u16 lpss_tx_threshold;
- u8 enable_dma;
- union {
- struct gpio_desc *gpiod_cs;
- unsigned int frm;
- };
- int gpio_cs_inverted;
+
int (*write)(struct driver_data *drv_data);
int (*read)(struct driver_data *drv_data);
+
void (*cs_control)(u32 command);
};
-static inline u32 pxa2xx_spi_read(const struct driver_data *drv_data,
- unsigned reg)
+static inline u32 pxa2xx_spi_read(const struct driver_data *drv_data, u32 reg)
{
- return __raw_readl(drv_data->ioaddr + reg);
+ return pxa_ssp_read_reg(drv_data->ssp, reg);
}
-static inline void pxa2xx_spi_write(const struct driver_data *drv_data,
- unsigned reg, u32 val)
+static inline void pxa2xx_spi_write(const struct driver_data *drv_data, u32 reg, u32 val)
{
- __raw_writel(val, drv_data->ioaddr + reg);
+ pxa_ssp_write_reg(drv_data->ssp, reg, val);
}
#define DMA_ALIGNMENT 8
-static inline int pxa25x_ssp_comp(struct driver_data *drv_data)
+static inline int pxa25x_ssp_comp(const struct driver_data *drv_data)
{
switch (drv_data->ssp_type) {
case PXA25x_SSP:
@@ -113,11 +99,21 @@ static inline int pxa25x_ssp_comp(struct driver_data *drv_data)
}
}
-static inline void write_SSSR_CS(struct driver_data *drv_data, u32 val)
+static inline void clear_SSCR1_bits(const struct driver_data *drv_data, u32 bits)
+{
+ pxa2xx_spi_write(drv_data, SSCR1, pxa2xx_spi_read(drv_data, SSCR1) & ~bits);
+}
+
+static inline u32 read_SSSR_bits(const struct driver_data *drv_data, u32 bits)
+{
+ return pxa2xx_spi_read(drv_data, SSSR) & bits;
+}
+
+static inline void write_SSSR_CS(const struct driver_data *drv_data, u32 val)
{
if (drv_data->ssp_type == CE4100_SSP ||
drv_data->ssp_type == QUARK_X1000_SSP)
- val |= pxa2xx_spi_read(drv_data, SSSR) & SSSR_ALT_FRM_MASK;
+ val |= read_SSSR_bits(drv_data, SSSR_ALT_FRM_MASK);
pxa2xx_spi_write(drv_data, SSSR, val);
}
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 52d6259d96ed..540861ca2ba3 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -107,6 +107,8 @@
#define CR0_OPM_MASTER 0x0
#define CR0_OPM_SLAVE 0x1
+#define CR0_SOI_OFFSET 23
+
#define CR0_MTM_OFFSET 0x21
/* Bit fields in SER, 2bit */
@@ -116,13 +118,14 @@
#define BAUDR_SCKDV_MIN 2
#define BAUDR_SCKDV_MAX 65534
-/* Bit fields in SR, 5bit */
-#define SR_MASK 0x1f
+/* Bit fields in SR, 6bit */
+#define SR_MASK 0x3f
#define SR_BUSY (1 << 0)
#define SR_TF_FULL (1 << 1)
#define SR_TF_EMPTY (1 << 2)
#define SR_RF_EMPTY (1 << 3)
#define SR_RF_FULL (1 << 4)
+#define SR_SLAVE_TX_BUSY (1 << 5)
/* Bit fields in ISR, IMR, ISR, RISR, 5bit */
#define INT_MASK 0x1f
@@ -156,7 +159,8 @@
*/
#define ROCKCHIP_SPI_MAX_TRANLEN 0xffff
-#define ROCKCHIP_SPI_MAX_CS_NUM 2
+/* 2 for native cs, 2 for cs-gpio */
+#define ROCKCHIP_SPI_MAX_CS_NUM 4
#define ROCKCHIP_SPI_VER2_TYPE1 0x05EC0002
#define ROCKCHIP_SPI_VER2_TYPE2 0x00110002
@@ -197,13 +201,19 @@ static inline void spi_enable_chip(struct rockchip_spi *rs, bool enable)
writel_relaxed((enable ? 1U : 0U), rs->regs + ROCKCHIP_SPI_SSIENR);
}
-static inline void wait_for_idle(struct rockchip_spi *rs)
+static inline void wait_for_tx_idle(struct rockchip_spi *rs, bool slave_mode)
{
unsigned long timeout = jiffies + msecs_to_jiffies(5);
do {
- if (!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY))
- return;
+ if (slave_mode) {
+ if (!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_SLAVE_TX_BUSY) &&
+ !((readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY)))
+ return;
+ } else {
+ if (!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY))
+ return;
+ }
} while (!time_after(jiffies, timeout));
dev_warn(rs->dev, "spi controller is in busy state!\n");
@@ -228,7 +238,7 @@ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
{
struct spi_controller *ctlr = spi->controller;
struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
- bool cs_asserted = !enable;
+ bool cs_asserted = spi->mode & SPI_CS_HIGH ? enable : !enable;
/* Return immediately for no-op */
if (cs_asserted == rs->cs_asserted[spi->chip_select])
@@ -238,11 +248,15 @@ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
/* Keep things powered as long as CS is asserted */
pm_runtime_get_sync(rs->dev);
- ROCKCHIP_SPI_SET_BITS(rs->regs + ROCKCHIP_SPI_SER,
- BIT(spi->chip_select));
+ if (spi->cs_gpiod)
+ ROCKCHIP_SPI_SET_BITS(rs->regs + ROCKCHIP_SPI_SER, 1);
+ else
+ ROCKCHIP_SPI_SET_BITS(rs->regs + ROCKCHIP_SPI_SER, BIT(spi->chip_select));
} else {
- ROCKCHIP_SPI_CLR_BITS(rs->regs + ROCKCHIP_SPI_SER,
- BIT(spi->chip_select));
+ if (spi->cs_gpiod)
+ ROCKCHIP_SPI_CLR_BITS(rs->regs + ROCKCHIP_SPI_SER, 1);
+ else
+ ROCKCHIP_SPI_CLR_BITS(rs->regs + ROCKCHIP_SPI_SER, BIT(spi->chip_select));
/* Drop reference from when we first asserted CS */
pm_runtime_put(rs->dev);
@@ -383,7 +397,7 @@ static void rockchip_spi_dma_txcb(void *data)
return;
/* Wait until the FIFO data completely. */
- wait_for_idle(rs);
+ wait_for_tx_idle(rs, ctlr->slave);
spi_enable_chip(rs, false);
spi_finalize_current_transfer(ctlr);
@@ -495,6 +509,8 @@ static int rockchip_spi_config(struct rockchip_spi *rs,
cr0 |= (spi->mode & 0x3U) << CR0_SCPH_OFFSET;
if (spi->mode & SPI_LSB_FIRST)
cr0 |= CR0_FBM_LSB << CR0_FBM_OFFSET;
+ if (spi->mode & SPI_CS_HIGH)
+ cr0 |= BIT(spi->chip_select) << CR0_SOI_OFFSET;
if (xfer->rx_buf && xfer->tx_buf)
cr0 |= CR0_XFM_TR << CR0_XFM_OFFSET;
@@ -540,12 +556,12 @@ static int rockchip_spi_config(struct rockchip_spi *rs,
* interrupt exactly when the fifo is full doesn't seem to work,
* so we need the strict inequality here
*/
- if (xfer->len < rs->fifo_len)
- writel_relaxed(xfer->len - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
+ if ((xfer->len / rs->n_bytes) < rs->fifo_len)
+ writel_relaxed(xfer->len / rs->n_bytes - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
else
writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
- writel_relaxed(rs->fifo_len / 2, rs->regs + ROCKCHIP_SPI_DMATDLR);
+ writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_DMATDLR);
writel_relaxed(rockchip_spi_calc_burst_size(xfer->len / rs->n_bytes) - 1,
rs->regs + ROCKCHIP_SPI_DMARDLR);
writel_relaxed(dmacr, rs->regs + ROCKCHIP_SPI_DMACR);
@@ -783,6 +799,14 @@ static int rockchip_spi_probe(struct platform_device *pdev)
ctlr->can_dma = rockchip_spi_can_dma;
}
+ switch (readl_relaxed(rs->regs + ROCKCHIP_SPI_VERSION)) {
+ case ROCKCHIP_SPI_VER2_TYPE2:
+ ctlr->mode_bits |= SPI_CS_HIGH;
+ break;
+ default:
+ break;
+ }
+
ret = devm_spi_register_controller(&pdev->dev, ctlr);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register controller\n");
@@ -922,6 +946,7 @@ static const struct of_device_id rockchip_spi_dt_match[] = {
{ .compatible = "rockchip,rk3368-spi", },
{ .compatible = "rockchip,rk3399-spi", },
{ .compatible = "rockchip,rv1108-spi", },
+ { .compatible = "rockchip,rv1126-spi", },
{ },
};
MODULE_DEVICE_TABLE(of, rockchip_spi_dt_match);
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index e39fd38f5180..d16ed88802d3 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -618,9 +618,9 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
ret = -ETIMEDOUT;
}
if (tx)
- dmaengine_terminate_all(rspi->ctlr->dma_tx);
+ dmaengine_terminate_sync(rspi->ctlr->dma_tx);
if (rx)
- dmaengine_terminate_all(rspi->ctlr->dma_rx);
+ dmaengine_terminate_sync(rspi->ctlr->dma_rx);
}
rspi_disable_irq(rspi, irq_mask);
@@ -634,7 +634,7 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
no_dma_tx:
if (rx)
- dmaengine_terminate_all(rspi->ctlr->dma_rx);
+ dmaengine_terminate_sync(rspi->ctlr->dma_rx);
no_dma_rx:
if (ret == -EAGAIN) {
dev_warn_once(&rspi->ctlr->dev,
diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c
index 297c512069a5..5d27ee482237 100644
--- a/drivers/spi/spi-sc18is602.c
+++ b/drivers/spi/spi-sc18is602.c
@@ -174,7 +174,7 @@ static int sc18is602_setup_transfer(struct sc18is602 *hw, u32 hz, u8 mode)
static int sc18is602_check_transfer(struct spi_device *spi,
struct spi_transfer *t, int tlen)
{
- if (t && t->len + tlen > SC18IS602_BUFSIZ)
+ if (t && t->len + tlen > SC18IS602_BUFSIZ + 1)
return -EINVAL;
return 0;
@@ -219,6 +219,11 @@ static int sc18is602_transfer_one(struct spi_master *master,
return status;
}
+static size_t sc18is602_max_transfer_size(struct spi_device *spi)
+{
+ return SC18IS602_BUFSIZ;
+}
+
static int sc18is602_setup(struct spi_device *spi)
{
struct sc18is602 *hw = spi_master_get_devdata(spi->master);
@@ -293,6 +298,8 @@ static int sc18is602_probe(struct i2c_client *client,
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->setup = sc18is602_setup;
master->transfer_one_message = sc18is602_transfer_one;
+ master->max_transfer_size = sc18is602_max_transfer_size;
+ master->max_message_size = sc18is602_max_transfer_size;
master->dev.of_node = np;
master->min_speed_hz = hw->freq / 128;
master->max_speed_hz = hw->freq / 4;
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 41ed9ff8fad0..f88d9acd20d9 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -853,10 +853,10 @@ stop_reset:
sh_msiof_spi_stop(p, rx);
stop_dma:
if (tx)
- dmaengine_terminate_all(p->ctlr->dma_tx);
+ dmaengine_terminate_sync(p->ctlr->dma_tx);
no_dma_tx:
if (rx)
- dmaengine_terminate_all(p->ctlr->dma_rx);
+ dmaengine_terminate_sync(p->ctlr->dma_rx);
sh_msiof_write(p, SIIER, 0);
return ret;
}
diff --git a/drivers/spi/spi-sprd.c b/drivers/spi/spi-sprd.c
index b41a75749b49..28e70db9bbba 100644
--- a/drivers/spi/spi-sprd.c
+++ b/drivers/spi/spi-sprd.c
@@ -1068,6 +1068,7 @@ static const struct of_device_id sprd_spi_of_match[] = {
{ .compatible = "sprd,sc9860-spi", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, sprd_spi_of_match);
static struct platform_driver sprd_spi_driver = {
.driver = {
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index 7e640ccc7e77..27f35aa2d746 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -36,6 +36,7 @@
#define CR_FTIE BIT(18)
#define CR_SMIE BIT(19)
#define CR_TOIE BIT(20)
+#define CR_APMS BIT(22)
#define CR_PRESC_MASK GENMASK(31, 24)
#define QSPI_DCR 0x04
@@ -53,6 +54,7 @@
#define QSPI_FCR 0x0c
#define FCR_CTEF BIT(0)
#define FCR_CTCF BIT(1)
+#define FCR_CSMF BIT(3)
#define QSPI_DLR 0x10
@@ -91,7 +93,6 @@
#define STM32_AUTOSUSPEND_DELAY -1
struct stm32_qspi_flash {
- struct stm32_qspi *qspi;
u32 cs;
u32 presc;
};
@@ -107,6 +108,7 @@ struct stm32_qspi {
u32 clk_rate;
struct stm32_qspi_flash flash[STM32_QSPI_MAX_NORCHIP];
struct completion data_completion;
+ struct completion match_completion;
u32 fmode;
struct dma_chan *dma_chtx;
@@ -115,6 +117,7 @@ struct stm32_qspi {
u32 cr_reg;
u32 dcr_reg;
+ unsigned long status_timeout;
/*
* to protect device configuration, could be different between
@@ -128,11 +131,20 @@ static irqreturn_t stm32_qspi_irq(int irq, void *dev_id)
struct stm32_qspi *qspi = (struct stm32_qspi *)dev_id;
u32 cr, sr;
+ cr = readl_relaxed(qspi->io_base + QSPI_CR);
sr = readl_relaxed(qspi->io_base + QSPI_SR);
+ if (cr & CR_SMIE && sr & SR_SMF) {
+ /* disable irq */
+ cr &= ~CR_SMIE;
+ writel_relaxed(cr, qspi->io_base + QSPI_CR);
+ complete(&qspi->match_completion);
+
+ return IRQ_HANDLED;
+ }
+
if (sr & (SR_TEF | SR_TCF)) {
/* disable irq */
- cr = readl_relaxed(qspi->io_base + QSPI_CR);
cr &= ~CR_TCIE & ~CR_TEIE;
writel_relaxed(cr, qspi->io_base + QSPI_CR);
complete(&qspi->data_completion);
@@ -294,7 +306,7 @@ static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
int err = 0;
if (!op->data.nbytes)
- return stm32_qspi_wait_nobusy(qspi);
+ goto wait_nobusy;
if (readl_relaxed(qspi->io_base + QSPI_SR) & SR_TCF)
goto out;
@@ -315,10 +327,31 @@ static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
out:
/* clear flags */
writel_relaxed(FCR_CTCF | FCR_CTEF, qspi->io_base + QSPI_FCR);
+wait_nobusy:
+ if (!err)
+ err = stm32_qspi_wait_nobusy(qspi);
return err;
}
+static int stm32_qspi_wait_poll_status(struct stm32_qspi *qspi,
+ const struct spi_mem_op *op)
+{
+ u32 cr;
+
+ reinit_completion(&qspi->match_completion);
+ cr = readl_relaxed(qspi->io_base + QSPI_CR);
+ writel_relaxed(cr | CR_SMIE, qspi->io_base + QSPI_CR);
+
+ if (!wait_for_completion_timeout(&qspi->match_completion,
+ msecs_to_jiffies(qspi->status_timeout)))
+ return -ETIMEDOUT;
+
+ writel_relaxed(FCR_CSMF, qspi->io_base + QSPI_FCR);
+
+ return 0;
+}
+
static int stm32_qspi_get_mode(struct stm32_qspi *qspi, u8 buswidth)
{
if (buswidth == 4)
@@ -332,7 +365,7 @@ static int stm32_qspi_send(struct spi_mem *mem, const struct spi_mem_op *op)
struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
struct stm32_qspi_flash *flash = &qspi->flash[mem->spi->chip_select];
u32 ccr, cr;
- int timeout, err = 0;
+ int timeout, err = 0, err_poll_status = 0;
dev_dbg(qspi->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n",
op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
@@ -378,6 +411,9 @@ static int stm32_qspi_send(struct spi_mem *mem, const struct spi_mem_op *op)
if (op->addr.nbytes && qspi->fmode != CCR_FMODE_MM)
writel_relaxed(op->addr.val, qspi->io_base + QSPI_AR);
+ if (qspi->fmode == CCR_FMODE_APM)
+ err_poll_status = stm32_qspi_wait_poll_status(qspi, op);
+
err = stm32_qspi_tx(qspi, op);
/*
@@ -387,7 +423,7 @@ static int stm32_qspi_send(struct spi_mem *mem, const struct spi_mem_op *op)
* byte of device (device size - fifo size). like device size is not
* knows, the prefetching is always stop.
*/
- if (err || qspi->fmode == CCR_FMODE_MM)
+ if (err || err_poll_status || qspi->fmode == CCR_FMODE_MM)
goto abort;
/* wait end of tx in indirect mode */
@@ -406,15 +442,49 @@ abort:
cr, !(cr & CR_ABORT), 1,
STM32_ABT_TIMEOUT_US);
- writel_relaxed(FCR_CTCF, qspi->io_base + QSPI_FCR);
+ writel_relaxed(FCR_CTCF | FCR_CSMF, qspi->io_base + QSPI_FCR);
- if (err || timeout)
- dev_err(qspi->dev, "%s err:%d abort timeout:%d\n",
- __func__, err, timeout);
+ if (err || err_poll_status || timeout)
+ dev_err(qspi->dev, "%s err:%d err_poll_status:%d abort timeout:%d\n",
+ __func__, err, err_poll_status, timeout);
return err;
}
+static int stm32_qspi_poll_status(struct spi_mem *mem, const struct spi_mem_op *op,
+ u16 mask, u16 match,
+ unsigned long initial_delay_us,
+ unsigned long polling_rate_us,
+ unsigned long timeout_ms)
+{
+ struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
+ int ret;
+
+ if (!spi_mem_supports_op(mem, op))
+ return -EOPNOTSUPP;
+
+ ret = pm_runtime_get_sync(qspi->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(qspi->dev);
+ return ret;
+ }
+
+ mutex_lock(&qspi->lock);
+
+ writel_relaxed(mask, qspi->io_base + QSPI_PSMKR);
+ writel_relaxed(match, qspi->io_base + QSPI_PSMAR);
+ qspi->fmode = CCR_FMODE_APM;
+ qspi->status_timeout = timeout_ms;
+
+ ret = stm32_qspi_send(mem, op);
+ mutex_unlock(&qspi->lock);
+
+ pm_runtime_mark_last_busy(qspi->dev);
+ pm_runtime_put_autosuspend(qspi->dev);
+
+ return ret;
+}
+
static int stm32_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
@@ -522,12 +592,11 @@ static int stm32_qspi_setup(struct spi_device *spi)
presc = DIV_ROUND_UP(qspi->clk_rate, spi->max_speed_hz) - 1;
flash = &qspi->flash[spi->chip_select];
- flash->qspi = qspi;
flash->cs = spi->chip_select;
flash->presc = presc;
mutex_lock(&qspi->lock);
- qspi->cr_reg = 3 << CR_FTHRES_SHIFT | CR_SSHIFT | CR_EN;
+ qspi->cr_reg = CR_APMS | 3 << CR_FTHRES_SHIFT | CR_SSHIFT | CR_EN;
writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR);
/* set dcr fsize to max address */
@@ -607,6 +676,7 @@ static const struct spi_controller_mem_ops stm32_qspi_mem_ops = {
.exec_op = stm32_qspi_exec_op,
.dirmap_create = stm32_qspi_dirmap_create,
.dirmap_read = stm32_qspi_dirmap_read,
+ .poll_status = stm32_qspi_poll_status,
};
static int stm32_qspi_probe(struct platform_device *pdev)
@@ -661,6 +731,7 @@ static int stm32_qspi_probe(struct platform_device *pdev)
}
init_completion(&qspi->data_completion);
+ init_completion(&qspi->match_completion);
qspi->clk = devm_clk_get(dev, NULL);
if (IS_ERR(qspi->clk)) {
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
index cc8401980125..23ad052528db 100644
--- a/drivers/spi/spi-sun6i.c
+++ b/drivers/spi/spi-sun6i.c
@@ -379,6 +379,10 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
}
sun6i_spi_write(sspi, SUN6I_CLK_CTL_REG, reg);
+ /* Finally enable the bus - doing so before might raise SCK to HIGH */
+ reg = sun6i_spi_read(sspi, SUN6I_GBL_CTL_REG);
+ reg |= SUN6I_GBL_CTL_BUS_ENABLE;
+ sun6i_spi_write(sspi, SUN6I_GBL_CTL_REG, reg);
/* Setup the transfer now... */
if (sspi->tx_buf)
@@ -504,7 +508,7 @@ static int sun6i_spi_runtime_resume(struct device *dev)
}
sun6i_spi_write(sspi, SUN6I_GBL_CTL_REG,
- SUN6I_GBL_CTL_BUS_ENABLE | SUN6I_GBL_CTL_MASTER | SUN6I_GBL_CTL_TP);
+ SUN6I_GBL_CTL_MASTER | SUN6I_GBL_CTL_TP);
return 0;
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index a2e5907276e7..5131141bbf0d 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -1071,8 +1071,7 @@ static int tegra_spi_transfer_one_message(struct spi_master *master,
ret = wait_for_completion_timeout(&tspi->xfer_completion,
SPI_DMA_TIMEOUT);
if (WARN_ON(ret == 0)) {
- dev_err(tspi->dev,
- "spi transfer timeout, err %d\n", ret);
+ dev_err(tspi->dev, "spi transfer timeout\n");
if (tspi->is_curr_dma_xfer &&
(tspi->cur_direction & DATA_DIR_TX))
dmaengine_terminate_all(tspi->tx_dma_chan);
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index f7c832fd4003..6a726c95ac7a 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -1118,6 +1118,11 @@ static int tegra_slink_probe(struct platform_device *pdev)
pm_runtime_put_noidle(&pdev->dev);
goto exit_pm_disable;
}
+
+ reset_control_assert(tspi->rst);
+ udelay(2);
+ reset_control_deassert(tspi->rst);
+
tspi->def_command_reg = SLINK_M_S;
tspi->def_command2_reg = SLINK_CS_ACTIVE_BETWEEN;
tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c
index 2f806f4b2c34..2354ca1e3858 100644
--- a/drivers/spi/spi-tegra210-quad.c
+++ b/drivers/spi/spi-tegra210-quad.c
@@ -1028,7 +1028,7 @@ static int tegra_qspi_transfer_one_message(struct spi_master *master, struct spi
ret = wait_for_completion_timeout(&tqspi->xfer_completion,
QSPI_DMA_TIMEOUT);
if (WARN_ON(ret == 0)) {
- dev_err(tqspi->dev, "transfer timeout: %d\n", ret);
+ dev_err(tqspi->dev, "transfer timeout\n");
if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_TX))
dmaengine_terminate_all(tqspi->tx_dma_chan);
if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_RX))
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index b8870784fc6e..8c4615b76339 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -580,8 +580,10 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
data->pkt_tx_buff = kzalloc(size, GFP_KERNEL);
if (data->pkt_tx_buff != NULL) {
data->pkt_rx_buff = kzalloc(size, GFP_KERNEL);
- if (!data->pkt_rx_buff)
+ if (!data->pkt_rx_buff) {
kfree(data->pkt_tx_buff);
+ data->pkt_tx_buff = NULL;
+ }
}
if (!data->pkt_rx_buff) {
diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c
index 6a9ef8ee3cc9..8900e51e1a1c 100644
--- a/drivers/spi/spi-uniphier.c
+++ b/drivers/spi/spi-uniphier.c
@@ -142,7 +142,7 @@ static void uniphier_spi_set_mode(struct spi_device *spi)
* FSTRT start frame timing
* 0: rising edge of clock, 1: falling edge of clock
*/
- switch (spi->mode & (SPI_CPOL | SPI_CPHA)) {
+ switch (spi->mode & SPI_MODE_X_MASK) {
case SPI_MODE_0:
/* CKPHS=1, CKINIT=0, CKDLY=1, FSTRT=0 */
val1 = SSI_CKS_CKPHS | SSI_CKS_CKDLY;
diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
index 5d8a5ee62fa2..9262c6418463 100644
--- a/drivers/spi/spi-zynq-qspi.c
+++ b/drivers/spi/spi-zynq-qspi.c
@@ -367,7 +367,7 @@ static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi)
}
/**
- * zynq_qspi_setup - Configure the QSPI controller
+ * zynq_qspi_setup_op - Configure the QSPI controller
* @spi: Pointer to the spi_device structure
*
* Sets the operational mode of QSPI controller for the next QSPI transfer, baud
@@ -528,18 +528,17 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
struct zynq_qspi *xqspi = spi_controller_get_devdata(mem->spi->master);
int err = 0, i;
u8 *tmpbuf;
- u8 opcode = op->cmd.opcode;
dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n",
- opcode, op->cmd.buswidth, op->addr.buswidth,
+ op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
op->dummy.buswidth, op->data.buswidth);
zynq_qspi_chipselect(mem->spi, true);
zynq_qspi_config_op(xqspi, mem->spi);
- if (op->cmd.nbytes) {
+ if (op->cmd.opcode) {
reinit_completion(&xqspi->data_completion);
- xqspi->txbuf = &opcode;
+ xqspi->txbuf = (u8 *)&op->cmd.opcode;
xqspi->rxbuf = NULL;
xqspi->tx_bytes = op->cmd.nbytes;
xqspi->rx_bytes = op->cmd.nbytes;
@@ -679,14 +678,14 @@ static int zynq_qspi_probe(struct platform_device *pdev)
xqspi->irq = platform_get_irq(pdev, 0);
if (xqspi->irq <= 0) {
ret = -ENXIO;
- goto remove_master;
+ goto clk_dis_all;
}
ret = devm_request_irq(&pdev->dev, xqspi->irq, zynq_qspi_irq,
0, pdev->name, xqspi);
if (ret != 0) {
ret = -ENXIO;
dev_err(&pdev->dev, "request_irq failed\n");
- goto remove_master;
+ goto clk_dis_all;
}
ret = of_property_read_u32(np, "num-cs",
@@ -694,8 +693,9 @@ static int zynq_qspi_probe(struct platform_device *pdev)
if (ret < 0) {
ctlr->num_chipselect = 1;
} else if (num_cs > ZYNQ_QSPI_MAX_NUM_CS) {
+ ret = -EINVAL;
dev_err(&pdev->dev, "only 2 chip selects are available\n");
- goto remove_master;
+ goto clk_dis_all;
} else {
ctlr->num_chipselect = num_cs;
}
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index ba425b9c7700..c99181165321 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -47,10 +47,6 @@ static void spidev_release(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
- /* spi controllers may cleanup for released devices */
- if (spi->controller->cleanup)
- spi->controller->cleanup(spi);
-
spi_controller_put(spi->controller);
kfree(spi->driver_override);
kfree(spi);
@@ -367,6 +363,10 @@ static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
const struct spi_device *spi = to_spi_device(dev);
int rc;
+ rc = of_device_uevent_modalias(dev, env);
+ if (rc != -ENODEV)
+ return rc;
+
rc = acpi_device_uevent_modalias(dev, env);
if (rc != -ENODEV)
return rc;
@@ -558,49 +558,29 @@ static int spi_dev_check(struct device *dev, void *data)
return 0;
}
-/**
- * spi_add_device - Add spi_device allocated with spi_alloc_device
- * @spi: spi_device to register
- *
- * Companion function to spi_alloc_device. Devices allocated with
- * spi_alloc_device can be added onto the spi bus with this function.
- *
- * Return: 0 on success; negative errno on failure
- */
-int spi_add_device(struct spi_device *spi)
+static void spi_cleanup(struct spi_device *spi)
+{
+ if (spi->controller->cleanup)
+ spi->controller->cleanup(spi);
+}
+
+static int __spi_add_device(struct spi_device *spi)
{
struct spi_controller *ctlr = spi->controller;
struct device *dev = ctlr->dev.parent;
int status;
- /* Chipselects are numbered 0..max; validate. */
- if (spi->chip_select >= ctlr->num_chipselect) {
- dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
- ctlr->num_chipselect);
- return -EINVAL;
- }
-
- /* Set the bus ID string */
- spi_dev_set_name(spi);
-
- /* We need to make sure there's no other device with this
- * chipselect **BEFORE** we call setup(), else we'll trash
- * its configuration. Lock against concurrent add() calls.
- */
- mutex_lock(&spi_add_lock);
-
status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
if (status) {
dev_err(dev, "chipselect %d already in use\n",
spi->chip_select);
- goto done;
+ return status;
}
/* Controller may unregister concurrently */
if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
!device_is_registered(&ctlr->dev)) {
- status = -ENODEV;
- goto done;
+ return -ENODEV;
}
/* Descriptors take precedence */
@@ -617,23 +597,77 @@ int spi_add_device(struct spi_device *spi)
if (status < 0) {
dev_err(dev, "can't setup %s, status %d\n",
dev_name(&spi->dev), status);
- goto done;
+ return status;
}
/* Device may be bound to an active driver when this returns */
status = device_add(&spi->dev);
- if (status < 0)
+ if (status < 0) {
dev_err(dev, "can't add %s, status %d\n",
dev_name(&spi->dev), status);
- else
+ spi_cleanup(spi);
+ } else {
dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
+ }
+
+ return status;
+}
+
+/**
+ * spi_add_device - Add spi_device allocated with spi_alloc_device
+ * @spi: spi_device to register
+ *
+ * Companion function to spi_alloc_device. Devices allocated with
+ * spi_alloc_device can be added onto the spi bus with this function.
+ *
+ * Return: 0 on success; negative errno on failure
+ */
+int spi_add_device(struct spi_device *spi)
+{
+ struct spi_controller *ctlr = spi->controller;
+ struct device *dev = ctlr->dev.parent;
+ int status;
+
+ /* Chipselects are numbered 0..max; validate. */
+ if (spi->chip_select >= ctlr->num_chipselect) {
+ dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
+ ctlr->num_chipselect);
+ return -EINVAL;
+ }
+
+ /* Set the bus ID string */
+ spi_dev_set_name(spi);
-done:
+ /* We need to make sure there's no other device with this
+ * chipselect **BEFORE** we call setup(), else we'll trash
+ * its configuration. Lock against concurrent add() calls.
+ */
+ mutex_lock(&spi_add_lock);
+ status = __spi_add_device(spi);
mutex_unlock(&spi_add_lock);
return status;
}
EXPORT_SYMBOL_GPL(spi_add_device);
+static int spi_add_device_locked(struct spi_device *spi)
+{
+ struct spi_controller *ctlr = spi->controller;
+ struct device *dev = ctlr->dev.parent;
+
+ /* Chipselects are numbered 0..max; validate. */
+ if (spi->chip_select >= ctlr->num_chipselect) {
+ dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
+ ctlr->num_chipselect);
+ return -EINVAL;
+ }
+
+ /* Set the bus ID string */
+ spi_dev_set_name(spi);
+
+ WARN_ON(!mutex_is_locked(&spi_add_lock));
+ return __spi_add_device(spi);
+}
+
/**
* spi_new_device - instantiate one new SPI device
* @ctlr: Controller to which device is connected
@@ -717,7 +751,9 @@ void spi_unregister_device(struct spi_device *spi)
if (ACPI_COMPANION(&spi->dev))
acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
device_remove_software_node(&spi->dev);
- device_unregister(&spi->dev);
+ device_del(&spi->dev);
+ spi_cleanup(spi);
+ put_device(&spi->dev);
}
EXPORT_SYMBOL_GPL(spi_unregister_device);
@@ -798,6 +834,8 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
(spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
return;
+ trace_spi_set_cs(spi, activate);
+
spi->controller->last_cs_enable = enable;
spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
@@ -814,15 +852,29 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) {
if (!(spi->mode & SPI_NO_CS)) {
- if (spi->cs_gpiod)
- /* polarity handled by gpiolib */
- gpiod_set_value_cansleep(spi->cs_gpiod, activate);
- else
+ if (spi->cs_gpiod) {
+ /*
+ * Historically ACPI has no means of the GPIO polarity and
+ * thus the SPISerialBus() resource defines it on the per-chip
+ * basis. In order to avoid a chain of negations, the GPIO
+ * polarity is considered being Active High. Even for the cases
+ * when _DSD() is involved (in the updated versions of ACPI)
+ * the GPIO CS polarity must be defined Active High to avoid
+ * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
+ * into account.
+ */
+ if (has_acpi_companion(&spi->dev))
+ gpiod_set_value_cansleep(spi->cs_gpiod, !enable);
+ else
+ /* Polarity handled by GPIO library */
+ gpiod_set_value_cansleep(spi->cs_gpiod, activate);
+ } else {
/*
* invert the enable line, as active low is
* default for SPI.
*/
gpio_set_value_cansleep(spi->cs_gpio, !enable);
+ }
}
/* Some SPI masters need both GPIO CS & slave_select */
if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
@@ -941,11 +993,15 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
if (ctlr->dma_tx)
tx_dev = ctlr->dma_tx->device->dev;
+ else if (ctlr->dma_map_dev)
+ tx_dev = ctlr->dma_map_dev;
else
tx_dev = ctlr->dev.parent;
if (ctlr->dma_rx)
rx_dev = ctlr->dma_rx->device->dev;
+ else if (ctlr->dma_map_dev)
+ rx_dev = ctlr->dma_map_dev;
else
rx_dev = ctlr->dev.parent;
@@ -1112,10 +1168,20 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
if (!speed_hz)
speed_hz = 100000;
- ms = 8LL * 1000LL * xfer->len;
+ /*
+ * For each byte we wait for 8 cycles of the SPI clock.
+ * Since speed is defined in Hz and we want milliseconds,
+ * use respective multiplier, but before the division,
+ * otherwise we may get 0 for short transfers.
+ */
+ ms = 8LL * MSEC_PER_SEC * xfer->len;
do_div(ms, speed_hz);
- ms += ms + 200; /* some tolerance */
+ /*
+ * Increase it twice and add 200 ms tolerance, use
+ * predefined maximum in case of overflow.
+ */
+ ms += ms + 200;
if (ms > UINT_MAX)
ms = UINT_MAX;
@@ -1138,10 +1204,10 @@ static void _spi_transfer_delay_ns(u32 ns)
{
if (!ns)
return;
- if (ns <= 1000) {
+ if (ns <= NSEC_PER_USEC) {
ndelay(ns);
} else {
- u32 us = DIV_ROUND_UP(ns, 1000);
+ u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
if (us <= 10)
udelay(us);
@@ -1161,21 +1227,25 @@ int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
switch (unit) {
case SPI_DELAY_UNIT_USECS:
- delay *= 1000;
+ delay *= NSEC_PER_USEC;
break;
- case SPI_DELAY_UNIT_NSECS: /* nothing to do here */
+ case SPI_DELAY_UNIT_NSECS:
+ /* Nothing to do here */
break;
case SPI_DELAY_UNIT_SCK:
/* clock cycles need to be obtained from spi_transfer */
if (!xfer)
return -EINVAL;
- /* if there is no effective speed know, then approximate
- * by underestimating with half the requested hz
+ /*
+ * If there is unknown effective speed, approximate it
+ * by underestimating with half of the requested hz.
*/
hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
if (!hz)
return -EINVAL;
- delay *= DIV_ROUND_UP(1000000000, hz);
+
+ /* Convert delay to nanoseconds */
+ delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz);
break;
default:
return -EINVAL;
@@ -1207,6 +1277,7 @@ EXPORT_SYMBOL_GPL(spi_delay_exec);
static void _spi_transfer_cs_change_delay(struct spi_message *msg,
struct spi_transfer *xfer)
{
+ u32 default_delay_ns = 10 * NSEC_PER_USEC;
u32 delay = xfer->cs_change_delay.value;
u32 unit = xfer->cs_change_delay.unit;
int ret;
@@ -1214,16 +1285,16 @@ static void _spi_transfer_cs_change_delay(struct spi_message *msg,
/* return early on "fast" mode - for everything but USECS */
if (!delay) {
if (unit == SPI_DELAY_UNIT_USECS)
- _spi_transfer_delay_ns(10000);
+ _spi_transfer_delay_ns(default_delay_ns);
return;
}
ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
if (ret) {
dev_err_once(&msg->spi->dev,
- "Use of unsupported delay unit %i, using default of 10us\n",
- unit);
- _spi_transfer_delay_ns(10000);
+ "Use of unsupported delay unit %i, using default of %luus\n",
+ unit, default_delay_ns / NSEC_PER_USEC);
+ _spi_transfer_delay_ns(default_delay_ns);
}
}
@@ -2037,6 +2108,7 @@ of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
/* Store a pointer to the node in the device structure */
of_node_get(nc);
spi->dev.of_node = nc;
+ spi->dev.fwnode = of_fwnode_handle(nc);
/* Register the new device */
rc = spi_add_device(spi);
@@ -2084,6 +2156,55 @@ static void of_register_spi_devices(struct spi_controller *ctlr)
static void of_register_spi_devices(struct spi_controller *ctlr) { }
#endif
+/**
+ * spi_new_ancillary_device() - Register ancillary SPI device
+ * @spi: Pointer to the main SPI device registering the ancillary device
+ * @chip_select: Chip Select of the ancillary device
+ *
+ * Register an ancillary SPI device; for example some chips have a chip-select
+ * for normal device usage and another one for setup/firmware upload.
+ *
+ * This may only be called from main SPI device's probe routine.
+ *
+ * Return: 0 on success; negative errno on failure
+ */
+struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
+ u8 chip_select)
+{
+ struct spi_device *ancillary;
+ int rc = 0;
+
+ /* Alloc an spi_device */
+ ancillary = spi_alloc_device(spi->controller);
+ if (!ancillary) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ strlcpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
+
+ /* Use provided chip-select for ancillary device */
+ ancillary->chip_select = chip_select;
+
+ /* Take over SPI mode/speed from SPI main device */
+ ancillary->max_speed_hz = spi->max_speed_hz;
+ ancillary->mode = spi->mode;
+
+ /* Register the new device */
+ rc = spi_add_device_locked(ancillary);
+ if (rc) {
+ dev_err(&spi->dev, "failed to register ancillary device\n");
+ goto err_out;
+ }
+
+ return ancillary;
+
+err_out:
+ spi_dev_put(ancillary);
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(spi_new_ancillary_device);
+
#ifdef CONFIG_ACPI
struct acpi_spi_lookup {
struct spi_controller *ctlr;
@@ -2601,9 +2722,10 @@ static int spi_get_gpio_descs(struct spi_controller *ctlr)
native_cs_mask |= BIT(i);
}
- ctlr->unused_native_cs = ffz(native_cs_mask);
- if (num_cs_gpios && ctlr->max_native_cs &&
- ctlr->unused_native_cs >= ctlr->max_native_cs) {
+ ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
+
+ if ((ctlr->flags & SPI_MASTER_GPIO_SS) && num_cs_gpios &&
+ ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
dev_err(dev, "No unused native chip select available\n");
return -EINVAL;
}
@@ -3420,8 +3542,10 @@ int spi_setup(struct spi_device *spi)
spi_set_thread_rt(spi->controller);
}
- dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
- (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
+ trace_spi_setup(spi, status);
+
+ dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
+ spi->mode & SPI_MODE_X_MASK,
(spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
(spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
(spi->mode & SPI_3WIRE) ? "3wire, " : "",
@@ -3433,74 +3557,6 @@ int spi_setup(struct spi_device *spi)
}
EXPORT_SYMBOL_GPL(spi_setup);
-/**
- * spi_set_cs_timing - configure CS setup, hold, and inactive delays
- * @spi: the device that requires specific CS timing configuration
- * @setup: CS setup time specified via @spi_delay
- * @hold: CS hold time specified via @spi_delay
- * @inactive: CS inactive delay between transfers specified via @spi_delay
- *
- * Return: zero on success, else a negative error code.
- */
-int spi_set_cs_timing(struct spi_device *spi, struct spi_delay *setup,
- struct spi_delay *hold, struct spi_delay *inactive)
-{
- struct device *parent = spi->controller->dev.parent;
- size_t len;
- int status;
-
- if (spi->controller->set_cs_timing &&
- !(spi->cs_gpiod || gpio_is_valid(spi->cs_gpio))) {
- if (spi->controller->auto_runtime_pm) {
- status = pm_runtime_get_sync(parent);
- if (status < 0) {
- pm_runtime_put_noidle(parent);
- dev_err(&spi->controller->dev, "Failed to power device: %d\n",
- status);
- return status;
- }
-
- status = spi->controller->set_cs_timing(spi, setup,
- hold, inactive);
- pm_runtime_mark_last_busy(parent);
- pm_runtime_put_autosuspend(parent);
- return status;
- } else {
- return spi->controller->set_cs_timing(spi, setup, hold,
- inactive);
- }
- }
-
- if ((setup && setup->unit == SPI_DELAY_UNIT_SCK) ||
- (hold && hold->unit == SPI_DELAY_UNIT_SCK) ||
- (inactive && inactive->unit == SPI_DELAY_UNIT_SCK)) {
- dev_err(&spi->dev,
- "Clock-cycle delays for CS not supported in SW mode\n");
- return -ENOTSUPP;
- }
-
- len = sizeof(struct spi_delay);
-
- /* copy delays to controller */
- if (setup)
- memcpy(&spi->controller->cs_setup, setup, len);
- else
- memset(&spi->controller->cs_setup, 0, len);
-
- if (hold)
- memcpy(&spi->controller->cs_hold, hold, len);
- else
- memset(&spi->controller->cs_hold, 0, len);
-
- if (inactive)
- memcpy(&spi->controller->cs_inactive, inactive, len);
- else
- memset(&spi->controller->cs_inactive, 0, len);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(spi_set_cs_timing);
-
static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
struct spi_device *spi)
{
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index f56e0e975a46..24e9469ea35b 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -59,7 +59,7 @@ static DECLARE_BITMAP(minors, N_SPI_MINORS);
*
* REVISIT should changing those flags be privileged?
*/
-#define SPI_MODE_MASK (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \
+#define SPI_MODE_MASK (SPI_MODE_X_MASK | SPI_CS_HIGH \
| SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \
| SPI_NO_CS | SPI_READY | SPI_TX_DUAL \
| SPI_TX_QUAD | SPI_TX_OCTAL | SPI_RX_DUAL \
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index 741147a4f0fe..ecc5c9da9027 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -2064,7 +2064,7 @@ static int _nbu2ss_nuke(struct nbu2ss_udc *udc,
struct nbu2ss_ep *ep,
int status)
{
- struct nbu2ss_req *req;
+ struct nbu2ss_req *req, *n;
/* Endpoint Disable */
_nbu2ss_epn_exit(udc, ep);
@@ -2076,7 +2076,7 @@ static int _nbu2ss_nuke(struct nbu2ss_udc *udc,
return 0;
/* called with irqs blocked */
- list_for_each_entry(req, &ep->queue, queue) {
+ list_for_each_entry_safe(req, n, &ep->queue, queue) {
_nbu2ss_ep_done(ep, req, status);
}
diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c
index dfd71e99e872..eab534dc4bcc 100644
--- a/drivers/staging/iio/cdc/ad7746.c
+++ b/drivers/staging/iio/cdc/ad7746.c
@@ -700,7 +700,6 @@ static int ad7746_probe(struct i2c_client *client,
indio_dev->num_channels = ARRAY_SIZE(ad7746_channels);
else
indio_dev->num_channels = ARRAY_SIZE(ad7746_channels) - 2;
- indio_dev->num_channels = ARRAY_SIZE(ad7746_channels);
indio_dev->modes = INDIO_DIRECT_MODE;
if (pdata) {
diff --git a/drivers/staging/ralink-gdma/ralink-gdma.c b/drivers/staging/ralink-gdma/ralink-gdma.c
index 33e28ccf4d85..b5229bc6eae5 100644
--- a/drivers/staging/ralink-gdma/ralink-gdma.c
+++ b/drivers/staging/ralink-gdma/ralink-gdma.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (C) 2013, Lars-Peter Clausen <lars@metafoo.de>
* GDMA4740 DMAC support
*/
@@ -914,6 +913,5 @@ static struct platform_driver gdma_dma_driver = {
};
module_platform_driver(gdma_dma_driver);
-MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("Ralink/MTK DMA driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index c1dac6eec59f..437859228371 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -527,6 +527,9 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct security_priv *psecuritypriv = &(padapter->securitypriv);
struct sta_priv *pstapriv = &padapter->stapriv;
+ char *grpkey = padapter->securitypriv.dot118021XGrpKey[param->u.crypt.idx].skey;
+ char *txkey = padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey;
+ char *rxkey = padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey;
param->u.crypt.err = 0;
param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
@@ -609,7 +612,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
{
if (strcmp(param->u.crypt.alg, "WEP") == 0)
{
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+ memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
if (param->u.crypt.key_len == 13)
@@ -622,12 +625,12 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
{
psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+ memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
/* DEBUG_ERR("set key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len); */
/* set mic key */
- memcpy(psecuritypriv->dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
- memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
+ memcpy(txkey, &(param->u.crypt.key[16]), 8);
+ memcpy(rxkey, &(param->u.crypt.key[24]), 8);
psecuritypriv->busetkipkey = true;
@@ -636,7 +639,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
{
psecuritypriv->dot118021XGrpPrivacy = _AES_;
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+ memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
}
else
{
@@ -713,7 +716,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
{
if (strcmp(param->u.crypt.alg, "WEP") == 0)
{
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+ memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
if (param->u.crypt.key_len == 13)
@@ -725,12 +728,12 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
{
psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+ memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
/* DEBUG_ERR("set key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len); */
/* set mic key */
- memcpy(psecuritypriv->dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
- memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
+ memcpy(txkey, &(param->u.crypt.key[16]), 8);
+ memcpy(rxkey, &(param->u.crypt.key[24]), 8);
psecuritypriv->busetkipkey = true;
@@ -739,7 +742,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
{
psecuritypriv->dot118021XGrpPrivacy = _AES_;
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+ memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
}
else
{
@@ -2088,7 +2091,7 @@ void rtw_cfg80211_indicate_sta_assoc(struct adapter *padapter, u8 *pmgmt_frame,
struct net_device *ndev = padapter->pnetdev;
{
- struct station_info sinfo;
+ struct station_info sinfo = {};
u8 ie_offset;
if (GetFrameSubType(pmgmt_frame) == WIFI_ASSOCREQ)
ie_offset = _ASOCREQ_IE_OFFSET_;
@@ -2281,7 +2284,7 @@ static int rtw_cfg80211_add_monitor_if(struct adapter *padapter, char *name, str
mon_wdev->iftype = NL80211_IFTYPE_MONITOR;
mon_ndev->ieee80211_ptr = mon_wdev;
- ret = register_netdevice(mon_ndev);
+ ret = cfg80211_register_netdevice(mon_ndev);
if (ret) {
goto out;
}
@@ -2357,7 +2360,7 @@ static int cfg80211_rtw_del_virtual_intf(struct wiphy *wiphy,
adapter = rtw_netdev_priv(ndev);
pwdev_priv = adapter_wdev_data(adapter);
- unregister_netdevice(ndev);
+ cfg80211_unregister_netdevice(ndev);
if (ndev == pwdev_priv->pmon_ndev) {
pwdev_priv->pmon_ndev = NULL;
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
index e98e5388d5c7..5088c3731b6d 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
@@ -2963,6 +2963,9 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct security_priv *psecuritypriv = &(padapter->securitypriv);
struct sta_priv *pstapriv = &padapter->stapriv;
+ char *txkey = padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey;
+ char *rxkey = padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey;
+ char *grpkey = psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey;
param->u.crypt.err = 0;
param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
@@ -3064,7 +3067,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
if (!psta && check_fwstate(pmlmepriv, WIFI_AP_STATE)) { /* group key */
if (param->u.crypt.set_tx == 1) {
if (strcmp(param->u.crypt.alg, "WEP") == 0) {
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+ memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
if (param->u.crypt.key_len == 13)
@@ -3073,11 +3076,11 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
} else if (strcmp(param->u.crypt.alg, "TKIP") == 0) {
psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+ memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
/* DEBUG_ERR("set key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len); */
/* set mic key */
- memcpy(psecuritypriv->dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
+ memcpy(txkey, &(param->u.crypt.key[16]), 8);
memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
psecuritypriv->busetkipkey = true;
@@ -3086,7 +3089,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
psecuritypriv->dot118021XGrpPrivacy = _AES_;
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+ memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
} else {
psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
}
@@ -3142,7 +3145,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
} else { /* group key??? */
if (strcmp(param->u.crypt.alg, "WEP") == 0) {
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+ memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
if (param->u.crypt.key_len == 13)
@@ -3150,19 +3153,19 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
} else if (strcmp(param->u.crypt.alg, "TKIP") == 0) {
psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+ memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
/* DEBUG_ERR("set key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len); */
/* set mic key */
- memcpy(psecuritypriv->dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
- memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
+ memcpy(txkey, &(param->u.crypt.key[16]), 8);
+ memcpy(rxkey, &(param->u.crypt.key[24]), 8);
psecuritypriv->busetkipkey = true;
} else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
psecuritypriv->dot118021XGrpPrivacy = _AES_;
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+ memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
} else {
psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
}
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index d6fdd1c61f90..a526f9678c34 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -204,11 +204,11 @@ static struct se_dev_plug *iblock_plug_device(struct se_device *se_dev)
struct iblock_dev_plug *ib_dev_plug;
/*
- * Each se_device has a per cpu work this can be run from. Wwe
+ * Each se_device has a per cpu work this can be run from. We
* shouldn't have multiple threads on the same cpu calling this
* at the same time.
*/
- ib_dev_plug = &ib_dev->ibd_plug[smp_processor_id()];
+ ib_dev_plug = &ib_dev->ibd_plug[raw_smp_processor_id()];
if (test_and_set_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags))
return NULL;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 8fbfe75c5744..7e35eddd9eb7 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1416,7 +1416,7 @@ void __target_init_cmd(
cmd->orig_fe_lun = unpacked_lun;
if (!(cmd->se_cmd_flags & SCF_USE_CPUID))
- cmd->cpuid = smp_processor_id();
+ cmd->cpuid = raw_smp_processor_id();
cmd->state_active = false;
}
@@ -3121,9 +3121,7 @@ __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
__releases(&cmd->t_state_lock)
__acquires(&cmd->t_state_lock)
{
-
- assert_spin_locked(&cmd->t_state_lock);
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_held(&cmd->t_state_lock);
if (fabric_stop)
cmd->transport_state |= CMD_T_FABRIC_STOP;
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 198d25ae482a..4bba10e7755a 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -516,8 +516,10 @@ static inline int tcmu_get_empty_block(struct tcmu_dev *udev,
dpi = dbi * udev->data_pages_per_blk;
/* Count the number of already allocated pages */
xas_set(&xas, dpi);
+ rcu_read_lock();
for (cnt = 0; xas_next(&xas) && cnt < page_cnt;)
cnt++;
+ rcu_read_unlock();
for (i = cnt; i < page_cnt; i++) {
/* try to get new page from the mm */
@@ -699,11 +701,10 @@ static inline void tcmu_copy_data(struct tcmu_dev *udev,
struct scatterlist *sg, unsigned int sg_nents,
struct iovec **iov, size_t data_len)
{
- XA_STATE(xas, &udev->data_pages, 0);
/* start value of dbi + 1 must not be a valid dbi */
int dbi = -2;
size_t page_remaining, cp_len;
- int page_cnt, page_inx;
+ int page_cnt, page_inx, dpi;
struct sg_mapping_iter sg_iter;
unsigned int sg_flags;
struct page *page;
@@ -726,9 +727,10 @@ static inline void tcmu_copy_data(struct tcmu_dev *udev,
if (page_cnt > udev->data_pages_per_blk)
page_cnt = udev->data_pages_per_blk;
- xas_set(&xas, dbi * udev->data_pages_per_blk);
- for (page_inx = 0; page_inx < page_cnt && data_len; page_inx++) {
- page = xas_next(&xas);
+ dpi = dbi * udev->data_pages_per_blk;
+ for (page_inx = 0; page_inx < page_cnt && data_len;
+ page_inx++, dpi++) {
+ page = xa_load(&udev->data_pages, dpi);
if (direction == TCMU_DATA_AREA_TO_SG)
flush_dcache_page(page);
diff --git a/drivers/tee/amdtee/amdtee_private.h b/drivers/tee/amdtee/amdtee_private.h
index 337c8d82f74e..6d0f7062bb87 100644
--- a/drivers/tee/amdtee/amdtee_private.h
+++ b/drivers/tee/amdtee/amdtee_private.h
@@ -21,6 +21,7 @@
#define TEEC_SUCCESS 0x00000000
#define TEEC_ERROR_GENERIC 0xFFFF0000
#define TEEC_ERROR_BAD_PARAMETERS 0xFFFF0006
+#define TEEC_ERROR_OUT_OF_MEMORY 0xFFFF000C
#define TEEC_ERROR_COMMUNICATION 0xFFFF000E
#define TEEC_ORIGIN_COMMS 0x00000002
@@ -93,6 +94,18 @@ struct amdtee_shm_data {
u32 buf_id;
};
+/**
+ * struct amdtee_ta_data - Keeps track of all TAs loaded in AMD Secure
+ * Processor
+ * @ta_handle: Handle to TA loaded in TEE
+ * @refcount: Reference count for the loaded TA
+ */
+struct amdtee_ta_data {
+ struct list_head list_node;
+ u32 ta_handle;
+ u32 refcount;
+};
+
#define LOWER_TWO_BYTE_MASK 0x0000FFFF
/**
diff --git a/drivers/tee/amdtee/call.c b/drivers/tee/amdtee/call.c
index 096dd4d92d39..07f36ac834c8 100644
--- a/drivers/tee/amdtee/call.c
+++ b/drivers/tee/amdtee/call.c
@@ -121,15 +121,69 @@ static int amd_params_to_tee_params(struct tee_param *tee, u32 count,
return ret;
}
+static DEFINE_MUTEX(ta_refcount_mutex);
+static struct list_head ta_list = LIST_HEAD_INIT(ta_list);
+
+static u32 get_ta_refcount(u32 ta_handle)
+{
+ struct amdtee_ta_data *ta_data;
+ u32 count = 0;
+
+ /* Caller must hold a mutex */
+ list_for_each_entry(ta_data, &ta_list, list_node)
+ if (ta_data->ta_handle == ta_handle)
+ return ++ta_data->refcount;
+
+ ta_data = kzalloc(sizeof(*ta_data), GFP_KERNEL);
+ if (ta_data) {
+ ta_data->ta_handle = ta_handle;
+ ta_data->refcount = 1;
+ count = ta_data->refcount;
+ list_add(&ta_data->list_node, &ta_list);
+ }
+
+ return count;
+}
+
+static u32 put_ta_refcount(u32 ta_handle)
+{
+ struct amdtee_ta_data *ta_data;
+ u32 count = 0;
+
+ /* Caller must hold a mutex */
+ list_for_each_entry(ta_data, &ta_list, list_node)
+ if (ta_data->ta_handle == ta_handle) {
+ count = --ta_data->refcount;
+ if (count == 0) {
+ list_del(&ta_data->list_node);
+ kfree(ta_data);
+ break;
+ }
+ }
+
+ return count;
+}
+
int handle_unload_ta(u32 ta_handle)
{
struct tee_cmd_unload_ta cmd = {0};
- u32 status;
+ u32 status, count;
int ret;
if (!ta_handle)
return -EINVAL;
+ mutex_lock(&ta_refcount_mutex);
+
+ count = put_ta_refcount(ta_handle);
+
+ if (count) {
+ pr_debug("unload ta: not unloading %u count %u\n",
+ ta_handle, count);
+ ret = -EBUSY;
+ goto unlock;
+ }
+
cmd.ta_handle = ta_handle;
ret = psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA, (void *)&cmd,
@@ -137,8 +191,12 @@ int handle_unload_ta(u32 ta_handle)
if (!ret && status != 0) {
pr_err("unload ta: status = 0x%x\n", status);
ret = -EBUSY;
+ } else {
+ pr_debug("unloaded ta handle %u\n", ta_handle);
}
+unlock:
+ mutex_unlock(&ta_refcount_mutex);
return ret;
}
@@ -340,7 +398,8 @@ int handle_open_session(struct tee_ioctl_open_session_arg *arg, u32 *info,
int handle_load_ta(void *data, u32 size, struct tee_ioctl_open_session_arg *arg)
{
- struct tee_cmd_load_ta cmd = {0};
+ struct tee_cmd_unload_ta unload_cmd = {};
+ struct tee_cmd_load_ta load_cmd = {};
phys_addr_t blob;
int ret;
@@ -353,21 +412,36 @@ int handle_load_ta(void *data, u32 size, struct tee_ioctl_open_session_arg *arg)
return -EINVAL;
}
- cmd.hi_addr = upper_32_bits(blob);
- cmd.low_addr = lower_32_bits(blob);
- cmd.size = size;
+ load_cmd.hi_addr = upper_32_bits(blob);
+ load_cmd.low_addr = lower_32_bits(blob);
+ load_cmd.size = size;
- ret = psp_tee_process_cmd(TEE_CMD_ID_LOAD_TA, (void *)&cmd,
- sizeof(cmd), &arg->ret);
+ mutex_lock(&ta_refcount_mutex);
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_LOAD_TA, (void *)&load_cmd,
+ sizeof(load_cmd), &arg->ret);
if (ret) {
arg->ret_origin = TEEC_ORIGIN_COMMS;
arg->ret = TEEC_ERROR_COMMUNICATION;
- } else {
- set_session_id(cmd.ta_handle, 0, &arg->session);
+ } else if (arg->ret == TEEC_SUCCESS) {
+ ret = get_ta_refcount(load_cmd.ta_handle);
+ if (!ret) {
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+
+ /* Unload the TA on error */
+ unload_cmd.ta_handle = load_cmd.ta_handle;
+ psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA,
+ (void *)&unload_cmd,
+ sizeof(unload_cmd), &ret);
+ } else {
+ set_session_id(load_cmd.ta_handle, 0, &arg->session);
+ }
}
+ mutex_unlock(&ta_refcount_mutex);
pr_debug("load TA: TA handle = 0x%x, RO = 0x%x, ret = 0x%x\n",
- cmd.ta_handle, arg->ret_origin, arg->ret);
+ load_cmd.ta_handle, arg->ret_origin, arg->ret);
return 0;
}
diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c
index 8a6a8f30bb42..da6b88e80dc0 100644
--- a/drivers/tee/amdtee/core.c
+++ b/drivers/tee/amdtee/core.c
@@ -59,10 +59,9 @@ static void release_session(struct amdtee_session *sess)
continue;
handle_close_session(sess->ta_handle, sess->session_info[i]);
+ handle_unload_ta(sess->ta_handle);
}
- /* Unload Trusted Application once all sessions are closed */
- handle_unload_ta(sess->ta_handle);
kfree(sess);
}
@@ -224,8 +223,6 @@ static void destroy_session(struct kref *ref)
struct amdtee_session *sess = container_of(ref, struct amdtee_session,
refcount);
- /* Unload the TA from TEE */
- handle_unload_ta(sess->ta_handle);
mutex_lock(&session_list_mutex);
list_del(&sess->list_node);
mutex_unlock(&session_list_mutex);
@@ -238,7 +235,7 @@ int amdtee_open_session(struct tee_context *ctx,
{
struct amdtee_context_data *ctxdata = ctx->data;
struct amdtee_session *sess = NULL;
- u32 session_info;
+ u32 session_info, ta_handle;
size_t ta_size;
int rc, i;
void *ta;
@@ -259,11 +256,14 @@ int amdtee_open_session(struct tee_context *ctx,
if (arg->ret != TEEC_SUCCESS)
goto out;
+ ta_handle = get_ta_handle(arg->session);
+
mutex_lock(&session_list_mutex);
sess = alloc_session(ctxdata, arg->session);
mutex_unlock(&session_list_mutex);
if (!sess) {
+ handle_unload_ta(ta_handle);
rc = -ENOMEM;
goto out;
}
@@ -277,6 +277,7 @@ int amdtee_open_session(struct tee_context *ctx,
if (i >= TEE_NUM_SESSIONS) {
pr_err("reached maximum session count %d\n", TEE_NUM_SESSIONS);
+ handle_unload_ta(ta_handle);
kref_put(&sess->refcount, destroy_session);
rc = -ENOMEM;
goto out;
@@ -289,12 +290,13 @@ int amdtee_open_session(struct tee_context *ctx,
spin_lock(&sess->lock);
clear_bit(i, sess->sess_mask);
spin_unlock(&sess->lock);
+ handle_unload_ta(ta_handle);
kref_put(&sess->refcount, destroy_session);
goto out;
}
sess->session_info[i] = session_info;
- set_session_id(sess->ta_handle, i, &arg->session);
+ set_session_id(ta_handle, i, &arg->session);
out:
free_pages((u64)ta, get_order(ta_size));
return rc;
@@ -329,6 +331,7 @@ int amdtee_close_session(struct tee_context *ctx, u32 session)
/* Close the session */
handle_close_session(ta_handle, session_info);
+ handle_unload_ta(ta_handle);
kref_put(&sess->refcount, destroy_session);
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
index 6132cc8d014c..6e6eb836e9b6 100644
--- a/drivers/tee/optee/call.c
+++ b/drivers/tee/optee/call.c
@@ -220,6 +220,7 @@ int optee_open_session(struct tee_context *ctx,
struct optee_msg_arg *msg_arg;
phys_addr_t msg_parg;
struct optee_session *sess = NULL;
+ uuid_t client_uuid;
/* +2 for the meta parameters added below */
shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg);
@@ -240,10 +241,11 @@ int optee_open_session(struct tee_context *ctx,
memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid));
msg_arg->params[1].u.value.c = arg->clnt_login;
- rc = tee_session_calc_client_uuid((uuid_t *)&msg_arg->params[1].u.value,
- arg->clnt_login, arg->clnt_uuid);
+ rc = tee_session_calc_client_uuid(&client_uuid, arg->clnt_login,
+ arg->clnt_uuid);
if (rc)
goto out;
+ export_uuid(msg_arg->params[1].u.octets, &client_uuid);
rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param);
if (rc)
diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h
index 81ff593ac4ec..e3d72d09c484 100644
--- a/drivers/tee/optee/optee_msg.h
+++ b/drivers/tee/optee/optee_msg.h
@@ -9,7 +9,7 @@
#include <linux/types.h>
/*
- * This file defines the OP-TEE message protocol used to communicate
+ * This file defines the OP-TEE message protocol (ABI) used to communicate
* with an instance of OP-TEE running in secure world.
*
* This file is divided into two sections.
@@ -144,9 +144,10 @@ struct optee_msg_param_value {
* @tmem: parameter by temporary memory reference
* @rmem: parameter by registered memory reference
* @value: parameter by opaque value
+ * @octets: parameter by octet string
*
* @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in
- * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value,
+ * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value or octets,
* OPTEE_MSG_ATTR_TYPE_TMEM_* indicates @tmem and
* OPTEE_MSG_ATTR_TYPE_RMEM_* indicates @rmem,
* OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used.
@@ -157,6 +158,7 @@ struct optee_msg_param {
struct optee_msg_param_tmem tmem;
struct optee_msg_param_rmem rmem;
struct optee_msg_param_value value;
+ u8 octets[24];
} u;
};
diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
index d1248ba943a4..62c0aa5d0783 100644
--- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
+++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
@@ -237,6 +237,8 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
if (ACPI_FAILURE(status))
trip_cnt = 0;
else {
+ int i;
+
int34x_thermal_zone->aux_trips =
kcalloc(trip_cnt,
sizeof(*int34x_thermal_zone->aux_trips),
@@ -247,6 +249,8 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
}
trip_mask = BIT(trip_cnt) - 1;
int34x_thermal_zone->aux_trip_nr = trip_cnt;
+ for (i = 0; i < trip_cnt; ++i)
+ int34x_thermal_zone->aux_trips[i] = THERMAL_TEMP_INVALID;
}
trip_cnt = int340x_thermal_read_trips(int34x_thermal_zone);
diff --git a/drivers/thermal/intel/therm_throt.c b/drivers/thermal/intel/therm_throt.c
index f8e882592ba5..99abdc03c44c 100644
--- a/drivers/thermal/intel/therm_throt.c
+++ b/drivers/thermal/intel/therm_throt.c
@@ -621,6 +621,17 @@ bool x86_thermal_enabled(void)
return atomic_read(&therm_throt_en);
}
+void __init therm_lvt_init(void)
+{
+ /*
+ * This function is only called on boot CPU. Save the init thermal
+ * LVT value on BSP and use that value to restore APs' thermal LVT
+ * entry BIOS programmed later
+ */
+ if (intel_thermal_supported(&boot_cpu_data))
+ lvtthmr_init = apic_read(APIC_LVTTHMR);
+}
+
void intel_init_thermal(struct cpuinfo_x86 *c)
{
unsigned int cpu = smp_processor_id();
@@ -630,10 +641,6 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
if (!intel_thermal_supported(c))
return;
- /* On the BSP? */
- if (c == &boot_cpu_data)
- lvtthmr_init = apic_read(APIC_LVTTHMR);
-
/*
* First check if its enabled already, in which case there might
* be some SMM goo which handles it, so we can't even put a handler
diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c
index 295742e83960..4d8edc61a78b 100644
--- a/drivers/thermal/intel/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c
@@ -166,7 +166,7 @@ static int sys_get_trip_temp(struct thermal_zone_device *tzd,
if (thres_reg_value)
*temp = zonedev->tj_max - thres_reg_value * 1000;
else
- *temp = 0;
+ *temp = THERMAL_TEMP_INVALID;
pr_debug("sys_get_trip_temp %d\n", *temp);
return 0;
diff --git a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
index b460b56e981c..232fd0b33325 100644
--- a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
+++ b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
@@ -441,7 +441,7 @@ static int adc_tm5_get_dt_channel_data(struct adc_tm5_chip *adc_tm,
if (args.args_count != 1 || args.args[0] >= ADC5_MAX_CHANNEL) {
dev_err(dev, "%s: invalid ADC channel number %d\n", name, chan);
- return ret;
+ return -EINVAL;
}
channel->adc_channel = args.args[0];
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index d20b25f40d19..10a2d8e1cacf 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -36,10 +36,8 @@ static LIST_HEAD(thermal_governor_list);
static DEFINE_MUTEX(thermal_list_lock);
static DEFINE_MUTEX(thermal_governor_lock);
-static DEFINE_MUTEX(poweroff_lock);
static atomic_t in_suspend;
-static bool power_off_triggered;
static struct thermal_governor *def_governor;
@@ -327,70 +325,18 @@ static void handle_non_critical_trips(struct thermal_zone_device *tz, int trip)
def_governor->throttle(tz, trip);
}
-/**
- * thermal_emergency_poweroff_func - emergency poweroff work after a known delay
- * @work: work_struct associated with the emergency poweroff function
- *
- * This function is called in very critical situations to force
- * a kernel poweroff after a configurable timeout value.
- */
-static void thermal_emergency_poweroff_func(struct work_struct *work)
-{
- /*
- * We have reached here after the emergency thermal shutdown
- * Waiting period has expired. This means orderly_poweroff has
- * not been able to shut off the system for some reason.
- * Try to shut down the system immediately using kernel_power_off
- * if populated
- */
- WARN(1, "Attempting kernel_power_off: Temperature too high\n");
- kernel_power_off();
-
- /*
- * Worst of the worst case trigger emergency restart
- */
- WARN(1, "Attempting emergency_restart: Temperature too high\n");
- emergency_restart();
-}
-
-static DECLARE_DELAYED_WORK(thermal_emergency_poweroff_work,
- thermal_emergency_poweroff_func);
-
-/**
- * thermal_emergency_poweroff - Trigger an emergency system poweroff
- *
- * This may be called from any critical situation to trigger a system shutdown
- * after a known period of time. By default this is not scheduled.
- */
-static void thermal_emergency_poweroff(void)
+void thermal_zone_device_critical(struct thermal_zone_device *tz)
{
- int poweroff_delay_ms = CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS;
/*
* poweroff_delay_ms must be a carefully profiled positive value.
- * Its a must for thermal_emergency_poweroff_work to be scheduled
+ * Its a must for forced_emergency_poweroff_work to be scheduled.
*/
- if (poweroff_delay_ms <= 0)
- return;
- schedule_delayed_work(&thermal_emergency_poweroff_work,
- msecs_to_jiffies(poweroff_delay_ms));
-}
+ int poweroff_delay_ms = CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS;
-void thermal_zone_device_critical(struct thermal_zone_device *tz)
-{
dev_emerg(&tz->device, "%s: critical temperature reached, "
"shutting down\n", tz->type);
- mutex_lock(&poweroff_lock);
- if (!power_off_triggered) {
- /*
- * Queue a backup emergency shutdown in the event of
- * orderly_poweroff failure
- */
- thermal_emergency_poweroff();
- orderly_poweroff(true);
- power_off_triggered = true;
- }
- mutex_unlock(&poweroff_lock);
+ hw_protection_shutdown("Temperature too high", poweroff_delay_ms);
}
EXPORT_SYMBOL(thermal_zone_device_critical);
@@ -1538,7 +1484,6 @@ error:
ida_destroy(&thermal_cdev_ida);
mutex_destroy(&thermal_list_lock);
mutex_destroy(&thermal_governor_lock);
- mutex_destroy(&poweroff_lock);
return result;
}
postcore_initcall(thermal_init);
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
index ebe7cb70bfb6..ea0603b59309 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
@@ -770,7 +770,7 @@ static int ti_bandgap_tshut_init(struct ti_bandgap *bgp,
}
/**
- * ti_bandgap_alert_init() - setup and initialize talert handling
+ * ti_bandgap_talert_init() - setup and initialize talert handling
* @bgp: pointer to struct ti_bandgap
* @pdev: pointer to device struct platform_device
*
diff --git a/drivers/thunderbolt/dma_port.c b/drivers/thunderbolt/dma_port.c
index 7288aaf01ae6..5631319f7b20 100644
--- a/drivers/thunderbolt/dma_port.c
+++ b/drivers/thunderbolt/dma_port.c
@@ -366,15 +366,15 @@ int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
void *buf, size_t size)
{
unsigned int retries = DMA_PORT_RETRIES;
- unsigned int offset;
-
- offset = address & 3;
- address = address & ~3;
do {
- u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4);
+ unsigned int offset;
+ size_t nbytes;
int ret;
+ offset = address & 3;
+ nbytes = min_t(size_t, size + offset, MAIL_DATA_DWORDS * 4);
+
ret = dma_port_flash_read_block(dma, address, dma->buf,
ALIGN(nbytes, 4));
if (ret) {
@@ -386,6 +386,7 @@ int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
return ret;
}
+ nbytes -= offset;
memcpy(buf, dma->buf + offset, nbytes);
size -= nbytes;
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index 680bc738dd66..671d72af8ba1 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -68,15 +68,15 @@ static int usb4_do_read_data(u16 address, void *buf, size_t size,
unsigned int retries = USB4_DATA_RETRIES;
unsigned int offset;
- offset = address & 3;
- address = address & ~3;
-
do {
- size_t nbytes = min_t(size_t, size, USB4_DATA_DWORDS * 4);
unsigned int dwaddress, dwords;
u8 data[USB4_DATA_DWORDS * 4];
+ size_t nbytes;
int ret;
+ offset = address & 3;
+ nbytes = min_t(size_t, size + offset, USB4_DATA_DWORDS * 4);
+
dwaddress = address / 4;
dwords = ALIGN(nbytes, 4) / 4;
@@ -87,6 +87,7 @@ static int usb4_do_read_data(u16 address, void *buf, size_t size,
return ret;
}
+ nbytes -= offset;
memcpy(buf, data + offset, nbytes);
size -= nbytes;
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index 52bb21205bb6..6473361525d1 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -7,6 +7,7 @@
* Copyright (C) 2001 Russell King.
*/
+#include <linux/bits.h>
#include <linux/serial_8250.h>
#include <linux/serial_reg.h>
#include <linux/dmaengine.h>
@@ -70,24 +71,25 @@ struct serial8250_config {
unsigned int flags;
};
-#define UART_CAP_FIFO (1 << 8) /* UART has FIFO */
-#define UART_CAP_EFR (1 << 9) /* UART has EFR */
-#define UART_CAP_SLEEP (1 << 10) /* UART has IER sleep */
-#define UART_CAP_AFE (1 << 11) /* MCR-based hw flow control */
-#define UART_CAP_UUE (1 << 12) /* UART needs IER bit 6 set (Xscale) */
-#define UART_CAP_RTOIE (1 << 13) /* UART needs IER bit 4 set (Xscale, Tegra) */
-#define UART_CAP_HFIFO (1 << 14) /* UART has a "hidden" FIFO */
-#define UART_CAP_RPM (1 << 15) /* Runtime PM is active while idle */
-#define UART_CAP_IRDA (1 << 16) /* UART supports IrDA line discipline */
-#define UART_CAP_MINI (1 << 17) /* Mini UART on BCM283X family lacks:
+#define UART_CAP_FIFO BIT(8) /* UART has FIFO */
+#define UART_CAP_EFR BIT(9) /* UART has EFR */
+#define UART_CAP_SLEEP BIT(10) /* UART has IER sleep */
+#define UART_CAP_AFE BIT(11) /* MCR-based hw flow control */
+#define UART_CAP_UUE BIT(12) /* UART needs IER bit 6 set (Xscale) */
+#define UART_CAP_RTOIE BIT(13) /* UART needs IER bit 4 set (Xscale, Tegra) */
+#define UART_CAP_HFIFO BIT(14) /* UART has a "hidden" FIFO */
+#define UART_CAP_RPM BIT(15) /* Runtime PM is active while idle */
+#define UART_CAP_IRDA BIT(16) /* UART supports IrDA line discipline */
+#define UART_CAP_MINI BIT(17) /* Mini UART on BCM283X family lacks:
* STOP PARITY EPAR SPAR WLEN5 WLEN6
*/
-#define UART_BUG_QUOT (1 << 0) /* UART has buggy quot LSB */
-#define UART_BUG_TXEN (1 << 1) /* UART has buggy TX IIR status */
-#define UART_BUG_NOMSR (1 << 2) /* UART has buggy MSR status bits (Au1x00) */
-#define UART_BUG_THRE (1 << 3) /* UART has buggy THRE reassertion */
-#define UART_BUG_PARITY (1 << 4) /* UART mishandles parity if FIFO enabled */
+#define UART_BUG_QUOT BIT(0) /* UART has buggy quot LSB */
+#define UART_BUG_TXEN BIT(1) /* UART has buggy TX IIR status */
+#define UART_BUG_NOMSR BIT(2) /* UART has buggy MSR status bits (Au1x00) */
+#define UART_BUG_THRE BIT(3) /* UART has buggy THRE reassertion */
+#define UART_BUG_PARITY BIT(4) /* UART mishandles parity if FIFO enabled */
+#define UART_BUG_TXRACE BIT(5) /* UART Tx fails to set remote DR */
#ifdef CONFIG_SERIAL_8250_SHARE_IRQ
diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
index 61550f24a2d3..d035d08cb987 100644
--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
+++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
@@ -437,6 +437,7 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
port.port.status = UPSTAT_SYNC_FIFO;
port.port.dev = &pdev->dev;
port.port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
+ port.bugs |= UART_BUG_TXRACE;
rc = sysfs_create_group(&vuart->dev->kobj, &aspeed_vuart_attr_group);
if (rc < 0)
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 9e204f9b799a..a3a0154da567 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -714,6 +714,7 @@ static const struct acpi_device_id dw8250_acpi_match[] = {
{ "APMC0D08", 0},
{ "AMD0020", 0 },
{ "AMDI0020", 0 },
+ { "AMDI0022", 0 },
{ "BRCM2032", 0 },
{ "HISI0031", 0 },
{ },
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index 2f49c580139b..bd4e9f6ac29c 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -553,7 +553,11 @@ static void pci_xr17v35x_exit(struct pci_dev *pcidev)
{
struct exar8250 *priv = pci_get_drvdata(pcidev);
struct uart_8250_port *port = serial8250_get_port(priv->line[0]);
- struct platform_device *pdev = port->port.private_data;
+ struct platform_device *pdev;
+
+ pdev = port->port.private_data;
+ if (!pdev)
+ return;
device_remove_software_node(&pdev->dev);
platform_device_unregister(pdev);
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 689d8227f95f..780cc99732b6 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -56,6 +56,8 @@ struct serial_private {
int line[];
};
+#define PCI_DEVICE_ID_HPE_PCI_SERIAL 0x37e
+
static const struct pci_device_id pci_use_msi[] = {
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9900,
0xA000, 0x1000) },
@@ -63,6 +65,8 @@ static const struct pci_device_id pci_use_msi[] = {
0xA000, 0x1000) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9922,
0xA000, 0x1000) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_HP_3PAR, PCI_DEVICE_ID_HPE_PCI_SERIAL,
+ PCI_ANY_ID, PCI_ANY_ID) },
{ }
};
@@ -1998,6 +2002,16 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
.setup = pci_hp_diva_setup,
},
/*
+ * HPE PCI serial device
+ */
+ {
+ .vendor = PCI_VENDOR_ID_HP_3PAR,
+ .device = PCI_DEVICE_ID_HPE_PCI_SERIAL,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_hp_diva_setup,
+ },
+ /*
* Intel
*/
{
@@ -3944,21 +3958,26 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board)
uart.port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ;
uart.port.uartclk = board->base_baud * 16;
- if (pci_match_id(pci_use_msi, dev)) {
- dev_dbg(&dev->dev, "Using MSI(-X) interrupts\n");
- pci_set_master(dev);
- rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (board->flags & FL_NOIRQ) {
+ uart.port.irq = 0;
} else {
- dev_dbg(&dev->dev, "Using legacy interrupts\n");
- rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY);
- }
- if (rc < 0) {
- kfree(priv);
- priv = ERR_PTR(rc);
- goto err_deinit;
+ if (pci_match_id(pci_use_msi, dev)) {
+ dev_dbg(&dev->dev, "Using MSI(-X) interrupts\n");
+ pci_set_master(dev);
+ rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_ALL_TYPES);
+ } else {
+ dev_dbg(&dev->dev, "Using legacy interrupts\n");
+ rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY);
+ }
+ if (rc < 0) {
+ kfree(priv);
+ priv = ERR_PTR(rc);
+ goto err_deinit;
+ }
+
+ uart.port.irq = pci_irq_vector(dev, 0);
}
- uart.port.irq = pci_irq_vector(dev, 0);
uart.port.dev = &dev->dev;
for (i = 0; i < nr_ports; i++) {
@@ -4973,6 +4992,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_AUX,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b2_1_115200 },
+ /* HPE PCI serial device */
+ { PCI_VENDOR_ID_HP_3PAR, PCI_DEVICE_ID_HPE_PCI_SERIAL,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b1_1_115200 },
{ PCI_VENDOR_ID_DCI, PCI_DEVICE_ID_DCI_PCCOM2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index d45dab1ab316..fc5ab2032282 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1809,6 +1809,18 @@ void serial8250_tx_chars(struct uart_8250_port *up)
count = up->tx_loadsz;
do {
serial_out(up, UART_TX, xmit->buf[xmit->tail]);
+ if (up->bugs & UART_BUG_TXRACE) {
+ /*
+ * The Aspeed BMC virtual UARTs have a bug where data
+ * may get stuck in the BMC's Tx FIFO from bursts of
+ * writes on the APB interface.
+ *
+ * Delay back-to-back writes by a read cycle to avoid
+ * stalling the VUART. Read a register that won't have
+ * side-effects and discard the result.
+ */
+ serial_in(up, UART_SCR);
+ }
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
if (uart_circ_empty(xmit))
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 8534d6e45a1d..3cbc757d7be7 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -1519,6 +1519,8 @@ static int __init max310x_uart_init(void)
#ifdef CONFIG_SPI_MASTER
ret = spi_register_driver(&max310x_spi_driver);
+ if (ret)
+ uart_unregister_driver(&max310x_uart);
#endif
return ret;
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index e0c00a1b0763..51b0ecabf2ec 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -818,9 +818,6 @@ static int mvebu_uart_probe(struct platform_device *pdev)
return -EINVAL;
}
- if (!match)
- return -ENODEV;
-
/* Assume that all UART ports have a DT alias or none has */
id = of_alias_get_id(pdev->dev.of_node, "serial");
if (!pdev->dev.of_node || id < 0)
diff --git a/drivers/tty/serial/rp2.c b/drivers/tty/serial/rp2.c
index d60abffab70e..6689d8add8f7 100644
--- a/drivers/tty/serial/rp2.c
+++ b/drivers/tty/serial/rp2.c
@@ -195,7 +195,6 @@ struct rp2_card {
void __iomem *bar0;
void __iomem *bar1;
spinlock_t card_lock;
- struct completion fw_loaded;
};
#define RP_ID(prod) PCI_VDEVICE(RP, (prod))
@@ -662,17 +661,10 @@ static void rp2_remove_ports(struct rp2_card *card)
card->initialized_ports = 0;
}
-static void rp2_fw_cb(const struct firmware *fw, void *context)
+static int rp2_load_firmware(struct rp2_card *card, const struct firmware *fw)
{
- struct rp2_card *card = context;
resource_size_t phys_base;
- int i, rc = -ENOENT;
-
- if (!fw) {
- dev_err(&card->pdev->dev, "cannot find '%s' firmware image\n",
- RP2_FW_NAME);
- goto no_fw;
- }
+ int i, rc = 0;
phys_base = pci_resource_start(card->pdev, 1);
@@ -718,23 +710,13 @@ static void rp2_fw_cb(const struct firmware *fw, void *context)
card->initialized_ports++;
}
- release_firmware(fw);
-no_fw:
- /*
- * rp2_fw_cb() is called from a workqueue long after rp2_probe()
- * has already returned success. So if something failed here,
- * we'll just leave the now-dormant device in place until somebody
- * unbinds it.
- */
- if (rc)
- dev_warn(&card->pdev->dev, "driver initialization failed\n");
-
- complete(&card->fw_loaded);
+ return rc;
}
static int rp2_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
+ const struct firmware *fw;
struct rp2_card *card;
struct rp2_uart_port *ports;
void __iomem * const *bars;
@@ -745,7 +727,6 @@ static int rp2_probe(struct pci_dev *pdev,
return -ENOMEM;
pci_set_drvdata(pdev, card);
spin_lock_init(&card->card_lock);
- init_completion(&card->fw_loaded);
rc = pcim_enable_device(pdev);
if (rc)
@@ -778,21 +759,23 @@ static int rp2_probe(struct pci_dev *pdev,
return -ENOMEM;
card->ports = ports;
- rc = devm_request_irq(&pdev->dev, pdev->irq, rp2_uart_interrupt,
- IRQF_SHARED, DRV_NAME, card);
- if (rc)
+ rc = request_firmware(&fw, RP2_FW_NAME, &pdev->dev);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "cannot find '%s' firmware image\n",
+ RP2_FW_NAME);
return rc;
+ }
- /*
- * Only catastrophic errors (e.g. ENOMEM) are reported here.
- * If the FW image is missing, we'll find out in rp2_fw_cb()
- * and print an error message.
- */
- rc = request_firmware_nowait(THIS_MODULE, 1, RP2_FW_NAME, &pdev->dev,
- GFP_KERNEL, card, rp2_fw_cb);
+ rc = rp2_load_firmware(card, fw);
+
+ release_firmware(fw);
+ if (rc < 0)
+ return rc;
+
+ rc = devm_request_irq(&pdev->dev, pdev->irq, rp2_uart_interrupt,
+ IRQF_SHARED, DRV_NAME, card);
if (rc)
return rc;
- dev_dbg(&pdev->dev, "waiting for firmware blob...\n");
return 0;
}
@@ -801,7 +784,6 @@ static void rp2_remove(struct pci_dev *pdev)
{
struct rp2_card *card = pci_get_drvdata(pdev);
- wait_for_completion(&card->fw_loaded);
rp2_remove_ports(card);
}
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index bbae072a125d..222032792d6c 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -338,7 +338,7 @@ static void tegra_uart_fifo_reset(struct tegra_uart_port *tup, u8 fcr_bits)
do {
lsr = tegra_uart_read(tup, UART_LSR);
- if ((lsr | UART_LSR_TEMT) && !(lsr & UART_LSR_DR))
+ if ((lsr & UART_LSR_TEMT) && !(lsr & UART_LSR_DR))
break;
udelay(1);
} while (--tmout);
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 87f7127b57e6..18ff85a83f80 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -863,9 +863,11 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
goto check_and_exit;
}
- retval = security_locked_down(LOCKDOWN_TIOCSSERIAL);
- if (retval && (change_irq || change_port))
- goto exit;
+ if (change_irq || change_port) {
+ retval = security_locked_down(LOCKDOWN_TIOCSSERIAL);
+ if (retval)
+ goto exit;
+ }
/*
* Ask the low level driver to verify the settings.
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index ef37fdf37612..4baf1316ea72 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1023,10 +1023,10 @@ static int scif_set_rtrg(struct uart_port *port, int rx_trig)
{
unsigned int bits;
+ if (rx_trig >= port->fifosize)
+ rx_trig = port->fifosize - 1;
if (rx_trig < 1)
rx_trig = 1;
- if (rx_trig >= port->fifosize)
- rx_trig = port->fifosize;
/* HSCIF can be set to an arbitrary level. */
if (sci_getreg(port, HSRTRGR)->size) {
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 01645e87b3d5..fa1548d4f94b 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1171,7 +1171,7 @@ static inline int resize_screen(struct vc_data *vc, int width, int height,
/* Resizes the resolution of the display adapater */
int err = 0;
- if (vc->vc_mode != KD_GRAPHICS && vc->vc_sw->con_resize)
+ if (vc->vc_sw->con_resize)
err = vc->vc_sw->con_resize(vc, width, height, user);
return err;
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index 89aeaf3c1bca..0e0cd9e9e589 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -671,21 +671,58 @@ static int vt_resizex(struct vc_data *vc, struct vt_consize __user *cs)
if (copy_from_user(&v, cs, sizeof(struct vt_consize)))
return -EFAULT;
- if (v.v_vlin)
- pr_info_once("\"struct vt_consize\"->v_vlin is ignored. Please report if you need this.\n");
- if (v.v_clin)
- pr_info_once("\"struct vt_consize\"->v_clin is ignored. Please report if you need this.\n");
+ /* FIXME: Should check the copies properly */
+ if (!v.v_vlin)
+ v.v_vlin = vc->vc_scan_lines;
+
+ if (v.v_clin) {
+ int rows = v.v_vlin / v.v_clin;
+ if (v.v_rows != rows) {
+ if (v.v_rows) /* Parameters don't add up */
+ return -EINVAL;
+ v.v_rows = rows;
+ }
+ }
+
+ if (v.v_vcol && v.v_ccol) {
+ int cols = v.v_vcol / v.v_ccol;
+ if (v.v_cols != cols) {
+ if (v.v_cols)
+ return -EINVAL;
+ v.v_cols = cols;
+ }
+ }
+
+ if (v.v_clin > 32)
+ return -EINVAL;
- console_lock();
for (i = 0; i < MAX_NR_CONSOLES; i++) {
- vc = vc_cons[i].d;
+ struct vc_data *vcp;
- if (vc) {
- vc->vc_resize_user = 1;
- vc_resize(vc, v.v_cols, v.v_rows);
+ if (!vc_cons[i].d)
+ continue;
+ console_lock();
+ vcp = vc_cons[i].d;
+ if (vcp) {
+ int ret;
+ int save_scan_lines = vcp->vc_scan_lines;
+ int save_cell_height = vcp->vc_cell_height;
+
+ if (v.v_vlin)
+ vcp->vc_scan_lines = v.v_vlin;
+ if (v.v_clin)
+ vcp->vc_cell_height = v.v_clin;
+ vcp->vc_resize_user = 1;
+ ret = vc_resize(vcp, v.v_cols, v.v_rows);
+ if (ret) {
+ vcp->vc_scan_lines = save_scan_lines;
+ vcp->vc_cell_height = save_cell_height;
+ console_unlock();
+ return ret;
+ }
}
+ console_unlock();
}
- console_unlock();
return 0;
}
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index 0330ba99730e..652fe2547587 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -291,13 +291,15 @@ hv_uio_probe(struct hv_device *dev,
pdata->recv_buf = vzalloc(RECV_BUFFER_SIZE);
if (pdata->recv_buf == NULL) {
ret = -ENOMEM;
- goto fail_close;
+ goto fail_free_ring;
}
ret = vmbus_establish_gpadl(channel, pdata->recv_buf,
RECV_BUFFER_SIZE, &pdata->recv_gpadl);
- if (ret)
+ if (ret) {
+ vfree(pdata->recv_buf);
goto fail_close;
+ }
/* put Global Physical Address Label in name */
snprintf(pdata->recv_name, sizeof(pdata->recv_name),
@@ -316,8 +318,10 @@ hv_uio_probe(struct hv_device *dev,
ret = vmbus_establish_gpadl(channel, pdata->send_buf,
SEND_BUFFER_SIZE, &pdata->send_gpadl);
- if (ret)
+ if (ret) {
+ vfree(pdata->send_buf);
goto fail_close;
+ }
snprintf(pdata->send_name, sizeof(pdata->send_name),
"send:%u", pdata->send_gpadl);
@@ -347,6 +351,8 @@ hv_uio_probe(struct hv_device *dev,
fail_close:
hv_uio_cleanup(dev, pdata);
+fail_free_ring:
+ vmbus_free_ring(dev->channel);
return ret;
}
diff --git a/drivers/uio/uio_pci_generic.c b/drivers/uio/uio_pci_generic.c
index c7d681fef198..3bb0b0075467 100644
--- a/drivers/uio/uio_pci_generic.c
+++ b/drivers/uio/uio_pci_generic.c
@@ -82,7 +82,7 @@ static int probe(struct pci_dev *pdev,
}
if (pdev->irq && !pci_intx_mask_supported(pdev))
- return -ENOMEM;
+ return -ENODEV;
gdev = devm_kzalloc(&pdev->dev, sizeof(struct uio_pci_generic_dev), GFP_KERNEL);
if (!gdev)
diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
index 9b1bd417cec0..5281f8d3fb3d 100644
--- a/drivers/usb/cdns3/cdns3-gadget.c
+++ b/drivers/usb/cdns3/cdns3-gadget.c
@@ -2007,7 +2007,7 @@ static void cdns3_configure_dmult(struct cdns3_device *priv_dev,
else
mask = BIT(priv_ep->num);
- if (priv_ep->type != USB_ENDPOINT_XFER_ISOC) {
+ if (priv_ep->type != USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) {
cdns3_set_register_bit(&regs->tdl_from_trb, mask);
cdns3_set_register_bit(&regs->tdl_beh, mask);
cdns3_set_register_bit(&regs->tdl_beh2, mask);
@@ -2046,15 +2046,13 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
case USB_ENDPOINT_XFER_INT:
ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_INT);
- if ((priv_dev->dev_ver == DEV_VER_V2 && !priv_ep->dir) ||
- priv_dev->dev_ver > DEV_VER_V2)
+ if (priv_dev->dev_ver >= DEV_VER_V2 && !priv_ep->dir)
ep_cfg |= EP_CFG_TDL_CHK;
break;
case USB_ENDPOINT_XFER_BULK:
ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_BULK);
- if ((priv_dev->dev_ver == DEV_VER_V2 && !priv_ep->dir) ||
- priv_dev->dev_ver > DEV_VER_V2)
+ if (priv_dev->dev_ver >= DEV_VER_V2 && !priv_ep->dir)
ep_cfg |= EP_CFG_TDL_CHK;
break;
default:
@@ -3268,8 +3266,10 @@ static int __cdns3_gadget_init(struct cdns *cdns)
pm_runtime_get_sync(cdns->dev);
ret = cdns3_gadget_start(cdns);
- if (ret)
+ if (ret) {
+ pm_runtime_put_sync(cdns->dev);
return ret;
+ }
/*
* Because interrupt line can be shared with other components in
diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c
index 56707b6b0f57..c083985e387b 100644
--- a/drivers/usb/cdns3/cdnsp-gadget.c
+++ b/drivers/usb/cdns3/cdnsp-gadget.c
@@ -422,17 +422,17 @@ unmap:
int cdnsp_ep_dequeue(struct cdnsp_ep *pep, struct cdnsp_request *preq)
{
struct cdnsp_device *pdev = pep->pdev;
- int ret;
+ int ret_stop = 0;
+ int ret_rem;
trace_cdnsp_request_dequeue(preq);
- if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_RUNNING) {
- ret = cdnsp_cmd_stop_ep(pdev, pep);
- if (ret)
- return ret;
- }
+ if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_RUNNING)
+ ret_stop = cdnsp_cmd_stop_ep(pdev, pep);
+
+ ret_rem = cdnsp_remove_request(pdev, preq, pep);
- return cdnsp_remove_request(pdev, preq, pep);
+ return ret_rem ? ret_rem : ret_stop;
}
static void cdnsp_zero_in_ctx(struct cdnsp_device *pdev)
diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c
index 5f0513c96c04..68972746e363 100644
--- a/drivers/usb/cdns3/cdnsp-ring.c
+++ b/drivers/usb/cdns3/cdnsp-ring.c
@@ -1517,13 +1517,14 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
{
struct cdnsp_device *pdev = (struct cdnsp_device *)data;
union cdnsp_trb *event_ring_deq;
+ unsigned long flags;
int counter = 0;
- spin_lock(&pdev->lock);
+ spin_lock_irqsave(&pdev->lock, flags);
if (pdev->cdnsp_state & (CDNSP_STATE_HALTED | CDNSP_STATE_DYING)) {
cdnsp_died(pdev);
- spin_unlock(&pdev->lock);
+ spin_unlock_irqrestore(&pdev->lock, flags);
return IRQ_HANDLED;
}
@@ -1539,7 +1540,7 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1);
- spin_unlock(&pdev->lock);
+ spin_unlock_irqrestore(&pdev->lock, flags);
return IRQ_HANDLED;
}
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index c16d900cdaee..393f216b9161 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -2061,6 +2061,7 @@ static int udc_start(struct ci_hdrc *ci)
ci->gadget.name = ci->platdata->name;
ci->gadget.otg_caps = otg_caps;
ci->gadget.sg_supported = 1;
+ ci->gadget.irq = ci->irq;
if (ci->platdata->flags & CI_HDRC_REQUIRES_ALIGNED_DMA)
ci->gadget.quirk_avoids_skb_reserve = 1;
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index 4545b23bda3f..bac0f5458cab 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -686,6 +686,16 @@ static int imx7d_charger_secondary_detection(struct imx_usbmisc_data *data)
int val;
unsigned long flags;
+ /* Clear VDATSRCENB0 to disable VDP_SRC and IDM_SNK required by BC 1.2 spec */
+ spin_lock_irqsave(&usbmisc->lock, flags);
+ val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+ val &= ~MX7D_USB_OTG_PHY_CFG2_CHRG_VDATSRCENB0;
+ writel(val, usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+ spin_unlock_irqrestore(&usbmisc->lock, flags);
+
+ /* TVDMSRC_DIS */
+ msleep(20);
+
/* VDM_SRC is connected to D- and IDP_SINK is connected to D+ */
spin_lock_irqsave(&usbmisc->lock, flags);
val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
@@ -695,7 +705,8 @@ static int imx7d_charger_secondary_detection(struct imx_usbmisc_data *data)
usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
spin_unlock_irqrestore(&usbmisc->lock, flags);
- usleep_range(1000, 2000);
+ /* TVDMSRC_ON */
+ msleep(40);
/*
* Per BC 1.2, check voltage of D+:
@@ -798,7 +809,8 @@ static int imx7d_charger_primary_detection(struct imx_usbmisc_data *data)
usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
spin_unlock_irqrestore(&usbmisc->lock, flags);
- usleep_range(1000, 2000);
+ /* TVDPSRC_ON */
+ msleep(40);
/* Check if D- is less than VDAT_REF to determine an SDP per BC 1.2 */
val = readl(usbmisc->base + MX7D_USB_OTG_PHY_STATUS);
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 508b1c3f8b73..d1e4a7379beb 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -321,12 +321,23 @@ exit:
}
-static void kill_urbs(struct wdm_device *desc)
+static void poison_urbs(struct wdm_device *desc)
{
/* the order here is essential */
- usb_kill_urb(desc->command);
- usb_kill_urb(desc->validity);
- usb_kill_urb(desc->response);
+ usb_poison_urb(desc->command);
+ usb_poison_urb(desc->validity);
+ usb_poison_urb(desc->response);
+}
+
+static void unpoison_urbs(struct wdm_device *desc)
+{
+ /*
+ * the order here is not essential
+ * it is symmetrical just to be nice
+ */
+ usb_unpoison_urb(desc->response);
+ usb_unpoison_urb(desc->validity);
+ usb_unpoison_urb(desc->command);
}
static void free_urbs(struct wdm_device *desc)
@@ -741,11 +752,12 @@ static int wdm_release(struct inode *inode, struct file *file)
if (!desc->count) {
if (!test_bit(WDM_DISCONNECTING, &desc->flags)) {
dev_dbg(&desc->intf->dev, "wdm_release: cleanup\n");
- kill_urbs(desc);
+ poison_urbs(desc);
spin_lock_irq(&desc->iuspin);
desc->resp_count = 0;
spin_unlock_irq(&desc->iuspin);
desc->manage_power(desc->intf, 0);
+ unpoison_urbs(desc);
} else {
/* must avoid dev_printk here as desc->intf is invalid */
pr_debug(KBUILD_MODNAME " %s: device gone - cleaning up\n", __func__);
@@ -1037,9 +1049,9 @@ static void wdm_disconnect(struct usb_interface *intf)
wake_up_all(&desc->wait);
mutex_lock(&desc->rlock);
mutex_lock(&desc->wlock);
+ poison_urbs(desc);
cancel_work_sync(&desc->rxwork);
cancel_work_sync(&desc->service_outs_intr);
- kill_urbs(desc);
mutex_unlock(&desc->wlock);
mutex_unlock(&desc->rlock);
@@ -1080,9 +1092,10 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
set_bit(WDM_SUSPENDING, &desc->flags);
spin_unlock_irq(&desc->iuspin);
/* callback submits work - order is essential */
- kill_urbs(desc);
+ poison_urbs(desc);
cancel_work_sync(&desc->rxwork);
cancel_work_sync(&desc->service_outs_intr);
+ unpoison_urbs(desc);
}
if (!PMSG_IS_AUTO(message)) {
mutex_unlock(&desc->wlock);
@@ -1140,7 +1153,7 @@ static int wdm_pre_reset(struct usb_interface *intf)
wake_up_all(&desc->wait);
mutex_lock(&desc->rlock);
mutex_lock(&desc->wlock);
- kill_urbs(desc);
+ poison_urbs(desc);
cancel_work_sync(&desc->rxwork);
cancel_work_sync(&desc->service_outs_intr);
return 0;
@@ -1151,6 +1164,7 @@ static int wdm_post_reset(struct usb_interface *intf)
struct wdm_device *desc = wdm_find_device(intf);
int rv;
+ unpoison_urbs(desc);
clear_bit(WDM_OVERFLOW, &desc->flags);
clear_bit(WDM_RESETTING, &desc->flags);
rv = recover_from_urb_loss(desc);
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 533236366a03..2218941d35a3 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1218,7 +1218,12 @@ static int do_proc_bulk(struct usb_dev_state *ps,
ret = usbfs_increase_memory_usage(len1 + sizeof(struct urb));
if (ret)
return ret;
- tbuf = kmalloc(len1, GFP_KERNEL);
+
+ /*
+ * len1 can be almost arbitrarily large. Don't WARN if it's
+ * too big, just fail the request.
+ */
+ tbuf = kmalloc(len1, GFP_KERNEL | __GFP_NOWARN);
if (!tbuf) {
ret = -ENOMEM;
goto done;
@@ -1696,7 +1701,7 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
if (num_sgs) {
as->urb->sg = kmalloc_array(num_sgs,
sizeof(struct scatterlist),
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_NOWARN);
if (!as->urb->sg) {
ret = -ENOMEM;
goto error;
@@ -1731,7 +1736,7 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
(uurb_start - as->usbm->vm_start);
} else {
as->urb->transfer_buffer = kmalloc(uurb->buffer_length,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_NOWARN);
if (!as->urb->transfer_buffer) {
ret = -ENOMEM;
goto error;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index b2bc4b7c4289..df8e69e60aaf 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -41,6 +41,8 @@
#define USB_VENDOR_GENESYS_LOGIC 0x05e3
#define USB_VENDOR_SMSC 0x0424
#define USB_PRODUCT_USB5534B 0x5534
+#define USB_VENDOR_CYPRESS 0x04b4
+#define USB_PRODUCT_CY7C65632 0x6570
#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
#define HUB_QUIRK_DISABLE_AUTOSUSPEND 0x02
@@ -3642,9 +3644,6 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
* sequence.
*/
status = hub_port_status(hub, port1, &portstatus, &portchange);
-
- /* TRSMRCY = 10 msec */
- msleep(10);
}
SuspendCleared:
@@ -3659,6 +3658,9 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_SUSPEND);
}
+
+ /* TRSMRCY = 10 msec */
+ msleep(10);
}
if (udev->persist_enabled)
@@ -5698,6 +5700,11 @@ static const struct usb_device_id hub_id_table[] = {
.bInterfaceClass = USB_CLASS_HUB,
.driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
{ .match_flags = USB_DEVICE_ID_MATCH_VENDOR
+ | USB_DEVICE_ID_MATCH_PRODUCT,
+ .idVendor = USB_VENDOR_CYPRESS,
+ .idProduct = USB_PRODUCT_CY7C65632,
+ .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
+ { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
| USB_DEVICE_ID_MATCH_INT_CLASS,
.idVendor = USB_VENDOR_GENESYS_LOGIC,
.bInterfaceClass = USB_CLASS_HUB,
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index da5ac4a4595b..ab6b815e0089 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -113,6 +113,7 @@ struct dwc2_hsotg_req;
* @debugfs: File entry for debugfs file for this endpoint.
* @dir_in: Set to true if this endpoint is of the IN direction, which
* means that it is sending data to the Host.
+ * @map_dir: Set to the value of dir_in when the DMA buffer is mapped.
* @index: The index for the endpoint registers.
* @mc: Multi Count - number of transactions per microframe
* @interval: Interval for periodic endpoints, in frames or microframes.
@@ -162,6 +163,7 @@ struct dwc2_hsotg_ep {
unsigned short fifo_index;
unsigned char dir_in;
+ unsigned char map_dir;
unsigned char index;
unsigned char mc;
u16 interval;
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index e6bb1bdb2760..184964174dc0 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -422,7 +422,7 @@ static void dwc2_hsotg_unmap_dma(struct dwc2_hsotg *hsotg,
{
struct usb_request *req = &hs_req->req;
- usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in);
+ usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->map_dir);
}
/*
@@ -1242,6 +1242,7 @@ static int dwc2_hsotg_map_dma(struct dwc2_hsotg *hsotg,
{
int ret;
+ hs_ep->map_dir = hs_ep->dir_in;
ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in);
if (ret)
goto dma_error;
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 3024785d84cb..520a0beef77c 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -776,7 +776,3 @@ static struct platform_driver dwc2_platform_driver = {
};
module_platform_driver(dwc2_platform_driver);
-
-MODULE_DESCRIPTION("DESIGNWARE HS OTG Platform Glue");
-MODULE_AUTHOR("Matthijs Kooijman <matthijs@stdin.nl>");
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index b6e53d8212cd..4ac397e43e19 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1671,8 +1671,8 @@ static int dwc3_remove(struct platform_device *pdev)
pm_runtime_get_sync(&pdev->dev);
- dwc3_debugfs_exit(dwc);
dwc3_core_exit_mode(dwc);
+ dwc3_debugfs_exit(dwc);
dwc3_core_exit(dwc);
dwc3_ulpi_exit(dwc);
@@ -1690,11 +1690,6 @@ static int dwc3_remove(struct platform_device *pdev)
return 0;
}
-static void dwc3_shutdown(struct platform_device *pdev)
-{
- dwc3_remove(pdev);
-}
-
#ifdef CONFIG_PM
static int dwc3_core_init_for_resume(struct dwc3 *dwc)
{
@@ -2012,7 +2007,6 @@ MODULE_DEVICE_TABLE(acpi, dwc3_acpi_match);
static struct platform_driver dwc3_driver = {
.probe = dwc3_probe,
.remove = dwc3_remove,
- .shutdown = dwc3_shutdown,
.driver = {
.name = "dwc3",
.of_match_table = of_match_ptr(of_dwc3_match),
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index b1e875c58f20..c5d5760cdf53 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -57,7 +57,7 @@
#define DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE 3
#define DWC3_DEVICE_EVENT_WAKEUP 4
#define DWC3_DEVICE_EVENT_HIBER_REQ 5
-#define DWC3_DEVICE_EVENT_EOPF 6
+#define DWC3_DEVICE_EVENT_SUSPEND 6
#define DWC3_DEVICE_EVENT_SOF 7
#define DWC3_DEVICE_EVENT_ERRATIC_ERROR 9
#define DWC3_DEVICE_EVENT_CMD_CMPL 10
@@ -460,7 +460,7 @@
#define DWC3_DEVTEN_CMDCMPLTEN BIT(10)
#define DWC3_DEVTEN_ERRTICERREN BIT(9)
#define DWC3_DEVTEN_SOFEN BIT(7)
-#define DWC3_DEVTEN_EOPFEN BIT(6)
+#define DWC3_DEVTEN_U3L2L1SUSPEN BIT(6)
#define DWC3_DEVTEN_HIBERNATIONREQEVTEN BIT(5)
#define DWC3_DEVTEN_WKUPEVTEN BIT(4)
#define DWC3_DEVTEN_ULSTCNGEN BIT(3)
@@ -850,6 +850,7 @@ struct dwc3_trb {
* @hwparams6: GHWPARAMS6
* @hwparams7: GHWPARAMS7
* @hwparams8: GHWPARAMS8
+ * @hwparams9: GHWPARAMS9
*/
struct dwc3_hwparams {
u32 hwparams0;
@@ -1374,7 +1375,7 @@ struct dwc3_event_depevt {
* 3 - ULStChng
* 4 - WkUpEvt
* 5 - Reserved
- * 6 - EOPF
+ * 6 - Suspend (EOPF on revisions 2.10a and prior)
* 7 - SOF
* 8 - Reserved
* 9 - ErrticErr
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index db231de46bb3..d223c54115f4 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -221,8 +221,8 @@ static inline const char *dwc3_gadget_event_string(char *str, size_t size,
snprintf(str, size, "WakeUp [%s]",
dwc3_gadget_link_string(state));
break;
- case DWC3_DEVICE_EVENT_EOPF:
- snprintf(str, size, "End-Of-Frame [%s]",
+ case DWC3_DEVICE_EVENT_SUSPEND:
+ snprintf(str, size, "Suspend [%s]",
dwc3_gadget_link_string(state));
break;
case DWC3_DEVICE_EVENT_SOF:
@@ -353,8 +353,8 @@ static inline const char *dwc3_gadget_event_type_string(u8 event)
return "Wake-Up";
case DWC3_DEVICE_EVENT_HIBER_REQ:
return "Hibernation";
- case DWC3_DEVICE_EVENT_EOPF:
- return "End of Periodic Frame";
+ case DWC3_DEVICE_EVENT_SUSPEND:
+ return "Suspend";
case DWC3_DEVICE_EVENT_SOF:
return "Start of Frame";
case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
@@ -413,9 +413,12 @@ static inline const char *dwc3_gadget_generic_cmd_status_string(int status)
#ifdef CONFIG_DEBUG_FS
+extern void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep);
extern void dwc3_debugfs_init(struct dwc3 *d);
extern void dwc3_debugfs_exit(struct dwc3 *d);
#else
+static inline void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
+{ }
static inline void dwc3_debugfs_init(struct dwc3 *d)
{ }
static inline void dwc3_debugfs_exit(struct dwc3 *d)
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 7146ee2ac057..5dbbe53269d3 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -886,30 +886,14 @@ static void dwc3_debugfs_create_endpoint_files(struct dwc3_ep *dep,
}
}
-static void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep,
- struct dentry *parent)
+void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
{
struct dentry *dir;
- dir = debugfs_create_dir(dep->name, parent);
+ dir = debugfs_create_dir(dep->name, dep->dwc->root);
dwc3_debugfs_create_endpoint_files(dep, dir);
}
-static void dwc3_debugfs_create_endpoint_dirs(struct dwc3 *dwc,
- struct dentry *parent)
-{
- int i;
-
- for (i = 0; i < dwc->num_eps; i++) {
- struct dwc3_ep *dep = dwc->eps[i];
-
- if (!dep)
- continue;
-
- dwc3_debugfs_create_endpoint_dir(dep, parent);
- }
-}
-
void dwc3_debugfs_init(struct dwc3 *dwc)
{
struct dentry *root;
@@ -940,7 +924,6 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
&dwc3_testmode_fops);
debugfs_create_file("link_state", 0644, root, dwc,
&dwc3_link_state_fops);
- dwc3_debugfs_create_endpoint_dirs(dwc, root);
}
}
diff --git a/drivers/usb/dwc3/dwc3-imx8mp.c b/drivers/usb/dwc3/dwc3-imx8mp.c
index b13cfab89d53..756faa46d33a 100644
--- a/drivers/usb/dwc3/dwc3-imx8mp.c
+++ b/drivers/usb/dwc3/dwc3-imx8mp.c
@@ -165,8 +165,9 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev)
if (err < 0)
goto disable_rpm;
- dwc3_np = of_get_child_by_name(node, "dwc3");
+ dwc3_np = of_get_compatible_child(node, "snps,dwc3");
if (!dwc3_np) {
+ err = -ENODEV;
dev_err(dev, "failed to find dwc3 core child\n");
goto disable_rpm;
}
diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c
index bdf1f98dfad8..ffe301d6ea35 100644
--- a/drivers/usb/dwc3/dwc3-meson-g12a.c
+++ b/drivers/usb/dwc3/dwc3-meson-g12a.c
@@ -651,7 +651,7 @@ static int dwc3_meson_g12a_setup_regmaps(struct dwc3_meson_g12a *priv,
return PTR_ERR(priv->usb_glue_regmap);
/* Create a regmap for each USB2 PHY control register set */
- for (i = 0; i < priv->usb2_ports; i++) {
+ for (i = 0; i < priv->drvdata->num_phys; i++) {
struct regmap_config u2p_regmap_config = {
.reg_bits = 8,
.val_bits = 32,
@@ -659,6 +659,9 @@ static int dwc3_meson_g12a_setup_regmaps(struct dwc3_meson_g12a *priv,
.max_register = U2P_R1,
};
+ if (!strstr(priv->drvdata->phy_names[i], "usb2"))
+ continue;
+
u2p_regmap_config.name = devm_kasprintf(priv->dev, GFP_KERNEL,
"u2p-%d", i);
if (!u2p_regmap_config.name)
@@ -772,13 +775,13 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
ret = priv->drvdata->usb_init(priv);
if (ret)
- goto err_disable_clks;
+ goto err_disable_regulator;
/* Init PHYs */
for (i = 0 ; i < PHY_COUNT ; ++i) {
ret = phy_init(priv->phys[i]);
if (ret)
- goto err_disable_clks;
+ goto err_disable_regulator;
}
/* Set PHY Power */
@@ -816,6 +819,10 @@ err_phys_exit:
for (i = 0 ; i < PHY_COUNT ; ++i)
phy_exit(priv->phys[i]);
+err_disable_regulator:
+ if (priv->vbus)
+ regulator_disable(priv->vbus);
+
err_disable_clks:
clk_bulk_disable_unprepare(priv->drvdata->num_clks,
priv->drvdata->clks);
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 3db17806e92e..e196673f5c64 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -437,8 +437,13 @@ static int dwc3_omap_extcon_register(struct dwc3_omap *omap)
if (extcon_get_state(edev, EXTCON_USB) == true)
dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_VALID);
+ else
+ dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_OFF);
+
if (extcon_get_state(edev, EXTCON_USB_HOST) == true)
dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_GROUND);
+ else
+ dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_FLOAT);
omap->edev = edev;
}
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index e7b932dcbf82..1e51460938b8 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -123,6 +123,7 @@ static const struct property_entry dwc3_pci_mrfld_properties[] = {
PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"),
PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,usb2-gadget-lpm-disable"),
PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
{}
};
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 8b668ef46f7f..3cd294264372 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -292,6 +292,9 @@ static struct dwc3_ep *dwc3_wIndex_to_dep(struct dwc3 *dwc, __le16 wIndex_le)
epnum |= 1;
dep = dwc->eps[epnum];
+ if (dep == NULL)
+ return NULL;
+
if (dep->flags & DWC3_EP_ENABLED)
return dep;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index dd80e5ca8c78..f14c2aa83759 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1244,6 +1244,7 @@ static int dwc3_prepare_trbs_sg(struct dwc3_ep *dep,
req->start_sg = sg_next(s);
req->num_queued_sgs++;
+ req->num_pending_sgs--;
/*
* The number of pending SG entries may not correspond to the
@@ -1251,7 +1252,7 @@ static int dwc3_prepare_trbs_sg(struct dwc3_ep *dep,
* don't include unused SG entries.
*/
if (length == 0) {
- req->num_pending_sgs -= req->request.num_mapped_sgs - req->num_queued_sgs;
+ req->num_pending_sgs = 0;
break;
}
@@ -1684,7 +1685,9 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
}
}
- return __dwc3_gadget_kick_transfer(dep);
+ __dwc3_gadget_kick_transfer(dep);
+
+ return 0;
}
static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
@@ -2258,13 +2261,10 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
}
/*
- * Synchronize any pending event handling before executing the controller
- * halt routine.
+ * Synchronize and disable any further event handling while controller
+ * is being enabled/disabled.
*/
- if (!is_on) {
- dwc3_gadget_disable_irq(dwc);
- synchronize_irq(dwc->irq_gadget);
- }
+ disable_irq(dwc->irq_gadget);
spin_lock_irqsave(&dwc->lock, flags);
@@ -2302,6 +2302,8 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
ret = dwc3_gadget_run_stop(dwc, is_on, false);
spin_unlock_irqrestore(&dwc->lock, flags);
+ enable_irq(dwc->irq_gadget);
+
pm_runtime_put(dwc->dev);
return ret;
@@ -2323,6 +2325,10 @@ static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
if (DWC3_VER_IS_PRIOR(DWC3, 250A))
reg |= DWC3_DEVTEN_ULSTCNGEN;
+ /* On 2.30a and above this bit enables U3/L2-L1 Suspend Events */
+ if (!DWC3_VER_IS_PRIOR(DWC3, 230A))
+ reg |= DWC3_DEVTEN_U3L2L1SUSPEN;
+
dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
}
@@ -2747,6 +2753,8 @@ static int dwc3_gadget_init_endpoint(struct dwc3 *dwc, u8 epnum)
INIT_LIST_HEAD(&dep->started_list);
INIT_LIST_HEAD(&dep->cancelled_list);
+ dwc3_debugfs_create_endpoint_dir(dep);
+
return 0;
}
@@ -2790,6 +2798,7 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
list_del(&dep->endpoint.ep_list);
}
+ debugfs_remove_recursive(debugfs_lookup(dep->name, dwc->root));
kfree(dep);
}
}
@@ -2867,15 +2876,15 @@ static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep,
struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue];
struct scatterlist *sg = req->sg;
struct scatterlist *s;
- unsigned int pending = req->num_pending_sgs;
+ unsigned int num_queued = req->num_queued_sgs;
unsigned int i;
int ret = 0;
- for_each_sg(sg, s, pending, i) {
+ for_each_sg(sg, s, num_queued, i) {
trb = &dep->trb_pool[dep->trb_dequeue];
req->sg = sg_next(s);
- req->num_pending_sgs--;
+ req->num_queued_sgs--;
ret = dwc3_gadget_ep_reclaim_completed_trb(dep, req,
trb, event, status, true);
@@ -2898,7 +2907,7 @@ static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep,
static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req)
{
- return req->num_pending_sgs == 0;
+ return req->num_pending_sgs == 0 && req->num_queued_sgs == 0;
}
static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
@@ -2907,7 +2916,7 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
{
int ret;
- if (req->num_pending_sgs)
+ if (req->request.num_mapped_sgs)
ret = dwc3_gadget_ep_reclaim_trb_sg(dep, req, event,
status);
else
@@ -3740,7 +3749,7 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
break;
- case DWC3_DEVICE_EVENT_EOPF:
+ case DWC3_DEVICE_EVENT_SUSPEND:
/* It changed to be suspend event for version 2.30a and above */
if (!DWC3_VER_IS_PRIOR(DWC3, 230A)) {
/*
@@ -4039,6 +4048,7 @@ err5:
dwc3_gadget_free_endpoints(dwc);
err4:
usb_put_gadget(dwc->gadget);
+ dwc->gadget = NULL;
err3:
dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
dwc->bounce_addr);
@@ -4058,8 +4068,12 @@ err0:
void dwc3_gadget_exit(struct dwc3 *dwc)
{
- usb_del_gadget_udc(dwc->gadget);
+ if (!dwc->gadget)
+ return;
+
+ usb_del_gadget(dwc->gadget);
dwc3_gadget_free_endpoints(dwc);
+ usb_put_gadget(dwc->gadget);
dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
dwc->bounce_addr);
kfree(dwc->setup_buf);
diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c
index 8bb25773b61e..05507606b2b4 100644
--- a/drivers/usb/gadget/config.c
+++ b/drivers/usb/gadget/config.c
@@ -164,6 +164,14 @@ int usb_assign_descriptors(struct usb_function *f,
{
struct usb_gadget *g = f->config->cdev->gadget;
+ /* super-speed-plus descriptor falls back to super-speed one,
+ * if such a descriptor was provided, thus avoiding a NULL
+ * pointer dereference if a 5gbps capable gadget is used with
+ * a 10gbps capable config (device port + cable + host port)
+ */
+ if (!ssp)
+ ssp = ss;
+
if (fs) {
f->fs_descriptors = usb_copy_descriptors(fs);
if (!f->fs_descriptors)
diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c
index 7f5cf488b2b1..ffe2486fce71 100644
--- a/drivers/usb/gadget/function/f_ecm.c
+++ b/drivers/usb/gadget/function/f_ecm.c
@@ -791,7 +791,7 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
fs_ecm_notify_desc.bEndpointAddress;
status = usb_assign_descriptors(f, ecm_fs_function, ecm_hs_function,
- ecm_ss_function, NULL);
+ ecm_ss_function, ecm_ss_function);
if (status)
goto fail;
diff --git a/drivers/usb/gadget/function/f_eem.c b/drivers/usb/gadget/function/f_eem.c
index cfcc4e81fb77..2cd9942707b4 100644
--- a/drivers/usb/gadget/function/f_eem.c
+++ b/drivers/usb/gadget/function/f_eem.c
@@ -302,7 +302,7 @@ static int eem_bind(struct usb_configuration *c, struct usb_function *f)
eem_ss_out_desc.bEndpointAddress = eem_fs_out_desc.bEndpointAddress;
status = usb_assign_descriptors(f, eem_fs_function, eem_hs_function,
- eem_ss_function, NULL);
+ eem_ss_function, eem_ss_function);
if (status)
goto fail;
@@ -495,7 +495,7 @@ static int eem_unwrap(struct gether *port,
skb2 = skb_clone(skb, GFP_ATOMIC);
if (unlikely(!skb2)) {
DBG(cdev, "unable to unframe EEM packet\n");
- continue;
+ goto next;
}
skb_trim(skb2, len - ETH_FCS_LEN);
@@ -505,7 +505,7 @@ static int eem_unwrap(struct gether *port,
GFP_ATOMIC);
if (unlikely(!skb3)) {
dev_kfree_skb_any(skb2);
- continue;
+ goto next;
}
dev_kfree_skb_any(skb2);
skb_queue_tail(list, skb3);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index bf109191659a..d4844afeaffc 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -3567,6 +3567,9 @@ static void ffs_func_unbind(struct usb_configuration *c,
ffs->func = NULL;
}
+ /* Drain any pending AIO completions */
+ drain_workqueue(ffs->io_completion_wq);
+
if (!--opts->refcnt)
functionfs_unbind(ffs);
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 1125f4715830..e55699308117 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -802,7 +802,8 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
hidg_fs_out_ep_desc.bEndpointAddress;
status = usb_assign_descriptors(f, hidg_fs_descriptors,
- hidg_hs_descriptors, hidg_ss_descriptors, NULL);
+ hidg_hs_descriptors, hidg_ss_descriptors,
+ hidg_ss_descriptors);
if (status)
goto fail;
diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c
index b56ad7c3838b..ae41f556eb75 100644
--- a/drivers/usb/gadget/function/f_loopback.c
+++ b/drivers/usb/gadget/function/f_loopback.c
@@ -207,7 +207,7 @@ autoconf_fail:
ss_loop_sink_desc.bEndpointAddress = fs_loop_sink_desc.bEndpointAddress;
ret = usb_assign_descriptors(f, fs_loopback_descs, hs_loopback_descs,
- ss_loopback_descs, NULL);
+ ss_loopback_descs, ss_loopback_descs);
if (ret)
return ret;
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 019bea8e09cc..855127249f24 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -583,7 +583,7 @@ static void ncm_do_notify(struct f_ncm *ncm)
data[0] = cpu_to_le32(ncm_bitrate(cdev->gadget));
data[1] = data[0];
- DBG(cdev, "notify speed %d\n", ncm_bitrate(cdev->gadget));
+ DBG(cdev, "notify speed %u\n", ncm_bitrate(cdev->gadget));
ncm->notify_state = NCM_NOTIFY_CONNECT;
break;
}
@@ -1101,11 +1101,11 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port,
ncm->ndp_dgram_count = 1;
/* Note: we skip opts->next_ndp_index */
- }
- /* Delay the timer. */
- hrtimer_start(&ncm->task_timer, TX_TIMEOUT_NSECS,
- HRTIMER_MODE_REL_SOFT);
+ /* Start the timer. */
+ hrtimer_start(&ncm->task_timer, TX_TIMEOUT_NSECS,
+ HRTIMER_MODE_REL_SOFT);
+ }
/* Add the datagram position entries */
ntb_ndp = skb_put_zero(ncm->skb_tx_ndp, dgram_idx_len);
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index f47fdc1fa7f1..59d382fe1bbf 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -1101,7 +1101,8 @@ autoconf_fail:
ss_ep_out_desc.bEndpointAddress = fs_ep_out_desc.bEndpointAddress;
ret = usb_assign_descriptors(f, fs_printer_function,
- hs_printer_function, ss_printer_function, NULL);
+ hs_printer_function, ss_printer_function,
+ ss_printer_function);
if (ret)
return ret;
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
index 0739b05a0ef7..ee95e8f5f9d4 100644
--- a/drivers/usb/gadget/function/f_rndis.c
+++ b/drivers/usb/gadget/function/f_rndis.c
@@ -789,7 +789,7 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
ss_notify_desc.bEndpointAddress = fs_notify_desc.bEndpointAddress;
status = usb_assign_descriptors(f, eth_fs_function, eth_hs_function,
- eth_ss_function, NULL);
+ eth_ss_function, eth_ss_function);
if (status)
goto fail;
diff --git a/drivers/usb/gadget/function/f_serial.c b/drivers/usb/gadget/function/f_serial.c
index e62713846350..1ed8ff0ac2d3 100644
--- a/drivers/usb/gadget/function/f_serial.c
+++ b/drivers/usb/gadget/function/f_serial.c
@@ -233,7 +233,7 @@ static int gser_bind(struct usb_configuration *c, struct usb_function *f)
gser_ss_out_desc.bEndpointAddress = gser_fs_out_desc.bEndpointAddress;
status = usb_assign_descriptors(f, gser_fs_function, gser_hs_function,
- gser_ss_function, NULL);
+ gser_ss_function, gser_ss_function);
if (status)
goto fail;
dev_dbg(&cdev->gadget->dev, "generic ttyGS%d: %s speed IN/%s OUT/%s\n",
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
index 5a201ba7b155..1abf08e5164a 100644
--- a/drivers/usb/gadget/function/f_sourcesink.c
+++ b/drivers/usb/gadget/function/f_sourcesink.c
@@ -431,7 +431,8 @@ no_iso:
ss_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress;
ret = usb_assign_descriptors(f, fs_source_sink_descs,
- hs_source_sink_descs, ss_source_sink_descs, NULL);
+ hs_source_sink_descs, ss_source_sink_descs,
+ ss_source_sink_descs);
if (ret)
return ret;
diff --git a/drivers/usb/gadget/function/f_subset.c b/drivers/usb/gadget/function/f_subset.c
index 4d945254905d..51c1cae162d9 100644
--- a/drivers/usb/gadget/function/f_subset.c
+++ b/drivers/usb/gadget/function/f_subset.c
@@ -358,7 +358,7 @@ geth_bind(struct usb_configuration *c, struct usb_function *f)
fs_subset_out_desc.bEndpointAddress;
status = usb_assign_descriptors(f, fs_eth_function, hs_eth_function,
- ss_eth_function, NULL);
+ ss_eth_function, ss_eth_function);
if (status)
goto fail;
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index 7acb507946e6..de161ee0b1f9 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -2057,7 +2057,8 @@ static int tcm_bind(struct usb_configuration *c, struct usb_function *f)
uasp_fs_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress;
ret = usb_assign_descriptors(f, uasp_fs_function_desc,
- uasp_hs_function_desc, uasp_ss_function_desc, NULL);
+ uasp_hs_function_desc, uasp_ss_function_desc,
+ uasp_ss_function_desc);
if (ret)
goto ep_fail;
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 0c418ce50ba0..f1b35a39d1ba 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -1488,7 +1488,7 @@ static void usb3_start_pipen(struct renesas_usb3_ep *usb3_ep,
struct renesas_usb3_request *usb3_req)
{
struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
- struct renesas_usb3_request *usb3_req_first = usb3_get_request(usb3_ep);
+ struct renesas_usb3_request *usb3_req_first;
unsigned long flags;
int ret = -EAGAIN;
u32 enable_bits = 0;
@@ -1496,7 +1496,8 @@ static void usb3_start_pipen(struct renesas_usb3_ep *usb3_ep,
spin_lock_irqsave(&usb3->lock, flags);
if (usb3_ep->halt || usb3_ep->started)
goto out;
- if (usb3_req != usb3_req_first)
+ usb3_req_first = __usb3_get_request(usb3_ep);
+ if (!usb3_req_first || usb3_req != usb3_req_first)
goto out;
if (usb3_pn_change(usb3, usb3_ep->num) < 0)
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index 6cac642520fc..9c2eda0918e1 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -5568,7 +5568,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
struct usb_hcd *hcd;
struct resource *res;
int irq;
- int retval = -ENODEV;
+ int retval;
struct fotg210_hcd *fotg210;
if (usb_disabled())
@@ -5588,7 +5588,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
hcd = usb_create_hcd(&fotg210_fotg210_hc_driver, dev,
dev_name(dev));
if (!hcd) {
- dev_err(dev, "failed to create hcd with err %d\n", retval);
+ dev_err(dev, "failed to create hcd\n");
retval = -ENOMEM;
goto fail_create_hcd;
}
diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
index fa59b242cd51..e8af0a125f84 100644
--- a/drivers/usb/host/xhci-ext-caps.h
+++ b/drivers/usb/host/xhci-ext-caps.h
@@ -7,8 +7,9 @@
* Author: Sarah Sharp
* Some code borrowed from the Linux EHCI driver.
*/
-/* Up to 16 ms to halt an HC */
-#define XHCI_MAX_HALT_USEC (16*1000)
+
+/* HC should halt within 16 ms, but use 32 ms as some hosts take longer */
+#define XHCI_MAX_HALT_USEC (32 * 1000)
/* HC not running - set to 1 when run/stop bit is cleared. */
#define XHCI_STS_HALT (1<<0)
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 5bbccc9a0179..18c2bbddf080 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -57,7 +57,9 @@
#define PCI_DEVICE_ID_INTEL_CML_XHCI 0xa3af
#define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI 0x9a13
#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI 0x1138
+#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI 0x461e
+#define PCI_DEVICE_ID_AMD_RENOIR_XHCI 0x1639
#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
#define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb
@@ -166,8 +168,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
(pdev->device == 0x15e0 || pdev->device == 0x15e1))
xhci->quirks |= XHCI_SNPS_BROKEN_SUSPEND;
- if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x15e5)
+ if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x15e5) {
xhci->quirks |= XHCI_DISABLE_SPARSE;
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+ }
if (pdev->vendor == PCI_VENDOR_ID_AMD)
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
@@ -179,6 +183,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
(pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_1)))
xhci->quirks |= XHCI_U2_DISABLE_WAKE;
+ if (pdev->vendor == PCI_VENDOR_ID_AMD &&
+ pdev->device == PCI_DEVICE_ID_AMD_RENOIR_XHCI)
+ xhci->quirks |= XHCI_BROKEN_D3COLD;
+
if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
xhci->quirks |= XHCI_LPM_SUPPORT;
xhci->quirks |= XHCI_INTEL_HOST;
@@ -243,7 +251,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI ||
- pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI))
+ pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI))
xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
@@ -535,7 +544,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
* Systems with the TI redriver that loses port status change events
* need to have the registers polled during D3, so avoid D3cold.
*/
- if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
+ if (xhci->quirks & (XHCI_COMP_MODE_QUIRK | XHCI_BROKEN_D3COLD))
pci_d3cold_disable(pdev);
if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 05c38dd3ee36..6acd2329e08d 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -828,14 +828,10 @@ static void xhci_giveback_invalidated_tds(struct xhci_virt_ep *ep)
list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list,
cancelled_td_list) {
- /*
- * Doesn't matter what we pass for status, since the core will
- * just overwrite it (because the URB has been unlinked).
- */
ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
if (td->cancel_status == TD_CLEARED)
- xhci_td_cleanup(ep->xhci, td, ring, 0);
+ xhci_td_cleanup(ep->xhci, td, ring, td->status);
if (ep->xhci->xhc_state & XHCI_STATE_DYING)
return;
@@ -862,7 +858,7 @@ done:
return ret;
}
-static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
+static int xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
struct xhci_virt_ep *ep, unsigned int stream_id,
struct xhci_td *td,
enum xhci_ep_reset_type reset_type)
@@ -875,7 +871,7 @@ static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
* Device will be reset soon to recover the link so don't do anything
*/
if (ep->vdev->flags & VDEV_PORT_ERROR)
- return;
+ return -ENODEV;
/* add td to cancelled list and let reset ep handler take care of it */
if (reset_type == EP_HARD_RESET) {
@@ -888,16 +884,18 @@ static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
if (ep->ep_state & EP_HALTED) {
xhci_dbg(xhci, "Reset ep command already pending\n");
- return;
+ return 0;
}
err = xhci_reset_halted_ep(xhci, slot_id, ep->ep_index, reset_type);
if (err)
- return;
+ return err;
ep->ep_state |= EP_HALTED;
xhci_ring_cmd_db(xhci);
+
+ return 0;
}
/*
@@ -935,14 +933,18 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
continue;
}
/*
- * If ring stopped on the TD we need to cancel, then we have to
+ * If a ring stopped on the TD we need to cancel then we have to
* move the xHC endpoint ring dequeue pointer past this TD.
+ * Rings halted due to STALL may show hw_deq is past the stalled
+ * TD, but still require a set TR Deq command to flush xHC cache.
*/
hw_deq = xhci_get_hw_deq(xhci, ep->vdev, ep->ep_index,
td->urb->stream_id);
hw_deq &= ~0xf;
- if (trb_in_td(xhci, td->start_seg, td->first_trb,
+ if (td->cancel_status == TD_HALTED) {
+ cached_td = td;
+ } else if (trb_in_td(xhci, td->start_seg, td->first_trb,
td->last_trb, hw_deq, false)) {
switch (td->cancel_status) {
case TD_CLEARED: /* TD is already no-op */
@@ -1014,6 +1016,7 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
struct xhci_td *td = NULL;
enum xhci_ep_reset_type reset_type;
struct xhci_command *command;
+ int err;
if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
if (!xhci->devs[slot_id])
@@ -1058,7 +1061,10 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
td->status = -EPROTO;
}
/* reset ep, reset handler cleans up cancelled tds */
- xhci_handle_halted_endpoint(xhci, ep, 0, td, reset_type);
+ err = xhci_handle_halted_endpoint(xhci, ep, 0, td,
+ reset_type);
+ if (err)
+ break;
xhci_stop_watchdog_timer_in_irq(xhci, ep);
return;
case EP_STATE_RUNNING:
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index ca9385d22f68..27283654ca08 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1514,7 +1514,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
* we need to issue an evaluate context command and wait on it.
*/
static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
- unsigned int ep_index, struct urb *urb)
+ unsigned int ep_index, struct urb *urb, gfp_t mem_flags)
{
struct xhci_container_ctx *out_ctx;
struct xhci_input_control_ctx *ctrl_ctx;
@@ -1545,7 +1545,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
* changes max packet sizes.
*/
- command = xhci_alloc_command(xhci, true, GFP_KERNEL);
+ command = xhci_alloc_command(xhci, true, mem_flags);
if (!command)
return -ENOMEM;
@@ -1639,7 +1639,7 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
*/
if (urb->dev->speed == USB_SPEED_FULL) {
ret = xhci_check_maxpacket(xhci, slot_id,
- ep_index, urb);
+ ep_index, urb, mem_flags);
if (ret < 0) {
xhci_urb_free_priv(urb_priv);
urb->hcpriv = NULL;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 2595a8f057c4..e417f5ce13d1 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1892,6 +1892,7 @@ struct xhci_hcd {
#define XHCI_DISABLE_SPARSE BIT_ULL(38)
#define XHCI_SG_TRB_CACHE_SIZE_QUIRK BIT_ULL(39)
#define XHCI_NO_SOFT_RETRY BIT_ULL(40)
+#define XHCI_BROKEN_D3COLD BIT_ULL(41)
unsigned int num_active_eps;
unsigned int limit_active_eps;
diff --git a/drivers/usb/misc/brcmstb-usb-pinmap.c b/drivers/usb/misc/brcmstb-usb-pinmap.c
index b3cfe8666ea7..336653091e3b 100644
--- a/drivers/usb/misc/brcmstb-usb-pinmap.c
+++ b/drivers/usb/misc/brcmstb-usb-pinmap.c
@@ -263,6 +263,8 @@ static int __init brcmstb_usb_pinmap_probe(struct platform_device *pdev)
return -EINVAL;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r)
+ return -EINVAL;
pdata = devm_kzalloc(&pdev->dev,
sizeof(*pdata) +
diff --git a/drivers/usb/misc/trancevibrator.c b/drivers/usb/misc/trancevibrator.c
index a3dfc77578ea..26baba3ab7d7 100644
--- a/drivers/usb/misc/trancevibrator.c
+++ b/drivers/usb/misc/trancevibrator.c
@@ -61,9 +61,9 @@ static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
/* Set speed */
retval = usb_control_msg(tv->udev, usb_sndctrlpipe(tv->udev, 0),
0x01, /* vendor request: set speed */
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
tv->speed, /* speed value */
- 0, NULL, 0, USB_CTRL_GET_TIMEOUT);
+ 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
if (retval) {
tv->speed = old;
dev_dbg(&tv->udev->dev, "retval = %d\n", retval);
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index b5d661644263..748139d26263 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -736,6 +736,7 @@ static int uss720_probe(struct usb_interface *intf,
parport_announce_port(pp);
usb_set_intfdata(intf, pp);
+ usb_put_dev(usbdev);
return 0;
probe_abort:
diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c
index eebeadd26946..6b92d037d8fc 100644
--- a/drivers/usb/musb/mediatek.c
+++ b/drivers/usb/musb/mediatek.c
@@ -518,8 +518,8 @@ static int mtk_musb_probe(struct platform_device *pdev)
glue->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
if (IS_ERR(glue->xceiv)) {
- dev_err(dev, "fail to getting usb-phy %d\n", ret);
ret = PTR_ERR(glue->xceiv);
+ dev_err(dev, "fail to getting usb-phy %d\n", ret);
goto err_unregister_usb_phy;
}
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 8f09a387b773..4c8f0112481f 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2009,9 +2009,8 @@ static void musb_pm_runtime_check_session(struct musb *musb)
schedule_delayed_work(&musb->irq_work,
msecs_to_jiffies(1000));
musb->quirk_retries--;
- break;
}
- fallthrough;
+ break;
case MUSB_QUIRK_B_INVALID_VBUS_91:
if (musb->quirk_retries && !musb->flush_irq_work) {
musb_dbg(musb,
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index ee595d1bea0a..fcb812bc832c 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -252,9 +252,11 @@ struct cp210x_serial_private {
u8 gpio_input;
#endif
u8 partnum;
+ u32 fw_version;
speed_t min_speed;
speed_t max_speed;
bool use_actual_rate;
+ bool no_flow_control;
};
enum cp210x_event_state {
@@ -398,6 +400,7 @@ struct cp210x_special_chars {
/* CP210X_VENDOR_SPECIFIC values */
#define CP210X_READ_2NCONFIG 0x000E
+#define CP210X_GET_FW_VER_2N 0x0010
#define CP210X_READ_LATCH 0x00C2
#define CP210X_GET_PARTNUM 0x370B
#define CP210X_GET_PORTCONFIG 0x370C
@@ -537,6 +540,12 @@ struct cp210x_single_port_config {
#define CP210X_2NCONFIG_GPIO_RSTLATCH_IDX 587
#define CP210X_2NCONFIG_GPIO_CONTROL_IDX 600
+/* CP2102N QFN20 port configuration values */
+#define CP2102N_QFN20_GPIO2_TXLED_MODE BIT(2)
+#define CP2102N_QFN20_GPIO3_RXLED_MODE BIT(3)
+#define CP2102N_QFN20_GPIO1_RS485_MODE BIT(4)
+#define CP2102N_QFN20_GPIO0_CLK_MODE BIT(6)
+
/* CP210X_VENDOR_SPECIFIC, CP210X_WRITE_LATCH call writes these 0x2 bytes. */
struct cp210x_gpio_write {
u8 mask;
@@ -1122,6 +1131,7 @@ static bool cp210x_termios_change(const struct ktermios *a, const struct ktermio
static void cp210x_set_flow_control(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
+ struct cp210x_serial_private *priv = usb_get_serial_data(port->serial);
struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
struct cp210x_special_chars chars;
struct cp210x_flow_ctl flow_ctl;
@@ -1129,6 +1139,15 @@ static void cp210x_set_flow_control(struct tty_struct *tty,
u32 ctl_hs;
int ret;
+ /*
+ * Some CP2102N interpret ulXonLimit as ulFlowReplace (erratum
+ * CP2102N_E104). Report back that flow control is not supported.
+ */
+ if (priv->no_flow_control) {
+ tty->termios.c_cflag &= ~CRTSCTS;
+ tty->termios.c_iflag &= ~(IXON | IXOFF);
+ }
+
if (old_termios &&
C_CRTSCTS(tty) == (old_termios->c_cflag & CRTSCTS) &&
I_IXON(tty) == (old_termios->c_iflag & IXON) &&
@@ -1185,19 +1204,20 @@ static void cp210x_set_flow_control(struct tty_struct *tty,
port_priv->crtscts = false;
}
- if (I_IXOFF(tty))
+ if (I_IXOFF(tty)) {
flow_repl |= CP210X_SERIAL_AUTO_RECEIVE;
- else
+
+ flow_ctl.ulXonLimit = cpu_to_le32(128);
+ flow_ctl.ulXoffLimit = cpu_to_le32(128);
+ } else {
flow_repl &= ~CP210X_SERIAL_AUTO_RECEIVE;
+ }
if (I_IXON(tty))
flow_repl |= CP210X_SERIAL_AUTO_TRANSMIT;
else
flow_repl &= ~CP210X_SERIAL_AUTO_TRANSMIT;
- flow_ctl.ulXonLimit = cpu_to_le32(128);
- flow_ctl.ulXoffLimit = cpu_to_le32(128);
-
dev_dbg(&port->dev, "%s - ctrl = 0x%02x, flow = 0x%02x\n", __func__,
ctl_hs, flow_repl);
@@ -1733,7 +1753,19 @@ static int cp2102n_gpioconf_init(struct usb_serial *serial)
priv->gpio_pushpull = (gpio_pushpull >> 3) & 0x0f;
/* 0 indicates GPIO mode, 1 is alternate function */
- priv->gpio_altfunc = (gpio_ctrl >> 2) & 0x0f;
+ if (priv->partnum == CP210X_PARTNUM_CP2102N_QFN20) {
+ /* QFN20 is special... */
+ if (gpio_ctrl & CP2102N_QFN20_GPIO0_CLK_MODE) /* GPIO 0 */
+ priv->gpio_altfunc |= BIT(0);
+ if (gpio_ctrl & CP2102N_QFN20_GPIO1_RS485_MODE) /* GPIO 1 */
+ priv->gpio_altfunc |= BIT(1);
+ if (gpio_ctrl & CP2102N_QFN20_GPIO2_TXLED_MODE) /* GPIO 2 */
+ priv->gpio_altfunc |= BIT(2);
+ if (gpio_ctrl & CP2102N_QFN20_GPIO3_RXLED_MODE) /* GPIO 3 */
+ priv->gpio_altfunc |= BIT(3);
+ } else {
+ priv->gpio_altfunc = (gpio_ctrl >> 2) & 0x0f;
+ }
if (priv->partnum == CP210X_PARTNUM_CP2102N_QFN28) {
/*
@@ -1908,6 +1940,45 @@ static void cp210x_init_max_speed(struct usb_serial *serial)
priv->use_actual_rate = use_actual_rate;
}
+static int cp210x_get_fw_version(struct usb_serial *serial, u16 value)
+{
+ struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+ u8 ver[3];
+ int ret;
+
+ ret = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST, value,
+ ver, sizeof(ver));
+ if (ret)
+ return ret;
+
+ dev_dbg(&serial->interface->dev, "%s - %d.%d.%d\n", __func__,
+ ver[0], ver[1], ver[2]);
+
+ priv->fw_version = ver[0] << 16 | ver[1] << 8 | ver[2];
+
+ return 0;
+}
+
+static void cp210x_determine_quirks(struct usb_serial *serial)
+{
+ struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+ int ret;
+
+ switch (priv->partnum) {
+ case CP210X_PARTNUM_CP2102N_QFN28:
+ case CP210X_PARTNUM_CP2102N_QFN24:
+ case CP210X_PARTNUM_CP2102N_QFN20:
+ ret = cp210x_get_fw_version(serial, CP210X_GET_FW_VER_2N);
+ if (ret)
+ break;
+ if (priv->fw_version <= 0x10004)
+ priv->no_flow_control = true;
+ break;
+ default:
+ break;
+ }
+}
+
static int cp210x_attach(struct usb_serial *serial)
{
int result;
@@ -1928,6 +1999,7 @@ static int cp210x_attach(struct usb_serial *serial)
usb_set_serial_data(serial, priv);
+ cp210x_determine_quirks(serial);
cp210x_init_max_speed(serial);
result = cp210x_gpio_init(serial);
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 6f2659e59b2e..4a1f3a95d017 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -611,6 +611,7 @@ static const struct usb_device_id id_table_combined[] = {
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLX_PLUS_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_NT_ORION_IO_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONMX_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) },
@@ -1034,6 +1035,9 @@ static const struct usb_device_id id_table_combined[] = {
/* Sienna devices */
{ USB_DEVICE(FTDI_VID, FTDI_SIENNA_PID) },
{ USB_DEVICE(ECHELON_VID, ECHELON_U20_PID) },
+ /* IDS GmbH devices */
+ { USB_DEVICE(IDS_VID, IDS_SI31A_PID) },
+ { USB_DEVICE(IDS_VID, IDS_CM31A_PID) },
/* U-Blox devices */
{ USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) },
{ USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 3d47c6d72256..add602bebd82 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -581,6 +581,7 @@
#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
#define FTDI_NT_ORIONLX_PLUS_PID 0x7c91 /* OrionLX+ Substation Automation Platform */
#define FTDI_NT_ORION_IO_PID 0x7c92 /* Orion I/O */
+#define FTDI_NT_ORIONMX_PID 0x7c93 /* OrionMX */
/*
* Synapse Wireless product ids (FTDI_VID)
@@ -1568,6 +1569,13 @@
#define UNJO_ISODEBUG_V1_PID 0x150D
/*
+ * IDS GmbH
+ */
+#define IDS_VID 0x2CAF
+#define IDS_SI31A_PID 0x13A2
+#define IDS_CM31A_PID 0x13A3
+
+/*
* U-Blox products (http://www.u-blox.com).
*/
#define UBLOX_VID 0x1546
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index 83c62f920c50..41f1b872d277 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * USB ZyXEL omni.net LCD PLUS driver
+ * USB ZyXEL omni.net driver
*
* Copyright (C) 2013,2017 Johan Hovold <johan@kernel.org>
*
@@ -22,10 +22,11 @@
#include <linux/usb/serial.h>
#define DRIVER_AUTHOR "Alessandro Zummo"
-#define DRIVER_DESC "USB ZyXEL omni.net LCD PLUS Driver"
+#define DRIVER_DESC "USB ZyXEL omni.net Driver"
#define ZYXEL_VENDOR_ID 0x0586
#define ZYXEL_OMNINET_ID 0x1000
+#define ZYXEL_OMNI_56K_PLUS_ID 0x1500
/* This one seems to be a re-branded ZyXEL device */
#define BT_IGNITIONPRO_ID 0x2000
@@ -40,6 +41,7 @@ static void omninet_port_remove(struct usb_serial_port *port);
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNINET_ID) },
+ { USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNI_56K_PLUS_ID) },
{ USB_DEVICE(ZYXEL_VENDOR_ID, BT_IGNITIONPRO_ID) },
{ } /* Terminating entry */
};
@@ -50,7 +52,7 @@ static struct usb_serial_driver zyxel_omninet_device = {
.owner = THIS_MODULE,
.name = "omninet",
},
- .description = "ZyXEL - omni.net lcd plus usb",
+ .description = "ZyXEL - omni.net usb",
.id_table = id_table,
.num_bulk_out = 2,
.calc_num_ports = omninet_calc_num_ports,
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 3e79a543d3e7..7608584ef4fe 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1240,6 +1240,10 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */
.driver_info = NCTRL(0) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7010, 0xff), /* Telit LE910-S1 (RNDIS) */
+ .driver_info = NCTRL(2) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff), /* Telit LE910-S1 (ECM) */
+ .driver_info = NCTRL(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */
.driver_info = NCTRL(0) | ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index fd773d252691..940050c31482 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -113,6 +113,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
{ USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
{ USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
+ { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530GC_PRODUCT_ID) },
{ USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) },
{ USB_DEVICE(AT_VENDOR_ID, AT_VTKIT3_PRODUCT_ID) },
{ } /* Terminating entry */
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 0f681ddbfd28..6097ee8fccb2 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -158,6 +158,7 @@
/* ADLINK ND-6530 RS232,RS485 and RS422 adapter */
#define ADLINK_VENDOR_ID 0x0b63
#define ADLINK_ND6530_PRODUCT_ID 0x6530
+#define ADLINK_ND6530GC_PRODUCT_ID 0x653a
/* SMART USB Serial Adapter */
#define SMART_VENDOR_ID 0x0b8c
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index 5f2e7f668e68..067690dac24c 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -416,7 +416,7 @@ static void qt2_close(struct usb_serial_port *port)
/* flush the port transmit buffer */
i = usb_control_msg(serial->dev,
- usb_rcvctrlpipe(serial->dev, 0),
+ usb_sndctrlpipe(serial->dev, 0),
QT2_FLUSH_DEVICE, 0x40, 1,
port_priv->device_port, NULL, 0, QT2_USB_TIMEOUT);
@@ -426,7 +426,7 @@ static void qt2_close(struct usb_serial_port *port)
/* flush the port receive buffer */
i = usb_control_msg(serial->dev,
- usb_rcvctrlpipe(serial->dev, 0),
+ usb_sndctrlpipe(serial->dev, 0),
QT2_FLUSH_DEVICE, 0x40, 0,
port_priv->device_port, NULL, 0, QT2_USB_TIMEOUT);
@@ -639,7 +639,7 @@ static int qt2_attach(struct usb_serial *serial)
int status;
/* power on unit */
- status = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+ status = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
0xc2, 0x40, 0x8000, 0, NULL, 0,
QT2_USB_TIMEOUT);
if (status < 0) {
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index caa46ac23db9..310db5abea9d 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -37,6 +37,7 @@
/* Vendor and product ids */
#define TI_VENDOR_ID 0x0451
#define IBM_VENDOR_ID 0x04b3
+#define STARTECH_VENDOR_ID 0x14b0
#define TI_3410_PRODUCT_ID 0x3410
#define IBM_4543_PRODUCT_ID 0x4543
#define IBM_454B_PRODUCT_ID 0x454b
@@ -370,6 +371,7 @@ static const struct usb_device_id ti_id_table_3410[] = {
{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1131_PRODUCT_ID) },
{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1150_PRODUCT_ID) },
{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1151_PRODUCT_ID) },
+ { USB_DEVICE(STARTECH_VENDOR_ID, TI_3410_PRODUCT_ID) },
{ } /* terminator */
};
@@ -408,6 +410,7 @@ static const struct usb_device_id ti_id_table_combined[] = {
{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1131_PRODUCT_ID) },
{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1150_PRODUCT_ID) },
{ USB_DEVICE(MXU1_VENDOR_ID, MXU1_1151_PRODUCT_ID) },
+ { USB_DEVICE(STARTECH_VENDOR_ID, TI_3410_PRODUCT_ID) },
{ } /* terminator */
};
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
index 9da22ae3006c..77dabd306ba8 100644
--- a/drivers/usb/typec/mux.c
+++ b/drivers/usb/typec/mux.c
@@ -191,6 +191,7 @@ static void *typec_mux_match(struct fwnode_handle *fwnode, const char *id,
bool match;
int nval;
u16 *val;
+ int ret;
int i;
/*
@@ -218,10 +219,10 @@ static void *typec_mux_match(struct fwnode_handle *fwnode, const char *id,
if (!val)
return ERR_PTR(-ENOMEM);
- nval = fwnode_property_read_u16_array(fwnode, "svid", val, nval);
- if (nval < 0) {
+ ret = fwnode_property_read_u16_array(fwnode, "svid", val, nval);
+ if (ret < 0) {
kfree(val);
- return ERR_PTR(nval);
+ return ERR_PTR(ret);
}
for (i = 0; i < nval; i++) {
@@ -238,7 +239,7 @@ find_mux:
dev = class_find_device(&typec_mux_class, NULL, fwnode,
mux_fwnode_match);
- return dev ? to_typec_switch(dev) : ERR_PTR(-EPROBE_DEFER);
+ return dev ? to_typec_mux(dev) : ERR_PTR(-EPROBE_DEFER);
}
/**
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
index 46a25b8db72e..ffa8aa12d5f1 100644
--- a/drivers/usb/typec/mux/intel_pmc_mux.c
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -582,10 +582,15 @@ static int pmc_usb_probe_iom(struct pmc_usb *pmc)
acpi_dev_free_resource_list(&resource_list);
if (!pmc->iom_base) {
- put_device(&adev->dev);
+ acpi_dev_put(adev);
return -ENOMEM;
}
+ if (IS_ERR(pmc->iom_base)) {
+ acpi_dev_put(adev);
+ return PTR_ERR(pmc->iom_base);
+ }
+
pmc->iom_adev = adev;
return 0;
@@ -636,8 +641,10 @@ static int pmc_usb_probe(struct platform_device *pdev)
break;
ret = pmc_usb_register_port(pmc, i, fwnode);
- if (ret)
+ if (ret) {
+ fwnode_handle_put(fwnode);
goto err_remove_ports;
+ }
}
platform_set_drvdata(pdev, pmc);
@@ -651,7 +658,7 @@ err_remove_ports:
usb_role_switch_unregister(pmc->port[i].usb_sw);
}
- put_device(&pmc->iom_adev->dev);
+ acpi_dev_put(pmc->iom_adev);
return ret;
}
@@ -667,7 +674,7 @@ static int pmc_usb_remove(struct platform_device *pdev)
usb_role_switch_unregister(pmc->port[i].usb_sw);
}
- put_device(&pmc->iom_adev->dev);
+ acpi_dev_put(pmc->iom_adev);
return 0;
}
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index c4fdc00a3bc8..63470cf7f4cd 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -259,6 +259,7 @@ enum frs_typec_current {
#define ALTMODE_DISCOVERY_MAX (SVID_DISCOVERY_MAX * MODE_DISCOVERY_MAX)
#define GET_SINK_CAP_RETRY_MS 100
+#define SEND_DISCOVER_RETRY_MS 100
struct pd_mode_data {
int svid_index; /* current SVID index */
@@ -366,6 +367,8 @@ struct tcpm_port {
struct kthread_work vdm_state_machine;
struct hrtimer enable_frs_timer;
struct kthread_work enable_frs;
+ struct hrtimer send_discover_timer;
+ struct kthread_work send_discover_work;
bool state_machine_running;
bool vdm_sm_running;
@@ -398,6 +401,8 @@ struct tcpm_port {
unsigned int nr_src_pdo;
u32 snk_pdo[PDO_MAX_OBJECTS];
unsigned int nr_snk_pdo;
+ u32 snk_vdo_v1[VDO_MAX_OBJECTS];
+ unsigned int nr_snk_vdo_v1;
u32 snk_vdo[VDO_MAX_OBJECTS];
unsigned int nr_snk_vdo;
@@ -1178,6 +1183,16 @@ static void mod_enable_frs_delayed_work(struct tcpm_port *port, unsigned int del
}
}
+static void mod_send_discover_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
+{
+ if (delay_ms) {
+ hrtimer_start(&port->send_discover_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
+ } else {
+ hrtimer_cancel(&port->send_discover_timer);
+ kthread_queue_work(port->wq, &port->send_discover_work);
+ }
+}
+
static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state,
unsigned int delay_ms)
{
@@ -1534,33 +1549,43 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
if (PD_VDO_VID(p[0]) != USB_SID_PD)
break;
- if (PD_VDO_SVDM_VER(p[0]) < svdm_version)
+ if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
typec_partner_set_svdm_version(port->partner,
PD_VDO_SVDM_VER(p[0]));
- /* 6.4.4.3.1: Only respond as UFP (device) */
- if (port->data_role == TYPEC_DEVICE &&
+ svdm_version = PD_VDO_SVDM_VER(p[0]);
+ }
+
+ port->ams = DISCOVER_IDENTITY;
+ /*
+ * PD2.0 Spec 6.10.3: respond with NAK as DFP (data host)
+ * PD3.1 Spec 6.4.4.2.5.1: respond with NAK if "invalid field" or
+ * "wrong configuation" or "Unrecognized"
+ */
+ if ((port->data_role == TYPEC_DEVICE || svdm_version >= SVDM_VER_2_0) &&
port->nr_snk_vdo) {
- /*
- * Product Type DFP and Connector Type are not defined in SVDM
- * version 1.0 and shall be set to zero.
- */
- if (typec_get_negotiated_svdm_version(typec) < SVDM_VER_2_0)
- response[1] = port->snk_vdo[0] & ~IDH_DFP_MASK
- & ~IDH_CONN_MASK;
- else
- response[1] = port->snk_vdo[0];
- for (i = 1; i < port->nr_snk_vdo; i++)
- response[i + 1] = port->snk_vdo[i];
- rlen = port->nr_snk_vdo + 1;
+ if (svdm_version < SVDM_VER_2_0) {
+ for (i = 0; i < port->nr_snk_vdo_v1; i++)
+ response[i + 1] = port->snk_vdo_v1[i];
+ rlen = port->nr_snk_vdo_v1 + 1;
+
+ } else {
+ for (i = 0; i < port->nr_snk_vdo; i++)
+ response[i + 1] = port->snk_vdo[i];
+ rlen = port->nr_snk_vdo + 1;
+ }
}
break;
case CMD_DISCOVER_SVID:
+ port->ams = DISCOVER_SVIDS;
break;
case CMD_DISCOVER_MODES:
+ port->ams = DISCOVER_MODES;
break;
case CMD_ENTER_MODE:
+ port->ams = DFP_TO_UFP_ENTER_MODE;
break;
case CMD_EXIT_MODE:
+ port->ams = DFP_TO_UFP_EXIT_MODE;
break;
case CMD_ATTENTION:
/* Attention command does not have response */
@@ -1855,6 +1880,9 @@ static void vdm_run_state_machine(struct tcpm_port *port)
res = tcpm_ams_start(port, DISCOVER_IDENTITY);
if (res == 0)
port->send_discover = false;
+ else if (res == -EAGAIN)
+ mod_send_discover_delayed_work(port,
+ SEND_DISCOVER_RETRY_MS);
break;
case CMD_DISCOVER_SVID:
res = tcpm_ams_start(port, DISCOVER_SVIDS);
@@ -1880,7 +1908,7 @@ static void vdm_run_state_machine(struct tcpm_port *port)
}
if (res < 0) {
- port->vdm_sm_running = false;
+ port->vdm_state = VDM_STATE_ERR_BUSY;
return;
}
}
@@ -1896,6 +1924,7 @@ static void vdm_run_state_machine(struct tcpm_port *port)
port->vdo_data[0] = port->vdo_retry;
port->vdo_count = 1;
port->vdm_state = VDM_STATE_READY;
+ tcpm_ams_finish(port);
break;
case VDM_STATE_BUSY:
port->vdm_state = VDM_STATE_ERR_TMOUT;
@@ -1913,6 +1942,9 @@ static void vdm_run_state_machine(struct tcpm_port *port)
tcpm_log(port, "VDM Tx error, retry");
port->vdm_retries++;
port->vdm_state = VDM_STATE_READY;
+ if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT)
+ tcpm_ams_finish(port);
+ } else {
tcpm_ams_finish(port);
}
break;
@@ -1961,7 +1993,7 @@ static void vdm_state_machine_work(struct kthread_work *work)
port->vdm_state != VDM_STATE_BUSY &&
port->vdm_state != VDM_STATE_SEND_MESSAGE);
- if (port->vdm_state == VDM_STATE_ERR_TMOUT)
+ if (port->vdm_state < VDM_STATE_READY)
port->vdm_sm_running = false;
mutex_unlock(&port->lock);
@@ -2159,20 +2191,25 @@ static void tcpm_handle_alert(struct tcpm_port *port, const __le32 *payload,
if (!type) {
tcpm_log(port, "Alert message received with no type");
+ tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
return;
}
/* Just handling non-battery alerts for now */
if (!(type & USB_PD_ADO_TYPE_BATT_STATUS_CHANGE)) {
- switch (port->state) {
- case SRC_READY:
- case SNK_READY:
+ if (port->pwr_role == TYPEC_SOURCE) {
+ port->upcoming_state = GET_STATUS_SEND;
+ tcpm_ams_start(port, GETTING_SOURCE_SINK_STATUS);
+ } else {
+ /*
+ * Do not check SinkTxOk here in case the Source doesn't set its Rp to
+ * SinkTxOk in time.
+ */
+ port->ams = GETTING_SOURCE_SINK_STATUS;
tcpm_set_state(port, GET_STATUS_SEND, 0);
- break;
- default:
- tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
- break;
}
+ } else {
+ tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
}
}
@@ -2270,6 +2307,12 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
bool frs_enable;
int ret;
+ if (tcpm_vdm_ams(port) && type != PD_DATA_VENDOR_DEF) {
+ port->vdm_state = VDM_STATE_ERR_BUSY;
+ tcpm_ams_finish(port);
+ mod_vdm_delayed_work(port, 0);
+ }
+
switch (type) {
case PD_DATA_SOURCE_CAP:
for (i = 0; i < cnt; i++)
@@ -2390,7 +2433,7 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
port->nr_sink_caps = cnt;
port->sink_cap_done = true;
if (port->ams == GET_SINK_CAPABILITIES)
- tcpm_pd_handle_state(port, ready_state(port), NONE_AMS, 0);
+ tcpm_set_state(port, ready_state(port), 0);
/* Unexpected Sink Capabilities */
else
tcpm_pd_handle_msg(port,
@@ -2400,14 +2443,22 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
NONE_AMS);
break;
case PD_DATA_VENDOR_DEF:
- tcpm_handle_vdm_request(port, msg->payload, cnt);
+ if (tcpm_vdm_ams(port) || port->nr_snk_vdo)
+ tcpm_handle_vdm_request(port, msg->payload, cnt);
+ else if (port->negotiated_rev > PD_REV20)
+ tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
break;
case PD_DATA_BIST:
port->bist_request = le32_to_cpu(msg->payload[0]);
tcpm_pd_handle_state(port, BIST_RX, BIST, 0);
break;
case PD_DATA_ALERT:
- tcpm_handle_alert(port, msg->payload, cnt);
+ if (port->state != SRC_READY && port->state != SNK_READY)
+ tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
+ SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
+ NONE_AMS, 0);
+ else
+ tcpm_handle_alert(port, msg->payload, cnt);
break;
case PD_DATA_BATT_STATUS:
case PD_DATA_GET_COUNTRY_INFO:
@@ -2442,6 +2493,16 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
enum tcpm_state next_state;
+ /*
+ * Stop VDM state machine if interrupted by other Messages while NOT_SUPP is allowed in
+ * VDM AMS if waiting for VDM responses and will be handled later.
+ */
+ if (tcpm_vdm_ams(port) && type != PD_CTRL_NOT_SUPP && type != PD_CTRL_GOOD_CRC) {
+ port->vdm_state = VDM_STATE_ERR_BUSY;
+ tcpm_ams_finish(port);
+ mod_vdm_delayed_work(port, 0);
+ }
+
switch (type) {
case PD_CTRL_GOOD_CRC:
case PD_CTRL_PING:
@@ -2552,6 +2613,16 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
port->sink_cap_done = true;
tcpm_set_state(port, ready_state(port), 0);
break;
+ case SRC_READY:
+ case SNK_READY:
+ if (port->vdm_state > VDM_STATE_READY) {
+ port->vdm_state = VDM_STATE_DONE;
+ if (tcpm_vdm_ams(port))
+ tcpm_ams_finish(port);
+ mod_vdm_delayed_work(port, 0);
+ break;
+ }
+ fallthrough;
default:
tcpm_pd_handle_state(port,
port->pwr_role == TYPEC_SOURCE ?
@@ -2690,7 +2761,14 @@ static void tcpm_pd_ext_msg_request(struct tcpm_port *port,
enum pd_ext_msg_type type = pd_header_type_le(msg->header);
unsigned int data_size = pd_ext_header_data_size_le(msg->ext_msg.header);
- if (!(msg->ext_msg.header & PD_EXT_HDR_CHUNKED)) {
+ /* stopping VDM state machine if interrupted by other Messages */
+ if (tcpm_vdm_ams(port)) {
+ port->vdm_state = VDM_STATE_ERR_BUSY;
+ tcpm_ams_finish(port);
+ mod_vdm_delayed_work(port, 0);
+ }
+
+ if (!(le16_to_cpu(msg->ext_msg.header) & PD_EXT_HDR_CHUNKED)) {
tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
tcpm_log(port, "Unchunked extended messages unsupported");
return;
@@ -2704,24 +2782,16 @@ static void tcpm_pd_ext_msg_request(struct tcpm_port *port,
switch (type) {
case PD_EXT_STATUS:
- /*
- * If PPS related events raised then get PPS status to clear
- * (see USB PD 3.0 Spec, 6.5.2.4)
- */
- if (msg->ext_msg.data[USB_PD_EXT_SDB_EVENT_FLAGS] &
- USB_PD_EXT_SDB_PPS_EVENTS)
- tcpm_pd_handle_state(port, GET_PPS_STATUS_SEND,
- GETTING_SOURCE_SINK_STATUS, 0);
-
- else
- tcpm_pd_handle_state(port, ready_state(port), NONE_AMS, 0);
- break;
case PD_EXT_PPS_STATUS:
- /*
- * For now the PPS status message is used to clear events
- * and nothing more.
- */
- tcpm_pd_handle_state(port, ready_state(port), NONE_AMS, 0);
+ if (port->ams == GETTING_SOURCE_SINK_STATUS) {
+ tcpm_ams_finish(port);
+ tcpm_set_state(port, ready_state(port), 0);
+ } else {
+ /* unexpected Status or PPS_Status Message */
+ tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
+ SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
+ NONE_AMS, 0);
+ }
break;
case PD_EXT_SOURCE_CAP_EXT:
case PD_EXT_GET_BATT_CAP:
@@ -2784,7 +2854,7 @@ static void tcpm_pd_rx_handler(struct kthread_work *work)
"Data role mismatch, initiating error recovery");
tcpm_set_state(port, ERROR_RECOVERY, 0);
} else {
- if (msg->header & PD_HEADER_EXT_HDR)
+ if (le16_to_cpu(msg->header) & PD_HEADER_EXT_HDR)
tcpm_pd_ext_msg_request(port, msg);
else if (cnt)
tcpm_pd_data_request(port, msg);
@@ -3682,14 +3752,6 @@ static inline enum tcpm_state unattached_state(struct tcpm_port *port)
return SNK_UNATTACHED;
}
-static void tcpm_check_send_discover(struct tcpm_port *port)
-{
- if ((port->data_role == TYPEC_HOST || port->negotiated_rev > PD_REV20) &&
- port->send_discover && port->pd_capable)
- tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
- port->send_discover = false;
-}
-
static void tcpm_swap_complete(struct tcpm_port *port, int result)
{
if (port->swap_pending) {
@@ -3926,7 +3988,18 @@ static void run_state_machine(struct tcpm_port *port)
break;
}
- tcpm_check_send_discover(port);
+ /*
+ * 6.4.4.3.1 Discover Identity
+ * "The Discover Identity Command Shall only be sent to SOP when there is an
+ * Explicit Contract."
+ * For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using
+ * port->explicit_contract to decide whether to send the command.
+ */
+ if (port->explicit_contract)
+ mod_send_discover_delayed_work(port, 0);
+ else
+ port->send_discover = false;
+
/*
* 6.3.5
* Sending ping messages is not necessary if
@@ -4055,7 +4128,7 @@ static void run_state_machine(struct tcpm_port *port)
if (port->vbus_present) {
u32 current_lim = tcpm_get_current_limit(port);
- if (port->slow_charger_loop || (current_lim > PD_P_SNK_STDBY_MW / 5))
+ if (port->slow_charger_loop && (current_lim > PD_P_SNK_STDBY_MW / 5))
current_lim = PD_P_SNK_STDBY_MW / 5;
tcpm_set_current_limit(port, current_lim, 5000);
tcpm_set_charge(port, true);
@@ -4194,7 +4267,18 @@ static void run_state_machine(struct tcpm_port *port)
break;
}
- tcpm_check_send_discover(port);
+ /*
+ * 6.4.4.3.1 Discover Identity
+ * "The Discover Identity Command Shall only be sent to SOP when there is an
+ * Explicit Contract."
+ * For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using
+ * port->explicit_contract.
+ */
+ if (port->explicit_contract)
+ mod_send_discover_delayed_work(port, 0);
+ else
+ port->send_discover = false;
+
power_supply_changed(port->psy);
break;
@@ -5288,6 +5372,29 @@ unlock:
mutex_unlock(&port->lock);
}
+static void tcpm_send_discover_work(struct kthread_work *work)
+{
+ struct tcpm_port *port = container_of(work, struct tcpm_port, send_discover_work);
+
+ mutex_lock(&port->lock);
+ /* No need to send DISCOVER_IDENTITY anymore */
+ if (!port->send_discover)
+ goto unlock;
+
+ /* Retry if the port is not idle */
+ if ((port->state != SRC_READY && port->state != SNK_READY) || port->vdm_sm_running) {
+ mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
+ goto unlock;
+ }
+
+ /* Only send the Message if the port is host for PD rev2.0 */
+ if (port->data_role == TYPEC_HOST || port->negotiated_rev > PD_REV20)
+ tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
+
+unlock:
+ mutex_unlock(&port->lock);
+}
+
static int tcpm_dr_set(struct typec_port *p, enum typec_data_role data)
{
struct tcpm_port *port = typec_get_drvdata(p);
@@ -5754,6 +5861,15 @@ static int tcpm_fw_get_caps(struct tcpm_port *port,
if (!fwnode)
return -EINVAL;
+ /*
+ * This fwnode has a "compatible" property, but is never populated as a
+ * struct device. Instead we simply parse it to read the properties.
+ * This it breaks fw_devlink=on. To maintain backward compatibility
+ * with existing DT files, we work around this by deleting any
+ * fwnode_links to/from this fwnode.
+ */
+ fw_devlink_purge_absent_suppliers(fwnode);
+
/* USB data support is optional */
ret = fwnode_property_read_string(fwnode, "data-role", &cap_str);
if (ret == 0) {
@@ -5841,6 +5957,22 @@ sink:
return ret;
}
+ /* If sink-vdos is found, sink-vdos-v1 is expected for backward compatibility. */
+ if (port->nr_snk_vdo) {
+ ret = fwnode_property_count_u32(fwnode, "sink-vdos-v1");
+ if (ret < 0)
+ return ret;
+ else if (ret == 0)
+ return -ENODATA;
+
+ port->nr_snk_vdo_v1 = min(ret, VDO_MAX_OBJECTS);
+ ret = fwnode_property_read_u32_array(fwnode, "sink-vdos-v1",
+ port->snk_vdo_v1,
+ port->nr_snk_vdo_v1);
+ if (ret < 0)
+ return ret;
+ }
+
return 0;
}
@@ -6093,6 +6225,14 @@ static enum hrtimer_restart enable_frs_timer_handler(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
+static enum hrtimer_restart send_discover_timer_handler(struct hrtimer *timer)
+{
+ struct tcpm_port *port = container_of(timer, struct tcpm_port, send_discover_timer);
+
+ kthread_queue_work(port->wq, &port->send_discover_work);
+ return HRTIMER_NORESTART;
+}
+
struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
{
struct tcpm_port *port;
@@ -6123,12 +6263,15 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
kthread_init_work(&port->vdm_state_machine, vdm_state_machine_work);
kthread_init_work(&port->event_work, tcpm_pd_event_handler);
kthread_init_work(&port->enable_frs, tcpm_enable_frs_work);
+ kthread_init_work(&port->send_discover_work, tcpm_send_discover_work);
hrtimer_init(&port->state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
port->state_machine_timer.function = state_machine_timer_handler;
hrtimer_init(&port->vdm_state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
port->vdm_state_machine_timer.function = vdm_state_machine_timer_handler;
hrtimer_init(&port->enable_frs_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
port->enable_frs_timer.function = enable_frs_timer_handler;
+ hrtimer_init(&port->send_discover_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ port->send_discover_timer.function = send_discover_timer_handler;
spin_lock_init(&port->pd_event_lock);
@@ -6195,6 +6338,11 @@ void tcpm_unregister_port(struct tcpm_port *port)
{
int i;
+ hrtimer_cancel(&port->send_discover_timer);
+ hrtimer_cancel(&port->enable_frs_timer);
+ hrtimer_cancel(&port->vdm_state_machine_timer);
+ hrtimer_cancel(&port->state_machine_timer);
+
tcpm_reset_port(port);
for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++)
typec_unregister_altmode(port->port_altmode[i]);
diff --git a/drivers/usb/typec/tcpm/wcove.c b/drivers/usb/typec/tcpm/wcove.c
index 79ae63950050..5d125339687a 100644
--- a/drivers/usb/typec/tcpm/wcove.c
+++ b/drivers/usb/typec/tcpm/wcove.c
@@ -378,7 +378,7 @@ static int wcove_pd_transmit(struct tcpc_dev *tcpc,
const u8 *data = (void *)msg;
int i;
- for (i = 0; i < pd_header_cnt(msg->header) * 4 + 2; i++) {
+ for (i = 0; i < pd_header_cnt_le(msg->header) * 4 + 2; i++) {
ret = regmap_write(wcove->regmap, USBC_TX_DATA + i,
data[i]);
if (ret)
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 282c3c825c13..b7d104c80d85 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -495,7 +495,8 @@ static void ucsi_unregister_altmodes(struct ucsi_connector *con, u8 recipient)
}
}
-static void ucsi_get_pdos(struct ucsi_connector *con, int is_partner)
+static int ucsi_get_pdos(struct ucsi_connector *con, int is_partner,
+ u32 *pdos, int offset, int num_pdos)
{
struct ucsi *ucsi = con->ucsi;
u64 command;
@@ -503,17 +504,39 @@ static void ucsi_get_pdos(struct ucsi_connector *con, int is_partner)
command = UCSI_COMMAND(UCSI_GET_PDOS) | UCSI_CONNECTOR_NUMBER(con->num);
command |= UCSI_GET_PDOS_PARTNER_PDO(is_partner);
- command |= UCSI_GET_PDOS_NUM_PDOS(UCSI_MAX_PDOS - 1);
+ command |= UCSI_GET_PDOS_PDO_OFFSET(offset);
+ command |= UCSI_GET_PDOS_NUM_PDOS(num_pdos - 1);
command |= UCSI_GET_PDOS_SRC_PDOS;
- ret = ucsi_send_command(ucsi, command, con->src_pdos,
- sizeof(con->src_pdos));
- if (ret < 0) {
+ ret = ucsi_send_command(ucsi, command, pdos + offset,
+ num_pdos * sizeof(u32));
+ if (ret < 0)
dev_err(ucsi->dev, "UCSI_GET_PDOS failed (%d)\n", ret);
+ if (ret == 0 && offset == 0)
+ dev_warn(ucsi->dev, "UCSI_GET_PDOS returned 0 bytes\n");
+
+ return ret;
+}
+
+static void ucsi_get_src_pdos(struct ucsi_connector *con, int is_partner)
+{
+ int ret;
+
+ /* UCSI max payload means only getting at most 4 PDOs at a time */
+ ret = ucsi_get_pdos(con, 1, con->src_pdos, 0, UCSI_MAX_PDOS);
+ if (ret < 0)
return;
- }
+
con->num_pdos = ret / sizeof(u32); /* number of bytes to 32-bit PDOs */
- if (ret == 0)
- dev_warn(ucsi->dev, "UCSI_GET_PDOS returned 0 bytes\n");
+ if (con->num_pdos < UCSI_MAX_PDOS)
+ return;
+
+ /* get the remaining PDOs, if any */
+ ret = ucsi_get_pdos(con, 1, con->src_pdos, UCSI_MAX_PDOS,
+ PDO_MAX_OBJECTS - UCSI_MAX_PDOS);
+ if (ret < 0)
+ return;
+
+ con->num_pdos += ret / sizeof(u32);
}
static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
@@ -522,7 +545,7 @@ static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
case UCSI_CONSTAT_PWR_OPMODE_PD:
con->rdo = con->status.request_data_obj;
typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_PD);
- ucsi_get_pdos(con, 1);
+ ucsi_get_src_pdos(con, 1);
break;
case UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5:
con->rdo = 0;
@@ -694,8 +717,8 @@ static void ucsi_handle_connector_change(struct work_struct *work)
ucsi_send_command(con->ucsi, command, NULL, 0);
/* 3. ACK connector change */
- clear_bit(EVENT_PENDING, &ucsi->flags);
ret = ucsi_acknowledge_connector_change(ucsi);
+ clear_bit(EVENT_PENDING, &ucsi->flags);
if (ret) {
dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret);
goto out_unlock;
@@ -999,6 +1022,7 @@ static const struct typec_operations ucsi_ops = {
.pr_set = ucsi_pr_swap
};
+/* Caller must call fwnode_handle_put() after use */
static struct fwnode_handle *ucsi_find_fwnode(struct ucsi_connector *con)
{
struct fwnode_handle *fwnode;
@@ -1033,7 +1057,7 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
command |= UCSI_CONNECTOR_NUMBER(con->num);
ret = ucsi_send_command(ucsi, command, &con->cap, sizeof(con->cap));
if (ret < 0)
- goto out;
+ goto out_unlock;
if (con->cap.op_mode & UCSI_CONCAP_OPMODE_DRP)
cap->data = TYPEC_PORT_DRD;
@@ -1151,6 +1175,8 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
trace_ucsi_register_port(con->num, &con->status);
out:
+ fwnode_handle_put(cap->fwnode);
+out_unlock:
mutex_unlock(&con->lock);
return ret;
}
@@ -1227,6 +1253,7 @@ err_unregister:
}
err_reset:
+ memset(&ucsi->cap, 0, sizeof(ucsi->cap));
ucsi_reset_ppm(ucsi);
err:
return ret;
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 3920e20a9e9e..cee666790907 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -8,6 +8,7 @@
#include <linux/power_supply.h>
#include <linux/types.h>
#include <linux/usb/typec.h>
+#include <linux/usb/pd.h>
#include <linux/usb/role.h>
/* -------------------------------------------------------------------------- */
@@ -134,7 +135,9 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
/* GET_PDOS command bits */
#define UCSI_GET_PDOS_PARTNER_PDO(_r_) ((u64)(_r_) << 23)
+#define UCSI_GET_PDOS_PDO_OFFSET(_r_) ((u64)(_r_) << 24)
#define UCSI_GET_PDOS_NUM_PDOS(_r_) ((u64)(_r_) << 32)
+#define UCSI_MAX_PDOS (4)
#define UCSI_GET_PDOS_SRC_PDOS ((u64)1 << 34)
/* -------------------------------------------------------------------------- */
@@ -302,7 +305,6 @@ struct ucsi {
#define UCSI_MAX_SVID 5
#define UCSI_MAX_ALTMODES (UCSI_MAX_SVID * 6)
-#define UCSI_MAX_PDOS (4)
#define UCSI_TYPEC_VSAFE5V 5000
#define UCSI_TYPEC_1_5_CURRENT 1500
@@ -330,7 +332,7 @@ struct ucsi_connector {
struct power_supply *psy;
struct power_supply_desc psy_desc;
u32 rdo;
- u32 src_pdos[UCSI_MAX_PDOS];
+ u32 src_pdos[PDO_MAX_OBJECTS];
int num_pdos;
struct usb_role_switch *usb_role_sw;
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 189e4385df40..dda5dc6f7737 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -15,6 +15,7 @@
#include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h>
#include <linux/mlx5/mlx5_ifc_vdpa.h>
+#include <linux/mlx5/mpfs.h>
#include "mlx5_vdpa.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
@@ -1859,11 +1860,16 @@ static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb
static void mlx5_vdpa_free(struct vdpa_device *vdev)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_core_dev *pfmdev;
struct mlx5_vdpa_net *ndev;
ndev = to_mlx5_vdpa_ndev(mvdev);
free_resources(ndev);
+ if (!is_zero_ether_addr(ndev->config.mac)) {
+ pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev));
+ mlx5_mpfs_del_mac(pfmdev, ndev->config.mac);
+ }
mlx5_vdpa_free_resources(&ndev->mvdev);
mutex_destroy(&ndev->reslock);
}
@@ -1990,6 +1996,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
{
struct mlx5_vdpa_mgmtdev *mgtdev = container_of(v_mdev, struct mlx5_vdpa_mgmtdev, mgtdev);
struct virtio_net_config *config;
+ struct mlx5_core_dev *pfmdev;
struct mlx5_vdpa_dev *mvdev;
struct mlx5_vdpa_net *ndev;
struct mlx5_core_dev *mdev;
@@ -2023,10 +2030,17 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
if (err)
goto err_mtu;
+ if (!is_zero_ether_addr(config->mac)) {
+ pfmdev = pci_get_drvdata(pci_physfn(mdev->pdev));
+ err = mlx5_mpfs_add_mac(pfmdev, config->mac);
+ if (err)
+ goto err_mtu;
+ }
+
mvdev->vdev.dma_dev = mdev->device;
err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
if (err)
- goto err_mtu;
+ goto err_mpfs;
err = alloc_resources(ndev);
if (err)
@@ -2044,6 +2058,9 @@ err_reg:
free_resources(ndev);
err_res:
mlx5_vdpa_free_resources(&ndev->mvdev);
+err_mpfs:
+ if (!is_zero_ether_addr(config->mac))
+ mlx5_mpfs_del_mac(pfmdev, config->mac);
err_mtu:
mutex_destroy(&ndev->reslock);
put_device(&mvdev->vdev.dev);
diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
index 53ce78d7d07b..5e2e1b9a9fd3 100644
--- a/drivers/vfio/pci/Kconfig
+++ b/drivers/vfio/pci/Kconfig
@@ -2,6 +2,7 @@
config VFIO_PCI
tristate "VFIO support for PCI devices"
depends on VFIO && PCI && EVENTFD
+ depends on MMU
select VFIO_VIRQFD
select IRQ_BYPASS_MANAGER
help
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index d57f037f65b8..70e28efbc51f 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -1581,7 +1581,7 @@ static int vfio_ecap_init(struct vfio_pci_device *vdev)
if (len == 0xFF) {
len = vfio_ext_cap_len(vdev, ecap, epos);
if (len < 0)
- return ret;
+ return len;
}
}
diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
index 361e5b57e369..470fcf7dac56 100644
--- a/drivers/vfio/platform/vfio_platform_common.c
+++ b/drivers/vfio/platform/vfio_platform_common.c
@@ -291,7 +291,7 @@ err_irq:
vfio_platform_regions_cleanup(vdev);
err_reg:
mutex_unlock(&driver_lock);
- module_put(THIS_MODULE);
+ module_put(vdev->parent_module);
return ret;
}
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index a0747c35a778..a3e925a41b0d 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -2795,7 +2795,7 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
return 0;
}
- size = sizeof(*cap_iovas) + (iovas * sizeof(*cap_iovas->iova_ranges));
+ size = struct_size(cap_iovas, iova_ranges, iovas);
cap_iovas = kzalloc(size, GFP_KERNEL);
if (!cap_iovas)
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 39258f9d36a0..ef9c57ce0906 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -380,7 +380,7 @@ static void vgacon_init(struct vc_data *c, int init)
vc_resize(c, vga_video_num_columns, vga_video_num_lines);
c->vc_scan_lines = vga_scan_lines;
- c->vc_font.height = vga_video_font_height;
+ c->vc_font.height = c->vc_cell_height = vga_video_font_height;
c->vc_complement_mask = 0x7700;
if (vga_512_chars)
c->vc_hi_font_mask = 0x0800;
@@ -515,32 +515,32 @@ static void vgacon_cursor(struct vc_data *c, int mode)
switch (CUR_SIZE(c->vc_cursor_type)) {
case CUR_UNDERLINE:
vgacon_set_cursor_size(c->state.x,
- c->vc_font.height -
- (c->vc_font.height <
+ c->vc_cell_height -
+ (c->vc_cell_height <
10 ? 2 : 3),
- c->vc_font.height -
- (c->vc_font.height <
+ c->vc_cell_height -
+ (c->vc_cell_height <
10 ? 1 : 2));
break;
case CUR_TWO_THIRDS:
vgacon_set_cursor_size(c->state.x,
- c->vc_font.height / 3,
- c->vc_font.height -
- (c->vc_font.height <
+ c->vc_cell_height / 3,
+ c->vc_cell_height -
+ (c->vc_cell_height <
10 ? 1 : 2));
break;
case CUR_LOWER_THIRD:
vgacon_set_cursor_size(c->state.x,
- (c->vc_font.height * 2) / 3,
- c->vc_font.height -
- (c->vc_font.height <
+ (c->vc_cell_height * 2) / 3,
+ c->vc_cell_height -
+ (c->vc_cell_height <
10 ? 1 : 2));
break;
case CUR_LOWER_HALF:
vgacon_set_cursor_size(c->state.x,
- c->vc_font.height / 2,
- c->vc_font.height -
- (c->vc_font.height <
+ c->vc_cell_height / 2,
+ c->vc_cell_height -
+ (c->vc_cell_height <
10 ? 1 : 2));
break;
case CUR_NONE:
@@ -551,7 +551,7 @@ static void vgacon_cursor(struct vc_data *c, int mode)
break;
default:
vgacon_set_cursor_size(c->state.x, 1,
- c->vc_font.height);
+ c->vc_cell_height);
break;
}
break;
@@ -562,13 +562,13 @@ static int vgacon_doresize(struct vc_data *c,
unsigned int width, unsigned int height)
{
unsigned long flags;
- unsigned int scanlines = height * c->vc_font.height;
+ unsigned int scanlines = height * c->vc_cell_height;
u8 scanlines_lo = 0, r7 = 0, vsync_end = 0, mode, max_scan;
raw_spin_lock_irqsave(&vga_lock, flags);
vgacon_xres = width * VGA_FONTWIDTH;
- vgacon_yres = height * c->vc_font.height;
+ vgacon_yres = height * c->vc_cell_height;
if (vga_video_type >= VIDEO_TYPE_VGAC) {
outb_p(VGA_CRTC_MAX_SCAN, vga_video_port_reg);
max_scan = inb_p(vga_video_port_val);
@@ -623,9 +623,9 @@ static int vgacon_doresize(struct vc_data *c,
static int vgacon_switch(struct vc_data *c)
{
int x = c->vc_cols * VGA_FONTWIDTH;
- int y = c->vc_rows * c->vc_font.height;
+ int y = c->vc_rows * c->vc_cell_height;
int rows = screen_info.orig_video_lines * vga_default_font_height/
- c->vc_font.height;
+ c->vc_cell_height;
/*
* We need to save screen size here as it's the only way
* we can spot the screen has been resized and we need to
@@ -1038,7 +1038,7 @@ static int vgacon_adjust_height(struct vc_data *vc, unsigned fontheight)
cursor_size_lastto = 0;
c->vc_sw->con_cursor(c, CM_DRAW);
}
- c->vc_font.height = fontheight;
+ c->vc_font.height = c->vc_cell_height = fontheight;
vc_resize(c, 0, rows); /* Adjust console size */
}
}
@@ -1086,12 +1086,20 @@ static int vgacon_resize(struct vc_data *c, unsigned int width,
if ((width << 1) * height > vga_vram_size)
return -EINVAL;
+ if (user) {
+ /*
+ * Ho ho! Someone (svgatextmode, eh?) may have reprogrammed
+ * the video mode! Set the new defaults then and go away.
+ */
+ screen_info.orig_video_cols = width;
+ screen_info.orig_video_lines = height;
+ vga_default_font_height = c->vc_cell_height;
+ return 0;
+ }
if (width % 2 || width > screen_info.orig_video_cols ||
height > (screen_info.orig_video_lines * vga_default_font_height)/
- c->vc_font.height)
- /* let svgatextmode tinker with video timings and
- return success */
- return (user) ? 0 : -EINVAL;
+ c->vc_cell_height)
+ return -EINVAL;
if (con_is_visible(c) && !vga_is_gfx) /* who knows */
vgacon_doresize(c, width, height);
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index b292887a2481..a591d291b231 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -52,6 +52,13 @@ static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
get_page(page);
+
+ if (vmf->vma->vm_file)
+ page->mapping = vmf->vma->vm_file->f_mapping;
+ else
+ printk(KERN_ERR "no mapping available\n");
+
+ BUG_ON(!page->mapping);
page->index = vmf->pgoff;
vmf->page = page;
@@ -144,6 +151,17 @@ static const struct vm_operations_struct fb_deferred_io_vm_ops = {
.page_mkwrite = fb_deferred_io_mkwrite,
};
+static int fb_deferred_io_set_page_dirty(struct page *page)
+{
+ if (!PageDirty(page))
+ SetPageDirty(page);
+ return 0;
+}
+
+static const struct address_space_operations fb_deferred_io_aops = {
+ .set_page_dirty = fb_deferred_io_set_page_dirty,
+};
+
int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
vma->vm_ops = &fb_deferred_io_vm_ops;
@@ -194,12 +212,29 @@ void fb_deferred_io_init(struct fb_info *info)
}
EXPORT_SYMBOL_GPL(fb_deferred_io_init);
+void fb_deferred_io_open(struct fb_info *info,
+ struct inode *inode,
+ struct file *file)
+{
+ file->f_mapping->a_ops = &fb_deferred_io_aops;
+}
+EXPORT_SYMBOL_GPL(fb_deferred_io_open);
+
void fb_deferred_io_cleanup(struct fb_info *info)
{
struct fb_deferred_io *fbdefio = info->fbdefio;
+ struct page *page;
+ int i;
BUG_ON(!fbdefio);
cancel_delayed_work_sync(&info->deferred_work);
+
+ /* clear out the mapping that we setup */
+ for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
+ page = fb_deferred_io_page(info, i);
+ page->mapping = NULL;
+ }
+
mutex_destroy(&fbdefio->lock);
}
EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 3406067985b1..22bb3892f6bd 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -2019,7 +2019,7 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
return -EINVAL;
pr_debug("resize now %ix%i\n", var.xres, var.yres);
- if (con_is_visible(vc)) {
+ if (con_is_visible(vc) && vc->vc_mode == KD_TEXT) {
var.activate = FB_ACTIVATE_NOW |
FB_ACTIVATE_FORCE;
fb_set_var(info, &var);
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 072780b0e570..98f193078c05 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1415,6 +1415,10 @@ __releases(&info->lock)
if (res)
module_put(info->fbops->owner);
}
+#ifdef CONFIG_FB_DEFERRED_IO
+ if (info->fbdefio)
+ fb_deferred_io_open(info, inode, file);
+#endif
out:
unlock_fb_info(info);
if (res)
diff --git a/drivers/video/fbdev/hgafb.c b/drivers/video/fbdev/hgafb.c
index 8bbac7182ad3..bd3d07aa4f0e 100644
--- a/drivers/video/fbdev/hgafb.c
+++ b/drivers/video/fbdev/hgafb.c
@@ -286,7 +286,7 @@ static int hga_card_detect(void)
hga_vram = ioremap(0xb0000, hga_vram_len);
if (!hga_vram)
- goto error;
+ return -ENOMEM;
if (request_region(0x3b0, 12, "hgafb"))
release_io_ports = 1;
@@ -346,13 +346,18 @@ static int hga_card_detect(void)
hga_type_name = "Hercules";
break;
}
- return 1;
+ return 0;
error:
if (release_io_ports)
release_region(0x3b0, 12);
if (release_io_port)
release_region(0x3bf, 1);
- return 0;
+
+ iounmap(hga_vram);
+
+ pr_err("hgafb: HGA card not detected.\n");
+
+ return -EINVAL;
}
/**
@@ -550,13 +555,11 @@ static const struct fb_ops hgafb_ops = {
static int hgafb_probe(struct platform_device *pdev)
{
struct fb_info *info;
+ int ret;
- if (! hga_card_detect()) {
- printk(KERN_INFO "hgafb: HGA card not detected.\n");
- if (hga_vram)
- iounmap(hga_vram);
- return -EINVAL;
- }
+ ret = hga_card_detect();
+ if (ret)
+ return ret;
printk(KERN_INFO "hgafb: %s with %ldK of memory detected.\n",
hga_type_name, hga_vram_len/1024);
diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
index 3ac053b88495..16f272a50811 100644
--- a/drivers/video/fbdev/imsttfb.c
+++ b/drivers/video/fbdev/imsttfb.c
@@ -1469,6 +1469,7 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct imstt_par *par;
struct fb_info *info;
struct device_node *dp;
+ int ret = -ENOMEM;
dp = pci_device_to_OF_node(pdev);
if(dp)
@@ -1504,28 +1505,37 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
default:
printk(KERN_INFO "imsttfb: Device 0x%x unknown, "
"contact maintainer.\n", pdev->device);
- release_mem_region(addr, size);
- framebuffer_release(info);
- return -ENODEV;
+ ret = -ENODEV;
+ goto error;
}
info->fix.smem_start = addr;
info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ?
0x400000 : 0x800000);
- if (!info->screen_base) {
- release_mem_region(addr, size);
- framebuffer_release(info);
- return -ENOMEM;
- }
+ if (!info->screen_base)
+ goto error;
info->fix.mmio_start = addr + 0x800000;
par->dc_regs = ioremap(addr + 0x800000, 0x1000);
+ if (!par->dc_regs)
+ goto error;
par->cmap_regs_phys = addr + 0x840000;
par->cmap_regs = (__u8 *)ioremap(addr + 0x840000, 0x1000);
+ if (!par->cmap_regs)
+ goto error;
info->pseudo_palette = par->palette;
init_imstt(info);
pci_set_drvdata(pdev, info);
return 0;
+
+error:
+ if (par->dc_regs)
+ iounmap(par->dc_regs);
+ if (info->screen_base)
+ iounmap(info->screen_base);
+ release_mem_region(addr, size);
+ framebuffer_release(info);
+ return ret;
}
static void imsttfb_remove(struct pci_dev *pdev)
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 7bbfd58958bc..d7e361fb0548 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -642,6 +642,9 @@ static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
}
info->eoi_time = 0;
+
+ /* is_active hasn't been reset yet, do it now. */
+ smp_store_release(&info->is_active, 0);
do_unmask(info, EVT_MASK_REASON_EOI_PENDING);
}
@@ -811,6 +814,7 @@ static void xen_evtchn_close(evtchn_port_t port)
BUG();
}
+/* Not called for lateeoi events. */
static void event_handler_exit(struct irq_info *info)
{
smp_store_release(&info->is_active, 0);
@@ -1883,7 +1887,12 @@ static void lateeoi_ack_dynirq(struct irq_data *data)
if (VALID_EVTCHN(evtchn)) {
do_mask(info, EVT_MASK_REASON_EOI_PENDING);
- event_handler_exit(info);
+ /*
+ * Don't call event_handler_exit().
+ * Need to keep is_active non-zero in order to ignore re-raised
+ * events after cpu affinity changes while a lateeoi is pending.
+ */
+ clear_evtchn(evtchn);
}
}
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index f01d58c7a042..a3e7be96527d 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -1017,8 +1017,10 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
err = mmu_interval_notifier_insert_locked(
&map->notifier, vma->vm_mm, vma->vm_start,
vma->vm_end - vma->vm_start, &gntdev_mmu_ops);
- if (err)
+ if (err) {
+ map->vma = NULL;
goto out_unlock_put;
+ }
}
mutex_unlock(&priv->lock);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 4c89afc0df62..24d11861ac7d 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -164,6 +164,11 @@ int __ref xen_swiotlb_init(void)
int rc = -ENOMEM;
char *start;
+ if (io_tlb_default_mem != NULL) {
+ pr_warn("swiotlb buffer already initialized\n");
+ return -EEXIST;
+ }
+
retry:
m_ret = XEN_SWIOTLB_ENOMEM;
order = get_order(bytes);
diff --git a/drivers/xen/unpopulated-alloc.c b/drivers/xen/unpopulated-alloc.c
index e64e6befc63b..87e6b7db892f 100644
--- a/drivers/xen/unpopulated-alloc.c
+++ b/drivers/xen/unpopulated-alloc.c
@@ -39,8 +39,10 @@ static int fill_list(unsigned int nr_pages)
}
pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
- if (!pgmap)
+ if (!pgmap) {
+ ret = -ENOMEM;
goto err_pgmap;
+ }
pgmap->type = MEMORY_DEVICE_GENERIC;
pgmap->range = (struct range) {
diff --git a/drivers/xen/xen-pciback/vpci.c b/drivers/xen/xen-pciback/vpci.c
index 4162d0e7e00d..cc7450f2b2a9 100644
--- a/drivers/xen/xen-pciback/vpci.c
+++ b/drivers/xen/xen-pciback/vpci.c
@@ -70,7 +70,7 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *dev, int devid,
publish_pci_dev_cb publish_cb)
{
- int err = 0, slot, func = -1;
+ int err = 0, slot, func = PCI_FUNC(dev->devfn);
struct pci_dev_entry *t, *dev_entry;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
@@ -95,22 +95,25 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
/*
* Keep multi-function devices together on the virtual PCI bus, except
- * virtual functions.
+ * that we want to keep virtual functions at func 0 on their own. They
+ * aren't multi-function devices and hence their presence at func 0
+ * may cause guests to not scan the other functions.
*/
- if (!dev->is_virtfn) {
+ if (!dev->is_virtfn || func) {
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
if (list_empty(&vpci_dev->dev_list[slot]))
continue;
t = list_entry(list_first(&vpci_dev->dev_list[slot]),
struct pci_dev_entry, list);
+ if (t->dev->is_virtfn && !PCI_FUNC(t->dev->devfn))
+ continue;
if (match_slot(dev, t->dev)) {
dev_info(&dev->dev, "vpci: assign to virtual slot %d func %d\n",
- slot, PCI_FUNC(dev->devfn));
+ slot, func);
list_add_tail(&dev_entry->list,
&vpci_dev->dev_list[slot]);
- func = PCI_FUNC(dev->devfn);
goto unlock;
}
}
@@ -123,7 +126,6 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
slot);
list_add_tail(&dev_entry->list,
&vpci_dev->dev_list[slot]);
- func = dev->is_virtfn ? 0 : PCI_FUNC(dev->devfn);
goto unlock;
}
}
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index 5188f02e75fb..c09c7ebd6968 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -359,7 +359,8 @@ out:
return err;
}
-static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
+static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev,
+ enum xenbus_state state)
{
int err = 0;
int num_devs;
@@ -373,9 +374,7 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
dev_dbg(&pdev->xdev->dev, "Reconfiguring device ...\n");
mutex_lock(&pdev->dev_lock);
- /* Make sure we only reconfigure once */
- if (xenbus_read_driver_state(pdev->xdev->nodename) !=
- XenbusStateReconfiguring)
+ if (xenbus_read_driver_state(pdev->xdev->nodename) != state)
goto out;
err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, "num_devs", "%d",
@@ -500,6 +499,10 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
}
}
+ if (state != XenbusStateReconfiguring)
+ /* Make sure we only reconfigure once. */
+ goto out;
+
err = xenbus_switch_state(pdev->xdev, XenbusStateReconfigured);
if (err) {
xenbus_dev_fatal(pdev->xdev, err,
@@ -525,7 +528,7 @@ static void xen_pcibk_frontend_changed(struct xenbus_device *xdev,
break;
case XenbusStateReconfiguring:
- xen_pcibk_reconfigure(pdev);
+ xen_pcibk_reconfigure(pdev, XenbusStateReconfiguring);
break;
case XenbusStateConnected:
@@ -664,6 +667,15 @@ static void xen_pcibk_be_watch(struct xenbus_watch *watch,
xen_pcibk_setup_backend(pdev);
break;
+ case XenbusStateInitialised:
+ /*
+ * We typically move to Initialised when the first device was
+ * added. Hence subsequent devices getting added may need
+ * reconfiguring.
+ */
+ xen_pcibk_reconfigure(pdev, XenbusStateInitialised);
+ break;
+
default:
break;
}
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index a4e9e6e07e93..d3c6bb22c5f4 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -322,6 +322,8 @@ static int afs_deliver_cb_callback(struct afs_call *call)
return ret;
call->unmarshall++;
+ fallthrough;
+
case 5:
break;
}
@@ -418,6 +420,7 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
r->node[loop] = ntohl(b[loop + 5]);
call->unmarshall++;
+ fallthrough;
case 2:
break;
@@ -530,6 +533,7 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call)
r->node[loop] = ntohl(b[loop + 5]);
call->unmarshall++;
+ fallthrough;
case 2:
break;
@@ -663,6 +667,7 @@ static int afs_deliver_yfs_cb_callback(struct afs_call *call)
afs_extract_to_tmp(call);
call->unmarshall++;
+ fallthrough;
case 3:
break;
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 9fbe5a5ec9bd..78719f2f567e 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -1919,7 +1919,9 @@ static void afs_rename_edit_dir(struct afs_operation *op)
new_inode = d_inode(new_dentry);
if (new_inode) {
spin_lock(&new_inode->i_lock);
- if (new_inode->i_nlink > 0)
+ if (S_ISDIR(new_inode->i_mode))
+ clear_nlink(new_inode);
+ else if (new_inode->i_nlink > 0)
drop_nlink(new_inode);
spin_unlock(&new_inode->i_lock);
}
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index 2f695a260442..dd3f45d906d2 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -388,6 +388,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
req->file_size = vp->scb.status.size;
call->unmarshall++;
+ fallthrough;
case 5:
break;
@@ -1408,6 +1409,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
_debug("motd '%s'", p);
call->unmarshall++;
+ fallthrough;
case 8:
break;
@@ -1845,6 +1847,7 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
xdr_decode_AFSVolSync(&bp, &op->volsync);
call->unmarshall++;
+ fallthrough;
case 6:
break;
@@ -1979,6 +1982,7 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call)
xdr_decode_AFSVolSync(&bp, &op->volsync);
call->unmarshall++;
+ fallthrough;
case 4:
break;
diff --git a/fs/afs/main.c b/fs/afs/main.c
index b2975256dadb..179004b15566 100644
--- a/fs/afs/main.c
+++ b/fs/afs/main.c
@@ -203,8 +203,8 @@ static int __init afs_init(void)
goto error_fs;
afs_proc_symlink = proc_symlink("fs/afs", NULL, "../self/net/afs");
- if (IS_ERR(afs_proc_symlink)) {
- ret = PTR_ERR(afs_proc_symlink);
+ if (!afs_proc_symlink) {
+ ret = -ENOMEM;
goto error_proc;
}
diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c
index dc9327332f06..00fca3c66ba6 100644
--- a/fs/afs/vlclient.c
+++ b/fs/afs/vlclient.c
@@ -593,6 +593,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call)
if (ret < 0)
return ret;
call->unmarshall = 6;
+ fallthrough;
case 6:
break;
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 3edb6204b937..3104b62c2082 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -118,6 +118,15 @@ int afs_write_end(struct file *file, struct address_space *mapping,
_enter("{%llx:%llu},{%lx}",
vnode->fid.vid, vnode->fid.vnode, page->index);
+ if (!PageUptodate(page)) {
+ if (copied < len) {
+ copied = 0;
+ goto out;
+ }
+
+ SetPageUptodate(page);
+ }
+
if (copied == 0)
goto out;
@@ -132,8 +141,6 @@ int afs_write_end(struct file *file, struct address_space *mapping,
write_sequnlock(&vnode->cb_lock);
}
- ASSERT(PageUptodate(page));
-
if (PagePrivate(page)) {
priv = page_private(page);
f = afs_page_dirty_from(page, priv);
@@ -730,7 +737,7 @@ static int afs_writepages_region(struct address_space *mapping,
return ret;
}
- start += ret * PAGE_SIZE;
+ start += ret;
cond_resched();
} while (wbc->nr_to_write > 0);
@@ -837,6 +844,7 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
struct inode *inode = file_inode(file);
struct afs_vnode *vnode = AFS_FS_I(inode);
unsigned long priv;
+ vm_fault_t ret = VM_FAULT_RETRY;
_enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, page->index);
@@ -848,14 +856,14 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
#ifdef CONFIG_AFS_FSCACHE
if (PageFsCache(page) &&
wait_on_page_fscache_killable(page) < 0)
- return VM_FAULT_RETRY;
+ goto out;
#endif
if (wait_on_page_writeback_killable(page))
- return VM_FAULT_RETRY;
+ goto out;
if (lock_page_killable(page) < 0)
- return VM_FAULT_RETRY;
+ goto out;
/* We mustn't change page->private until writeback is complete as that
* details the portion of the page we need to write back and we might
@@ -863,7 +871,7 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
*/
if (wait_on_page_writeback_killable(page) < 0) {
unlock_page(page);
- return VM_FAULT_RETRY;
+ goto out;
}
priv = afs_page_dirty(page, 0, thp_size(page));
@@ -877,8 +885,10 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
}
file_update_time(file);
+ ret = VM_FAULT_LOCKED;
+out:
sb_end_pagefault(inode->i_sb);
- return VM_FAULT_LOCKED;
+ return ret;
}
/*
diff --git a/fs/block_dev.c b/fs/block_dev.c
index b8abccd03e5d..6cc4d4cfe0c2 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1244,6 +1244,9 @@ int bdev_disk_changed(struct block_device *bdev, bool invalidate)
lockdep_assert_held(&bdev->bd_mutex);
+ if (!(disk->flags & GENHD_FL_UP))
+ return -ENXIO;
+
rescan:
if (bdev->bd_part_count)
return -EBUSY;
@@ -1298,6 +1301,9 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode)
struct gendisk *disk = bdev->bd_disk;
int ret = 0;
+ if (!(disk->flags & GENHD_FL_UP))
+ return -ENXIO;
+
if (!bdev->bd_openers) {
if (!bdev_is_partition(bdev)) {
ret = 0;
@@ -1332,8 +1338,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode)
whole->bd_part_count++;
mutex_unlock(&whole->bd_mutex);
- if (!(disk->flags & GENHD_FL_UP) ||
- !bdev_nr_sectors(bdev)) {
+ if (!bdev_nr_sectors(bdev)) {
__blkdev_put(whole, mode, 1);
bdput(whole);
return -ENXIO;
@@ -1364,16 +1369,12 @@ struct block_device *blkdev_get_no_open(dev_t dev)
struct block_device *bdev;
struct gendisk *disk;
- down_read(&bdev_lookup_sem);
bdev = bdget(dev);
if (!bdev) {
- up_read(&bdev_lookup_sem);
blk_request_module(dev);
- down_read(&bdev_lookup_sem);
-
bdev = bdget(dev);
if (!bdev)
- goto unlock;
+ return NULL;
}
disk = bdev->bd_disk;
@@ -1383,14 +1384,11 @@ struct block_device *blkdev_get_no_open(dev_t dev)
goto put_disk;
if (!try_module_get(bdev->bd_disk->fops->owner))
goto put_disk;
- up_read(&bdev_lookup_sem);
return bdev;
put_disk:
put_disk(disk);
bdput:
bdput(bdev);
-unlock:
- up_read(&bdev_lookup_sem);
return NULL;
}
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index aa57bdc8fc89..6d5c4e45cfef 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -2442,16 +2442,16 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
spin_lock(&sinfo->lock);
spin_lock(&cache->lock);
if (!--cache->ro) {
- num_bytes = cache->length - cache->reserved -
- cache->pinned - cache->bytes_super -
- cache->zone_unusable - cache->used;
- sinfo->bytes_readonly -= num_bytes;
if (btrfs_is_zoned(cache->fs_info)) {
/* Migrate zone_unusable bytes back */
cache->zone_unusable = cache->alloc_offset - cache->used;
sinfo->bytes_zone_unusable += cache->zone_unusable;
sinfo->bytes_readonly -= cache->zone_unusable;
}
+ num_bytes = cache->length - cache->reserved -
+ cache->pinned - cache->bytes_super -
+ cache->zone_unusable - cache->used;
+ sinfo->bytes_readonly -= num_bytes;
list_del_init(&cache->ro_list);
}
spin_unlock(&cache->lock);
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 2bea01d23a5b..1346d698463a 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -28,6 +28,7 @@
#include "compression.h"
#include "extent_io.h"
#include "extent_map.h"
+#include "zoned.h"
static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
@@ -349,6 +350,7 @@ static void end_compressed_bio_write(struct bio *bio)
*/
inode = cb->inode;
cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
+ btrfs_record_physical_zoned(inode, cb->start, bio);
btrfs_writepage_endio_finish_ordered(cb->compressed_pages[0],
cb->start, cb->start + cb->len - 1,
bio->bi_status == BLK_STS_OK);
@@ -401,6 +403,8 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
u64 first_byte = disk_start;
blk_status_t ret;
int skip_sum = inode->flags & BTRFS_INODE_NODATASUM;
+ const bool use_append = btrfs_use_zone_append(inode, disk_start);
+ const unsigned int bio_op = use_append ? REQ_OP_ZONE_APPEND : REQ_OP_WRITE;
WARN_ON(!PAGE_ALIGNED(start));
cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
@@ -418,10 +422,31 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
cb->nr_pages = nr_pages;
bio = btrfs_bio_alloc(first_byte);
- bio->bi_opf = REQ_OP_WRITE | write_flags;
+ bio->bi_opf = bio_op | write_flags;
bio->bi_private = cb;
bio->bi_end_io = end_compressed_bio_write;
+ if (use_append) {
+ struct extent_map *em;
+ struct map_lookup *map;
+ struct block_device *bdev;
+
+ em = btrfs_get_chunk_map(fs_info, disk_start, PAGE_SIZE);
+ if (IS_ERR(em)) {
+ kfree(cb);
+ bio_put(bio);
+ return BLK_STS_NOTSUPP;
+ }
+
+ map = em->map_lookup;
+ /* We only support single profile for now */
+ ASSERT(map->num_stripes == 1);
+ bdev = map->stripes[0].dev->bdev;
+
+ bio_set_dev(bio, bdev);
+ free_extent_map(em);
+ }
+
if (blkcg_css) {
bio->bi_opf |= REQ_CGROUP_PUNT;
kthread_associate_blkcg(blkcg_css);
@@ -432,6 +457,7 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
bytes_left = compressed_len;
for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
int submit = 0;
+ int len = 0;
page = compressed_pages[pg_index];
page->mapping = inode->vfs_inode.i_mapping;
@@ -439,9 +465,20 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, bio,
0);
+ /*
+ * Page can only be added to bio if the current bio fits in
+ * stripe.
+ */
+ if (!submit) {
+ if (pg_index == 0 && use_append)
+ len = bio_add_zone_append_page(bio, page,
+ PAGE_SIZE, 0);
+ else
+ len = bio_add_page(bio, page, PAGE_SIZE, 0);
+ }
+
page->mapping = NULL;
- if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
- PAGE_SIZE) {
+ if (submit || len < PAGE_SIZE) {
/*
* inc the count before we submit the bio so
* we know the end IO handler won't happen before
@@ -465,11 +502,15 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
}
bio = btrfs_bio_alloc(first_byte);
- bio->bi_opf = REQ_OP_WRITE | write_flags;
+ bio->bi_opf = bio_op | write_flags;
bio->bi_private = cb;
bio->bi_end_io = end_compressed_bio_write;
if (blkcg_css)
bio->bi_opf |= REQ_CGROUP_PUNT;
+ /*
+ * Use bio_add_page() to ensure the bio has at least one
+ * page.
+ */
bio_add_page(bio, page, PAGE_SIZE, 0);
}
if (bytes_left < PAGE_SIZE) {
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index c9a3036c23bf..8d386a5587ee 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2648,6 +2648,24 @@ static int validate_super(struct btrfs_fs_info *fs_info,
ret = -EINVAL;
}
+ if (memcmp(fs_info->fs_devices->fsid, fs_info->super_copy->fsid,
+ BTRFS_FSID_SIZE)) {
+ btrfs_err(fs_info,
+ "superblock fsid doesn't match fsid of fs_devices: %pU != %pU",
+ fs_info->super_copy->fsid, fs_info->fs_devices->fsid);
+ ret = -EINVAL;
+ }
+
+ if (btrfs_fs_incompat(fs_info, METADATA_UUID) &&
+ memcmp(fs_info->fs_devices->metadata_uuid,
+ fs_info->super_copy->metadata_uuid, BTRFS_FSID_SIZE)) {
+ btrfs_err(fs_info,
+"superblock metadata_uuid doesn't match metadata uuid of fs_devices: %pU != %pU",
+ fs_info->super_copy->metadata_uuid,
+ fs_info->fs_devices->metadata_uuid);
+ ret = -EINVAL;
+ }
+
if (memcmp(fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid,
BTRFS_FSID_SIZE) != 0) {
btrfs_err(fs_info,
@@ -3279,14 +3297,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
disk_super = fs_info->super_copy;
- ASSERT(!memcmp(fs_info->fs_devices->fsid, fs_info->super_copy->fsid,
- BTRFS_FSID_SIZE));
-
- if (btrfs_fs_incompat(fs_info, METADATA_UUID)) {
- ASSERT(!memcmp(fs_info->fs_devices->metadata_uuid,
- fs_info->super_copy->metadata_uuid,
- BTRFS_FSID_SIZE));
- }
features = btrfs_super_flags(disk_super);
if (features & BTRFS_SUPER_FLAG_CHANGING_FSID_V2) {
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index f1d15b68994a..3d5c35e4cb76 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -1868,7 +1868,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
trace_run_delayed_ref_head(fs_info, head, 0);
btrfs_delayed_ref_unlock(head);
btrfs_put_delayed_ref_head(head);
- return 0;
+ return ret;
}
static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head(
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 074a78a202b8..dee2dafbc872 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3753,7 +3753,7 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
/* Note that em_end from extent_map_end() is exclusive */
iosize = min(em_end, end + 1) - cur;
- if (btrfs_use_zone_append(inode, em))
+ if (btrfs_use_zone_append(inode, em->block_start))
opf = REQ_OP_ZONE_APPEND;
free_extent_map(em);
@@ -5196,7 +5196,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len)
{
int ret = 0;
- u64 off = start;
+ u64 off;
u64 max = start + len;
u32 flags = 0;
u32 found_type;
@@ -5231,6 +5231,11 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
goto out_free_ulist;
}
+ /*
+ * We can't initialize that to 'start' as this could miss extents due
+ * to extent item merging
+ */
+ off = 0;
start = round_down(start, btrfs_inode_sectorsize(inode));
len = round_up(max, btrfs_inode_sectorsize(inode)) - start;
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 294602f139ef..441cee7fbb62 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -788,7 +788,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
u64 end_byte = bytenr + len;
u64 csum_end;
struct extent_buffer *leaf;
- int ret;
+ int ret = 0;
const u32 csum_size = fs_info->csum_size;
u32 blocksize_bits = fs_info->sectorsize_bits;
@@ -806,6 +806,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0) {
+ ret = 0;
if (path->slots[0] == 0)
break;
path->slots[0]--;
@@ -862,7 +863,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
ret = btrfs_del_items(trans, root, path,
path->slots[0], del_nr);
if (ret)
- goto out;
+ break;
if (key.offset == bytenr)
break;
} else if (key.offset < bytenr && csum_end > end_byte) {
@@ -906,8 +907,9 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
ret = btrfs_split_item(trans, root, path, &key, offset);
if (ret && ret != -EAGAIN) {
btrfs_abort_transaction(trans, ret);
- goto out;
+ break;
}
+ ret = 0;
key.offset = end_byte - 1;
} else {
@@ -917,12 +919,41 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
}
btrfs_release_path(path);
}
- ret = 0;
-out:
btrfs_free_path(path);
return ret;
}
+static int find_next_csum_offset(struct btrfs_root *root,
+ struct btrfs_path *path,
+ u64 *next_offset)
+{
+ const u32 nritems = btrfs_header_nritems(path->nodes[0]);
+ struct btrfs_key found_key;
+ int slot = path->slots[0] + 1;
+ int ret;
+
+ if (nritems == 0 || slot >= nritems) {
+ ret = btrfs_next_leaf(root, path);
+ if (ret < 0) {
+ return ret;
+ } else if (ret > 0) {
+ *next_offset = (u64)-1;
+ return 0;
+ }
+ slot = path->slots[0];
+ }
+
+ btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
+
+ if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
+ found_key.type != BTRFS_EXTENT_CSUM_KEY)
+ *next_offset = (u64)-1;
+ else
+ *next_offset = found_key.offset;
+
+ return 0;
+}
+
int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_ordered_sum *sums)
@@ -938,7 +969,6 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
u64 total_bytes = 0;
u64 csum_offset;
u64 bytenr;
- u32 nritems;
u32 ins_size;
int index = 0;
int found_next;
@@ -981,26 +1011,10 @@ again:
goto insert;
}
} else {
- int slot = path->slots[0] + 1;
- /* we didn't find a csum item, insert one */
- nritems = btrfs_header_nritems(path->nodes[0]);
- if (!nritems || (path->slots[0] >= nritems - 1)) {
- ret = btrfs_next_leaf(root, path);
- if (ret < 0) {
- goto out;
- } else if (ret > 0) {
- found_next = 1;
- goto insert;
- }
- slot = path->slots[0];
- }
- btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
- if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
- found_key.type != BTRFS_EXTENT_CSUM_KEY) {
- found_next = 1;
- goto insert;
- }
- next_offset = found_key.offset;
+ /* We didn't find a csum item, insert one. */
+ ret = find_next_csum_offset(root, path, &next_offset);
+ if (ret < 0)
+ goto out;
found_next = 1;
goto insert;
}
@@ -1056,8 +1070,48 @@ extend_csum:
tmp = sums->len - total_bytes;
tmp >>= fs_info->sectorsize_bits;
WARN_ON(tmp < 1);
+ extend_nr = max_t(int, 1, tmp);
+
+ /*
+ * A log tree can already have checksum items with a subset of
+ * the checksums we are trying to log. This can happen after
+ * doing a sequence of partial writes into prealloc extents and
+ * fsyncs in between, with a full fsync logging a larger subrange
+ * of an extent for which a previous fast fsync logged a smaller
+ * subrange. And this happens in particular due to merging file
+ * extent items when we complete an ordered extent for a range
+ * covered by a prealloc extent - this is done at
+ * btrfs_mark_extent_written().
+ *
+ * So if we try to extend the previous checksum item, which has
+ * a range that ends at the start of the range we want to insert,
+ * make sure we don't extend beyond the start offset of the next
+ * checksum item. If we are at the last item in the leaf, then
+ * forget the optimization of extending and add a new checksum
+ * item - it is not worth the complexity of releasing the path,
+ * getting the first key for the next leaf, repeat the btree
+ * search, etc, because log trees are temporary anyway and it
+ * would only save a few bytes of leaf space.
+ */
+ if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
+ if (path->slots[0] + 1 >=
+ btrfs_header_nritems(path->nodes[0])) {
+ ret = find_next_csum_offset(root, path, &next_offset);
+ if (ret < 0)
+ goto out;
+ found_next = 1;
+ goto insert;
+ }
+
+ ret = find_next_csum_offset(root, path, &next_offset);
+ if (ret < 0)
+ goto out;
+
+ tmp = (next_offset - bytenr) >> fs_info->sectorsize_bits;
+ if (tmp <= INT_MAX)
+ extend_nr = min_t(int, extend_nr, tmp);
+ }
- extend_nr = max_t(int, 1, (int)tmp);
diff = (csum_offset + extend_nr) * csum_size;
diff = min(diff,
MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 3b10d98b4ebb..55f68422061d 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1094,7 +1094,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
int del_nr = 0;
int del_slot = 0;
int recow;
- int ret;
+ int ret = 0;
u64 ino = btrfs_ino(inode);
path = btrfs_alloc_path();
@@ -1315,7 +1315,7 @@ again:
}
out:
btrfs_free_path(path);
- return 0;
+ return ret;
}
/*
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index eb6fddf40841..46f392943f4d 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3000,6 +3000,18 @@ out:
if (ret || truncated) {
u64 unwritten_start = start;
+ /*
+ * If we failed to finish this ordered extent for any reason we
+ * need to make sure BTRFS_ORDERED_IOERR is set on the ordered
+ * extent, and mark the inode with the error if it wasn't
+ * already set. Any error during writeback would have already
+ * set the mapping error, so we need to set it if we're the ones
+ * marking this ordered extent as failed.
+ */
+ if (ret && !test_and_set_bit(BTRFS_ORDERED_IOERR,
+ &ordered_extent->flags))
+ mapping_set_error(ordered_extent->inode->i_mapping, -EIO);
+
if (truncated)
unwritten_start += logical_len;
clear_extent_uptodate(io_tree, unwritten_start, end, NULL);
@@ -3241,6 +3253,7 @@ void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
inode = list_first_entry(&fs_info->delayed_iputs,
struct btrfs_inode, delayed_iput);
run_delayed_iput_locked(fs_info, inode);
+ cond_resched_lock(&fs_info->delayed_iput_lock);
}
spin_unlock(&fs_info->delayed_iput_lock);
}
@@ -7785,7 +7798,7 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
iomap->bdev = fs_info->fs_devices->latest_bdev;
iomap->length = len;
- if (write && btrfs_use_zone_append(BTRFS_I(inode), em))
+ if (write && btrfs_use_zone_append(BTRFS_I(inode), em->block_start))
iomap->flags |= IOMAP_F_ZONE_APPEND;
free_extent_map(em);
@@ -9075,6 +9088,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
int ret2;
bool root_log_pinned = false;
bool dest_log_pinned = false;
+ bool need_abort = false;
/* we only allow rename subvolume link between subvolumes */
if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
@@ -9134,6 +9148,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
old_idx);
if (ret)
goto out_fail;
+ need_abort = true;
}
/* And now for the dest. */
@@ -9149,8 +9164,11 @@ static int btrfs_rename_exchange(struct inode *old_dir,
new_ino,
btrfs_ino(BTRFS_I(old_dir)),
new_idx);
- if (ret)
+ if (ret) {
+ if (need_abort)
+ btrfs_abort_transaction(trans, ret);
goto out_fail;
+ }
}
/* Update inode version and ctime/mtime. */
diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c
index 3928ecc40d7b..9178da07cc9c 100644
--- a/fs/btrfs/reflink.c
+++ b/fs/btrfs/reflink.c
@@ -203,10 +203,7 @@ static int clone_copy_inline_extent(struct inode *dst,
* inline extent's data to the page.
*/
ASSERT(key.offset > 0);
- ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
- inline_data, size, datal,
- comp_type);
- goto out;
+ goto copy_to_page;
}
} else if (i_size_read(dst) <= datal) {
struct btrfs_file_extent_item *ei;
@@ -222,13 +219,10 @@ static int clone_copy_inline_extent(struct inode *dst,
BTRFS_FILE_EXTENT_INLINE)
goto copy_inline_extent;
- ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
- inline_data, size, datal, comp_type);
- goto out;
+ goto copy_to_page;
}
copy_inline_extent:
- ret = 0;
/*
* We have no extent items, or we have an extent at offset 0 which may
* or may not be inlined. All these cases are dealt the same way.
@@ -240,11 +234,13 @@ copy_inline_extent:
* clone. Deal with all these cases by copying the inline extent
* data into the respective page at the destination inode.
*/
- ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
- inline_data, size, datal, comp_type);
- goto out;
+ goto copy_to_page;
}
+ /*
+ * Release path before starting a new transaction so we don't hold locks
+ * that would confuse lockdep.
+ */
btrfs_release_path(path);
/*
* If we end up here it means were copy the inline extent into a leaf
@@ -301,6 +297,21 @@ out:
*trans_out = trans;
return ret;
+
+copy_to_page:
+ /*
+ * Release our path because we don't need it anymore and also because
+ * copy_inline_to_page() needs to reserve data and metadata, which may
+ * need to flush delalloc when we are low on available space and
+ * therefore cause a deadlock if writeback of an inline extent needs to
+ * write to the same leaf or an ordered extent completion needs to write
+ * to the same leaf.
+ */
+ btrfs_release_path(path);
+
+ ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
+ inline_data, size, datal, comp_type);
+ goto out;
}
/**
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 95a600034d61..dbcf8bb2f3b9 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -1574,7 +1574,9 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
if (ret)
goto out;
- btrfs_update_inode(trans, root, BTRFS_I(inode));
+ ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ if (ret)
+ goto out;
}
ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
@@ -1749,7 +1751,9 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
if (nlink != inode->i_nlink) {
set_nlink(inode, nlink);
- btrfs_update_inode(trans, root, BTRFS_I(inode));
+ ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ if (ret)
+ goto out;
}
BTRFS_I(inode)->index_cnt = (u64)-1;
@@ -1787,6 +1791,7 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
break;
if (ret == 1) {
+ ret = 0;
if (path->slots[0] == 0)
break;
path->slots[0]--;
@@ -1799,17 +1804,19 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
ret = btrfs_del_item(trans, root, path);
if (ret)
- goto out;
+ break;
btrfs_release_path(path);
inode = read_one_inode(root, key.offset);
- if (!inode)
- return -EIO;
+ if (!inode) {
+ ret = -EIO;
+ break;
+ }
ret = fixup_inode_link_count(trans, root, inode);
iput(inode);
if (ret)
- goto out;
+ break;
/*
* fixup on a directory may create new entries,
@@ -1818,8 +1825,6 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
*/
key.offset = (u64)-1;
}
- ret = 0;
-out:
btrfs_release_path(path);
return ret;
}
@@ -1858,8 +1863,6 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
} else if (ret == -EEXIST) {
ret = 0;
- } else {
- BUG(); /* Logic Error */
}
iput(inode);
@@ -3299,6 +3302,22 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
* begins and releases it only after writing its superblock.
*/
mutex_lock(&fs_info->tree_log_mutex);
+
+ /*
+ * The previous transaction writeout phase could have failed, and thus
+ * marked the fs in an error state. We must not commit here, as we
+ * could have updated our generation in the super_for_commit and
+ * writing the super here would result in transid mismatches. If there
+ * is an error here just bail.
+ */
+ if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
+ ret = -EIO;
+ btrfs_set_log_full_commit(trans);
+ btrfs_abort_transaction(trans, ret);
+ mutex_unlock(&fs_info->tree_log_mutex);
+ goto out_wake_log_root;
+ }
+
btrfs_set_super_log_root(fs_info->super_for_commit, log_root_start);
btrfs_set_super_log_root_level(fs_info->super_for_commit, log_root_level);
ret = write_all_supers(fs_info, 1);
@@ -6463,6 +6482,24 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans,
(!old_dir || old_dir->logged_trans < trans->transid))
return;
+ /*
+ * If we are doing a rename (old_dir is not NULL) from a directory that
+ * was previously logged, make sure the next log attempt on the directory
+ * is not skipped and logs the inode again. This is because the log may
+ * not currently be authoritative for a range including the old
+ * BTRFS_DIR_ITEM_KEY and BTRFS_DIR_INDEX_KEY keys, so we want to make
+ * sure after a log replay we do not end up with both the new and old
+ * dentries around (in case the inode is a directory we would have a
+ * directory with two hard links and 2 inode references for different
+ * parents). The next log attempt of old_dir will happen at
+ * btrfs_log_all_parents(), called through btrfs_log_inode_parent()
+ * below, because we have previously set inode->last_unlink_trans to the
+ * current transaction ID, either here or at btrfs_record_unlink_dir() in
+ * case inode is a directory.
+ */
+ if (old_dir)
+ old_dir->logged_trans = 0;
+
btrfs_init_log_ctx(&ctx, &inode->vfs_inode);
ctx.logging_new_name = true;
/*
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 9a1ead0c4a31..47d27059d064 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1459,7 +1459,7 @@ static bool dev_extent_hole_check_zoned(struct btrfs_device *device,
/* Given hole range was invalid (outside of device) */
if (ret == -ERANGE) {
*hole_start += *hole_size;
- *hole_size = false;
+ *hole_size = 0;
return true;
}
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 304ce64c70a4..f1f3b10d1dbb 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -150,6 +150,18 @@ static inline u32 sb_zone_number(int shift, int mirror)
return (u32)zone;
}
+static inline sector_t zone_start_sector(u32 zone_number,
+ struct block_device *bdev)
+{
+ return (sector_t)zone_number << ilog2(bdev_zone_sectors(bdev));
+}
+
+static inline u64 zone_start_physical(u32 zone_number,
+ struct btrfs_zoned_device_info *zone_info)
+{
+ return (u64)zone_number << zone_info->zone_size_shift;
+}
+
/*
* Emulate blkdev_report_zones() for a non-zoned device. It slices up the block
* device into static sized chunks and fake a conventional zone on each of
@@ -405,8 +417,8 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
if (sb_zone + 1 >= zone_info->nr_zones)
continue;
- sector = sb_zone << (zone_info->zone_size_shift - SECTOR_SHIFT);
- ret = btrfs_get_dev_zones(device, sector << SECTOR_SHIFT,
+ ret = btrfs_get_dev_zones(device,
+ zone_start_physical(sb_zone, zone_info),
&zone_info->sb_zones[sb_pos],
&nr_zones);
if (ret)
@@ -721,7 +733,7 @@ int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
if (sb_zone + 1 >= nr_zones)
return -ENOENT;
- ret = blkdev_report_zones(bdev, sb_zone << zone_sectors_shift,
+ ret = blkdev_report_zones(bdev, zone_start_sector(sb_zone, bdev),
BTRFS_NR_SB_LOG_ZONES, copy_zone_info_cb,
zones);
if (ret < 0)
@@ -826,7 +838,7 @@ int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror)
return -ENOENT;
return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
- sb_zone << zone_sectors_shift,
+ zone_start_sector(sb_zone, bdev),
zone_sectors * BTRFS_NR_SB_LOG_ZONES, GFP_NOFS);
}
@@ -878,7 +890,8 @@ u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
if (!(end <= sb_zone ||
sb_zone + BTRFS_NR_SB_LOG_ZONES <= begin)) {
have_sb = true;
- pos = ((u64)sb_zone + BTRFS_NR_SB_LOG_ZONES) << shift;
+ pos = zone_start_physical(
+ sb_zone + BTRFS_NR_SB_LOG_ZONES, zinfo);
break;
}
@@ -1278,7 +1291,7 @@ void btrfs_free_redirty_list(struct btrfs_transaction *trans)
spin_unlock(&trans->releasing_ebs_lock);
}
-bool btrfs_use_zone_append(struct btrfs_inode *inode, struct extent_map *em)
+bool btrfs_use_zone_append(struct btrfs_inode *inode, u64 start)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_block_group *cache;
@@ -1293,7 +1306,7 @@ bool btrfs_use_zone_append(struct btrfs_inode *inode, struct extent_map *em)
if (!is_data_inode(&inode->vfs_inode))
return false;
- cache = btrfs_lookup_block_group(fs_info, em->block_start);
+ cache = btrfs_lookup_block_group(fs_info, start);
ASSERT(cache);
if (!cache)
return false;
diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h
index 5e41a74a9cb2..e55d32595c2c 100644
--- a/fs/btrfs/zoned.h
+++ b/fs/btrfs/zoned.h
@@ -53,7 +53,7 @@ void btrfs_calc_zone_unusable(struct btrfs_block_group *cache);
void btrfs_redirty_list_add(struct btrfs_transaction *trans,
struct extent_buffer *eb);
void btrfs_free_redirty_list(struct btrfs_transaction *trans);
-bool btrfs_use_zone_append(struct btrfs_inode *inode, struct extent_map *em);
+bool btrfs_use_zone_append(struct btrfs_inode *inode, u64 start);
void btrfs_record_physical_zoned(struct inode *inode, u64 file_offset,
struct bio *bio);
void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered);
@@ -152,8 +152,7 @@ static inline void btrfs_redirty_list_add(struct btrfs_transaction *trans,
struct extent_buffer *eb) { }
static inline void btrfs_free_redirty_list(struct btrfs_transaction *trans) { }
-static inline bool btrfs_use_zone_append(struct btrfs_inode *inode,
- struct extent_map *em)
+static inline bool btrfs_use_zone_append(struct btrfs_inode *inode, u64 start)
{
return false;
}
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 5624fae7a603..9ba79b6531fb 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -668,14 +668,13 @@ out:
* Handle lookups for the hidden .snap directory.
*/
struct dentry *ceph_handle_snapdir(struct ceph_mds_request *req,
- struct dentry *dentry, int err)
+ struct dentry *dentry)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
struct inode *parent = d_inode(dentry->d_parent); /* we hold i_mutex */
/* .snap dir? */
- if (err == -ENOENT &&
- ceph_snap(parent) == CEPH_NOSNAP &&
+ if (ceph_snap(parent) == CEPH_NOSNAP &&
strcmp(dentry->d_name.name, fsc->mount_options->snapdir_name) == 0) {
struct dentry *res;
struct inode *inode = ceph_get_snapdir(parent);
@@ -742,7 +741,6 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
struct ceph_mds_request *req;
- struct dentry *res;
int op;
int mask;
int err;
@@ -793,12 +791,16 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
req->r_parent = dir;
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
err = ceph_mdsc_do_request(mdsc, NULL, req);
- res = ceph_handle_snapdir(req, dentry, err);
- if (IS_ERR(res)) {
- err = PTR_ERR(res);
- } else {
- dentry = res;
- err = 0;
+ if (err == -ENOENT) {
+ struct dentry *res;
+
+ res = ceph_handle_snapdir(req, dentry);
+ if (IS_ERR(res)) {
+ err = PTR_ERR(res);
+ } else {
+ dentry = res;
+ err = 0;
+ }
}
dentry = ceph_finish_lookup(req, dentry, err);
ceph_mdsc_put_request(req); /* will dput(dentry) */
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 77fc037d5beb..d51af3698032 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -578,6 +578,7 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
struct ceph_inode_info *ci = ceph_inode(dir);
struct inode *inode;
struct timespec64 now;
+ struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
struct ceph_vino vino = { .ino = req->r_deleg_ino,
.snap = CEPH_NOSNAP };
@@ -615,8 +616,10 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
ceph_file_layout_to_legacy(lo, &in.layout);
+ down_read(&mdsc->snap_rwsem);
ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session,
req->r_fmode, NULL);
+ up_read(&mdsc->snap_rwsem);
if (ret) {
dout("%s failed to fill inode: %d\n", __func__, ret);
ceph_dir_clear_complete(dir);
@@ -739,14 +742,16 @@ retry:
err = ceph_mdsc_do_request(mdsc,
(flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
req);
- dentry = ceph_handle_snapdir(req, dentry, err);
- if (IS_ERR(dentry)) {
- err = PTR_ERR(dentry);
- goto out_req;
+ if (err == -ENOENT) {
+ dentry = ceph_handle_snapdir(req, dentry);
+ if (IS_ERR(dentry)) {
+ err = PTR_ERR(dentry);
+ goto out_req;
+ }
+ err = 0;
}
- err = 0;
- if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
+ if (!err && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
err = ceph_handle_notrace_create(dir, dentry);
if (d_in_lookup(dentry)) {
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index e1c63adb196d..df0c8a724609 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -777,6 +777,8 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
umode_t mode = le32_to_cpu(info->mode);
dev_t rdev = le32_to_cpu(info->rdev);
+ lockdep_assert_held(&mdsc->snap_rwsem);
+
dout("%s %p ino %llx.%llx v %llu had %llu\n", __func__,
inode, ceph_vinop(inode), le64_to_cpu(info->version),
ci->i_version);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index db80d89556b1..839e6b0239ee 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -1218,7 +1218,7 @@ extern const struct dentry_operations ceph_dentry_ops;
extern loff_t ceph_make_fpos(unsigned high, unsigned off, bool hash_order);
extern int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry);
extern struct dentry *ceph_handle_snapdir(struct ceph_mds_request *req,
- struct dentry *dentry, int err);
+ struct dentry *dentry);
extern struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
struct dentry *dentry, int err);
diff --git a/fs/cifs/cifs_ioctl.h b/fs/cifs/cifs_ioctl.h
index 4a97fe12006b..37fc7d6ac457 100644
--- a/fs/cifs/cifs_ioctl.h
+++ b/fs/cifs/cifs_ioctl.h
@@ -72,15 +72,28 @@ struct smb3_key_debug_info {
} __packed;
/*
- * Dump full key (32 byte encrypt/decrypt keys instead of 16 bytes)
- * is needed if GCM256 (stronger encryption) negotiated
+ * Dump variable-sized keys
*/
struct smb3_full_key_debug_info {
- __u64 Suid;
+ /* INPUT: size of userspace buffer */
+ __u32 in_size;
+
+ /*
+ * INPUT: 0 for current user, otherwise session to dump
+ * OUTPUT: session id that was dumped
+ */
+ __u64 session_id;
__u16 cipher_type;
- __u8 auth_key[16]; /* SMB2_NTLMV2_SESSKEY_SIZE */
- __u8 smb3encryptionkey[32]; /* SMB3_ENC_DEC_KEY_SIZE */
- __u8 smb3decryptionkey[32]; /* SMB3_ENC_DEC_KEY_SIZE */
+ __u8 session_key_length;
+ __u8 server_in_key_length;
+ __u8 server_out_key_length;
+ __u8 data[];
+ /*
+ * return this struct with the keys appended at the end:
+ * __u8 session_key[session_key_length];
+ * __u8 server_in_key[server_in_key_length];
+ * __u8 server_out_key[server_out_key_length];
+ */
} __packed;
struct smb3_notify {
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index d7ea9c5fe0f8..2ffcb29d5c8f 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -133,7 +133,7 @@ struct workqueue_struct *cifsiod_wq;
struct workqueue_struct *decrypt_wq;
struct workqueue_struct *fileinfo_put_wq;
struct workqueue_struct *cifsoplockd_wq;
-struct workqueue_struct *deferredclose_wq;
+struct workqueue_struct *deferredclose_wq;
__u32 cifs_lock_secret;
/*
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index d88b4b523dcc..8488d7024462 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -1257,8 +1257,7 @@ struct cifsFileInfo {
struct work_struct oplock_break; /* work for oplock breaks */
struct work_struct put; /* work for the final part of _put */
struct delayed_work deferred;
- bool oplock_break_received; /* Flag to indicate oplock break */
- bool deferred_scheduled;
+ bool deferred_close_scheduled; /* Flag to indicate close is scheduled */
};
struct cifs_io_parms {
@@ -1418,6 +1417,7 @@ struct cifsInodeInfo {
struct inode vfs_inode;
struct list_head deferred_closes; /* list of deferred closes */
spinlock_t deferred_lock; /* protection on deferred list */
+ bool lease_granted; /* Flag to indicate whether lease or oplock is granted. */
};
static inline struct cifsInodeInfo *
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index b53a87db282f..554d64fe171e 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -148,7 +148,8 @@
#define SMB3_SIGN_KEY_SIZE (16)
/*
- * Size of the smb3 encryption/decryption keys
+ * Size of the smb3 encryption/decryption key storage.
+ * This size is big enough to store any cipher key types.
*/
#define SMB3_ENC_DEC_KEY_SIZE (32)
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 6caad100c3f3..379a427f3c2f 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -323,8 +323,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
cfile->dentry = dget(dentry);
cfile->f_flags = file->f_flags;
cfile->invalidHandle = false;
- cfile->oplock_break_received = false;
- cfile->deferred_scheduled = false;
+ cfile->deferred_close_scheduled = false;
cfile->tlink = cifs_get_tlink(tlink);
INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
INIT_WORK(&cfile->put, cifsFileInfo_put_work);
@@ -574,21 +573,18 @@ int cifs_open(struct inode *inode, struct file *file)
file->f_op = &cifs_file_direct_ops;
}
- spin_lock(&CIFS_I(inode)->deferred_lock);
/* Get the cached handle as SMB2 close is deferred */
rc = cifs_get_readable_path(tcon, full_path, &cfile);
if (rc == 0) {
if (file->f_flags == cfile->f_flags) {
file->private_data = cfile;
+ spin_lock(&CIFS_I(inode)->deferred_lock);
cifs_del_deferred_close(cfile);
spin_unlock(&CIFS_I(inode)->deferred_lock);
goto out;
} else {
- spin_unlock(&CIFS_I(inode)->deferred_lock);
_cifsFileInfo_put(cfile, true, false);
}
- } else {
- spin_unlock(&CIFS_I(inode)->deferred_lock);
}
if (server->oplocks)
@@ -878,12 +874,8 @@ void smb2_deferred_work_close(struct work_struct *work)
struct cifsFileInfo, deferred.work);
spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
- if (!cfile->deferred_scheduled) {
- spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
- return;
- }
cifs_del_deferred_close(cfile);
- cfile->deferred_scheduled = false;
+ cfile->deferred_close_scheduled = false;
spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
_cifsFileInfo_put(cfile, true, false);
}
@@ -900,19 +892,26 @@ int cifs_close(struct inode *inode, struct file *file)
file->private_data = NULL;
dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
if ((cinode->oplock == CIFS_CACHE_RHW_FLG) &&
+ cinode->lease_granted &&
dclose) {
if (test_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags))
inode->i_ctime = inode->i_mtime = current_time(inode);
spin_lock(&cinode->deferred_lock);
cifs_add_deferred_close(cfile, dclose);
- if (cfile->deferred_scheduled) {
- mod_delayed_work(deferredclose_wq,
- &cfile->deferred, cifs_sb->ctx->acregmax);
+ if (cfile->deferred_close_scheduled &&
+ delayed_work_pending(&cfile->deferred)) {
+ /*
+ * If there is no pending work, mod_delayed_work queues new work.
+ * So, Increase the ref count to avoid use-after-free.
+ */
+ if (!mod_delayed_work(deferredclose_wq,
+ &cfile->deferred, cifs_sb->ctx->acregmax))
+ cifsFileInfo_get(cfile);
} else {
/* Deferred close for files */
queue_delayed_work(deferredclose_wq,
&cfile->deferred, cifs_sb->ctx->acregmax);
- cfile->deferred_scheduled = true;
+ cfile->deferred_close_scheduled = true;
spin_unlock(&cinode->deferred_lock);
return 0;
}
@@ -2020,8 +2019,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
continue;
if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
- if ((!open_file->invalidHandle) &&
- (!open_file->oplock_break_received)) {
+ if ((!open_file->invalidHandle)) {
/* found a good file */
/* lock it so it will not be closed on us */
cifsFileInfo_get(open_file);
@@ -4874,14 +4872,20 @@ oplock_break_ack:
}
/*
* When oplock break is received and there are no active
- * file handles but cached, then set the flag oplock_break_received.
+ * file handles but cached, then schedule deferred close immediately.
* So, new open will not use cached handle.
*/
spin_lock(&CIFS_I(inode)->deferred_lock);
is_deferred = cifs_is_deferred_close(cfile, &dclose);
- if (is_deferred && cfile->deferred_scheduled) {
- cfile->oplock_break_received = true;
- mod_delayed_work(deferredclose_wq, &cfile->deferred, 0);
+ if (is_deferred &&
+ cfile->deferred_close_scheduled &&
+ delayed_work_pending(&cfile->deferred)) {
+ /*
+ * If there is no pending work, mod_delayed_work queues new work.
+ * So, Increase the ref count to avoid use-after-free.
+ */
+ if (!mod_delayed_work(deferredclose_wq, &cfile->deferred, 0))
+ cifsFileInfo_get(cfile);
}
spin_unlock(&CIFS_I(inode)->deferred_lock);
_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
index 5d21cd905315..92d4ab029c91 100644
--- a/fs/cifs/fs_context.c
+++ b/fs/cifs/fs_context.c
@@ -1145,7 +1145,7 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
/* if iocharset not set then load_nls_default
* is used by caller
*/
- cifs_dbg(FYI, "iocharset set to %s\n", ctx->iocharset);
+ cifs_dbg(FYI, "iocharset set to %s\n", ctx->iocharset);
break;
case Opt_netbiosname:
memset(ctx->source_rfc1001_name, 0x20,
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 28ec8d7c521a..d67d281ab863 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -33,6 +33,7 @@
#include "cifsfs.h"
#include "cifs_ioctl.h"
#include "smb2proto.h"
+#include "smb2glob.h"
#include <linux/btrfs.h>
static long cifs_ioctl_query_info(unsigned int xid, struct file *filep,
@@ -214,48 +215,112 @@ static int cifs_shutdown(struct super_block *sb, unsigned long arg)
return 0;
}
-static int cifs_dump_full_key(struct cifs_tcon *tcon, unsigned long arg)
+static int cifs_dump_full_key(struct cifs_tcon *tcon, struct smb3_full_key_debug_info __user *in)
{
- struct smb3_full_key_debug_info pfull_key_inf;
- __u64 suid;
- struct list_head *tmp;
+ struct smb3_full_key_debug_info out;
struct cifs_ses *ses;
+ int rc = 0;
bool found = false;
+ u8 __user *end;
- if (!smb3_encryption_required(tcon))
- return -EOPNOTSUPP;
+ if (!smb3_encryption_required(tcon)) {
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+
+ /* copy user input into our output buffer */
+ if (copy_from_user(&out, in, sizeof(out))) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (!out.session_id) {
+ /* if ses id is 0, use current user session */
+ ses = tcon->ses;
+ } else {
+ /* otherwise if a session id is given, look for it in all our sessions */
+ struct cifs_ses *ses_it = NULL;
+ struct TCP_Server_Info *server_it = NULL;
- ses = tcon->ses; /* default to user id for current user */
- if (get_user(suid, (__u64 __user *)arg))
- suid = 0;
- if (suid) {
- /* search to see if there is a session with a matching SMB UID */
spin_lock(&cifs_tcp_ses_lock);
- list_for_each(tmp, &tcon->ses->server->smb_ses_list) {
- ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
- if (ses->Suid == suid) {
- found = true;
- break;
+ list_for_each_entry(server_it, &cifs_tcp_ses_list, tcp_ses_list) {
+ list_for_each_entry(ses_it, &server_it->smb_ses_list, smb_ses_list) {
+ if (ses_it->Suid == out.session_id) {
+ ses = ses_it;
+ /*
+ * since we are using the session outside the crit
+ * section, we need to make sure it won't be released
+ * so increment its refcount
+ */
+ ses->ses_count++;
+ found = true;
+ goto search_end;
+ }
}
}
+search_end:
spin_unlock(&cifs_tcp_ses_lock);
- if (found == false)
- return -EINVAL;
- } /* else uses default user's SMB UID (ie current user) */
-
- pfull_key_inf.cipher_type = le16_to_cpu(ses->server->cipher_type);
- pfull_key_inf.Suid = ses->Suid;
- memcpy(pfull_key_inf.auth_key, ses->auth_key.response,
- 16 /* SMB2_NTLMV2_SESSKEY_SIZE */);
- memcpy(pfull_key_inf.smb3decryptionkey, ses->smb3decryptionkey,
- 32 /* SMB3_ENC_DEC_KEY_SIZE */);
- memcpy(pfull_key_inf.smb3encryptionkey,
- ses->smb3encryptionkey, 32 /* SMB3_ENC_DEC_KEY_SIZE */);
- if (copy_to_user((void __user *)arg, &pfull_key_inf,
- sizeof(struct smb3_full_key_debug_info)))
- return -EFAULT;
+ if (!found) {
+ rc = -ENOENT;
+ goto out;
+ }
+ }
- return 0;
+ switch (ses->server->cipher_type) {
+ case SMB2_ENCRYPTION_AES128_CCM:
+ case SMB2_ENCRYPTION_AES128_GCM:
+ out.session_key_length = CIFS_SESS_KEY_SIZE;
+ out.server_in_key_length = out.server_out_key_length = SMB3_GCM128_CRYPTKEY_SIZE;
+ break;
+ case SMB2_ENCRYPTION_AES256_CCM:
+ case SMB2_ENCRYPTION_AES256_GCM:
+ out.session_key_length = CIFS_SESS_KEY_SIZE;
+ out.server_in_key_length = out.server_out_key_length = SMB3_GCM256_CRYPTKEY_SIZE;
+ break;
+ default:
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+
+ /* check if user buffer is big enough to store all the keys */
+ if (out.in_size < sizeof(out) + out.session_key_length + out.server_in_key_length
+ + out.server_out_key_length) {
+ rc = -ENOBUFS;
+ goto out;
+ }
+
+ out.session_id = ses->Suid;
+ out.cipher_type = le16_to_cpu(ses->server->cipher_type);
+
+ /* overwrite user input with our output */
+ if (copy_to_user(in, &out, sizeof(out))) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* append all the keys at the end of the user buffer */
+ end = in->data;
+ if (copy_to_user(end, ses->auth_key.response, out.session_key_length)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ end += out.session_key_length;
+
+ if (copy_to_user(end, ses->smb3encryptionkey, out.server_in_key_length)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ end += out.server_in_key_length;
+
+ if (copy_to_user(end, ses->smb3decryptionkey, out.server_out_key_length)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+out:
+ if (found)
+ cifs_put_smb_ses(ses);
+ return rc;
}
long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
@@ -371,6 +436,10 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
rc = -EOPNOTSUPP;
break;
case CIFS_DUMP_KEY:
+ /*
+ * Dump encryption keys. This is an old ioctl that only
+ * handles AES-128-{CCM,GCM}.
+ */
if (pSMBFile == NULL)
break;
if (!capable(CAP_SYS_ADMIN)) {
@@ -398,11 +467,10 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
else
rc = 0;
break;
- /*
- * Dump full key (32 bytes instead of 16 bytes) is
- * needed if GCM256 (stronger encryption) negotiated
- */
case CIFS_DUMP_FULL_KEY:
+ /*
+ * Dump encryption keys (handles any key sizes)
+ */
if (pSMBFile == NULL)
break;
if (!capable(CAP_SYS_ADMIN)) {
@@ -410,8 +478,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
break;
}
tcon = tlink_tcon(pSMBFile->tlink);
- rc = cifs_dump_full_key(tcon, arg);
-
+ rc = cifs_dump_full_key(tcon, (void __user *)arg);
break;
case CIFS_IOC_NOTIFY:
if (!S_ISDIR(inode->i_mode)) {
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 524dbdfb7184..7207a63819cb 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -672,6 +672,11 @@ cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
}
+/*
+ * Critical section which runs after acquiring deferred_lock.
+ * As there is no reference count on cifs_deferred_close, pdclose
+ * should not be used outside deferred_lock.
+ */
bool
cifs_is_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close **pdclose)
{
@@ -688,6 +693,9 @@ cifs_is_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close **
return false;
}
+/*
+ * Critical section which runs after acquiring deferred_lock.
+ */
void
cifs_add_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close *dclose)
{
@@ -707,6 +715,9 @@ cifs_add_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close *
list_add_tail(&dclose->dlist, &CIFS_I(d_inode(cfile->dentry))->deferred_closes);
}
+/*
+ * Critical section which runs after acquiring deferred_lock.
+ */
void
cifs_del_deferred_close(struct cifsFileInfo *cfile)
{
@@ -738,15 +749,19 @@ void
cifs_close_all_deferred_files(struct cifs_tcon *tcon)
{
struct cifsFileInfo *cfile;
- struct cifsInodeInfo *cinode;
struct list_head *tmp;
spin_lock(&tcon->open_file_lock);
list_for_each(tmp, &tcon->openFileList) {
cfile = list_entry(tmp, struct cifsFileInfo, tlist);
- cinode = CIFS_I(d_inode(cfile->dentry));
- if (delayed_work_pending(&cfile->deferred))
- mod_delayed_work(deferredclose_wq, &cfile->deferred, 0);
+ if (delayed_work_pending(&cfile->deferred)) {
+ /*
+ * If there is no pending work, mod_delayed_work queues new work.
+ * So, Increase the ref count to avoid use-after-free.
+ */
+ if (!mod_delayed_work(deferredclose_wq, &cfile->deferred, 0))
+ cifsFileInfo_get(cfile);
+ }
}
spin_unlock(&tcon->open_file_lock);
}
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index dd0eb665b680..21ef51d338e0 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -1861,6 +1861,8 @@ smb2_copychunk_range(const unsigned int xid,
cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
/* Request server copy to target from src identified by key */
+ kfree(retbuf);
+ retbuf = NULL;
rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
true /* is_fsctl */, (char *)pcchunk,
@@ -3981,6 +3983,7 @@ smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
unsigned int epoch, bool *purge_cache)
{
oplock &= 0xFF;
+ cinode->lease_granted = false;
if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
return;
if (oplock == SMB2_OPLOCK_LEVEL_BATCH) {
@@ -4007,6 +4010,7 @@ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
unsigned int new_oplock = 0;
oplock &= 0xFF;
+ cinode->lease_granted = true;
if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
return;
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index a8bf43184773..c205f93e0a10 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -958,6 +958,13 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
/* Internal types */
server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES;
+ /*
+ * SMB3.0 supports only 1 cipher and doesn't have a encryption neg context
+ * Set the cipher type manually.
+ */
+ if (server->dialect == SMB30_PROT_ID && (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
+ server->cipher_type = SMB2_ENCRYPTION_AES128_CCM;
+
security_blob = smb2_get_data_area_len(&blob_offset, &blob_length,
(struct smb2_sync_hdr *)rsp);
/*
@@ -3900,10 +3907,10 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
* Related requests use info from previous read request
* in chain.
*/
- shdr->SessionId = 0xFFFFFFFF;
+ shdr->SessionId = 0xFFFFFFFFFFFFFFFF;
shdr->TreeId = 0xFFFFFFFF;
- req->PersistentFileId = 0xFFFFFFFF;
- req->VolatileFileId = 0xFFFFFFFF;
+ req->PersistentFileId = 0xFFFFFFFFFFFFFFFF;
+ req->VolatileFileId = 0xFFFFFFFFFFFFFFFF;
}
}
if (remaining_bytes > io_parms->length)
diff --git a/fs/cifs/trace.h b/fs/cifs/trace.h
index d6df908dccad..dafcb6ab050d 100644
--- a/fs/cifs/trace.h
+++ b/fs/cifs/trace.h
@@ -12,6 +12,11 @@
#include <linux/tracepoint.h>
+/*
+ * Please use this 3-part article as a reference for writing new tracepoints:
+ * https://lwn.net/Articles/379903/
+ */
+
/* For logging errors in read or write */
DECLARE_EVENT_CLASS(smb3_rw_err_class,
TP_PROTO(unsigned int xid,
@@ -529,16 +534,16 @@ DECLARE_EVENT_CLASS(smb3_exit_err_class,
TP_ARGS(xid, func_name, rc),
TP_STRUCT__entry(
__field(unsigned int, xid)
- __field(const char *, func_name)
+ __string(func_name, func_name)
__field(int, rc)
),
TP_fast_assign(
__entry->xid = xid;
- __entry->func_name = func_name;
+ __assign_str(func_name, func_name);
__entry->rc = rc;
),
TP_printk("\t%s: xid=%u rc=%d",
- __entry->func_name, __entry->xid, __entry->rc)
+ __get_str(func_name), __entry->xid, __entry->rc)
)
#define DEFINE_SMB3_EXIT_ERR_EVENT(name) \
@@ -583,14 +588,14 @@ DECLARE_EVENT_CLASS(smb3_enter_exit_class,
TP_ARGS(xid, func_name),
TP_STRUCT__entry(
__field(unsigned int, xid)
- __field(const char *, func_name)
+ __string(func_name, func_name)
),
TP_fast_assign(
__entry->xid = xid;
- __entry->func_name = func_name;
+ __assign_str(func_name, func_name);
),
TP_printk("\t%s: xid=%u",
- __entry->func_name, __entry->xid)
+ __get_str(func_name), __entry->xid)
)
#define DEFINE_SMB3_ENTER_EXIT_EVENT(name) \
@@ -857,16 +862,16 @@ DECLARE_EVENT_CLASS(smb3_reconnect_class,
TP_STRUCT__entry(
__field(__u64, currmid)
__field(__u64, conn_id)
- __field(char *, hostname)
+ __string(hostname, hostname)
),
TP_fast_assign(
__entry->currmid = currmid;
__entry->conn_id = conn_id;
- __entry->hostname = hostname;
+ __assign_str(hostname, hostname);
),
TP_printk("conn_id=0x%llx server=%s current_mid=%llu",
__entry->conn_id,
- __entry->hostname,
+ __get_str(hostname),
__entry->currmid)
)
@@ -891,7 +896,7 @@ DECLARE_EVENT_CLASS(smb3_credit_class,
TP_STRUCT__entry(
__field(__u64, currmid)
__field(__u64, conn_id)
- __field(char *, hostname)
+ __string(hostname, hostname)
__field(int, credits)
__field(int, credits_to_add)
__field(int, in_flight)
@@ -899,7 +904,7 @@ DECLARE_EVENT_CLASS(smb3_credit_class,
TP_fast_assign(
__entry->currmid = currmid;
__entry->conn_id = conn_id;
- __entry->hostname = hostname;
+ __assign_str(hostname, hostname);
__entry->credits = credits;
__entry->credits_to_add = credits_to_add;
__entry->in_flight = in_flight;
@@ -907,7 +912,7 @@ DECLARE_EVENT_CLASS(smb3_credit_class,
TP_printk("conn_id=0x%llx server=%s current_mid=%llu "
"credits=%d credit_change=%d in_flight=%d",
__entry->conn_id,
- __entry->hostname,
+ __get_str(hostname),
__entry->currmid,
__entry->credits,
__entry->credits_to_add,
diff --git a/fs/coredump.c b/fs/coredump.c
index 2868e3e171ae..c3d8fc14b993 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -519,7 +519,7 @@ static bool dump_interrupted(void)
* but then we need to teach dump_write() to restart and clear
* TIF_SIGPENDING.
*/
- return signal_pending(current);
+ return fatal_signal_pending(current) || freezing(current);
}
static void wait_for_dump_helpers(struct file *file)
diff --git a/fs/dax.c b/fs/dax.c
index 69216241392f..62352cbcf0f4 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -144,6 +144,16 @@ struct wait_exceptional_entry_queue {
struct exceptional_entry_key key;
};
+/**
+ * enum dax_wake_mode: waitqueue wakeup behaviour
+ * @WAKE_ALL: wake all waiters in the waitqueue
+ * @WAKE_NEXT: wake only the first waiter in the waitqueue
+ */
+enum dax_wake_mode {
+ WAKE_ALL,
+ WAKE_NEXT,
+};
+
static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
void *entry, struct exceptional_entry_key *key)
{
@@ -182,7 +192,8 @@ static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
* The important information it's conveying is whether the entry at
* this index used to be a PMD entry.
*/
-static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
+static void dax_wake_entry(struct xa_state *xas, void *entry,
+ enum dax_wake_mode mode)
{
struct exceptional_entry_key key;
wait_queue_head_t *wq;
@@ -196,7 +207,7 @@ static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
* must be in the waitqueue and the following check will see them.
*/
if (waitqueue_active(wq))
- __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
+ __wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key);
}
/*
@@ -264,11 +275,11 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry)
finish_wait(wq, &ewait.wait);
}
-static void put_unlocked_entry(struct xa_state *xas, void *entry)
+static void put_unlocked_entry(struct xa_state *xas, void *entry,
+ enum dax_wake_mode mode)
{
- /* If we were the only waiter woken, wake the next one */
if (entry && !dax_is_conflict(entry))
- dax_wake_entry(xas, entry, false);
+ dax_wake_entry(xas, entry, mode);
}
/*
@@ -286,7 +297,7 @@ static void dax_unlock_entry(struct xa_state *xas, void *entry)
old = xas_store(xas, entry);
xas_unlock_irq(xas);
BUG_ON(!dax_is_locked(old));
- dax_wake_entry(xas, entry, false);
+ dax_wake_entry(xas, entry, WAKE_NEXT);
}
/*
@@ -524,7 +535,7 @@ retry:
dax_disassociate_entry(entry, mapping, false);
xas_store(xas, NULL); /* undo the PMD join */
- dax_wake_entry(xas, entry, true);
+ dax_wake_entry(xas, entry, WAKE_ALL);
mapping->nrpages -= PG_PMD_NR;
entry = NULL;
xas_set(xas, index);
@@ -622,7 +633,7 @@ struct page *dax_layout_busy_page_range(struct address_space *mapping,
entry = get_unlocked_entry(&xas, 0);
if (entry)
page = dax_busy_page(entry);
- put_unlocked_entry(&xas, entry);
+ put_unlocked_entry(&xas, entry, WAKE_NEXT);
if (page)
break;
if (++scanned % XA_CHECK_SCHED)
@@ -664,7 +675,7 @@ static int __dax_invalidate_entry(struct address_space *mapping,
mapping->nrpages -= 1UL << dax_entry_order(entry);
ret = 1;
out:
- put_unlocked_entry(&xas, entry);
+ put_unlocked_entry(&xas, entry, WAKE_ALL);
xas_unlock_irq(&xas);
return ret;
}
@@ -937,13 +948,13 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
xas_lock_irq(xas);
xas_store(xas, entry);
xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
- dax_wake_entry(xas, entry, false);
+ dax_wake_entry(xas, entry, WAKE_NEXT);
trace_dax_writeback_one(mapping->host, index, count);
return ret;
put_unlocked:
- put_unlocked_entry(xas, entry);
+ put_unlocked_entry(xas, entry, WAKE_NEXT);
return ret;
}
@@ -1684,7 +1695,7 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
/* Did we race with someone splitting entry or so? */
if (!entry || dax_is_conflict(entry) ||
(order == 0 && !dax_is_pte_entry(entry))) {
- put_unlocked_entry(&xas, entry);
+ put_unlocked_entry(&xas, entry, WAKE_NEXT);
xas_unlock_irq(&xas);
trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
VM_FAULT_NOPAGE);
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index e813acfaa6e8..ba7c01cd9a5d 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -893,7 +893,7 @@ ssize_t debugfs_read_file_str(struct file *file, char __user *user_buf,
copy[copy_len] = '\n';
- ret = simple_read_from_buffer(user_buf, count, ppos, copy, copy_len);
+ ret = simple_read_from_buffer(user_buf, count, ppos, copy, len);
kfree(copy);
return ret;
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 1d252164d97b..8129a430d789 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -45,10 +45,13 @@ static unsigned int debugfs_allow __ro_after_init = DEFAULT_DEBUGFS_ALLOW_BITS;
static int debugfs_setattr(struct user_namespace *mnt_userns,
struct dentry *dentry, struct iattr *ia)
{
- int ret = security_locked_down(LOCKDOWN_DEBUGFS);
+ int ret;
- if (ret && (ia->ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)))
- return ret;
+ if (ia->ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)) {
+ ret = security_locked_down(LOCKDOWN_DEBUGFS);
+ if (ret)
+ return ret;
+ }
return simple_setattr(&init_user_ns, dentry, ia);
}
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index 345f8061e3b4..e3f5d7f3c8a0 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -296,10 +296,6 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
struct extent_crypt_result ecr;
int rc = 0;
- if (!crypt_stat || !crypt_stat->tfm
- || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED))
- return -EINVAL;
-
if (unlikely(ecryptfs_verbosity > 0)) {
ecryptfs_printk(KERN_DEBUG, "Key size [%zd]; key:\n",
crypt_stat->key_size);
diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
index e62d813756f2..efaf32596b97 100644
--- a/fs/erofs/zmap.c
+++ b/fs/erofs/zmap.c
@@ -450,14 +450,31 @@ static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
lcn = m->lcn + 1;
if (m->compressedlcs)
goto out;
- if (lcn == initial_lcn)
- goto err_bonus_cblkcnt;
err = z_erofs_load_cluster_from_disk(m, lcn);
if (err)
return err;
+ /*
+ * If the 1st NONHEAD lcluster has already been handled initially w/o
+ * valid compressedlcs, which means at least it mustn't be CBLKCNT, or
+ * an internal implemenatation error is detected.
+ *
+ * The following code can also handle it properly anyway, but let's
+ * BUG_ON in the debugging mode only for developers to notice that.
+ */
+ DBG_BUGON(lcn == initial_lcn &&
+ m->type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD);
+
switch (m->type) {
+ case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
+ case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
+ /*
+ * if the 1st NONHEAD lcluster is actually PLAIN or HEAD type
+ * rather than CBLKCNT, it's a 1 lcluster-sized pcluster.
+ */
+ m->compressedlcs = 1;
+ break;
case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
if (m->delta[0] != 1)
goto err_bonus_cblkcnt;
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 77c84d6f1af6..cbf37b2cf871 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3206,7 +3206,10 @@ static int ext4_split_extent_at(handle_t *handle,
ext4_ext_mark_unwritten(ex2);
err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
- if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
+ if (err != -ENOSPC && err != -EDQUOT)
+ goto out;
+
+ if (EXT4_EXT_MAY_ZEROOUT & split_flag) {
if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
if (split_flag & EXT4_EXT_DATA_VALID1) {
err = ext4_ext_zeroout(inode, ex2);
@@ -3232,25 +3235,22 @@ static int ext4_split_extent_at(handle_t *handle,
ext4_ext_pblock(&orig_ex));
}
- if (err)
- goto fix_extent_len;
- /* update the extent length and mark as initialized */
- ex->ee_len = cpu_to_le16(ee_len);
- ext4_ext_try_to_merge(handle, inode, path, ex);
- err = ext4_ext_dirty(handle, inode, path + path->p_depth);
- if (err)
- goto fix_extent_len;
-
- /* update extent status tree */
- err = ext4_zeroout_es(inode, &zero_ex);
-
- goto out;
- } else if (err)
- goto fix_extent_len;
-
-out:
- ext4_ext_show_leaf(inode, path);
- return err;
+ if (!err) {
+ /* update the extent length and mark as initialized */
+ ex->ee_len = cpu_to_le16(ee_len);
+ ext4_ext_try_to_merge(handle, inode, path, ex);
+ err = ext4_ext_dirty(handle, inode, path + path->p_depth);
+ if (!err)
+ /* update extent status tree */
+ err = ext4_zeroout_es(inode, &zero_ex);
+ /* If we failed at this point, we don't know in which
+ * state the extent tree exactly is so don't try to fix
+ * length of the original extent as it may do even more
+ * damage.
+ */
+ goto out;
+ }
+ }
fix_extent_len:
ex->ee_len = orig_ex.ee_len;
@@ -3260,6 +3260,9 @@ fix_extent_len:
*/
ext4_ext_dirty(handle, inode, path + path->p_depth);
return err;
+out:
+ ext4_ext_show_leaf(inode, path);
+ return err;
}
/*
diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
index f98ca4f37ef6..e8195229c252 100644
--- a/fs/ext4/fast_commit.c
+++ b/fs/ext4/fast_commit.c
@@ -1288,28 +1288,29 @@ struct dentry_info_args {
};
static inline void tl_to_darg(struct dentry_info_args *darg,
- struct ext4_fc_tl *tl)
+ struct ext4_fc_tl *tl, u8 *val)
{
- struct ext4_fc_dentry_info *fcd;
+ struct ext4_fc_dentry_info fcd;
- fcd = (struct ext4_fc_dentry_info *)ext4_fc_tag_val(tl);
+ memcpy(&fcd, val, sizeof(fcd));
- darg->parent_ino = le32_to_cpu(fcd->fc_parent_ino);
- darg->ino = le32_to_cpu(fcd->fc_ino);
- darg->dname = fcd->fc_dname;
- darg->dname_len = ext4_fc_tag_len(tl) -
- sizeof(struct ext4_fc_dentry_info);
+ darg->parent_ino = le32_to_cpu(fcd.fc_parent_ino);
+ darg->ino = le32_to_cpu(fcd.fc_ino);
+ darg->dname = val + offsetof(struct ext4_fc_dentry_info, fc_dname);
+ darg->dname_len = le16_to_cpu(tl->fc_len) -
+ sizeof(struct ext4_fc_dentry_info);
}
/* Unlink replay function */
-static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl)
+static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl,
+ u8 *val)
{
struct inode *inode, *old_parent;
struct qstr entry;
struct dentry_info_args darg;
int ret = 0;
- tl_to_darg(&darg, tl);
+ tl_to_darg(&darg, tl, val);
trace_ext4_fc_replay(sb, EXT4_FC_TAG_UNLINK, darg.ino,
darg.parent_ino, darg.dname_len);
@@ -1399,13 +1400,14 @@ out:
}
/* Link replay function */
-static int ext4_fc_replay_link(struct super_block *sb, struct ext4_fc_tl *tl)
+static int ext4_fc_replay_link(struct super_block *sb, struct ext4_fc_tl *tl,
+ u8 *val)
{
struct inode *inode;
struct dentry_info_args darg;
int ret = 0;
- tl_to_darg(&darg, tl);
+ tl_to_darg(&darg, tl, val);
trace_ext4_fc_replay(sb, EXT4_FC_TAG_LINK, darg.ino,
darg.parent_ino, darg.dname_len);
@@ -1450,9 +1452,10 @@ static int ext4_fc_record_modified_inode(struct super_block *sb, int ino)
/*
* Inode replay function
*/
-static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl)
+static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl,
+ u8 *val)
{
- struct ext4_fc_inode *fc_inode;
+ struct ext4_fc_inode fc_inode;
struct ext4_inode *raw_inode;
struct ext4_inode *raw_fc_inode;
struct inode *inode = NULL;
@@ -1460,9 +1463,9 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl)
int inode_len, ino, ret, tag = le16_to_cpu(tl->fc_tag);
struct ext4_extent_header *eh;
- fc_inode = (struct ext4_fc_inode *)ext4_fc_tag_val(tl);
+ memcpy(&fc_inode, val, sizeof(fc_inode));
- ino = le32_to_cpu(fc_inode->fc_ino);
+ ino = le32_to_cpu(fc_inode.fc_ino);
trace_ext4_fc_replay(sb, tag, ino, 0, 0);
inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
@@ -1474,12 +1477,13 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl)
ext4_fc_record_modified_inode(sb, ino);
- raw_fc_inode = (struct ext4_inode *)fc_inode->fc_raw_inode;
+ raw_fc_inode = (struct ext4_inode *)
+ (val + offsetof(struct ext4_fc_inode, fc_raw_inode));
ret = ext4_get_fc_inode_loc(sb, ino, &iloc);
if (ret)
goto out;
- inode_len = ext4_fc_tag_len(tl) - sizeof(struct ext4_fc_inode);
+ inode_len = le16_to_cpu(tl->fc_len) - sizeof(struct ext4_fc_inode);
raw_inode = ext4_raw_inode(&iloc);
memcpy(raw_inode, raw_fc_inode, offsetof(struct ext4_inode, i_block));
@@ -1547,14 +1551,15 @@ out:
* inode for which we are trying to create a dentry here, should already have
* been replayed before we start here.
*/
-static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl)
+static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl,
+ u8 *val)
{
int ret = 0;
struct inode *inode = NULL;
struct inode *dir = NULL;
struct dentry_info_args darg;
- tl_to_darg(&darg, tl);
+ tl_to_darg(&darg, tl, val);
trace_ext4_fc_replay(sb, EXT4_FC_TAG_CREAT, darg.ino,
darg.parent_ino, darg.dname_len);
@@ -1633,9 +1638,9 @@ static int ext4_fc_record_regions(struct super_block *sb, int ino,
/* Replay add range tag */
static int ext4_fc_replay_add_range(struct super_block *sb,
- struct ext4_fc_tl *tl)
+ struct ext4_fc_tl *tl, u8 *val)
{
- struct ext4_fc_add_range *fc_add_ex;
+ struct ext4_fc_add_range fc_add_ex;
struct ext4_extent newex, *ex;
struct inode *inode;
ext4_lblk_t start, cur;
@@ -1645,15 +1650,14 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
struct ext4_ext_path *path = NULL;
int ret;
- fc_add_ex = (struct ext4_fc_add_range *)ext4_fc_tag_val(tl);
- ex = (struct ext4_extent *)&fc_add_ex->fc_ex;
+ memcpy(&fc_add_ex, val, sizeof(fc_add_ex));
+ ex = (struct ext4_extent *)&fc_add_ex.fc_ex;
trace_ext4_fc_replay(sb, EXT4_FC_TAG_ADD_RANGE,
- le32_to_cpu(fc_add_ex->fc_ino), le32_to_cpu(ex->ee_block),
+ le32_to_cpu(fc_add_ex.fc_ino), le32_to_cpu(ex->ee_block),
ext4_ext_get_actual_len(ex));
- inode = ext4_iget(sb, le32_to_cpu(fc_add_ex->fc_ino),
- EXT4_IGET_NORMAL);
+ inode = ext4_iget(sb, le32_to_cpu(fc_add_ex.fc_ino), EXT4_IGET_NORMAL);
if (IS_ERR(inode)) {
jbd_debug(1, "Inode not found.");
return 0;
@@ -1762,32 +1766,33 @@ next:
/* Replay DEL_RANGE tag */
static int
-ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl)
+ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl,
+ u8 *val)
{
struct inode *inode;
- struct ext4_fc_del_range *lrange;
+ struct ext4_fc_del_range lrange;
struct ext4_map_blocks map;
ext4_lblk_t cur, remaining;
int ret;
- lrange = (struct ext4_fc_del_range *)ext4_fc_tag_val(tl);
- cur = le32_to_cpu(lrange->fc_lblk);
- remaining = le32_to_cpu(lrange->fc_len);
+ memcpy(&lrange, val, sizeof(lrange));
+ cur = le32_to_cpu(lrange.fc_lblk);
+ remaining = le32_to_cpu(lrange.fc_len);
trace_ext4_fc_replay(sb, EXT4_FC_TAG_DEL_RANGE,
- le32_to_cpu(lrange->fc_ino), cur, remaining);
+ le32_to_cpu(lrange.fc_ino), cur, remaining);
- inode = ext4_iget(sb, le32_to_cpu(lrange->fc_ino), EXT4_IGET_NORMAL);
+ inode = ext4_iget(sb, le32_to_cpu(lrange.fc_ino), EXT4_IGET_NORMAL);
if (IS_ERR(inode)) {
- jbd_debug(1, "Inode %d not found", le32_to_cpu(lrange->fc_ino));
+ jbd_debug(1, "Inode %d not found", le32_to_cpu(lrange.fc_ino));
return 0;
}
ret = ext4_fc_record_modified_inode(sb, inode->i_ino);
jbd_debug(1, "DEL_RANGE, inode %ld, lblk %d, len %d\n",
- inode->i_ino, le32_to_cpu(lrange->fc_lblk),
- le32_to_cpu(lrange->fc_len));
+ inode->i_ino, le32_to_cpu(lrange.fc_lblk),
+ le32_to_cpu(lrange.fc_len));
while (remaining > 0) {
map.m_lblk = cur;
map.m_len = remaining;
@@ -1808,8 +1813,8 @@ ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl)
}
ret = ext4_punch_hole(inode,
- le32_to_cpu(lrange->fc_lblk) << sb->s_blocksize_bits,
- le32_to_cpu(lrange->fc_len) << sb->s_blocksize_bits);
+ le32_to_cpu(lrange.fc_lblk) << sb->s_blocksize_bits,
+ le32_to_cpu(lrange.fc_len) << sb->s_blocksize_bits);
if (ret)
jbd_debug(1, "ext4_punch_hole returned %d", ret);
ext4_ext_replay_shrink_inode(inode,
@@ -1925,11 +1930,11 @@ static int ext4_fc_replay_scan(journal_t *journal,
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_fc_replay_state *state;
int ret = JBD2_FC_REPLAY_CONTINUE;
- struct ext4_fc_add_range *ext;
- struct ext4_fc_tl *tl;
- struct ext4_fc_tail *tail;
- __u8 *start, *end;
- struct ext4_fc_head *head;
+ struct ext4_fc_add_range ext;
+ struct ext4_fc_tl tl;
+ struct ext4_fc_tail tail;
+ __u8 *start, *end, *cur, *val;
+ struct ext4_fc_head head;
struct ext4_extent *ex;
state = &sbi->s_fc_replay_state;
@@ -1956,15 +1961,17 @@ static int ext4_fc_replay_scan(journal_t *journal,
}
state->fc_replay_expected_off++;
- fc_for_each_tl(start, end, tl) {
+ for (cur = start; cur < end; cur = cur + sizeof(tl) + le16_to_cpu(tl.fc_len)) {
+ memcpy(&tl, cur, sizeof(tl));
+ val = cur + sizeof(tl);
jbd_debug(3, "Scan phase, tag:%s, blk %lld\n",
- tag2str(le16_to_cpu(tl->fc_tag)), bh->b_blocknr);
- switch (le16_to_cpu(tl->fc_tag)) {
+ tag2str(le16_to_cpu(tl.fc_tag)), bh->b_blocknr);
+ switch (le16_to_cpu(tl.fc_tag)) {
case EXT4_FC_TAG_ADD_RANGE:
- ext = (struct ext4_fc_add_range *)ext4_fc_tag_val(tl);
- ex = (struct ext4_extent *)&ext->fc_ex;
+ memcpy(&ext, val, sizeof(ext));
+ ex = (struct ext4_extent *)&ext.fc_ex;
ret = ext4_fc_record_regions(sb,
- le32_to_cpu(ext->fc_ino),
+ le32_to_cpu(ext.fc_ino),
le32_to_cpu(ex->ee_block), ext4_ext_pblock(ex),
ext4_ext_get_actual_len(ex));
if (ret < 0)
@@ -1978,18 +1985,18 @@ static int ext4_fc_replay_scan(journal_t *journal,
case EXT4_FC_TAG_INODE:
case EXT4_FC_TAG_PAD:
state->fc_cur_tag++;
- state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl,
- sizeof(*tl) + ext4_fc_tag_len(tl));
+ state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur,
+ sizeof(tl) + le16_to_cpu(tl.fc_len));
break;
case EXT4_FC_TAG_TAIL:
state->fc_cur_tag++;
- tail = (struct ext4_fc_tail *)ext4_fc_tag_val(tl);
- state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl,
- sizeof(*tl) +
+ memcpy(&tail, val, sizeof(tail));
+ state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur,
+ sizeof(tl) +
offsetof(struct ext4_fc_tail,
fc_crc));
- if (le32_to_cpu(tail->fc_tid) == expected_tid &&
- le32_to_cpu(tail->fc_crc) == state->fc_crc) {
+ if (le32_to_cpu(tail.fc_tid) == expected_tid &&
+ le32_to_cpu(tail.fc_crc) == state->fc_crc) {
state->fc_replay_num_tags = state->fc_cur_tag;
state->fc_regions_valid =
state->fc_regions_used;
@@ -2000,19 +2007,19 @@ static int ext4_fc_replay_scan(journal_t *journal,
state->fc_crc = 0;
break;
case EXT4_FC_TAG_HEAD:
- head = (struct ext4_fc_head *)ext4_fc_tag_val(tl);
- if (le32_to_cpu(head->fc_features) &
+ memcpy(&head, val, sizeof(head));
+ if (le32_to_cpu(head.fc_features) &
~EXT4_FC_SUPPORTED_FEATURES) {
ret = -EOPNOTSUPP;
break;
}
- if (le32_to_cpu(head->fc_tid) != expected_tid) {
+ if (le32_to_cpu(head.fc_tid) != expected_tid) {
ret = JBD2_FC_REPLAY_STOP;
break;
}
state->fc_cur_tag++;
- state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl,
- sizeof(*tl) + ext4_fc_tag_len(tl));
+ state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur,
+ sizeof(tl) + le16_to_cpu(tl.fc_len));
break;
default:
ret = state->fc_replay_num_tags ?
@@ -2036,11 +2043,11 @@ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh,
{
struct super_block *sb = journal->j_private;
struct ext4_sb_info *sbi = EXT4_SB(sb);
- struct ext4_fc_tl *tl;
- __u8 *start, *end;
+ struct ext4_fc_tl tl;
+ __u8 *start, *end, *cur, *val;
int ret = JBD2_FC_REPLAY_CONTINUE;
struct ext4_fc_replay_state *state = &sbi->s_fc_replay_state;
- struct ext4_fc_tail *tail;
+ struct ext4_fc_tail tail;
if (pass == PASS_SCAN) {
state->fc_current_pass = PASS_SCAN;
@@ -2067,49 +2074,52 @@ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh,
start = (u8 *)bh->b_data;
end = (__u8 *)bh->b_data + journal->j_blocksize - 1;
- fc_for_each_tl(start, end, tl) {
+ for (cur = start; cur < end; cur = cur + sizeof(tl) + le16_to_cpu(tl.fc_len)) {
+ memcpy(&tl, cur, sizeof(tl));
+ val = cur + sizeof(tl);
+
if (state->fc_replay_num_tags == 0) {
ret = JBD2_FC_REPLAY_STOP;
ext4_fc_set_bitmaps_and_counters(sb);
break;
}
jbd_debug(3, "Replay phase, tag:%s\n",
- tag2str(le16_to_cpu(tl->fc_tag)));
+ tag2str(le16_to_cpu(tl.fc_tag)));
state->fc_replay_num_tags--;
- switch (le16_to_cpu(tl->fc_tag)) {
+ switch (le16_to_cpu(tl.fc_tag)) {
case EXT4_FC_TAG_LINK:
- ret = ext4_fc_replay_link(sb, tl);
+ ret = ext4_fc_replay_link(sb, &tl, val);
break;
case EXT4_FC_TAG_UNLINK:
- ret = ext4_fc_replay_unlink(sb, tl);
+ ret = ext4_fc_replay_unlink(sb, &tl, val);
break;
case EXT4_FC_TAG_ADD_RANGE:
- ret = ext4_fc_replay_add_range(sb, tl);
+ ret = ext4_fc_replay_add_range(sb, &tl, val);
break;
case EXT4_FC_TAG_CREAT:
- ret = ext4_fc_replay_create(sb, tl);
+ ret = ext4_fc_replay_create(sb, &tl, val);
break;
case EXT4_FC_TAG_DEL_RANGE:
- ret = ext4_fc_replay_del_range(sb, tl);
+ ret = ext4_fc_replay_del_range(sb, &tl, val);
break;
case EXT4_FC_TAG_INODE:
- ret = ext4_fc_replay_inode(sb, tl);
+ ret = ext4_fc_replay_inode(sb, &tl, val);
break;
case EXT4_FC_TAG_PAD:
trace_ext4_fc_replay(sb, EXT4_FC_TAG_PAD, 0,
- ext4_fc_tag_len(tl), 0);
+ le16_to_cpu(tl.fc_len), 0);
break;
case EXT4_FC_TAG_TAIL:
trace_ext4_fc_replay(sb, EXT4_FC_TAG_TAIL, 0,
- ext4_fc_tag_len(tl), 0);
- tail = (struct ext4_fc_tail *)ext4_fc_tag_val(tl);
- WARN_ON(le32_to_cpu(tail->fc_tid) != expected_tid);
+ le16_to_cpu(tl.fc_len), 0);
+ memcpy(&tail, val, sizeof(tail));
+ WARN_ON(le32_to_cpu(tail.fc_tid) != expected_tid);
break;
case EXT4_FC_TAG_HEAD:
break;
default:
- trace_ext4_fc_replay(sb, le16_to_cpu(tl->fc_tag), 0,
- ext4_fc_tag_len(tl), 0);
+ trace_ext4_fc_replay(sb, le16_to_cpu(tl.fc_tag), 0,
+ le16_to_cpu(tl.fc_len), 0);
ret = -ECANCELED;
break;
}
diff --git a/fs/ext4/fast_commit.h b/fs/ext4/fast_commit.h
index b77f70f55a62..937c381b4c85 100644
--- a/fs/ext4/fast_commit.h
+++ b/fs/ext4/fast_commit.h
@@ -153,13 +153,6 @@ struct ext4_fc_replay_state {
#define region_last(__region) (((__region)->lblk) + ((__region)->len) - 1)
#endif
-#define fc_for_each_tl(__start, __end, __tl) \
- for (tl = (struct ext4_fc_tl *)(__start); \
- (__u8 *)tl < (__u8 *)(__end); \
- tl = (struct ext4_fc_tl *)((__u8 *)tl + \
- sizeof(struct ext4_fc_tl) + \
- + le16_to_cpu(tl->fc_len)))
-
static inline const char *tag2str(__u16 tag)
{
switch (tag) {
@@ -186,16 +179,4 @@ static inline const char *tag2str(__u16 tag)
}
}
-/* Get length of a particular tlv */
-static inline int ext4_fc_tag_len(struct ext4_fc_tl *tl)
-{
- return le16_to_cpu(tl->fc_len);
-}
-
-/* Get a pointer to "value" of a tlv */
-static inline __u8 *ext4_fc_tag_val(struct ext4_fc_tl *tl)
-{
- return (__u8 *)tl + sizeof(*tl);
-}
-
#endif /* __FAST_COMMIT_H__ */
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 81a17a3cd80e..9bab7fd4ccd5 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -322,14 +322,16 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
if (is_directory) {
count = ext4_used_dirs_count(sb, gdp) - 1;
ext4_used_dirs_set(sb, gdp, count);
- percpu_counter_dec(&sbi->s_dirs_counter);
+ if (percpu_counter_initialized(&sbi->s_dirs_counter))
+ percpu_counter_dec(&sbi->s_dirs_counter);
}
ext4_inode_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
EXT4_INODES_PER_GROUP(sb) / 8);
ext4_group_desc_csum_set(sb, block_group, gdp);
ext4_unlock_group(sb, block_group);
- percpu_counter_inc(&sbi->s_freeinodes_counter);
+ if (percpu_counter_initialized(&sbi->s_freeinodes_counter))
+ percpu_counter_inc(&sbi->s_freeinodes_counter);
if (sbi->s_log_groups_per_flex) {
struct flex_groups *fg;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 3239e6669e84..c2c22c2baac0 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3217,7 +3217,7 @@ static int ext4_mb_init_backend(struct super_block *sb)
*/
if (sbi->s_es->s_log_groups_per_flex >= 32) {
ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group");
- goto err_freesgi;
+ goto err_freebuddy;
}
sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex,
BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9));
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index afb9d05a99ba..a4af26d4459a 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1376,7 +1376,8 @@ int ext4_fname_setup_ci_filename(struct inode *dir, const struct qstr *iname,
struct dx_hash_info *hinfo = &name->hinfo;
int len;
- if (!IS_CASEFOLDED(dir) || !dir->i_sb->s_encoding) {
+ if (!IS_CASEFOLDED(dir) || !dir->i_sb->s_encoding ||
+ (IS_ENCRYPTED(dir) && !fscrypt_has_encryption_key(dir))) {
cf_name->name = NULL;
return 0;
}
@@ -1427,7 +1428,8 @@ static bool ext4_match(struct inode *parent,
#endif
#ifdef CONFIG_UNICODE
- if (parent->i_sb->s_encoding && IS_CASEFOLDED(parent)) {
+ if (parent->i_sb->s_encoding && IS_CASEFOLDED(parent) &&
+ (!IS_ENCRYPTED(parent) || fscrypt_has_encryption_key(parent))) {
if (fname->cf_name.name) {
struct qstr cf = {.name = fname->cf_name.name,
.len = fname->cf_name.len};
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 7dc94f3e18e6..d29f6aa7d96e 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -4462,14 +4462,20 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
}
if (sb->s_blocksize != blocksize) {
+ /*
+ * bh must be released before kill_bdev(), otherwise
+ * it won't be freed and its page also. kill_bdev()
+ * is called by sb_set_blocksize().
+ */
+ brelse(bh);
/* Validate the filesystem blocksize */
if (!sb_set_blocksize(sb, blocksize)) {
ext4_msg(sb, KERN_ERR, "bad block size %d",
blocksize);
+ bh = NULL;
goto failed_mount;
}
- brelse(bh);
logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
offset = do_div(logical_sb_block, blocksize);
bh = ext4_sb_bread_unmovable(sb, logical_sb_block);
@@ -5202,8 +5208,9 @@ failed_mount:
kfree(get_qf_name(sb, sbi, i));
#endif
fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
- ext4_blkdev_remove(sbi);
+ /* ext4_blkdev_remove() calls kill_bdev(), release bh before it. */
brelse(bh);
+ ext4_blkdev_remove(sbi);
out_fail:
sb->s_fs_info = NULL;
kfree(sbi->s_blockgroup_lock);
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index 6f825dedc3d4..55fcab60a59a 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -315,7 +315,9 @@ EXT4_ATTR_FEATURE(verity);
#endif
EXT4_ATTR_FEATURE(metadata_csum_seed);
EXT4_ATTR_FEATURE(fast_commit);
+#if defined(CONFIG_UNICODE) && defined(CONFIG_FS_ENCRYPTION)
EXT4_ATTR_FEATURE(encrypted_casefold);
+#endif
static struct attribute *ext4_feat_attrs[] = {
ATTR_LIST(lazy_itable_init),
@@ -333,7 +335,9 @@ static struct attribute *ext4_feat_attrs[] = {
#endif
ATTR_LIST(metadata_csum_seed),
ATTR_LIST(fast_commit),
+#if defined(CONFIG_UNICODE) && defined(CONFIG_FS_ENCRYPTION)
ATTR_LIST(encrypted_casefold),
+#endif
NULL,
};
ATTRIBUTE_GROUPS(ext4_feat);
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 53b13787eb2c..925a5ca3744a 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -117,19 +117,6 @@ static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
f2fs_drop_rpages(cc, len, true);
}
-static void f2fs_put_rpages_mapping(struct address_space *mapping,
- pgoff_t start, int len)
-{
- int i;
-
- for (i = 0; i < len; i++) {
- struct page *page = find_get_page(mapping, start + i);
-
- put_page(page);
- put_page(page);
- }
-}
-
static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
struct writeback_control *wbc, bool redirty, int unlock)
{
@@ -158,13 +145,14 @@ int f2fs_init_compress_ctx(struct compress_ctx *cc)
return cc->rpages ? 0 : -ENOMEM;
}
-void f2fs_destroy_compress_ctx(struct compress_ctx *cc)
+void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
{
page_array_free(cc->inode, cc->rpages, cc->cluster_size);
cc->rpages = NULL;
cc->nr_rpages = 0;
cc->nr_cpages = 0;
- cc->cluster_idx = NULL_CLUSTER;
+ if (!reuse)
+ cc->cluster_idx = NULL_CLUSTER;
}
void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
@@ -1036,7 +1024,7 @@ retry:
}
if (PageUptodate(page))
- unlock_page(page);
+ f2fs_put_page(page, 1);
else
f2fs_compress_ctx_add_page(cc, page);
}
@@ -1046,33 +1034,35 @@ retry:
ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
&last_block_in_bio, false, true);
- f2fs_destroy_compress_ctx(cc);
+ f2fs_put_rpages(cc);
+ f2fs_destroy_compress_ctx(cc, true);
if (ret)
- goto release_pages;
+ goto out;
if (bio)
f2fs_submit_bio(sbi, bio, DATA);
ret = f2fs_init_compress_ctx(cc);
if (ret)
- goto release_pages;
+ goto out;
}
for (i = 0; i < cc->cluster_size; i++) {
f2fs_bug_on(sbi, cc->rpages[i]);
page = find_lock_page(mapping, start_idx + i);
- f2fs_bug_on(sbi, !page);
+ if (!page) {
+ /* page can be truncated */
+ goto release_and_retry;
+ }
f2fs_wait_on_page_writeback(page, DATA, true, true);
-
f2fs_compress_ctx_add_page(cc, page);
- f2fs_put_page(page, 0);
if (!PageUptodate(page)) {
+release_and_retry:
+ f2fs_put_rpages(cc);
f2fs_unlock_rpages(cc, i + 1);
- f2fs_put_rpages_mapping(mapping, start_idx,
- cc->cluster_size);
- f2fs_destroy_compress_ctx(cc);
+ f2fs_destroy_compress_ctx(cc, true);
goto retry;
}
}
@@ -1103,10 +1093,10 @@ retry:
}
unlock_pages:
+ f2fs_put_rpages(cc);
f2fs_unlock_rpages(cc, i);
-release_pages:
- f2fs_put_rpages_mapping(mapping, start_idx, i);
- f2fs_destroy_compress_ctx(cc);
+ f2fs_destroy_compress_ctx(cc, true);
+out:
return ret;
}
@@ -1141,7 +1131,7 @@ bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
set_cluster_dirty(&cc);
f2fs_put_rpages_wbc(&cc, NULL, false, 1);
- f2fs_destroy_compress_ctx(&cc);
+ f2fs_destroy_compress_ctx(&cc, false);
return first_index;
}
@@ -1361,7 +1351,7 @@ unlock_continue:
f2fs_put_rpages(cc);
page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
cc->cpages = NULL;
- f2fs_destroy_compress_ctx(cc);
+ f2fs_destroy_compress_ctx(cc, false);
return 0;
out_destroy_crypt:
@@ -1372,7 +1362,8 @@ out_destroy_crypt:
for (i = 0; i < cc->nr_cpages; i++) {
if (!cc->cpages[i])
continue;
- f2fs_put_page(cc->cpages[i], 1);
+ f2fs_compress_free_page(cc->cpages[i]);
+ cc->cpages[i] = NULL;
}
out_put_cic:
kmem_cache_free(cic_entry_slab, cic);
@@ -1522,7 +1513,7 @@ write:
err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
f2fs_put_rpages_wbc(cc, wbc, false, 0);
destroy_out:
- f2fs_destroy_compress_ctx(cc);
+ f2fs_destroy_compress_ctx(cc, false);
return err;
}
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 96f1a354f89f..009a09fb9d88 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -2287,7 +2287,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
max_nr_pages,
&last_block_in_bio,
rac != NULL, false);
- f2fs_destroy_compress_ctx(&cc);
+ f2fs_destroy_compress_ctx(&cc, false);
if (ret)
goto set_error_page;
}
@@ -2332,7 +2332,7 @@ next_page:
max_nr_pages,
&last_block_in_bio,
rac != NULL, false);
- f2fs_destroy_compress_ctx(&cc);
+ f2fs_destroy_compress_ctx(&cc, false);
}
}
#endif
@@ -3033,7 +3033,7 @@ next:
}
}
if (f2fs_compressed_file(inode))
- f2fs_destroy_compress_ctx(&cc);
+ f2fs_destroy_compress_ctx(&cc, false);
#endif
if (retry) {
index = 0;
@@ -3801,6 +3801,7 @@ static int f2fs_is_file_aligned(struct inode *inode)
block_t pblock;
unsigned long nr_pblocks;
unsigned int blocks_per_sec = BLKS_PER_SEC(sbi);
+ unsigned int not_aligned = 0;
int ret = 0;
cur_lblock = 0;
@@ -3833,13 +3834,20 @@ static int f2fs_is_file_aligned(struct inode *inode)
if ((pblock - main_blkaddr) & (blocks_per_sec - 1) ||
nr_pblocks & (blocks_per_sec - 1)) {
- f2fs_err(sbi, "Swapfile does not align to section");
- ret = -EINVAL;
- goto out;
+ if (f2fs_is_pinned_file(inode)) {
+ f2fs_err(sbi, "Swapfile does not align to section");
+ ret = -EINVAL;
+ goto out;
+ }
+ not_aligned++;
}
cur_lblock += nr_pblocks;
}
+ if (not_aligned)
+ f2fs_warn(sbi, "Swapfile (%u) is not align to section: \n"
+ "\t1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate()",
+ not_aligned);
out:
return ret;
}
@@ -3858,6 +3866,7 @@ static int check_swap_activate_fast(struct swap_info_struct *sis,
int nr_extents = 0;
unsigned long nr_pblocks;
unsigned int blocks_per_sec = BLKS_PER_SEC(sbi);
+ unsigned int not_aligned = 0;
int ret = 0;
/*
@@ -3887,7 +3896,7 @@ static int check_swap_activate_fast(struct swap_info_struct *sis,
/* hole */
if (!(map.m_flags & F2FS_MAP_FLAGS)) {
f2fs_err(sbi, "Swapfile has holes\n");
- ret = -ENOENT;
+ ret = -EINVAL;
goto out;
}
@@ -3896,9 +3905,12 @@ static int check_swap_activate_fast(struct swap_info_struct *sis,
if ((pblock - SM_I(sbi)->main_blkaddr) & (blocks_per_sec - 1) ||
nr_pblocks & (blocks_per_sec - 1)) {
- f2fs_err(sbi, "Swapfile does not align to section");
- ret = -EINVAL;
- goto out;
+ if (f2fs_is_pinned_file(inode)) {
+ f2fs_err(sbi, "Swapfile does not align to section");
+ ret = -EINVAL;
+ goto out;
+ }
+ not_aligned++;
}
if (cur_lblock + nr_pblocks >= sis->max)
@@ -3927,6 +3939,11 @@ static int check_swap_activate_fast(struct swap_info_struct *sis,
sis->max = cur_lblock;
sis->pages = cur_lblock - 1;
sis->highest_bit = cur_lblock - 1;
+
+ if (not_aligned)
+ f2fs_warn(sbi, "Swapfile (%u) is not align to section: \n"
+ "\t1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate()",
+ not_aligned);
out:
return ret;
}
@@ -4035,7 +4052,7 @@ out:
return ret;
bad_bmap:
f2fs_err(sbi, "Swapfile has holes\n");
- return -ENOENT;
+ return -EINVAL;
}
static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 044878866ca3..c83d90125ebd 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -3956,7 +3956,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed);
void f2fs_put_page_dic(struct page *page);
int f2fs_init_compress_ctx(struct compress_ctx *cc);
-void f2fs_destroy_compress_ctx(struct compress_ctx *cc);
+void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi);
void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 44a4650aea7b..ceb575f99048 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -1817,7 +1817,8 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
struct f2fs_inode_info *fi = F2FS_I(inode);
u32 masked_flags = fi->i_flags & mask;
- f2fs_bug_on(F2FS_I_SB(inode), (iflags & ~mask));
+ /* mask can be shrunk by flags_valid selector */
+ iflags &= mask;
/* Is it quota file? Do not allow user to mess with it */
if (IS_NOQUOTA(inode))
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index c605415840b5..51dc79fad4fe 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -3574,12 +3574,12 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio)
return err;
drop_bio:
- if (fio->bio) {
+ if (fio->bio && *(fio->bio)) {
struct bio *bio = *(fio->bio);
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
- fio->bio = NULL;
+ *(fio->bio) = NULL;
}
return err;
}
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index a0b542d84cd9..493a83e3f590 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -911,8 +911,11 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
current->backing_dev_info = inode_to_bdi(inode);
buffered = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
current->backing_dev_info = NULL;
- if (unlikely(buffered <= 0))
+ if (unlikely(buffered <= 0)) {
+ if (!ret)
+ ret = buffered;
goto out_unlock;
+ }
/*
* We need to ensure that the page cache pages are written to
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index ea7fc5c641c7..d9cb261f55b0 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -582,6 +582,16 @@ out_locked:
spin_unlock(&gl->gl_lockref.lock);
}
+static bool is_system_glock(struct gfs2_glock *gl)
+{
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
+
+ if (gl == m_ip->i_gl)
+ return true;
+ return false;
+}
+
/**
* do_xmote - Calls the DLM to change the state of a lock
* @gl: The lock state
@@ -671,17 +681,25 @@ skip_inval:
* to see sd_log_error and withdraw, and in the meantime, requeue the
* work for later.
*
+ * We make a special exception for some system glocks, such as the
+ * system statfs inode glock, which needs to be granted before the
+ * gfs2_quotad daemon can exit, and that exit needs to finish before
+ * we can unmount the withdrawn file system.
+ *
* However, if we're just unlocking the lock (say, for unmount, when
* gfs2_gl_hash_clear calls clear_glock) and recovery is complete
* then it's okay to tell dlm to unlock it.
*/
if (unlikely(sdp->sd_log_error && !gfs2_withdrawn(sdp)))
gfs2_withdraw_delayed(sdp);
- if (glock_blocked_by_withdraw(gl)) {
- if (target != LM_ST_UNLOCKED ||
- test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags)) {
+ if (glock_blocked_by_withdraw(gl) &&
+ (target != LM_ST_UNLOCKED ||
+ test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags))) {
+ if (!is_system_glock(gl)) {
gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
goto out;
+ } else {
+ clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
}
}
@@ -1466,9 +1484,11 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
glock_blocked_by_withdraw(gl) &&
gh->gh_gl != sdp->sd_jinode_gl) {
sdp->sd_glock_dqs_held++;
+ spin_unlock(&gl->gl_lockref.lock);
might_sleep();
wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY,
TASK_UNINTERRUPTIBLE);
+ spin_lock(&gl->gl_lockref.lock);
}
if (gh->gh_flags & GL_NOCACHE)
handle_callback(gl, LM_ST_UNLOCKED, 0, false);
@@ -1775,6 +1795,7 @@ __acquires(&lru_lock)
while(!list_empty(list)) {
gl = list_first_entry(list, struct gfs2_glock, gl_lru);
list_del_init(&gl->gl_lru);
+ clear_bit(GLF_LRU, &gl->gl_flags);
if (!spin_trylock(&gl->gl_lockref.lock)) {
add_back_to_lru:
list_add(&gl->gl_lru, &lru_list);
@@ -1820,7 +1841,6 @@ static long gfs2_scan_glock_lru(int nr)
if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
list_move(&gl->gl_lru, &dispose);
atomic_dec(&lru_count);
- clear_bit(GLF_LRU, &gl->gl_flags);
freed++;
continue;
}
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 454095e9fedf..54d3fbeb3002 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -396,7 +396,7 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
struct timespec64 atime;
u16 height, depth;
umode_t mode = be32_to_cpu(str->di_mode);
- bool is_new = ip->i_inode.i_flags & I_NEW;
+ bool is_new = ip->i_inode.i_state & I_NEW;
if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
goto corrupt;
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 97d54e581a7b..42c15cfc0821 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -926,10 +926,10 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
}
/**
- * ail_drain - drain the ail lists after a withdraw
+ * gfs2_ail_drain - drain the ail lists after a withdraw
* @sdp: Pointer to GFS2 superblock
*/
-static void ail_drain(struct gfs2_sbd *sdp)
+void gfs2_ail_drain(struct gfs2_sbd *sdp)
{
struct gfs2_trans *tr;
@@ -956,6 +956,7 @@ static void ail_drain(struct gfs2_sbd *sdp)
list_del(&tr->tr_list);
gfs2_trans_free(sdp, tr);
}
+ gfs2_drain_revokes(sdp);
spin_unlock(&sdp->sd_ail_lock);
}
@@ -1162,7 +1163,6 @@ out_withdraw:
if (tr && list_empty(&tr->tr_list))
list_add(&tr->tr_list, &sdp->sd_ail1_list);
spin_unlock(&sdp->sd_ail_lock);
- ail_drain(sdp); /* frees all transactions */
tr = NULL;
goto out_end;
}
diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h
index eea58015710e..fc905c2af53c 100644
--- a/fs/gfs2/log.h
+++ b/fs/gfs2/log.h
@@ -93,5 +93,6 @@ extern int gfs2_logd(void *data);
extern void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
extern void gfs2_glock_remove_revoke(struct gfs2_glock *gl);
extern void gfs2_flush_revokes(struct gfs2_sbd *sdp);
+extern void gfs2_ail_drain(struct gfs2_sbd *sdp);
#endif /* __LOG_DOT_H__ */
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 221e7118cc3b..8ee05d25dfa6 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -885,7 +885,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
gfs2_log_write_page(sdp, page);
}
-static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
+void gfs2_drain_revokes(struct gfs2_sbd *sdp)
{
struct list_head *head = &sdp->sd_log_revokes;
struct gfs2_bufdata *bd;
@@ -900,6 +900,11 @@ static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
}
}
+static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
+{
+ gfs2_drain_revokes(sdp);
+}
+
static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
struct gfs2_log_header_host *head, int pass)
{
diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h
index 31b6dd0d2e5d..f707601597dc 100644
--- a/fs/gfs2/lops.h
+++ b/fs/gfs2/lops.h
@@ -20,6 +20,7 @@ extern void gfs2_log_submit_bio(struct bio **biop, int opf);
extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh);
extern int gfs2_find_jhead(struct gfs2_jdesc *jd,
struct gfs2_log_header_host *head, bool keep_cache);
+extern void gfs2_drain_revokes(struct gfs2_sbd *sdp);
static inline unsigned int buf_limit(struct gfs2_sbd *sdp)
{
return sdp->sd_ldptrs;
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
index 3e08027a6c81..f4325b44956d 100644
--- a/fs/gfs2/util.c
+++ b/fs/gfs2/util.c
@@ -131,6 +131,7 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) || !sdp->sd_jdesc)
return;
+ gfs2_ail_drain(sdp); /* frees all transactions */
inode = sdp->sd_jdesc->jd_inode;
ip = GFS2_I(inode);
i_gl = ip->i_gl;
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
index a930ddd15681..7054a542689f 100644
--- a/fs/hfsplus/extents.c
+++ b/fs/hfsplus/extents.c
@@ -598,13 +598,15 @@ void hfsplus_file_truncate(struct inode *inode)
res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt);
if (res)
break;
- hfs_brec_remove(&fd);
- mutex_unlock(&fd.tree->tree_lock);
start = hip->cached_start;
+ if (blk_cnt <= start)
+ hfs_brec_remove(&fd);
+ mutex_unlock(&fd.tree->tree_lock);
hfsplus_free_extents(sb, hip->cached_extents,
alloc_cnt - start, alloc_cnt - blk_cnt);
hfsplus_dump_extent(hip->cached_extents);
+ mutex_lock(&fd.tree->tree_lock);
if (blk_cnt > start) {
hip->extent_state |= HFSPLUS_EXT_DIRTY;
break;
@@ -612,7 +614,6 @@ void hfsplus_file_truncate(struct inode *inode)
alloc_cnt = start;
hip->cached_start = hip->cached_blocks = 0;
hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
- mutex_lock(&fd.tree->tree_lock);
}
hfs_find_exit(&fd);
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index a2a42335e8fd..30dee68458c7 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -131,6 +131,7 @@ static void huge_pagevec_release(struct pagevec *pvec)
static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
struct inode *inode = file_inode(file);
+ struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
loff_t len, vma_len;
int ret;
struct hstate *h = hstate_file(file);
@@ -146,6 +147,10 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
vma->vm_ops = &hugetlb_vm_ops;
+ ret = seal_check_future_write(info->seals, vma);
+ if (ret)
+ return ret;
+
/*
* page based offset in vm_pgoff could be sufficiently large to
* overflow a loff_t when converted to byte offset. This can
@@ -524,7 +529,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
* the subpool and global reserve usage count can need
* to be adjusted.
*/
- VM_BUG_ON(PagePrivate(page));
+ VM_BUG_ON(HPageRestoreReserve(page));
remove_huge_page(page);
freed++;
if (!truncate_op) {
@@ -730,6 +735,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
__SetPageUptodate(page);
error = huge_add_to_page_cache(page, mapping, index);
if (unlikely(error)) {
+ restore_reserve_on_error(h, &pseudo_vma, addr, page);
put_page(page);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
goto out;
diff --git a/fs/io-wq.c b/fs/io-wq.c
index 5361a9b4b47b..b3e8624a37d0 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -979,13 +979,16 @@ static bool io_task_work_match(struct callback_head *cb, void *data)
return cwd->wqe->wq == data;
}
+void io_wq_exit_start(struct io_wq *wq)
+{
+ set_bit(IO_WQ_BIT_EXIT, &wq->state);
+}
+
static void io_wq_exit_workers(struct io_wq *wq)
{
struct callback_head *cb;
int node;
- set_bit(IO_WQ_BIT_EXIT, &wq->state);
-
if (!wq->task)
return;
@@ -1003,13 +1006,16 @@ static void io_wq_exit_workers(struct io_wq *wq)
struct io_wqe *wqe = wq->wqes[node];
io_wq_for_each_worker(wqe, io_wq_worker_wake, NULL);
- spin_lock_irq(&wq->hash->wait.lock);
- list_del_init(&wq->wqes[node]->wait.entry);
- spin_unlock_irq(&wq->hash->wait.lock);
}
rcu_read_unlock();
io_worker_ref_put(wq);
wait_for_completion(&wq->worker_done);
+
+ for_each_node(node) {
+ spin_lock_irq(&wq->hash->wait.lock);
+ list_del_init(&wq->wqes[node]->wait.entry);
+ spin_unlock_irq(&wq->hash->wait.lock);
+ }
put_task_struct(wq->task);
wq->task = NULL;
}
@@ -1020,8 +1026,6 @@ static void io_wq_destroy(struct io_wq *wq)
cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
- io_wq_exit_workers(wq);
-
for_each_node(node) {
struct io_wqe *wqe = wq->wqes[node];
struct io_cb_cancel_data match = {
@@ -1036,16 +1040,13 @@ static void io_wq_destroy(struct io_wq *wq)
kfree(wq);
}
-void io_wq_put(struct io_wq *wq)
-{
- if (refcount_dec_and_test(&wq->refs))
- io_wq_destroy(wq);
-}
-
void io_wq_put_and_exit(struct io_wq *wq)
{
+ WARN_ON_ONCE(!test_bit(IO_WQ_BIT_EXIT, &wq->state));
+
io_wq_exit_workers(wq);
- io_wq_put(wq);
+ if (refcount_dec_and_test(&wq->refs))
+ io_wq_destroy(wq);
}
static bool io_wq_worker_affinity(struct io_worker *worker, void *data)
diff --git a/fs/io-wq.h b/fs/io-wq.h
index 0e6d310999e8..af2df0680ee2 100644
--- a/fs/io-wq.h
+++ b/fs/io-wq.h
@@ -122,7 +122,7 @@ struct io_wq_data {
};
struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
-void io_wq_put(struct io_wq *wq);
+void io_wq_exit_start(struct io_wq *wq);
void io_wq_put_and_exit(struct io_wq *wq);
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
diff --git a/fs/io_uring.c b/fs/io_uring.c
index f46acbbeed57..fa8794c61af7 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -100,6 +100,8 @@
#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
IORING_REGISTER_LAST + IORING_OP_LAST)
+#define IORING_MAX_REG_BUFFERS (1U << 14)
+
#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
IOSQE_BUFFER_SELECT)
@@ -781,6 +783,11 @@ struct io_task_work {
task_work_func_t func;
};
+enum {
+ IORING_RSRC_FILE = 0,
+ IORING_RSRC_BUFFER = 1,
+};
+
/*
* NOTE! Each of the iocb union members has the file pointer
* as the first entry in their struct definition. So you can
@@ -4035,7 +4042,7 @@ static int io_epoll_ctl_prep(struct io_kiocb *req,
#if defined(CONFIG_EPOLL)
if (sqe->ioprio || sqe->buf_index)
return -EINVAL;
- if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
+ if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
req->epoll.epfd = READ_ONCE(sqe->fd);
@@ -4150,7 +4157,7 @@ static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
+ if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (sqe->ioprio || sqe->buf_index)
return -EINVAL;
@@ -5017,10 +5024,10 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
* Can't handle multishot for double wait for now, turn it
* into one-shot mode.
*/
- if (!(req->poll.events & EPOLLONESHOT))
- req->poll.events |= EPOLLONESHOT;
+ if (!(poll_one->events & EPOLLONESHOT))
+ poll_one->events |= EPOLLONESHOT;
/* double add on the same waitqueue head, ignore */
- if (poll->head == head)
+ if (poll_one->head == head)
return;
poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
if (!poll) {
@@ -5827,8 +5834,6 @@ done:
static int io_rsrc_update_prep(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
- if (unlikely(req->ctx->flags & IORING_SETUP_SQPOLL))
- return -EINVAL;
if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
return -EINVAL;
if (sqe->ioprio || sqe->rw_flags)
@@ -6354,19 +6359,20 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
* We don't expect the list to be empty, that will only happen if we
* race with the completion of the linked work.
*/
- if (prev && req_ref_inc_not_zero(prev))
+ if (prev) {
io_remove_next_linked(prev);
- else
- prev = NULL;
+ if (!req_ref_inc_not_zero(prev))
+ prev = NULL;
+ }
spin_unlock_irqrestore(&ctx->completion_lock, flags);
if (prev) {
io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
io_put_req_deferred(prev, 1);
+ io_put_req_deferred(req, 1);
} else {
io_req_complete_post(req, -ETIME, 0);
}
- io_put_req_deferred(req, 1);
return HRTIMER_NORESTART;
}
@@ -8227,6 +8233,7 @@ static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
{
int i, ret;
+ imu->acct_pages = 0;
for (i = 0; i < nr_pages; i++) {
if (!PageCompound(pages[i])) {
imu->acct_pages++;
@@ -8390,7 +8397,7 @@ static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
if (ctx->user_bufs)
return -EBUSY;
- if (!nr_args || nr_args > UIO_MAXIOV)
+ if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
return -EINVAL;
ret = io_rsrc_node_switch_start(ctx);
if (ret)
@@ -9034,14 +9041,19 @@ static void io_uring_del_task_file(unsigned long index)
static void io_uring_clean_tctx(struct io_uring_task *tctx)
{
+ struct io_wq *wq = tctx->io_wq;
struct io_tctx_node *node;
unsigned long index;
xa_for_each(&tctx->xa, index, node)
io_uring_del_task_file(index);
- if (tctx->io_wq) {
- io_wq_put_and_exit(tctx->io_wq);
+ if (wq) {
+ /*
+ * Must be after io_uring_del_task_file() (removes nodes under
+ * uring_lock) to avoid race with io_uring_try_cancel_iowq().
+ */
tctx->io_wq = NULL;
+ io_wq_put_and_exit(wq);
}
}
@@ -9077,6 +9089,9 @@ static void io_uring_cancel_sqpoll(struct io_sq_data *sqd)
if (!current->io_uring)
return;
+ if (tctx->io_wq)
+ io_wq_exit_start(tctx->io_wq);
+
WARN_ON_ONCE(!sqd || sqd->thread != current);
atomic_inc(&tctx->in_idle);
@@ -9111,6 +9126,9 @@ void __io_uring_cancel(struct files_struct *files)
DEFINE_WAIT(wait);
s64 inflight;
+ if (tctx->io_wq)
+ io_wq_exit_start(tctx->io_wq);
+
/* make sure overflow events are dropped */
atomic_inc(&tctx->in_idle);
do {
@@ -9658,7 +9676,8 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
- IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS;
+ IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
+ IORING_FEAT_RSRC_TAGS;
if (copy_to_user(params, p, sizeof(*p))) {
ret = -EFAULT;
@@ -9898,7 +9917,7 @@ static int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
}
static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
- unsigned size)
+ unsigned size, unsigned type)
{
struct io_uring_rsrc_update2 up;
@@ -9906,13 +9925,13 @@ static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
return -EINVAL;
if (copy_from_user(&up, arg, sizeof(up)))
return -EFAULT;
- if (!up.nr)
+ if (!up.nr || up.resv)
return -EINVAL;
- return __io_register_rsrc_update(ctx, up.type, &up, up.nr);
+ return __io_register_rsrc_update(ctx, type, &up, up.nr);
}
static int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
- unsigned int size)
+ unsigned int size, unsigned int type)
{
struct io_uring_rsrc_register rr;
@@ -9923,10 +9942,10 @@ static int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
memset(&rr, 0, sizeof(rr));
if (copy_from_user(&rr, arg, size))
return -EFAULT;
- if (!rr.nr)
+ if (!rr.nr || rr.resv || rr.resv2)
return -EINVAL;
- switch (rr.type) {
+ switch (type) {
case IORING_RSRC_FILE:
return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
rr.nr, u64_to_user_ptr(rr.tags));
@@ -9948,8 +9967,10 @@ static bool io_register_op_must_quiesce(int op)
case IORING_REGISTER_PROBE:
case IORING_REGISTER_PERSONALITY:
case IORING_UNREGISTER_PERSONALITY:
- case IORING_REGISTER_RSRC:
- case IORING_REGISTER_RSRC_UPDATE:
+ case IORING_REGISTER_FILES2:
+ case IORING_REGISTER_FILES_UPDATE2:
+ case IORING_REGISTER_BUFFERS2:
+ case IORING_REGISTER_BUFFERS_UPDATE:
return false;
default:
return true;
@@ -10075,11 +10096,19 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
case IORING_REGISTER_RESTRICTIONS:
ret = io_register_restrictions(ctx, arg, nr_args);
break;
- case IORING_REGISTER_RSRC:
- ret = io_register_rsrc(ctx, arg, nr_args);
+ case IORING_REGISTER_FILES2:
+ ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE);
+ break;
+ case IORING_REGISTER_FILES_UPDATE2:
+ ret = io_register_rsrc_update(ctx, arg, nr_args,
+ IORING_RSRC_FILE);
+ break;
+ case IORING_REGISTER_BUFFERS2:
+ ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER);
break;
- case IORING_REGISTER_RSRC_UPDATE:
- ret = io_register_rsrc_update(ctx, arg, nr_args);
+ case IORING_REGISTER_BUFFERS_UPDATE:
+ ret = io_register_rsrc_update(ctx, arg, nr_args,
+ IORING_RSRC_BUFFER);
break;
default:
ret = -EINVAL;
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index f2cd2034a87b..9023717c5188 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -394,7 +394,7 @@ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
{
struct inode *inode = rac->mapping->host;
loff_t pos = readahead_pos(rac);
- loff_t length = readahead_length(rac);
+ size_t length = readahead_length(rac);
struct iomap_readpage_ctx ctx = {
.rac = rac,
};
@@ -402,7 +402,7 @@ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
trace_iomap_readahead(inode, readahead_count(rac));
while (length > 0) {
- loff_t ret = iomap_apply(inode, pos, length, 0, ops,
+ ssize_t ret = iomap_apply(inode, pos, length, 0, ops,
&ctx, iomap_readahead_actor);
if (ret <= 0) {
WARN_ON_ONCE(ret == 0);
diff --git a/fs/namespace.c b/fs/namespace.c
index f63337828e1c..c3f1a78ba369 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -3855,8 +3855,12 @@ static int can_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
if (!(m->mnt_sb->s_type->fs_flags & FS_ALLOW_IDMAP))
return -EINVAL;
+ /* Don't yet support filesystem mountable in user namespaces. */
+ if (m->mnt_sb->s_user_ns != &init_user_ns)
+ return -EINVAL;
+
/* We're not controlling the superblock. */
- if (!ns_capable(m->mnt_sb->s_user_ns, CAP_SYS_ADMIN))
+ if (!capable(CAP_SYS_ADMIN))
return -EPERM;
/* Mount has already been visible in the filesystem hierarchy. */
diff --git a/fs/netfs/Kconfig b/fs/netfs/Kconfig
index 578112713703..b4db21022cb4 100644
--- a/fs/netfs/Kconfig
+++ b/fs/netfs/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config NETFS_SUPPORT
- tristate "Support for network filesystem high-level I/O"
+ tristate
help
This option enables support for network filesystems, including
helpers for high-level buffered I/O, abstracting out read
diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c
index 193841d03de0..0b6cd3b8734c 100644
--- a/fs/netfs/read_helper.c
+++ b/fs/netfs/read_helper.c
@@ -1011,12 +1011,42 @@ out:
}
EXPORT_SYMBOL(netfs_readpage);
-static void netfs_clear_thp(struct page *page)
+/**
+ * netfs_skip_page_read - prep a page for writing without reading first
+ * @page: page being prepared
+ * @pos: starting position for the write
+ * @len: length of write
+ *
+ * In some cases, write_begin doesn't need to read at all:
+ * - full page write
+ * - write that lies in a page that is completely beyond EOF
+ * - write that covers the the page from start to EOF or beyond it
+ *
+ * If any of these criteria are met, then zero out the unwritten parts
+ * of the page and return true. Otherwise, return false.
+ */
+static bool netfs_skip_page_read(struct page *page, loff_t pos, size_t len)
{
- unsigned int i;
+ struct inode *inode = page->mapping->host;
+ loff_t i_size = i_size_read(inode);
+ size_t offset = offset_in_thp(page, pos);
+
+ /* Full page write */
+ if (offset == 0 && len >= thp_size(page))
+ return true;
+
+ /* pos beyond last page in the file */
+ if (pos - offset >= i_size)
+ goto zero_out;
+
+ /* Write that covers from the start of the page to EOF or beyond */
+ if (offset == 0 && (pos + len) >= i_size)
+ goto zero_out;
- for (i = 0; i < thp_nr_pages(page); i++)
- clear_highpage(page + i);
+ return false;
+zero_out:
+ zero_user_segments(page, 0, offset, offset + len, thp_size(page));
+ return true;
}
/**
@@ -1024,7 +1054,7 @@ static void netfs_clear_thp(struct page *page)
* @file: The file to read from
* @mapping: The mapping to read from
* @pos: File position at which the write will begin
- * @len: The length of the write in this page
+ * @len: The length of the write (may extend beyond the end of the page chosen)
* @flags: AOP_* flags
* @_page: Where to put the resultant page
* @_fsdata: Place for the netfs to store a cookie
@@ -1061,14 +1091,12 @@ int netfs_write_begin(struct file *file, struct address_space *mapping,
struct inode *inode = file_inode(file);
unsigned int debug_index = 0;
pgoff_t index = pos >> PAGE_SHIFT;
- int pos_in_page = pos & ~PAGE_MASK;
- loff_t size;
int ret;
DEFINE_READAHEAD(ractl, file, NULL, mapping, index);
retry:
- page = grab_cache_page_write_begin(mapping, index, 0);
+ page = grab_cache_page_write_begin(mapping, index, flags);
if (!page)
return -ENOMEM;
@@ -1090,13 +1118,8 @@ retry:
* within the cache granule containing the EOF, in which case we need
* to preload the granule.
*/
- size = i_size_read(inode);
if (!ops->is_cache_enabled(inode) &&
- ((pos_in_page == 0 && len == thp_size(page)) ||
- (pos >= size) ||
- (pos_in_page == 0 && (pos + len) >= size))) {
- netfs_clear_thp(page);
- SetPageUptodate(page);
+ netfs_skip_page_read(page, pos, len)) {
netfs_stat(&netfs_n_rh_write_zskip);
goto have_page_no_wait;
}
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index cfeaadf56bf0..330f65727c45 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -406,7 +406,7 @@ struct nfs_client *nfs_get_client(const struct nfs_client_initdata *cl_init)
if (cl_init->hostname == NULL) {
WARN_ON(1);
- return NULL;
+ return ERR_PTR(-EINVAL);
}
/* see if the client already exists */
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index d158a500c25c..d2103852475f 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -718,7 +718,7 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
if (unlikely(!p))
goto out_err;
fl->fh_array[i]->size = be32_to_cpup(p++);
- if (sizeof(struct nfs_fh) < fl->fh_array[i]->size) {
+ if (fl->fh_array[i]->size > NFS_MAXFHSIZE) {
printk(KERN_ERR "NFS: Too big fh %d received %d\n",
i, fl->fh_array[i]->size);
goto out_err;
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index 93e60e921f92..bc0c698f3350 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -362,7 +362,7 @@ static const struct kernel_param_ops param_ops_nfs_timeout = {
.set = param_set_nfs_timeout,
.get = param_get_nfs_timeout,
};
-#define param_check_nfs_timeout(name, p) __param_check(name, p, int);
+#define param_check_nfs_timeout(name, p) __param_check(name, p, int)
module_param(nfs_mountpoint_expiry_timeout, nfs_timeout, 0644);
MODULE_PARM_DESC(nfs_mountpoint_expiry_timeout,
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 065cb04222a1..543d916f79ab 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -205,6 +205,7 @@ struct nfs4_exception {
struct inode *inode;
nfs4_stateid *stateid;
long timeout;
+ unsigned char task_is_privileged : 1;
unsigned char delay : 1,
recovering : 1,
retry : 1;
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 889a9f4c0310..42719384e25f 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -435,8 +435,8 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
*/
nfs_mark_client_ready(clp, -EPERM);
}
- nfs_put_client(clp);
clear_bit(NFS_CS_TSM_POSSIBLE, &clp->cl_flags);
+ nfs_put_client(clp);
return old;
error:
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 57b3821d975a..a1e5c6b85ded 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -211,7 +211,7 @@ static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence)
case SEEK_HOLE:
case SEEK_DATA:
ret = nfs42_proc_llseek(filep, offset, whence);
- if (ret != -ENOTSUPP)
+ if (ret != -EOPNOTSUPP)
return ret;
fallthrough;
default:
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 87d04f2c9385..e653654c10bc 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -589,6 +589,8 @@ int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_
goto out_retry;
}
if (exception->recovering) {
+ if (exception->task_is_privileged)
+ return -EDEADLOCK;
ret = nfs4_wait_clnt_recover(clp);
if (test_bit(NFS_MIG_FAILED, &server->mig_status))
return -EIO;
@@ -614,6 +616,8 @@ nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
goto out_retry;
}
if (exception->recovering) {
+ if (exception->task_is_privileged)
+ return -EDEADLOCK;
rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
@@ -1706,7 +1710,7 @@ static void nfs_set_open_stateid_locked(struct nfs4_state *state,
rcu_read_unlock();
trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0);
- if (!signal_pending(current)) {
+ if (!fatal_signal_pending(current)) {
if (schedule_timeout(5*HZ) == 0)
status = -EAGAIN;
else
@@ -3487,7 +3491,7 @@ static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst,
write_sequnlock(&state->seqlock);
trace_nfs4_close_stateid_update_wait(state->inode, dst, 0);
- if (signal_pending(current))
+ if (fatal_signal_pending(current))
status = -EINTR;
else
if (schedule_timeout(5*HZ) != 0)
@@ -3878,6 +3882,10 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
server->caps |= NFS_CAP_HARDLINKS;
if (res.has_symlinks != 0)
server->caps |= NFS_CAP_SYMLINKS;
+#ifdef CONFIG_NFS_V4_SECURITY_LABEL
+ if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL)
+ server->caps |= NFS_CAP_SECURITY_LABEL;
+#endif
if (!(res.attr_bitmask[0] & FATTR4_WORD0_FILEID))
server->fattr_valid &= ~NFS_ATTR_FATTR_FILEID;
if (!(res.attr_bitmask[1] & FATTR4_WORD1_MODE))
@@ -3898,10 +3906,6 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
server->fattr_valid &= ~NFS_ATTR_FATTR_CTIME;
if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY))
server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME;
-#ifdef CONFIG_NFS_V4_SECURITY_LABEL
- if (!(res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL))
- server->fattr_valid &= ~NFS_ATTR_FATTR_V4_SECURITY_LABEL;
-#endif
memcpy(server->attr_bitmask_nl, res.attr_bitmask,
sizeof(server->attr_bitmask));
server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
@@ -5968,6 +5972,14 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen
do {
err = __nfs4_proc_set_acl(inode, buf, buflen);
trace_nfs4_set_acl(inode, err);
+ if (err == -NFS4ERR_BADOWNER || err == -NFS4ERR_BADNAME) {
+ /*
+ * no need to retry since the kernel
+ * isn't involved in encoding the ACEs.
+ */
+ err = -EINVAL;
+ break;
+ }
err = nfs4_handle_exception(NFS_SERVER(inode), err,
&exception);
} while (exception.retry);
@@ -6409,6 +6421,7 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
struct nfs4_exception exception = {
.inode = data->inode,
.stateid = &data->stateid,
+ .task_is_privileged = data->args.seq_args.sa_privileged,
};
if (!nfs4_sequence_done(task, &data->res.seq_res))
@@ -6532,7 +6545,6 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
data = kzalloc(sizeof(*data), GFP_NOFS);
if (data == NULL)
return -ENOMEM;
- nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
nfs4_state_protect(server->nfs_client,
NFS_SP4_MACH_CRED_CLEANUP,
@@ -6563,6 +6575,12 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
}
}
+ if (!data->inode)
+ nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1,
+ 1);
+ else
+ nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1,
+ 0);
task_setup_data.callback_data = data;
msg.rpc_argp = &data->args;
msg.rpc_resp = &data->res;
@@ -9640,15 +9658,20 @@ int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
&task_setup_data.rpc_client, &msg);
dprintk("--> %s\n", __func__);
+ lrp->inode = nfs_igrab_and_active(lrp->args.inode);
if (!sync) {
- lrp->inode = nfs_igrab_and_active(lrp->args.inode);
if (!lrp->inode) {
nfs4_layoutreturn_release(lrp);
return -EAGAIN;
}
task_setup_data.flags |= RPC_TASK_ASYNC;
}
- nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1, 0);
+ if (!lrp->inode)
+ nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1,
+ 1);
+ else
+ nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1,
+ 0);
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
index eb1ef3462e84..ccef43e02b48 100644
--- a/fs/nfs/nfstrace.h
+++ b/fs/nfs/nfstrace.h
@@ -430,10 +430,6 @@ TRACE_DEFINE_ENUM(O_CLOEXEC);
{ O_NOATIME, "O_NOATIME" }, \
{ O_CLOEXEC, "O_CLOEXEC" })
-TRACE_DEFINE_ENUM(FMODE_READ);
-TRACE_DEFINE_ENUM(FMODE_WRITE);
-TRACE_DEFINE_ENUM(FMODE_EXEC);
-
#define show_fmode_flags(mode) \
__print_flags(mode, "|", \
{ ((__force unsigned long)FMODE_READ), "READ" }, \
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 6c20b28d9d7c..cf9cc62ec48e 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -1094,15 +1094,16 @@ nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
struct nfs_page *prev = NULL;
unsigned int size;
- if (mirror->pg_count != 0) {
- prev = nfs_list_entry(mirror->pg_list.prev);
- } else {
+ if (list_empty(&mirror->pg_list)) {
if (desc->pg_ops->pg_init)
desc->pg_ops->pg_init(desc, req);
if (desc->pg_error < 0)
return 0;
mirror->pg_base = req->wb_pgbase;
- }
+ mirror->pg_count = 0;
+ mirror->pg_recoalesce = 0;
+ } else
+ prev = nfs_list_entry(mirror->pg_list.prev);
if (desc->pg_maxretrans && req->wb_nio > desc->pg_maxretrans) {
if (NFS_SERVER(desc->pg_inode)->flags & NFS_MOUNT_SOFTERR)
@@ -1127,18 +1128,13 @@ static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
{
struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
-
if (!list_empty(&mirror->pg_list)) {
int error = desc->pg_ops->pg_doio(desc);
if (error < 0)
desc->pg_error = error;
- else
+ if (list_empty(&mirror->pg_list))
mirror->pg_bytes_written += mirror->pg_count;
}
- if (list_empty(&mirror->pg_list)) {
- mirror->pg_count = 0;
- mirror->pg_base = 0;
- }
}
static void
@@ -1227,10 +1223,6 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
do {
list_splice_init(&mirror->pg_list, &head);
- mirror->pg_bytes_written -= mirror->pg_count;
- mirror->pg_count = 0;
- mirror->pg_base = 0;
- mirror->pg_recoalesce = 0;
while (!list_empty(&head)) {
struct nfs_page *req;
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 03e0b34c4a64..2c01ee805306 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1317,6 +1317,11 @@ _pnfs_return_layout(struct inode *ino)
{
struct pnfs_layout_hdr *lo = NULL;
struct nfs_inode *nfsi = NFS_I(ino);
+ struct pnfs_layout_range range = {
+ .iomode = IOMODE_ANY,
+ .offset = 0,
+ .length = NFS4_MAX_UINT64,
+ };
LIST_HEAD(tmp_list);
const struct cred *cred;
nfs4_stateid stateid;
@@ -1344,16 +1349,10 @@ _pnfs_return_layout(struct inode *ino)
}
valid_layout = pnfs_layout_is_valid(lo);
pnfs_clear_layoutcommit(ino, &tmp_list);
- pnfs_mark_matching_lsegs_return(lo, &tmp_list, NULL, 0);
+ pnfs_mark_matching_lsegs_return(lo, &tmp_list, &range, 0);
- if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
- struct pnfs_layout_range range = {
- .iomode = IOMODE_ANY,
- .offset = 0,
- .length = NFS4_MAX_UINT64,
- };
+ if (NFS_SERVER(ino)->pnfs_curr_ld->return_range)
NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, &range);
- }
/* Don't send a LAYOUTRETURN if list was initially empty */
if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) ||
@@ -2678,7 +2677,7 @@ EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_range);
void
pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
{
- u64 rd_size = req->wb_bytes;
+ u64 rd_size;
pnfs_generic_pg_check_layout(pgio);
pnfs_generic_pg_check_range(pgio, req);
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 19a212f9725d..fe58525cfed4 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1379,7 +1379,7 @@ static const struct kernel_param_ops param_ops_portnr = {
.set = param_set_portnr,
.get = param_get_uint,
};
-#define param_check_portnr(name, p) __param_check(name, p, unsigned int);
+#define param_check_portnr(name, p) __param_check(name, p, unsigned int)
module_param_named(callback_tcpport, nfs_callback_set_tcpport, portnr, 0644);
module_param_named(callback_nr_threads, nfs_callback_nr_threads, ushort, 0644);
diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c
index 303d71430bdd..9c6c0e2e5880 100644
--- a/fs/nilfs2/sysfs.c
+++ b/fs/nilfs2/sysfs.c
@@ -1053,6 +1053,7 @@ void nilfs_sysfs_delete_device_group(struct the_nilfs *nilfs)
nilfs_sysfs_delete_superblock_group(nilfs);
nilfs_sysfs_delete_segctor_group(nilfs);
kobject_del(&nilfs->ns_dev_kobj);
+ kobject_put(&nilfs->ns_dev_kobj);
kfree(nilfs->ns_dev_subgroups);
}
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 71fefb30e015..64864fb40b40 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -424,11 +424,18 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
* events generated by the listener process itself, without disclosing
* the pids of other processes.
*/
- if (!capable(CAP_SYS_ADMIN) &&
+ if (FAN_GROUP_FLAG(group, FANOTIFY_UNPRIV) &&
task_tgid(current) != event->pid)
metadata.pid = 0;
- if (path && path->mnt && path->dentry) {
+ /*
+ * For now, fid mode is required for an unprivileged listener and
+ * fid mode does not report fd in events. Keep this check anyway
+ * for safety in case fid mode requirement is relaxed in the future
+ * to allow unprivileged listener to get events with no fd and no fid.
+ */
+ if (!FAN_GROUP_FLAG(group, FANOTIFY_UNPRIV) &&
+ path && path->mnt && path->dentry) {
fd = create_fd(group, path, &f);
if (fd < 0)
return fd;
@@ -464,7 +471,7 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
info_type, fanotify_info_name(info),
info->name_len, buf, count);
if (ret < 0)
- return ret;
+ goto out_close_fd;
buf += ret;
count -= ret;
@@ -512,7 +519,7 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
fanotify_event_object_fh(event),
info_type, dot, dot_len, buf, count);
if (ret < 0)
- return ret;
+ goto out_close_fd;
buf += ret;
count -= ret;
@@ -1040,6 +1047,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
int f_flags, fd;
unsigned int fid_mode = flags & FANOTIFY_FID_BITS;
unsigned int class = flags & FANOTIFY_CLASS_BITS;
+ unsigned int internal_flags = 0;
pr_debug("%s: flags=%x event_f_flags=%x\n",
__func__, flags, event_f_flags);
@@ -1053,6 +1061,13 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
*/
if ((flags & FANOTIFY_ADMIN_INIT_FLAGS) || !fid_mode)
return -EPERM;
+
+ /*
+ * Setting the internal flag FANOTIFY_UNPRIV on the group
+ * prevents setting mount/filesystem marks on this group and
+ * prevents reporting pid and open fd in events.
+ */
+ internal_flags |= FANOTIFY_UNPRIV;
}
#ifdef CONFIG_AUDITSYSCALL
@@ -1105,7 +1120,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
goto out_destroy_group;
}
- group->fanotify_data.flags = flags;
+ group->fanotify_data.flags = flags | internal_flags;
group->memcg = get_mem_cgroup_from_mm(current->mm);
group->fanotify_data.merge_hash = fanotify_alloc_merge_hash();
@@ -1305,11 +1320,13 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
group = f.file->private_data;
/*
- * An unprivileged user is not allowed to watch a mount point nor
- * a filesystem.
+ * An unprivileged user is not allowed to setup mount nor filesystem
+ * marks. This also includes setting up such marks by a group that
+ * was initialized by an unprivileged user.
*/
ret = -EPERM;
- if (!capable(CAP_SYS_ADMIN) &&
+ if ((!capable(CAP_SYS_ADMIN) ||
+ FAN_GROUP_FLAG(group, FANOTIFY_UNPRIV)) &&
mark_type != FAN_MARK_INODE)
goto fput_and_out;
@@ -1460,6 +1477,7 @@ static int __init fanotify_user_setup(void)
max_marks = clamp(max_marks, FANOTIFY_OLD_DEFAULT_MAX_MARKS,
FANOTIFY_DEFAULT_MAX_USER_MARKS);
+ BUILD_BUG_ON(FANOTIFY_INIT_FLAGS & FANOTIFY_INTERNAL_GROUP_FLAGS);
BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 10);
BUILD_BUG_ON(HWEIGHT32(FANOTIFY_MARK_FLAGS) != 9);
diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c
index a712b2aaa9ac..57f0d5d9f934 100644
--- a/fs/notify/fdinfo.c
+++ b/fs/notify/fdinfo.c
@@ -144,7 +144,7 @@ void fanotify_show_fdinfo(struct seq_file *m, struct file *f)
struct fsnotify_group *group = f->private_data;
seq_printf(m, "fanotify flags:%x event-flags:%x\n",
- group->fanotify_data.flags,
+ group->fanotify_data.flags & FANOTIFY_INIT_FLAGS,
group->fanotify_data.f_flags);
show_fdinfo(m, f, fanotify_fdinfo);
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index f17c3d33fb18..775657943057 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1856,6 +1856,45 @@ out:
}
/*
+ * zero out partial blocks of one cluster.
+ *
+ * start: file offset where zero starts, will be made upper block aligned.
+ * len: it will be trimmed to the end of current cluster if "start + len"
+ * is bigger than it.
+ */
+static int ocfs2_zeroout_partial_cluster(struct inode *inode,
+ u64 start, u64 len)
+{
+ int ret;
+ u64 start_block, end_block, nr_blocks;
+ u64 p_block, offset;
+ u32 cluster, p_cluster, nr_clusters;
+ struct super_block *sb = inode->i_sb;
+ u64 end = ocfs2_align_bytes_to_clusters(sb, start);
+
+ if (start + len < end)
+ end = start + len;
+
+ start_block = ocfs2_blocks_for_bytes(sb, start);
+ end_block = ocfs2_blocks_for_bytes(sb, end);
+ nr_blocks = end_block - start_block;
+ if (!nr_blocks)
+ return 0;
+
+ cluster = ocfs2_bytes_to_clusters(sb, start);
+ ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
+ &nr_clusters, NULL);
+ if (ret)
+ return ret;
+ if (!p_cluster)
+ return 0;
+
+ offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
+ p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
+ return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
+}
+
+/*
* Parts of this function taken from xfs_change_file_space()
*/
static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
@@ -1865,7 +1904,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
{
int ret;
s64 llen;
- loff_t size;
+ loff_t size, orig_isize;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct buffer_head *di_bh = NULL;
handle_t *handle;
@@ -1896,6 +1935,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
goto out_inode_unlock;
}
+ orig_isize = i_size_read(inode);
switch (sr->l_whence) {
case 0: /*SEEK_SET*/
break;
@@ -1903,7 +1943,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
sr->l_start += f_pos;
break;
case 2: /*SEEK_END*/
- sr->l_start += i_size_read(inode);
+ sr->l_start += orig_isize;
break;
default:
ret = -EINVAL;
@@ -1957,6 +1997,14 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
default:
ret = -EINVAL;
}
+
+ /* zeroout eof blocks in the cluster. */
+ if (!ret && change_size && orig_isize < size) {
+ ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,
+ size - orig_isize);
+ if (!ret)
+ i_size_write(inode, size);
+ }
up_write(&OCFS2_I(inode)->ip_alloc_sem);
if (ret) {
mlog_errno(ret);
@@ -1973,9 +2021,6 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
goto out_inode_unlock;
}
- if (change_size && i_size_read(inode) < size)
- i_size_write(inode, size);
-
inode->i_ctime = inode->i_mtime = current_time(inode);
ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
if (ret < 0)
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 3851bfcdba56..9cbd915025ad 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2674,6 +2674,13 @@ out:
}
#ifdef CONFIG_SECURITY
+static int proc_pid_attr_open(struct inode *inode, struct file *file)
+{
+ file->private_data = NULL;
+ __mem_open(inode, file, PTRACE_MODE_READ_FSCREDS);
+ return 0;
+}
+
static ssize_t proc_pid_attr_read(struct file * file, char __user * buf,
size_t count, loff_t *ppos)
{
@@ -2703,6 +2710,10 @@ static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
void *page;
int rv;
+ /* A task may only write when it was the opener. */
+ if (file->private_data != current->mm)
+ return -EPERM;
+
rcu_read_lock();
task = pid_task(proc_pid(inode), PIDTYPE_PID);
if (!task) {
@@ -2750,9 +2761,11 @@ out:
}
static const struct file_operations proc_pid_attr_operations = {
+ .open = proc_pid_attr_open,
.read = proc_pid_attr_read,
.write = proc_pid_attr_write,
.llseek = generic_file_llseek,
+ .release = mem_release,
};
#define LSM_DIR_OPS(LSM) \
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 4f1373463766..22d904bde6ab 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -288,14 +288,12 @@ static inline void remove_dquot_hash(struct dquot *dquot)
static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
struct kqid qid)
{
- struct hlist_node *node;
struct dquot *dquot;
- hlist_for_each (node, dquot_hash+hashent) {
- dquot = hlist_entry(node, struct dquot, dq_hash);
+ hlist_for_each_entry(dquot, dquot_hash+hashent, dq_hash)
if (dquot->dq_sb == sb && qid_eq(dquot->dq_id, qid))
return dquot;
- }
+
return NULL;
}
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 040a1142915f..167b5889db4b 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -114,29 +114,24 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
break;
case SIL_FAULT_BNDERR:
case SIL_FAULT_PKUERR:
+ case SIL_PERF_EVENT:
/*
- * Fall through to the SIL_FAULT case. Both SIL_FAULT_BNDERR
- * and SIL_FAULT_PKUERR are only generated by faults that
- * deliver them synchronously to userspace. In case someone
- * injects one of these signals and signalfd catches it treat
- * it as SIL_FAULT.
+ * Fall through to the SIL_FAULT case. SIL_FAULT_BNDERR,
+ * SIL_FAULT_PKUERR, and SIL_PERF_EVENT are only
+ * generated by faults that deliver them synchronously to
+ * userspace. In case someone injects one of these signals
+ * and signalfd catches it treat it as SIL_FAULT.
*/
case SIL_FAULT:
new.ssi_addr = (long) kinfo->si_addr;
-#ifdef __ARCH_SI_TRAPNO
- new.ssi_trapno = kinfo->si_trapno;
-#endif
break;
- case SIL_FAULT_MCEERR:
+ case SIL_FAULT_TRAPNO:
new.ssi_addr = (long) kinfo->si_addr;
-#ifdef __ARCH_SI_TRAPNO
new.ssi_trapno = kinfo->si_trapno;
-#endif
- new.ssi_addr_lsb = (short) kinfo->si_addr_lsb;
break;
- case SIL_PERF_EVENT:
+ case SIL_FAULT_MCEERR:
new.ssi_addr = (long) kinfo->si_addr;
- new.ssi_perf = kinfo->si_perf;
+ new.ssi_addr_lsb = (short) kinfo->si_addr_lsb;
break;
case SIL_CHLD:
new.ssi_pid = kinfo->si_pid;
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index 7b1128398976..89d492916dea 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -211,11 +211,11 @@ failure:
* If the skip factor is limited in this way then the file will use multiple
* slots.
*/
-static inline int calculate_skip(int blocks)
+static inline int calculate_skip(u64 blocks)
{
- int skip = blocks / ((SQUASHFS_META_ENTRIES + 1)
+ u64 skip = blocks / ((SQUASHFS_META_ENTRIES + 1)
* SQUASHFS_META_INDEXES);
- return min(SQUASHFS_CACHED_BLKS - 1, skip + 1);
+ return min((u64) SQUASHFS_CACHED_BLKS - 1, skip + 1);
}
diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c
index e32a1833d523..bbfea8022a3b 100644
--- a/fs/xfs/libxfs/xfs_ag_resv.c
+++ b/fs/xfs/libxfs/xfs_ag_resv.c
@@ -325,10 +325,22 @@ out:
error2 = xfs_alloc_pagf_init(mp, tp, pag->pag_agno, 0);
if (error2)
return error2;
- ASSERT(xfs_perag_resv(pag, XFS_AG_RESV_METADATA)->ar_reserved +
- xfs_perag_resv(pag, XFS_AG_RESV_RMAPBT)->ar_reserved <=
- pag->pagf_freeblks + pag->pagf_flcount);
+
+ /*
+ * If there isn't enough space in the AG to satisfy the
+ * reservation, let the caller know that there wasn't enough
+ * space. Callers are responsible for deciding what to do
+ * next, since (in theory) we can stumble along with
+ * insufficient reservation if data blocks are being freed to
+ * replenish the AG's free space.
+ */
+ if (!error &&
+ xfs_perag_resv(pag, XFS_AG_RESV_METADATA)->ar_reserved +
+ xfs_perag_resv(pag, XFS_AG_RESV_RMAPBT)->ar_reserved >
+ pag->pagf_freeblks + pag->pagf_flcount)
+ error = -ENOSPC;
}
+
return error;
}
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 7e3b9b01431e..a3e0e6f672d6 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -605,7 +605,6 @@ xfs_bmap_btree_to_extents(
ASSERT(cur);
ASSERT(whichfork != XFS_COW_FORK);
- ASSERT(!xfs_need_iread_extents(ifp));
ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
ASSERT(be16_to_cpu(rblock->bb_level) == 1);
ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
@@ -5350,7 +5349,6 @@ __xfs_bunmapi(
xfs_fsblock_t sum;
xfs_filblks_t len = *rlen; /* length to unmap in file */
xfs_fileoff_t max_len;
- xfs_agnumber_t prev_agno = NULLAGNUMBER, agno;
xfs_fileoff_t end;
struct xfs_iext_cursor icur;
bool done = false;
@@ -5442,16 +5440,6 @@ __xfs_bunmapi(
del = got;
wasdel = isnullstartblock(del.br_startblock);
- /*
- * Make sure we don't touch multiple AGF headers out of order
- * in a single transaction, as that could cause AB-BA deadlocks.
- */
- if (!wasdel && !isrt) {
- agno = XFS_FSB_TO_AGNO(mp, del.br_startblock);
- if (prev_agno != NULLAGNUMBER && prev_agno > agno)
- break;
- prev_agno = agno;
- }
if (got.br_startoff < start) {
del.br_startoff = start;
del.br_blockcount -= start - got.br_startoff;
diff --git a/fs/xfs/libxfs/xfs_fs.h b/fs/xfs/libxfs/xfs_fs.h
index a83bdd0c47a8..bde2b4c64dbe 100644
--- a/fs/xfs/libxfs/xfs_fs.h
+++ b/fs/xfs/libxfs/xfs_fs.h
@@ -770,6 +770,8 @@ struct xfs_scrub_metadata {
/*
* ioctl commands that are used by Linux filesystems
*/
+#define XFS_IOC_GETXFLAGS FS_IOC_GETFLAGS
+#define XFS_IOC_SETXFLAGS FS_IOC_SETFLAGS
#define XFS_IOC_GETVERSION FS_IOC_GETVERSION
/*
@@ -780,6 +782,8 @@ struct xfs_scrub_metadata {
#define XFS_IOC_ALLOCSP _IOW ('X', 10, struct xfs_flock64)
#define XFS_IOC_FREESP _IOW ('X', 11, struct xfs_flock64)
#define XFS_IOC_DIOINFO _IOR ('X', 30, struct dioattr)
+#define XFS_IOC_FSGETXATTR FS_IOC_FSGETXATTR
+#define XFS_IOC_FSSETXATTR FS_IOC_FSSETXATTR
#define XFS_IOC_ALLOCSP64 _IOW ('X', 36, struct xfs_flock64)
#define XFS_IOC_FREESP64 _IOW ('X', 37, struct xfs_flock64)
#define XFS_IOC_GETBMAP _IOWR('X', 38, struct getbmap)
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index 5c9a7440d9e4..f3254a4f4cb4 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -559,8 +559,17 @@ xfs_dinode_calc_crc(
/*
* Validate di_extsize hint.
*
- * The rules are documented at xfs_ioctl_setattr_check_extsize().
- * These functions must be kept in sync with each other.
+ * 1. Extent size hint is only valid for directories and regular files.
+ * 2. FS_XFLAG_EXTSIZE is only valid for regular files.
+ * 3. FS_XFLAG_EXTSZINHERIT is only valid for directories.
+ * 4. Hint cannot be larger than MAXTEXTLEN.
+ * 5. Can be changed on directories at any time.
+ * 6. Hint value of 0 turns off hints, clears inode flags.
+ * 7. Extent size must be a multiple of the appropriate block size.
+ * For realtime files, this is the rt extent size.
+ * 8. For non-realtime files, the extent size hint must be limited
+ * to half the AG size to avoid alignment extending the extent beyond the
+ * limits of the AG.
*/
xfs_failaddr_t
xfs_inode_validate_extsize(
@@ -580,6 +589,28 @@ xfs_inode_validate_extsize(
inherit_flag = (flags & XFS_DIFLAG_EXTSZINHERIT);
extsize_bytes = XFS_FSB_TO_B(mp, extsize);
+ /*
+ * This comment describes a historic gap in this verifier function.
+ *
+ * On older kernels, the extent size hint verifier doesn't check that
+ * the extent size hint is an integer multiple of the realtime extent
+ * size on a directory with both RTINHERIT and EXTSZINHERIT flags set.
+ * The verifier has always enforced the alignment rule for regular
+ * files with the REALTIME flag set.
+ *
+ * If a directory with a misaligned extent size hint is allowed to
+ * propagate that hint into a new regular realtime file, the result
+ * is that the inode cluster buffer verifier will trigger a corruption
+ * shutdown the next time it is run.
+ *
+ * Unfortunately, there could be filesystems with these misconfigured
+ * directories in the wild, so we cannot add a check to this verifier
+ * at this time because that will result a new source of directory
+ * corruption errors when reading an existing filesystem. Instead, we
+ * permit the misconfiguration to pass through the verifiers so that
+ * callers of this function can correct and mitigate externally.
+ */
+
if (rt_flag)
blocksize_bytes = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog;
else
@@ -616,8 +647,15 @@ xfs_inode_validate_extsize(
/*
* Validate di_cowextsize hint.
*
- * The rules are documented at xfs_ioctl_setattr_check_cowextsize().
- * These functions must be kept in sync with each other.
+ * 1. CoW extent size hint can only be set if reflink is enabled on the fs.
+ * The inode does not have to have any shared blocks, but it must be a v3.
+ * 2. FS_XFLAG_COWEXTSIZE is only valid for directories and regular files;
+ * for a directory, the hint is propagated to new files.
+ * 3. Can be changed on files & directories at any time.
+ * 4. Hint value of 0 turns off hints, clears inode flags.
+ * 5. Extent size must be a multiple of the appropriate block size.
+ * 6. The extent size hint must be limited to half the AG size to avoid
+ * alignment extending the extent beyond the limits of the AG.
*/
xfs_failaddr_t
xfs_inode_validate_cowextsize(
diff --git a/fs/xfs/libxfs/xfs_trans_inode.c b/fs/xfs/libxfs/xfs_trans_inode.c
index 78324e043e25..8d595a5c4abd 100644
--- a/fs/xfs/libxfs/xfs_trans_inode.c
+++ b/fs/xfs/libxfs/xfs_trans_inode.c
@@ -143,6 +143,23 @@ xfs_trans_log_inode(
}
/*
+ * Inode verifiers on older kernels don't check that the extent size
+ * hint is an integer multiple of the rt extent size on a directory
+ * with both rtinherit and extszinherit flags set. If we're logging a
+ * directory that is misconfigured in this way, clear the hint.
+ */
+ if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
+ (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) &&
+ (ip->i_extsize % ip->i_mount->m_sb.sb_rextsize) > 0) {
+ xfs_info_once(ip->i_mount,
+ "Correcting misaligned extent size hint in inode 0x%llx.", ip->i_ino);
+ ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
+ XFS_DIFLAG_EXTSZINHERIT);
+ ip->i_extsize = 0;
+ flags |= XFS_ILOG_CORE;
+ }
+
+ /*
* Record the specific change for fdatasync optimisation. This allows
* fdatasync to skip log forces for inodes that are only timestamp
* dirty.
diff --git a/fs/xfs/scrub/common.c b/fs/xfs/scrub/common.c
index aa874607618a..be38c960da85 100644
--- a/fs/xfs/scrub/common.c
+++ b/fs/xfs/scrub/common.c
@@ -74,7 +74,9 @@ __xchk_process_error(
return true;
case -EDEADLOCK:
/* Used to restart an op with deadlock avoidance. */
- trace_xchk_deadlock_retry(sc->ip, sc->sm, *error);
+ trace_xchk_deadlock_retry(
+ sc->ip ? sc->ip : XFS_I(file_inode(sc->file)),
+ sc->sm, *error);
break;
case -EFSBADCRC:
case -EFSCORRUPTED:
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index a5e9d7d34023..0936f3a96fe6 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -71,18 +71,24 @@ xfs_zero_extent(
#ifdef CONFIG_XFS_RT
int
xfs_bmap_rtalloc(
- struct xfs_bmalloca *ap) /* bmap alloc argument struct */
+ struct xfs_bmalloca *ap)
{
- int error; /* error return value */
- xfs_mount_t *mp; /* mount point structure */
- xfs_extlen_t prod = 0; /* product factor for allocators */
- xfs_extlen_t mod = 0; /* product factor for allocators */
- xfs_extlen_t ralen = 0; /* realtime allocation length */
- xfs_extlen_t align; /* minimum allocation alignment */
- xfs_rtblock_t rtb;
-
- mp = ap->ip->i_mount;
+ struct xfs_mount *mp = ap->ip->i_mount;
+ xfs_fileoff_t orig_offset = ap->offset;
+ xfs_rtblock_t rtb;
+ xfs_extlen_t prod = 0; /* product factor for allocators */
+ xfs_extlen_t mod = 0; /* product factor for allocators */
+ xfs_extlen_t ralen = 0; /* realtime allocation length */
+ xfs_extlen_t align; /* minimum allocation alignment */
+ xfs_extlen_t orig_length = ap->length;
+ xfs_extlen_t minlen = mp->m_sb.sb_rextsize;
+ xfs_extlen_t raminlen;
+ bool rtlocked = false;
+ bool ignore_locality = false;
+ int error;
+
align = xfs_get_extsz_hint(ap->ip);
+retry:
prod = align / mp->m_sb.sb_rextsize;
error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
align, 1, ap->eof, 0,
@@ -93,6 +99,15 @@ xfs_bmap_rtalloc(
ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
/*
+ * If we shifted the file offset downward to satisfy an extent size
+ * hint, increase minlen by that amount so that the allocator won't
+ * give us an allocation that's too short to cover at least one of the
+ * blocks that the caller asked for.
+ */
+ if (ap->offset != orig_offset)
+ minlen += orig_offset - ap->offset;
+
+ /*
* If the offset & length are not perfectly aligned
* then kill prod, it will just get us in trouble.
*/
@@ -116,10 +131,13 @@ xfs_bmap_rtalloc(
/*
* Lock out modifications to both the RT bitmap and summary inodes
*/
- xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
- xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
- xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
- xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
+ if (!rtlocked) {
+ xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
+ xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
+ xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
+ xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
+ rtlocked = true;
+ }
/*
* If it's an allocation to an empty file at offset 0,
@@ -141,33 +159,59 @@ xfs_bmap_rtalloc(
/*
* Realtime allocation, done through xfs_rtallocate_extent.
*/
- do_div(ap->blkno, mp->m_sb.sb_rextsize);
+ if (ignore_locality)
+ ap->blkno = 0;
+ else
+ do_div(ap->blkno, mp->m_sb.sb_rextsize);
rtb = ap->blkno;
ap->length = ralen;
- error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
- &ralen, ap->wasdel, prod, &rtb);
+ raminlen = max_t(xfs_extlen_t, 1, minlen / mp->m_sb.sb_rextsize);
+ error = xfs_rtallocate_extent(ap->tp, ap->blkno, raminlen, ap->length,
+ &ralen, ap->wasdel, prod, &rtb);
if (error)
return error;
- ap->blkno = rtb;
- if (ap->blkno != NULLFSBLOCK) {
- ap->blkno *= mp->m_sb.sb_rextsize;
- ralen *= mp->m_sb.sb_rextsize;
- ap->length = ralen;
- ap->ip->i_nblocks += ralen;
+ if (rtb != NULLRTBLOCK) {
+ ap->blkno = rtb * mp->m_sb.sb_rextsize;
+ ap->length = ralen * mp->m_sb.sb_rextsize;
+ ap->ip->i_nblocks += ap->length;
xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
if (ap->wasdel)
- ap->ip->i_delayed_blks -= ralen;
+ ap->ip->i_delayed_blks -= ap->length;
/*
* Adjust the disk quota also. This was reserved
* earlier.
*/
xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
- XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
- } else {
- ap->length = 0;
+ XFS_TRANS_DQ_RTBCOUNT, ap->length);
+ return 0;
}
+
+ if (align > mp->m_sb.sb_rextsize) {
+ /*
+ * We previously enlarged the request length to try to satisfy
+ * an extent size hint. The allocator didn't return anything,
+ * so reset the parameters to the original values and try again
+ * without alignment criteria.
+ */
+ ap->offset = orig_offset;
+ ap->length = orig_length;
+ minlen = align = mp->m_sb.sb_rextsize;
+ goto retry;
+ }
+
+ if (!ignore_locality && ap->blkno != 0) {
+ /*
+ * If we can't allocate near a specific rt extent, try again
+ * without locality criteria.
+ */
+ ignore_locality = true;
+ goto retry;
+ }
+
+ ap->blkno = NULLFSBLOCK;
+ ap->length = 0;
return 0;
}
#endif /* CONFIG_XFS_RT */
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 0369eb22c1bb..e4c2da4566f1 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -690,6 +690,7 @@ xfs_inode_inherit_flags(
const struct xfs_inode *pip)
{
unsigned int di_flags = 0;
+ xfs_failaddr_t failaddr;
umode_t mode = VFS_I(ip)->i_mode;
if (S_ISDIR(mode)) {
@@ -729,6 +730,24 @@ xfs_inode_inherit_flags(
di_flags |= XFS_DIFLAG_FILESTREAM;
ip->i_diflags |= di_flags;
+
+ /*
+ * Inode verifiers on older kernels only check that the extent size
+ * hint is an integer multiple of the rt extent size on realtime files.
+ * They did not check the hint alignment on a directory with both
+ * rtinherit and extszinherit flags set. If the misaligned hint is
+ * propagated from a directory into a new realtime file, new file
+ * allocations will fail due to math errors in the rt allocator and/or
+ * trip the verifiers. Validate the hint settings in the new file so
+ * that we don't let broken hints propagate.
+ */
+ failaddr = xfs_inode_validate_extsize(ip->i_mount, ip->i_extsize,
+ VFS_I(ip)->i_mode, ip->i_diflags);
+ if (failaddr) {
+ ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
+ XFS_DIFLAG_EXTSZINHERIT);
+ ip->i_extsize = 0;
+ }
}
/* Propagate di_flags2 from a parent inode to a child inode. */
@@ -737,12 +756,22 @@ xfs_inode_inherit_flags2(
struct xfs_inode *ip,
const struct xfs_inode *pip)
{
+ xfs_failaddr_t failaddr;
+
if (pip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) {
ip->i_diflags2 |= XFS_DIFLAG2_COWEXTSIZE;
ip->i_cowextsize = pip->i_cowextsize;
}
if (pip->i_diflags2 & XFS_DIFLAG2_DAX)
ip->i_diflags2 |= XFS_DIFLAG2_DAX;
+
+ /* Don't let invalid cowextsize hints propagate. */
+ failaddr = xfs_inode_validate_cowextsize(ip->i_mount, ip->i_cowextsize,
+ VFS_I(ip)->i_mode, ip->i_diflags, ip->i_diflags2);
+ if (failaddr) {
+ ip->i_diflags2 &= ~XFS_DIFLAG2_COWEXTSIZE;
+ ip->i_cowextsize = 0;
+ }
}
/*
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 3925bfcb2365..1fe4c1fc0aea 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -1267,20 +1267,8 @@ out_error:
}
/*
- * extent size hint validation is somewhat cumbersome. Rules are:
- *
- * 1. extent size hint is only valid for directories and regular files
- * 2. FS_XFLAG_EXTSIZE is only valid for regular files
- * 3. FS_XFLAG_EXTSZINHERIT is only valid for directories.
- * 4. can only be changed on regular files if no extents are allocated
- * 5. can be changed on directories at any time
- * 6. extsize hint of 0 turns off hints, clears inode flags.
- * 7. Extent size must be a multiple of the appropriate block size.
- * 8. for non-realtime files, the extent size hint must be limited
- * to half the AG size to avoid alignment extending the extent beyond the
- * limits of the AG.
- *
- * Please keep this function in sync with xfs_scrub_inode_extsize.
+ * Validate a proposed extent size hint. For regular files, the hint can only
+ * be changed if no extents are allocated.
*/
static int
xfs_ioctl_setattr_check_extsize(
@@ -1288,86 +1276,65 @@ xfs_ioctl_setattr_check_extsize(
struct fileattr *fa)
{
struct xfs_mount *mp = ip->i_mount;
- xfs_extlen_t size;
- xfs_fsblock_t extsize_fsb;
+ xfs_failaddr_t failaddr;
+ uint16_t new_diflags;
if (!fa->fsx_valid)
return 0;
if (S_ISREG(VFS_I(ip)->i_mode) && ip->i_df.if_nextents &&
- ((ip->i_extsize << mp->m_sb.sb_blocklog) != fa->fsx_extsize))
+ XFS_FSB_TO_B(mp, ip->i_extsize) != fa->fsx_extsize)
return -EINVAL;
- if (fa->fsx_extsize == 0)
- return 0;
-
- extsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_extsize);
- if (extsize_fsb > MAXEXTLEN)
+ if (fa->fsx_extsize & mp->m_blockmask)
return -EINVAL;
- if (XFS_IS_REALTIME_INODE(ip) ||
- (fa->fsx_xflags & FS_XFLAG_REALTIME)) {
- size = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog;
- } else {
- size = mp->m_sb.sb_blocksize;
- if (extsize_fsb > mp->m_sb.sb_agblocks / 2)
+ new_diflags = xfs_flags2diflags(ip, fa->fsx_xflags);
+
+ /*
+ * Inode verifiers on older kernels don't check that the extent size
+ * hint is an integer multiple of the rt extent size on a directory
+ * with both rtinherit and extszinherit flags set. Don't let sysadmins
+ * misconfigure directories.
+ */
+ if ((new_diflags & XFS_DIFLAG_RTINHERIT) &&
+ (new_diflags & XFS_DIFLAG_EXTSZINHERIT)) {
+ unsigned int rtextsize_bytes;
+
+ rtextsize_bytes = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize);
+ if (fa->fsx_extsize % rtextsize_bytes)
return -EINVAL;
}
- if (fa->fsx_extsize % size)
- return -EINVAL;
-
- return 0;
+ failaddr = xfs_inode_validate_extsize(ip->i_mount,
+ XFS_B_TO_FSB(mp, fa->fsx_extsize),
+ VFS_I(ip)->i_mode, new_diflags);
+ return failaddr != NULL ? -EINVAL : 0;
}
-/*
- * CoW extent size hint validation rules are:
- *
- * 1. CoW extent size hint can only be set if reflink is enabled on the fs.
- * The inode does not have to have any shared blocks, but it must be a v3.
- * 2. FS_XFLAG_COWEXTSIZE is only valid for directories and regular files;
- * for a directory, the hint is propagated to new files.
- * 3. Can be changed on files & directories at any time.
- * 4. CoW extsize hint of 0 turns off hints, clears inode flags.
- * 5. Extent size must be a multiple of the appropriate block size.
- * 6. The extent size hint must be limited to half the AG size to avoid
- * alignment extending the extent beyond the limits of the AG.
- *
- * Please keep this function in sync with xfs_scrub_inode_cowextsize.
- */
static int
xfs_ioctl_setattr_check_cowextsize(
struct xfs_inode *ip,
struct fileattr *fa)
{
struct xfs_mount *mp = ip->i_mount;
- xfs_extlen_t size;
- xfs_fsblock_t cowextsize_fsb;
+ xfs_failaddr_t failaddr;
+ uint64_t new_diflags2;
+ uint16_t new_diflags;
if (!fa->fsx_valid)
return 0;
- if (!(fa->fsx_xflags & FS_XFLAG_COWEXTSIZE))
- return 0;
-
- if (!xfs_sb_version_hasreflink(&ip->i_mount->m_sb))
+ if (fa->fsx_cowextsize & mp->m_blockmask)
return -EINVAL;
- if (fa->fsx_cowextsize == 0)
- return 0;
+ new_diflags = xfs_flags2diflags(ip, fa->fsx_xflags);
+ new_diflags2 = xfs_flags2diflags2(ip, fa->fsx_xflags);
- cowextsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_cowextsize);
- if (cowextsize_fsb > MAXEXTLEN)
- return -EINVAL;
-
- size = mp->m_sb.sb_blocksize;
- if (cowextsize_fsb > mp->m_sb.sb_agblocks / 2)
- return -EINVAL;
-
- if (fa->fsx_cowextsize % size)
- return -EINVAL;
-
- return 0;
+ failaddr = xfs_inode_validate_cowextsize(ip->i_mount,
+ XFS_B_TO_FSB(mp, fa->fsx_cowextsize),
+ VFS_I(ip)->i_mode, new_diflags, new_diflags2);
+ return failaddr != NULL ? -EINVAL : 0;
}
static int
diff --git a/fs/xfs/xfs_message.h b/fs/xfs/xfs_message.h
index 3c392b1512ac..7ec1a9207517 100644
--- a/fs/xfs/xfs_message.h
+++ b/fs/xfs/xfs_message.h
@@ -73,6 +73,8 @@ do { \
xfs_printk_once(xfs_warn, dev, fmt, ##__VA_ARGS__)
#define xfs_notice_once(dev, fmt, ...) \
xfs_printk_once(xfs_notice, dev, fmt, ##__VA_ARGS__)
+#define xfs_info_once(dev, fmt, ...) \
+ xfs_printk_once(xfs_info, dev, fmt, ##__VA_ARGS__)
void assfail(struct xfs_mount *mp, char *expr, char *f, int l);
void asswarn(struct xfs_mount *mp, char *expr, char *f, int l);
diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h
index 888b6cfeed91..bc45af52c93b 100644
--- a/include/asm-generic/atomic-instrumented.h
+++ b/include/asm-generic/atomic-instrumented.h
@@ -27,17 +27,13 @@ atomic_read(const atomic_t *v)
instrument_atomic_read(v, sizeof(*v));
return arch_atomic_read(v);
}
-#define atomic_read atomic_read
-#if defined(arch_atomic_read_acquire)
static __always_inline int
atomic_read_acquire(const atomic_t *v)
{
instrument_atomic_read(v, sizeof(*v));
return arch_atomic_read_acquire(v);
}
-#define atomic_read_acquire atomic_read_acquire
-#endif
static __always_inline void
atomic_set(atomic_t *v, int i)
@@ -45,17 +41,13 @@ atomic_set(atomic_t *v, int i)
instrument_atomic_write(v, sizeof(*v));
arch_atomic_set(v, i);
}
-#define atomic_set atomic_set
-#if defined(arch_atomic_set_release)
static __always_inline void
atomic_set_release(atomic_t *v, int i)
{
instrument_atomic_write(v, sizeof(*v));
arch_atomic_set_release(v, i);
}
-#define atomic_set_release atomic_set_release
-#endif
static __always_inline void
atomic_add(int i, atomic_t *v)
@@ -63,87 +55,62 @@ atomic_add(int i, atomic_t *v)
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_add(i, v);
}
-#define atomic_add atomic_add
-#if !defined(arch_atomic_add_return_relaxed) || defined(arch_atomic_add_return)
static __always_inline int
atomic_add_return(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_add_return(i, v);
}
-#define atomic_add_return atomic_add_return
-#endif
-#if defined(arch_atomic_add_return_acquire)
static __always_inline int
atomic_add_return_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_add_return_acquire(i, v);
}
-#define atomic_add_return_acquire atomic_add_return_acquire
-#endif
-#if defined(arch_atomic_add_return_release)
static __always_inline int
atomic_add_return_release(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_add_return_release(i, v);
}
-#define atomic_add_return_release atomic_add_return_release
-#endif
-#if defined(arch_atomic_add_return_relaxed)
static __always_inline int
atomic_add_return_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_add_return_relaxed(i, v);
}
-#define atomic_add_return_relaxed atomic_add_return_relaxed
-#endif
-#if !defined(arch_atomic_fetch_add_relaxed) || defined(arch_atomic_fetch_add)
static __always_inline int
atomic_fetch_add(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_add(i, v);
}
-#define atomic_fetch_add atomic_fetch_add
-#endif
-#if defined(arch_atomic_fetch_add_acquire)
static __always_inline int
atomic_fetch_add_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_add_acquire(i, v);
}
-#define atomic_fetch_add_acquire atomic_fetch_add_acquire
-#endif
-#if defined(arch_atomic_fetch_add_release)
static __always_inline int
atomic_fetch_add_release(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_add_release(i, v);
}
-#define atomic_fetch_add_release atomic_fetch_add_release
-#endif
-#if defined(arch_atomic_fetch_add_relaxed)
static __always_inline int
atomic_fetch_add_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_add_relaxed(i, v);
}
-#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-#endif
static __always_inline void
atomic_sub(int i, atomic_t *v)
@@ -151,267 +118,188 @@ atomic_sub(int i, atomic_t *v)
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_sub(i, v);
}
-#define atomic_sub atomic_sub
-#if !defined(arch_atomic_sub_return_relaxed) || defined(arch_atomic_sub_return)
static __always_inline int
atomic_sub_return(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_sub_return(i, v);
}
-#define atomic_sub_return atomic_sub_return
-#endif
-#if defined(arch_atomic_sub_return_acquire)
static __always_inline int
atomic_sub_return_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_sub_return_acquire(i, v);
}
-#define atomic_sub_return_acquire atomic_sub_return_acquire
-#endif
-#if defined(arch_atomic_sub_return_release)
static __always_inline int
atomic_sub_return_release(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_sub_return_release(i, v);
}
-#define atomic_sub_return_release atomic_sub_return_release
-#endif
-#if defined(arch_atomic_sub_return_relaxed)
static __always_inline int
atomic_sub_return_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_sub_return_relaxed(i, v);
}
-#define atomic_sub_return_relaxed atomic_sub_return_relaxed
-#endif
-#if !defined(arch_atomic_fetch_sub_relaxed) || defined(arch_atomic_fetch_sub)
static __always_inline int
atomic_fetch_sub(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_sub(i, v);
}
-#define atomic_fetch_sub atomic_fetch_sub
-#endif
-#if defined(arch_atomic_fetch_sub_acquire)
static __always_inline int
atomic_fetch_sub_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_sub_acquire(i, v);
}
-#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire
-#endif
-#if defined(arch_atomic_fetch_sub_release)
static __always_inline int
atomic_fetch_sub_release(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_sub_release(i, v);
}
-#define atomic_fetch_sub_release atomic_fetch_sub_release
-#endif
-#if defined(arch_atomic_fetch_sub_relaxed)
static __always_inline int
atomic_fetch_sub_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_sub_relaxed(i, v);
}
-#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
-#endif
-#if defined(arch_atomic_inc)
static __always_inline void
atomic_inc(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_inc(v);
}
-#define atomic_inc atomic_inc
-#endif
-#if defined(arch_atomic_inc_return)
static __always_inline int
atomic_inc_return(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_inc_return(v);
}
-#define atomic_inc_return atomic_inc_return
-#endif
-#if defined(arch_atomic_inc_return_acquire)
static __always_inline int
atomic_inc_return_acquire(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_inc_return_acquire(v);
}
-#define atomic_inc_return_acquire atomic_inc_return_acquire
-#endif
-#if defined(arch_atomic_inc_return_release)
static __always_inline int
atomic_inc_return_release(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_inc_return_release(v);
}
-#define atomic_inc_return_release atomic_inc_return_release
-#endif
-#if defined(arch_atomic_inc_return_relaxed)
static __always_inline int
atomic_inc_return_relaxed(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_inc_return_relaxed(v);
}
-#define atomic_inc_return_relaxed atomic_inc_return_relaxed
-#endif
-#if defined(arch_atomic_fetch_inc)
static __always_inline int
atomic_fetch_inc(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_inc(v);
}
-#define atomic_fetch_inc atomic_fetch_inc
-#endif
-#if defined(arch_atomic_fetch_inc_acquire)
static __always_inline int
atomic_fetch_inc_acquire(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_inc_acquire(v);
}
-#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire
-#endif
-#if defined(arch_atomic_fetch_inc_release)
static __always_inline int
atomic_fetch_inc_release(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_inc_release(v);
}
-#define atomic_fetch_inc_release atomic_fetch_inc_release
-#endif
-#if defined(arch_atomic_fetch_inc_relaxed)
static __always_inline int
atomic_fetch_inc_relaxed(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_inc_relaxed(v);
}
-#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
-#endif
-#if defined(arch_atomic_dec)
static __always_inline void
atomic_dec(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_dec(v);
}
-#define atomic_dec atomic_dec
-#endif
-#if defined(arch_atomic_dec_return)
static __always_inline int
atomic_dec_return(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_dec_return(v);
}
-#define atomic_dec_return atomic_dec_return
-#endif
-#if defined(arch_atomic_dec_return_acquire)
static __always_inline int
atomic_dec_return_acquire(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_dec_return_acquire(v);
}
-#define atomic_dec_return_acquire atomic_dec_return_acquire
-#endif
-#if defined(arch_atomic_dec_return_release)
static __always_inline int
atomic_dec_return_release(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_dec_return_release(v);
}
-#define atomic_dec_return_release atomic_dec_return_release
-#endif
-#if defined(arch_atomic_dec_return_relaxed)
static __always_inline int
atomic_dec_return_relaxed(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_dec_return_relaxed(v);
}
-#define atomic_dec_return_relaxed atomic_dec_return_relaxed
-#endif
-#if defined(arch_atomic_fetch_dec)
static __always_inline int
atomic_fetch_dec(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_dec(v);
}
-#define atomic_fetch_dec atomic_fetch_dec
-#endif
-#if defined(arch_atomic_fetch_dec_acquire)
static __always_inline int
atomic_fetch_dec_acquire(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_dec_acquire(v);
}
-#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire
-#endif
-#if defined(arch_atomic_fetch_dec_release)
static __always_inline int
atomic_fetch_dec_release(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_dec_release(v);
}
-#define atomic_fetch_dec_release atomic_fetch_dec_release
-#endif
-#if defined(arch_atomic_fetch_dec_relaxed)
static __always_inline int
atomic_fetch_dec_relaxed(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_dec_relaxed(v);
}
-#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
-#endif
static __always_inline void
atomic_and(int i, atomic_t *v)
@@ -419,97 +307,69 @@ atomic_and(int i, atomic_t *v)
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_and(i, v);
}
-#define atomic_and atomic_and
-#if !defined(arch_atomic_fetch_and_relaxed) || defined(arch_atomic_fetch_and)
static __always_inline int
atomic_fetch_and(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_and(i, v);
}
-#define atomic_fetch_and atomic_fetch_and
-#endif
-#if defined(arch_atomic_fetch_and_acquire)
static __always_inline int
atomic_fetch_and_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_and_acquire(i, v);
}
-#define atomic_fetch_and_acquire atomic_fetch_and_acquire
-#endif
-#if defined(arch_atomic_fetch_and_release)
static __always_inline int
atomic_fetch_and_release(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_and_release(i, v);
}
-#define atomic_fetch_and_release atomic_fetch_and_release
-#endif
-#if defined(arch_atomic_fetch_and_relaxed)
static __always_inline int
atomic_fetch_and_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_and_relaxed(i, v);
}
-#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-#endif
-#if defined(arch_atomic_andnot)
static __always_inline void
atomic_andnot(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_andnot(i, v);
}
-#define atomic_andnot atomic_andnot
-#endif
-#if defined(arch_atomic_fetch_andnot)
static __always_inline int
atomic_fetch_andnot(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_andnot(i, v);
}
-#define atomic_fetch_andnot atomic_fetch_andnot
-#endif
-#if defined(arch_atomic_fetch_andnot_acquire)
static __always_inline int
atomic_fetch_andnot_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_andnot_acquire(i, v);
}
-#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
-#endif
-#if defined(arch_atomic_fetch_andnot_release)
static __always_inline int
atomic_fetch_andnot_release(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_andnot_release(i, v);
}
-#define atomic_fetch_andnot_release atomic_fetch_andnot_release
-#endif
-#if defined(arch_atomic_fetch_andnot_relaxed)
static __always_inline int
atomic_fetch_andnot_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_andnot_relaxed(i, v);
}
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
-#endif
static __always_inline void
atomic_or(int i, atomic_t *v)
@@ -517,47 +377,34 @@ atomic_or(int i, atomic_t *v)
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_or(i, v);
}
-#define atomic_or atomic_or
-#if !defined(arch_atomic_fetch_or_relaxed) || defined(arch_atomic_fetch_or)
static __always_inline int
atomic_fetch_or(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_or(i, v);
}
-#define atomic_fetch_or atomic_fetch_or
-#endif
-#if defined(arch_atomic_fetch_or_acquire)
static __always_inline int
atomic_fetch_or_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_or_acquire(i, v);
}
-#define atomic_fetch_or_acquire atomic_fetch_or_acquire
-#endif
-#if defined(arch_atomic_fetch_or_release)
static __always_inline int
atomic_fetch_or_release(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_or_release(i, v);
}
-#define atomic_fetch_or_release atomic_fetch_or_release
-#endif
-#if defined(arch_atomic_fetch_or_relaxed)
static __always_inline int
atomic_fetch_or_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_or_relaxed(i, v);
}
-#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-#endif
static __always_inline void
atomic_xor(int i, atomic_t *v)
@@ -565,129 +412,91 @@ atomic_xor(int i, atomic_t *v)
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_xor(i, v);
}
-#define atomic_xor atomic_xor
-#if !defined(arch_atomic_fetch_xor_relaxed) || defined(arch_atomic_fetch_xor)
static __always_inline int
atomic_fetch_xor(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_xor(i, v);
}
-#define atomic_fetch_xor atomic_fetch_xor
-#endif
-#if defined(arch_atomic_fetch_xor_acquire)
static __always_inline int
atomic_fetch_xor_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_xor_acquire(i, v);
}
-#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire
-#endif
-#if defined(arch_atomic_fetch_xor_release)
static __always_inline int
atomic_fetch_xor_release(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_xor_release(i, v);
}
-#define atomic_fetch_xor_release atomic_fetch_xor_release
-#endif
-#if defined(arch_atomic_fetch_xor_relaxed)
static __always_inline int
atomic_fetch_xor_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_xor_relaxed(i, v);
}
-#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
-#endif
-#if !defined(arch_atomic_xchg_relaxed) || defined(arch_atomic_xchg)
static __always_inline int
atomic_xchg(atomic_t *v, int i)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_xchg(v, i);
}
-#define atomic_xchg atomic_xchg
-#endif
-#if defined(arch_atomic_xchg_acquire)
static __always_inline int
atomic_xchg_acquire(atomic_t *v, int i)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_xchg_acquire(v, i);
}
-#define atomic_xchg_acquire atomic_xchg_acquire
-#endif
-#if defined(arch_atomic_xchg_release)
static __always_inline int
atomic_xchg_release(atomic_t *v, int i)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_xchg_release(v, i);
}
-#define atomic_xchg_release atomic_xchg_release
-#endif
-#if defined(arch_atomic_xchg_relaxed)
static __always_inline int
atomic_xchg_relaxed(atomic_t *v, int i)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_xchg_relaxed(v, i);
}
-#define atomic_xchg_relaxed atomic_xchg_relaxed
-#endif
-#if !defined(arch_atomic_cmpxchg_relaxed) || defined(arch_atomic_cmpxchg)
static __always_inline int
atomic_cmpxchg(atomic_t *v, int old, int new)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_cmpxchg(v, old, new);
}
-#define atomic_cmpxchg atomic_cmpxchg
-#endif
-#if defined(arch_atomic_cmpxchg_acquire)
static __always_inline int
atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_cmpxchg_acquire(v, old, new);
}
-#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
-#endif
-#if defined(arch_atomic_cmpxchg_release)
static __always_inline int
atomic_cmpxchg_release(atomic_t *v, int old, int new)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_cmpxchg_release(v, old, new);
}
-#define atomic_cmpxchg_release atomic_cmpxchg_release
-#endif
-#if defined(arch_atomic_cmpxchg_relaxed)
static __always_inline int
atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_cmpxchg_relaxed(v, old, new);
}
-#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
-#endif
-#if defined(arch_atomic_try_cmpxchg)
static __always_inline bool
atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
@@ -695,10 +504,7 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new)
instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg(v, old, new);
}
-#define atomic_try_cmpxchg atomic_try_cmpxchg
-#endif
-#if defined(arch_atomic_try_cmpxchg_acquire)
static __always_inline bool
atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
{
@@ -706,10 +512,7 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg_acquire(v, old, new);
}
-#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
-#endif
-#if defined(arch_atomic_try_cmpxchg_release)
static __always_inline bool
atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
{
@@ -717,10 +520,7 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg_release(v, old, new);
}
-#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release
-#endif
-#if defined(arch_atomic_try_cmpxchg_relaxed)
static __always_inline bool
atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
{
@@ -728,108 +528,76 @@ atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg_relaxed(v, old, new);
}
-#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed
-#endif
-#if defined(arch_atomic_sub_and_test)
static __always_inline bool
atomic_sub_and_test(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_sub_and_test(i, v);
}
-#define atomic_sub_and_test atomic_sub_and_test
-#endif
-#if defined(arch_atomic_dec_and_test)
static __always_inline bool
atomic_dec_and_test(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_dec_and_test(v);
}
-#define atomic_dec_and_test atomic_dec_and_test
-#endif
-#if defined(arch_atomic_inc_and_test)
static __always_inline bool
atomic_inc_and_test(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_inc_and_test(v);
}
-#define atomic_inc_and_test atomic_inc_and_test
-#endif
-#if defined(arch_atomic_add_negative)
static __always_inline bool
atomic_add_negative(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_add_negative(i, v);
}
-#define atomic_add_negative atomic_add_negative
-#endif
-#if defined(arch_atomic_fetch_add_unless)
static __always_inline int
atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_add_unless(v, a, u);
}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
-#endif
-#if defined(arch_atomic_add_unless)
static __always_inline bool
atomic_add_unless(atomic_t *v, int a, int u)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_add_unless(v, a, u);
}
-#define atomic_add_unless atomic_add_unless
-#endif
-#if defined(arch_atomic_inc_not_zero)
static __always_inline bool
atomic_inc_not_zero(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_inc_not_zero(v);
}
-#define atomic_inc_not_zero atomic_inc_not_zero
-#endif
-#if defined(arch_atomic_inc_unless_negative)
static __always_inline bool
atomic_inc_unless_negative(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_inc_unless_negative(v);
}
-#define atomic_inc_unless_negative atomic_inc_unless_negative
-#endif
-#if defined(arch_atomic_dec_unless_positive)
static __always_inline bool
atomic_dec_unless_positive(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_dec_unless_positive(v);
}
-#define atomic_dec_unless_positive atomic_dec_unless_positive
-#endif
-#if defined(arch_atomic_dec_if_positive)
static __always_inline int
atomic_dec_if_positive(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_dec_if_positive(v);
}
-#define atomic_dec_if_positive atomic_dec_if_positive
-#endif
static __always_inline s64
atomic64_read(const atomic64_t *v)
@@ -837,17 +605,13 @@ atomic64_read(const atomic64_t *v)
instrument_atomic_read(v, sizeof(*v));
return arch_atomic64_read(v);
}
-#define atomic64_read atomic64_read
-#if defined(arch_atomic64_read_acquire)
static __always_inline s64
atomic64_read_acquire(const atomic64_t *v)
{
instrument_atomic_read(v, sizeof(*v));
return arch_atomic64_read_acquire(v);
}
-#define atomic64_read_acquire atomic64_read_acquire
-#endif
static __always_inline void
atomic64_set(atomic64_t *v, s64 i)
@@ -855,17 +619,13 @@ atomic64_set(atomic64_t *v, s64 i)
instrument_atomic_write(v, sizeof(*v));
arch_atomic64_set(v, i);
}
-#define atomic64_set atomic64_set
-#if defined(arch_atomic64_set_release)
static __always_inline void
atomic64_set_release(atomic64_t *v, s64 i)
{
instrument_atomic_write(v, sizeof(*v));
arch_atomic64_set_release(v, i);
}
-#define atomic64_set_release atomic64_set_release
-#endif
static __always_inline void
atomic64_add(s64 i, atomic64_t *v)
@@ -873,87 +633,62 @@ atomic64_add(s64 i, atomic64_t *v)
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_add(i, v);
}
-#define atomic64_add atomic64_add
-#if !defined(arch_atomic64_add_return_relaxed) || defined(arch_atomic64_add_return)
static __always_inline s64
atomic64_add_return(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_add_return(i, v);
}
-#define atomic64_add_return atomic64_add_return
-#endif
-#if defined(arch_atomic64_add_return_acquire)
static __always_inline s64
atomic64_add_return_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_add_return_acquire(i, v);
}
-#define atomic64_add_return_acquire atomic64_add_return_acquire
-#endif
-#if defined(arch_atomic64_add_return_release)
static __always_inline s64
atomic64_add_return_release(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_add_return_release(i, v);
}
-#define atomic64_add_return_release atomic64_add_return_release
-#endif
-#if defined(arch_atomic64_add_return_relaxed)
static __always_inline s64
atomic64_add_return_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_add_return_relaxed(i, v);
}
-#define atomic64_add_return_relaxed atomic64_add_return_relaxed
-#endif
-#if !defined(arch_atomic64_fetch_add_relaxed) || defined(arch_atomic64_fetch_add)
static __always_inline s64
atomic64_fetch_add(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_add(i, v);
}
-#define atomic64_fetch_add atomic64_fetch_add
-#endif
-#if defined(arch_atomic64_fetch_add_acquire)
static __always_inline s64
atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_add_acquire(i, v);
}
-#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire
-#endif
-#if defined(arch_atomic64_fetch_add_release)
static __always_inline s64
atomic64_fetch_add_release(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_add_release(i, v);
}
-#define atomic64_fetch_add_release atomic64_fetch_add_release
-#endif
-#if defined(arch_atomic64_fetch_add_relaxed)
static __always_inline s64
atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_add_relaxed(i, v);
}
-#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-#endif
static __always_inline void
atomic64_sub(s64 i, atomic64_t *v)
@@ -961,267 +696,188 @@ atomic64_sub(s64 i, atomic64_t *v)
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_sub(i, v);
}
-#define atomic64_sub atomic64_sub
-#if !defined(arch_atomic64_sub_return_relaxed) || defined(arch_atomic64_sub_return)
static __always_inline s64
atomic64_sub_return(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_sub_return(i, v);
}
-#define atomic64_sub_return atomic64_sub_return
-#endif
-#if defined(arch_atomic64_sub_return_acquire)
static __always_inline s64
atomic64_sub_return_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_sub_return_acquire(i, v);
}
-#define atomic64_sub_return_acquire atomic64_sub_return_acquire
-#endif
-#if defined(arch_atomic64_sub_return_release)
static __always_inline s64
atomic64_sub_return_release(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_sub_return_release(i, v);
}
-#define atomic64_sub_return_release atomic64_sub_return_release
-#endif
-#if defined(arch_atomic64_sub_return_relaxed)
static __always_inline s64
atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_sub_return_relaxed(i, v);
}
-#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-#endif
-#if !defined(arch_atomic64_fetch_sub_relaxed) || defined(arch_atomic64_fetch_sub)
static __always_inline s64
atomic64_fetch_sub(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_sub(i, v);
}
-#define atomic64_fetch_sub atomic64_fetch_sub
-#endif
-#if defined(arch_atomic64_fetch_sub_acquire)
static __always_inline s64
atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_sub_acquire(i, v);
}
-#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire
-#endif
-#if defined(arch_atomic64_fetch_sub_release)
static __always_inline s64
atomic64_fetch_sub_release(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_sub_release(i, v);
}
-#define atomic64_fetch_sub_release atomic64_fetch_sub_release
-#endif
-#if defined(arch_atomic64_fetch_sub_relaxed)
static __always_inline s64
atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_sub_relaxed(i, v);
}
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
-#endif
-#if defined(arch_atomic64_inc)
static __always_inline void
atomic64_inc(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_inc(v);
}
-#define atomic64_inc atomic64_inc
-#endif
-#if defined(arch_atomic64_inc_return)
static __always_inline s64
atomic64_inc_return(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_inc_return(v);
}
-#define atomic64_inc_return atomic64_inc_return
-#endif
-#if defined(arch_atomic64_inc_return_acquire)
static __always_inline s64
atomic64_inc_return_acquire(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_inc_return_acquire(v);
}
-#define atomic64_inc_return_acquire atomic64_inc_return_acquire
-#endif
-#if defined(arch_atomic64_inc_return_release)
static __always_inline s64
atomic64_inc_return_release(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_inc_return_release(v);
}
-#define atomic64_inc_return_release atomic64_inc_return_release
-#endif
-#if defined(arch_atomic64_inc_return_relaxed)
static __always_inline s64
atomic64_inc_return_relaxed(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_inc_return_relaxed(v);
}
-#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
-#endif
-#if defined(arch_atomic64_fetch_inc)
static __always_inline s64
atomic64_fetch_inc(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_inc(v);
}
-#define atomic64_fetch_inc atomic64_fetch_inc
-#endif
-#if defined(arch_atomic64_fetch_inc_acquire)
static __always_inline s64
atomic64_fetch_inc_acquire(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_inc_acquire(v);
}
-#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
-#endif
-#if defined(arch_atomic64_fetch_inc_release)
static __always_inline s64
atomic64_fetch_inc_release(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_inc_release(v);
}
-#define atomic64_fetch_inc_release atomic64_fetch_inc_release
-#endif
-#if defined(arch_atomic64_fetch_inc_relaxed)
static __always_inline s64
atomic64_fetch_inc_relaxed(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_inc_relaxed(v);
}
-#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
-#endif
-#if defined(arch_atomic64_dec)
static __always_inline void
atomic64_dec(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_dec(v);
}
-#define atomic64_dec atomic64_dec
-#endif
-#if defined(arch_atomic64_dec_return)
static __always_inline s64
atomic64_dec_return(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_dec_return(v);
}
-#define atomic64_dec_return atomic64_dec_return
-#endif
-#if defined(arch_atomic64_dec_return_acquire)
static __always_inline s64
atomic64_dec_return_acquire(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_dec_return_acquire(v);
}
-#define atomic64_dec_return_acquire atomic64_dec_return_acquire
-#endif
-#if defined(arch_atomic64_dec_return_release)
static __always_inline s64
atomic64_dec_return_release(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_dec_return_release(v);
}
-#define atomic64_dec_return_release atomic64_dec_return_release
-#endif
-#if defined(arch_atomic64_dec_return_relaxed)
static __always_inline s64
atomic64_dec_return_relaxed(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_dec_return_relaxed(v);
}
-#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
-#endif
-#if defined(arch_atomic64_fetch_dec)
static __always_inline s64
atomic64_fetch_dec(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_dec(v);
}
-#define atomic64_fetch_dec atomic64_fetch_dec
-#endif
-#if defined(arch_atomic64_fetch_dec_acquire)
static __always_inline s64
atomic64_fetch_dec_acquire(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_dec_acquire(v);
}
-#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
-#endif
-#if defined(arch_atomic64_fetch_dec_release)
static __always_inline s64
atomic64_fetch_dec_release(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_dec_release(v);
}
-#define atomic64_fetch_dec_release atomic64_fetch_dec_release
-#endif
-#if defined(arch_atomic64_fetch_dec_relaxed)
static __always_inline s64
atomic64_fetch_dec_relaxed(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_dec_relaxed(v);
}
-#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
-#endif
static __always_inline void
atomic64_and(s64 i, atomic64_t *v)
@@ -1229,97 +885,69 @@ atomic64_and(s64 i, atomic64_t *v)
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_and(i, v);
}
-#define atomic64_and atomic64_and
-#if !defined(arch_atomic64_fetch_and_relaxed) || defined(arch_atomic64_fetch_and)
static __always_inline s64
atomic64_fetch_and(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_and(i, v);
}
-#define atomic64_fetch_and atomic64_fetch_and
-#endif
-#if defined(arch_atomic64_fetch_and_acquire)
static __always_inline s64
atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_and_acquire(i, v);
}
-#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire
-#endif
-#if defined(arch_atomic64_fetch_and_release)
static __always_inline s64
atomic64_fetch_and_release(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_and_release(i, v);
}
-#define atomic64_fetch_and_release atomic64_fetch_and_release
-#endif
-#if defined(arch_atomic64_fetch_and_relaxed)
static __always_inline s64
atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_and_relaxed(i, v);
}
-#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-#endif
-#if defined(arch_atomic64_andnot)
static __always_inline void
atomic64_andnot(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_andnot(i, v);
}
-#define atomic64_andnot atomic64_andnot
-#endif
-#if defined(arch_atomic64_fetch_andnot)
static __always_inline s64
atomic64_fetch_andnot(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot(i, v);
}
-#define atomic64_fetch_andnot atomic64_fetch_andnot
-#endif
-#if defined(arch_atomic64_fetch_andnot_acquire)
static __always_inline s64
atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot_acquire(i, v);
}
-#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
-#endif
-#if defined(arch_atomic64_fetch_andnot_release)
static __always_inline s64
atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot_release(i, v);
}
-#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release
-#endif
-#if defined(arch_atomic64_fetch_andnot_relaxed)
static __always_inline s64
atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot_relaxed(i, v);
}
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
-#endif
static __always_inline void
atomic64_or(s64 i, atomic64_t *v)
@@ -1327,47 +955,34 @@ atomic64_or(s64 i, atomic64_t *v)
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_or(i, v);
}
-#define atomic64_or atomic64_or
-#if !defined(arch_atomic64_fetch_or_relaxed) || defined(arch_atomic64_fetch_or)
static __always_inline s64
atomic64_fetch_or(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_or(i, v);
}
-#define atomic64_fetch_or atomic64_fetch_or
-#endif
-#if defined(arch_atomic64_fetch_or_acquire)
static __always_inline s64
atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_or_acquire(i, v);
}
-#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire
-#endif
-#if defined(arch_atomic64_fetch_or_release)
static __always_inline s64
atomic64_fetch_or_release(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_or_release(i, v);
}
-#define atomic64_fetch_or_release atomic64_fetch_or_release
-#endif
-#if defined(arch_atomic64_fetch_or_relaxed)
static __always_inline s64
atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_or_relaxed(i, v);
}
-#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-#endif
static __always_inline void
atomic64_xor(s64 i, atomic64_t *v)
@@ -1375,129 +990,91 @@ atomic64_xor(s64 i, atomic64_t *v)
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_xor(i, v);
}
-#define atomic64_xor atomic64_xor
-#if !defined(arch_atomic64_fetch_xor_relaxed) || defined(arch_atomic64_fetch_xor)
static __always_inline s64
atomic64_fetch_xor(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_xor(i, v);
}
-#define atomic64_fetch_xor atomic64_fetch_xor
-#endif
-#if defined(arch_atomic64_fetch_xor_acquire)
static __always_inline s64
atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_xor_acquire(i, v);
}
-#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire
-#endif
-#if defined(arch_atomic64_fetch_xor_release)
static __always_inline s64
atomic64_fetch_xor_release(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_xor_release(i, v);
}
-#define atomic64_fetch_xor_release atomic64_fetch_xor_release
-#endif
-#if defined(arch_atomic64_fetch_xor_relaxed)
static __always_inline s64
atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_xor_relaxed(i, v);
}
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
-#endif
-#if !defined(arch_atomic64_xchg_relaxed) || defined(arch_atomic64_xchg)
static __always_inline s64
atomic64_xchg(atomic64_t *v, s64 i)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_xchg(v, i);
}
-#define atomic64_xchg atomic64_xchg
-#endif
-#if defined(arch_atomic64_xchg_acquire)
static __always_inline s64
atomic64_xchg_acquire(atomic64_t *v, s64 i)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_xchg_acquire(v, i);
}
-#define atomic64_xchg_acquire atomic64_xchg_acquire
-#endif
-#if defined(arch_atomic64_xchg_release)
static __always_inline s64
atomic64_xchg_release(atomic64_t *v, s64 i)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_xchg_release(v, i);
}
-#define atomic64_xchg_release atomic64_xchg_release
-#endif
-#if defined(arch_atomic64_xchg_relaxed)
static __always_inline s64
atomic64_xchg_relaxed(atomic64_t *v, s64 i)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_xchg_relaxed(v, i);
}
-#define atomic64_xchg_relaxed atomic64_xchg_relaxed
-#endif
-#if !defined(arch_atomic64_cmpxchg_relaxed) || defined(arch_atomic64_cmpxchg)
static __always_inline s64
atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_cmpxchg(v, old, new);
}
-#define atomic64_cmpxchg atomic64_cmpxchg
-#endif
-#if defined(arch_atomic64_cmpxchg_acquire)
static __always_inline s64
atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_cmpxchg_acquire(v, old, new);
}
-#define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire
-#endif
-#if defined(arch_atomic64_cmpxchg_release)
static __always_inline s64
atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_cmpxchg_release(v, old, new);
}
-#define atomic64_cmpxchg_release atomic64_cmpxchg_release
-#endif
-#if defined(arch_atomic64_cmpxchg_relaxed)
static __always_inline s64
atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_cmpxchg_relaxed(v, old, new);
}
-#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
-#endif
-#if defined(arch_atomic64_try_cmpxchg)
static __always_inline bool
atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
@@ -1505,10 +1082,7 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg(v, old, new);
}
-#define atomic64_try_cmpxchg atomic64_try_cmpxchg
-#endif
-#if defined(arch_atomic64_try_cmpxchg_acquire)
static __always_inline bool
atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
{
@@ -1516,10 +1090,7 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg_acquire(v, old, new);
}
-#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
-#endif
-#if defined(arch_atomic64_try_cmpxchg_release)
static __always_inline bool
atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
{
@@ -1527,10 +1098,7 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg_release(v, old, new);
}
-#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
-#endif
-#if defined(arch_atomic64_try_cmpxchg_relaxed)
static __always_inline bool
atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
{
@@ -1538,218 +1106,161 @@ atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
}
-#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed
-#endif
-#if defined(arch_atomic64_sub_and_test)
static __always_inline bool
atomic64_sub_and_test(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_sub_and_test(i, v);
}
-#define atomic64_sub_and_test atomic64_sub_and_test
-#endif
-#if defined(arch_atomic64_dec_and_test)
static __always_inline bool
atomic64_dec_and_test(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_dec_and_test(v);
}
-#define atomic64_dec_and_test atomic64_dec_and_test
-#endif
-#if defined(arch_atomic64_inc_and_test)
static __always_inline bool
atomic64_inc_and_test(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_inc_and_test(v);
}
-#define atomic64_inc_and_test atomic64_inc_and_test
-#endif
-#if defined(arch_atomic64_add_negative)
static __always_inline bool
atomic64_add_negative(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_add_negative(i, v);
}
-#define atomic64_add_negative atomic64_add_negative
-#endif
-#if defined(arch_atomic64_fetch_add_unless)
static __always_inline s64
atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_add_unless(v, a, u);
}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
-#endif
-#if defined(arch_atomic64_add_unless)
static __always_inline bool
atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_add_unless(v, a, u);
}
-#define atomic64_add_unless atomic64_add_unless
-#endif
-#if defined(arch_atomic64_inc_not_zero)
static __always_inline bool
atomic64_inc_not_zero(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_inc_not_zero(v);
}
-#define atomic64_inc_not_zero atomic64_inc_not_zero
-#endif
-#if defined(arch_atomic64_inc_unless_negative)
static __always_inline bool
atomic64_inc_unless_negative(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_inc_unless_negative(v);
}
-#define atomic64_inc_unless_negative atomic64_inc_unless_negative
-#endif
-#if defined(arch_atomic64_dec_unless_positive)
static __always_inline bool
atomic64_dec_unless_positive(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_dec_unless_positive(v);
}
-#define atomic64_dec_unless_positive atomic64_dec_unless_positive
-#endif
-#if defined(arch_atomic64_dec_if_positive)
static __always_inline s64
atomic64_dec_if_positive(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_dec_if_positive(v);
}
-#define atomic64_dec_if_positive atomic64_dec_if_positive
-#endif
-#if !defined(arch_xchg_relaxed) || defined(arch_xchg)
#define xchg(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_xchg(__ai_ptr, __VA_ARGS__); \
})
-#endif
-#if defined(arch_xchg_acquire)
#define xchg_acquire(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \
})
-#endif
-#if defined(arch_xchg_release)
#define xchg_release(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_xchg_release(__ai_ptr, __VA_ARGS__); \
})
-#endif
-#if defined(arch_xchg_relaxed)
#define xchg_relaxed(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \
})
-#endif
-#if !defined(arch_cmpxchg_relaxed) || defined(arch_cmpxchg)
#define cmpxchg(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
})
-#endif
-#if defined(arch_cmpxchg_acquire)
#define cmpxchg_acquire(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \
})
-#endif
-#if defined(arch_cmpxchg_release)
#define cmpxchg_release(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \
})
-#endif
-#if defined(arch_cmpxchg_relaxed)
#define cmpxchg_relaxed(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \
})
-#endif
-#if !defined(arch_cmpxchg64_relaxed) || defined(arch_cmpxchg64)
#define cmpxchg64(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \
})
-#endif
-#if defined(arch_cmpxchg64_acquire)
#define cmpxchg64_acquire(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \
})
-#endif
-#if defined(arch_cmpxchg64_release)
#define cmpxchg64_release(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \
})
-#endif
-#if defined(arch_cmpxchg64_relaxed)
#define cmpxchg64_relaxed(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \
})
-#endif
-#if !defined(arch_try_cmpxchg_relaxed) || defined(arch_try_cmpxchg)
#define try_cmpxchg(ptr, oldp, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
@@ -1758,9 +1269,7 @@ atomic64_dec_if_positive(atomic64_t *v)
instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
arch_try_cmpxchg(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
-#endif
-#if defined(arch_try_cmpxchg_acquire)
#define try_cmpxchg_acquire(ptr, oldp, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
@@ -1769,9 +1278,7 @@ atomic64_dec_if_positive(atomic64_t *v)
instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
arch_try_cmpxchg_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
-#endif
-#if defined(arch_try_cmpxchg_release)
#define try_cmpxchg_release(ptr, oldp, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
@@ -1780,9 +1287,7 @@ atomic64_dec_if_positive(atomic64_t *v)
instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
arch_try_cmpxchg_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
-#endif
-#if defined(arch_try_cmpxchg_relaxed)
#define try_cmpxchg_relaxed(ptr, oldp, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
@@ -1791,7 +1296,6 @@ atomic64_dec_if_positive(atomic64_t *v)
instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
arch_try_cmpxchg_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
-#endif
#define cmpxchg_local(ptr, ...) \
({ \
@@ -1830,4 +1334,4 @@ atomic64_dec_if_positive(atomic64_t *v)
})
#endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */
-// 4bec382e44520f4d8267e42620054db26a659ea3
+// 1d7c3a25aca5c7fb031c307be4c3d24c7b48fcd5
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 11f96f40f4a7..04b8be9f1a77 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Generic C implementation of atomic counter operations. Usable on
- * UP systems only. Do not include in machine independent code.
+ * Generic C implementation of atomic counter operations. Do not include in
+ * machine independent code.
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
@@ -12,56 +12,39 @@
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
-/*
- * atomic_$op() - $op integer to atomic variable
- * @i: integer value to $op
- * @v: pointer to the atomic variable
- *
- * Atomically $ops @i to @v. Does not strictly guarantee a memory-barrier, use
- * smp_mb__{before,after}_atomic().
- */
-
-/*
- * atomic_$op_return() - $op interer to atomic variable and returns the result
- * @i: integer value to $op
- * @v: pointer to the atomic variable
- *
- * Atomically $ops @i to @v. Does imply a full memory barrier.
- */
-
#ifdef CONFIG_SMP
/* we can build all atomic primitives from cmpxchg */
#define ATOMIC_OP(op, c_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void generic_atomic_##op(int i, atomic_t *v) \
{ \
int c, old; \
\
c = v->counter; \
- while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
+ while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \
c = old; \
}
#define ATOMIC_OP_RETURN(op, c_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int generic_atomic_##op##_return(int i, atomic_t *v) \
{ \
int c, old; \
\
c = v->counter; \
- while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
+ while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \
c = old; \
\
return c c_op i; \
}
#define ATOMIC_FETCH_OP(op, c_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int generic_atomic_fetch_##op(int i, atomic_t *v) \
{ \
int c, old; \
\
c = v->counter; \
- while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
+ while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \
c = old; \
\
return c; \
@@ -72,7 +55,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
#include <linux/irqflags.h>
#define ATOMIC_OP(op, c_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void generic_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
\
@@ -82,7 +65,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
}
#define ATOMIC_OP_RETURN(op, c_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int generic_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long flags; \
int ret; \
@@ -95,7 +78,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int generic_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
int ret; \
@@ -110,87 +93,44 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
#endif /* CONFIG_SMP */
-#ifndef atomic_add_return
ATOMIC_OP_RETURN(add, +)
-#endif
-
-#ifndef atomic_sub_return
ATOMIC_OP_RETURN(sub, -)
-#endif
-#ifndef atomic_fetch_add
ATOMIC_FETCH_OP(add, +)
-#endif
-
-#ifndef atomic_fetch_sub
ATOMIC_FETCH_OP(sub, -)
-#endif
-
-#ifndef atomic_fetch_and
ATOMIC_FETCH_OP(and, &)
-#endif
-
-#ifndef atomic_fetch_or
ATOMIC_FETCH_OP(or, |)
-#endif
-
-#ifndef atomic_fetch_xor
ATOMIC_FETCH_OP(xor, ^)
-#endif
-#ifndef atomic_and
+ATOMIC_OP(add, +)
+ATOMIC_OP(sub, -)
ATOMIC_OP(and, &)
-#endif
-
-#ifndef atomic_or
ATOMIC_OP(or, |)
-#endif
-
-#ifndef atomic_xor
ATOMIC_OP(xor, ^)
-#endif
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-/*
- * Atomic operations that C can't guarantee us. Useful for
- * resource counting etc..
- */
-
-/**
- * atomic_read - read atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically reads the value of @v.
- */
-#ifndef atomic_read
-#define atomic_read(v) READ_ONCE((v)->counter)
-#endif
-
-/**
- * atomic_set - set atomic variable
- * @v: pointer of type atomic_t
- * @i: required value
- *
- * Atomically sets the value of @v to @i.
- */
-#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic_add_return generic_atomic_add_return
+#define arch_atomic_sub_return generic_atomic_sub_return
-#include <linux/irqflags.h>
+#define arch_atomic_fetch_add generic_atomic_fetch_add
+#define arch_atomic_fetch_sub generic_atomic_fetch_sub
+#define arch_atomic_fetch_and generic_atomic_fetch_and
+#define arch_atomic_fetch_or generic_atomic_fetch_or
+#define arch_atomic_fetch_xor generic_atomic_fetch_xor
-static inline void atomic_add(int i, atomic_t *v)
-{
- atomic_add_return(i, v);
-}
+#define arch_atomic_add generic_atomic_add
+#define arch_atomic_sub generic_atomic_sub
+#define arch_atomic_and generic_atomic_and
+#define arch_atomic_or generic_atomic_or
+#define arch_atomic_xor generic_atomic_xor
-static inline void atomic_sub(int i, atomic_t *v)
-{
- atomic_sub_return(i, v);
-}
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
-#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
-#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
+#define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (v)))
+#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (old), (new)))
#endif /* __ASM_GENERIC_ATOMIC_H */
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
index 370f01d4450f..100d24b02e52 100644
--- a/include/asm-generic/atomic64.h
+++ b/include/asm-generic/atomic64.h
@@ -15,19 +15,17 @@ typedef struct {
#define ATOMIC64_INIT(i) { (i) }
-extern s64 atomic64_read(const atomic64_t *v);
-extern void atomic64_set(atomic64_t *v, s64 i);
-
-#define atomic64_set_release(v, i) atomic64_set((v), (i))
+extern s64 generic_atomic64_read(const atomic64_t *v);
+extern void generic_atomic64_set(atomic64_t *v, s64 i);
#define ATOMIC64_OP(op) \
-extern void atomic64_##op(s64 a, atomic64_t *v);
+extern void generic_atomic64_##op(s64 a, atomic64_t *v);
#define ATOMIC64_OP_RETURN(op) \
-extern s64 atomic64_##op##_return(s64 a, atomic64_t *v);
+extern s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v);
#define ATOMIC64_FETCH_OP(op) \
-extern s64 atomic64_fetch_##op(s64 a, atomic64_t *v);
+extern s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v);
#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op)
@@ -46,11 +44,32 @@ ATOMIC64_OPS(xor)
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
-extern s64 atomic64_dec_if_positive(atomic64_t *v);
-#define atomic64_dec_if_positive atomic64_dec_if_positive
-extern s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n);
-extern s64 atomic64_xchg(atomic64_t *v, s64 new);
-extern s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u);
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+extern s64 generic_atomic64_dec_if_positive(atomic64_t *v);
+extern s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n);
+extern s64 generic_atomic64_xchg(atomic64_t *v, s64 new);
+extern s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u);
+
+#define arch_atomic64_read generic_atomic64_read
+#define arch_atomic64_set generic_atomic64_set
+#define arch_atomic64_set_release generic_atomic64_set
+
+#define arch_atomic64_add generic_atomic64_add
+#define arch_atomic64_add_return generic_atomic64_add_return
+#define arch_atomic64_fetch_add generic_atomic64_fetch_add
+#define arch_atomic64_sub generic_atomic64_sub
+#define arch_atomic64_sub_return generic_atomic64_sub_return
+#define arch_atomic64_fetch_sub generic_atomic64_fetch_sub
+
+#define arch_atomic64_and generic_atomic64_and
+#define arch_atomic64_fetch_and generic_atomic64_fetch_and
+#define arch_atomic64_or generic_atomic64_or
+#define arch_atomic64_fetch_or generic_atomic64_fetch_or
+#define arch_atomic64_xor generic_atomic64_xor
+#define arch_atomic64_fetch_xor generic_atomic64_fetch_xor
+
+#define arch_atomic64_dec_if_positive generic_atomic64_dec_if_positive
+#define arch_atomic64_cmpxchg generic_atomic64_cmpxchg
+#define arch_atomic64_xchg generic_atomic64_xchg
+#define arch_atomic64_fetch_add_unless generic_atomic64_fetch_add_unless
#endif /* _ASM_GENERIC_ATOMIC64_H */
diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h
index f17f14f84d09..380cdc824e4b 100644
--- a/include/asm-generic/cmpxchg-local.h
+++ b/include/asm-generic/cmpxchg-local.h
@@ -12,7 +12,7 @@ extern unsigned long wrong_size_cmpxchg(volatile void *ptr)
* Generic version of __cmpxchg_local (disables interrupts). Takes an unsigned
* long parameter, supporting various types of architectures.
*/
-static inline unsigned long __cmpxchg_local_generic(volatile void *ptr,
+static inline unsigned long __generic_cmpxchg_local(volatile void *ptr,
unsigned long old, unsigned long new, int size)
{
unsigned long flags, prev;
@@ -51,7 +51,7 @@ static inline unsigned long __cmpxchg_local_generic(volatile void *ptr,
/*
* Generic version of __cmpxchg64_local. Takes an u64 parameter.
*/
-static inline u64 __cmpxchg64_local_generic(volatile void *ptr,
+static inline u64 __generic_cmpxchg64_local(volatile void *ptr,
u64 old, u64 new)
{
u64 prev;
diff --git a/include/asm-generic/cmpxchg.h b/include/asm-generic/cmpxchg.h
index 9a24510cd8c1..dca4419922a9 100644
--- a/include/asm-generic/cmpxchg.h
+++ b/include/asm-generic/cmpxchg.h
@@ -14,16 +14,14 @@
#include <linux/types.h>
#include <linux/irqflags.h>
-#ifndef xchg
-
/*
* This function doesn't exist, so you'll get a linker error if
* something tries to do an invalidly-sized xchg().
*/
-extern void __xchg_called_with_bad_pointer(void);
+extern void __generic_xchg_called_with_bad_pointer(void);
static inline
-unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
+unsigned long __generic_xchg(unsigned long x, volatile void *ptr, int size)
{
unsigned long ret, flags;
@@ -75,35 +73,43 @@ unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
#endif /* CONFIG_64BIT */
default:
- __xchg_called_with_bad_pointer();
+ __generic_xchg_called_with_bad_pointer();
return x;
}
}
-#define xchg(ptr, x) ({ \
- ((__typeof__(*(ptr))) \
- __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \
+#define generic_xchg(ptr, x) ({ \
+ ((__typeof__(*(ptr))) \
+ __generic_xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \
})
-#endif /* xchg */
-
/*
* Atomic compare and exchange.
*/
#include <asm-generic/cmpxchg-local.h>
-#ifndef cmpxchg_local
-#define cmpxchg_local(ptr, o, n) ({ \
- ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
- (unsigned long)(n), sizeof(*(ptr)))); \
+#define generic_cmpxchg_local(ptr, o, n) ({ \
+ ((__typeof__(*(ptr)))__generic_cmpxchg_local((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr)))); \
})
+
+#define generic_cmpxchg64_local(ptr, o, n) \
+ __generic_cmpxchg64_local((ptr), (o), (n))
+
+
+#ifndef arch_xchg
+#define arch_xchg generic_xchg
+#endif
+
+#ifndef arch_cmpxchg_local
+#define arch_cmpxchg_local generic_cmpxchg_local
#endif
-#ifndef cmpxchg64_local
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#ifndef arch_cmpxchg64_local
+#define arch_cmpxchg64_local generic_cmpxchg64_local
#endif
-#define cmpxchg(ptr, o, n) cmpxchg_local((ptr), (o), (n))
-#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
+#define arch_cmpxchg arch_cmpxchg_local
+#define arch_cmpxchg64 arch_cmpxchg64_local
#endif /* __ASM_GENERIC_CMPXCHG_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 40a9c101565e..17325416e2de 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -960,6 +960,7 @@
#ifdef CONFIG_AMD_MEM_ENCRYPT
#define PERCPU_DECRYPTED_SECTION \
. = ALIGN(PAGE_SIZE); \
+ *(.data..decrypted) \
*(.data..percpu..decrypted) \
. = ALIGN(PAGE_SIZE);
#else
diff --git a/include/dt-bindings/usb/pd.h b/include/dt-bindings/usb/pd.h
index fef3ef65967f..e6526b138174 100644
--- a/include/dt-bindings/usb/pd.h
+++ b/include/dt-bindings/usb/pd.h
@@ -106,6 +106,10 @@
* <20:16> :: Reserved, Shall be set to zero
* <15:0> :: USB-IF assigned VID for this cable vendor
*/
+
+/* PD Rev2.0 definition */
+#define IDH_PTYPE_UNDEF 0
+
/* SOP Product Type (UFP) */
#define IDH_PTYPE_NOT_UFP 0
#define IDH_PTYPE_HUB 1
@@ -163,10 +167,10 @@
#define UFP_VDO_VER1_2 2
/* Device Capability */
-#define DEV_USB2_CAPABLE BIT(0)
-#define DEV_USB2_BILLBOARD BIT(1)
-#define DEV_USB3_CAPABLE BIT(2)
-#define DEV_USB4_CAPABLE BIT(3)
+#define DEV_USB2_CAPABLE (1 << 0)
+#define DEV_USB2_BILLBOARD (1 << 1)
+#define DEV_USB3_CAPABLE (1 << 2)
+#define DEV_USB4_CAPABLE (1 << 3)
/* Connector Type */
#define UFP_RECEPTACLE 2
@@ -191,9 +195,9 @@
/* Alternate Modes */
#define UFP_ALTMODE_NOT_SUPP 0
-#define UFP_ALTMODE_TBT3 BIT(0)
-#define UFP_ALTMODE_RECFG BIT(1)
-#define UFP_ALTMODE_NO_RECFG BIT(2)
+#define UFP_ALTMODE_TBT3 (1 << 0)
+#define UFP_ALTMODE_RECFG (1 << 1)
+#define UFP_ALTMODE_NO_RECFG (1 << 2)
/* USB Highest Speed */
#define UFP_USB2_ONLY 0
@@ -217,9 +221,9 @@
* <4:0> :: Port number
*/
#define DFP_VDO_VER1_1 1
-#define HOST_USB2_CAPABLE BIT(0)
-#define HOST_USB3_CAPABLE BIT(1)
-#define HOST_USB4_CAPABLE BIT(2)
+#define HOST_USB2_CAPABLE (1 << 0)
+#define HOST_USB3_CAPABLE (1 << 1)
+#define HOST_USB4_CAPABLE (1 << 2)
#define DFP_RECEPTACLE 2
#define DFP_CAPTIVE 3
@@ -228,7 +232,25 @@
| ((pnum) & 0x1f))
/*
- * Passive Cable VDO
+ * Cable VDO (for both Passive and Active Cable VDO in PD Rev2.0)
+ * ---------
+ * <31:28> :: Cable HW version
+ * <27:24> :: Cable FW version
+ * <23:20> :: Reserved, Shall be set to zero
+ * <19:18> :: type-C to Type-A/B/C/Captive (00b == A, 01 == B, 10 == C, 11 == Captive)
+ * <17> :: Reserved, Shall be set to zero
+ * <16:13> :: cable latency (0001 == <10ns(~1m length))
+ * <12:11> :: cable termination type (11b == both ends active VCONN req)
+ * <10> :: SSTX1 Directionality support (0b == fixed, 1b == cfgable)
+ * <9> :: SSTX2 Directionality support
+ * <8> :: SSRX1 Directionality support
+ * <7> :: SSRX2 Directionality support
+ * <6:5> :: Vbus current handling capability (01b == 3A, 10b == 5A)
+ * <4> :: Vbus through cable (0b == no, 1b == yes)
+ * <3> :: SOP" controller present? (0b == no, 1b == yes)
+ * <2:0> :: USB SS Signaling support
+ *
+ * Passive Cable VDO (PD Rev3.0+)
* ---------
* <31:28> :: Cable HW version
* <27:24> :: Cable FW version
@@ -244,7 +266,7 @@
* <4:3> :: Reserved, Shall be set to zero
* <2:0> :: USB highest speed
*
- * Active Cable VDO 1
+ * Active Cable VDO 1 (PD Rev3.0+)
* ---------
* <31:28> :: Cable HW version
* <27:24> :: Cable FW version
@@ -266,7 +288,9 @@
#define CABLE_VDO_VER1_0 0
#define CABLE_VDO_VER1_3 3
-/* Connector Type */
+/* Connector Type (_ATYPE and _BTYPE are for PD Rev2.0 only) */
+#define CABLE_ATYPE 0
+#define CABLE_BTYPE 1
#define CABLE_CTYPE 2
#define CABLE_CAPTIVE 3
@@ -303,12 +327,22 @@
#define CABLE_CURR_3A 1
#define CABLE_CURR_5A 2
+/* USB SuperSpeed Signaling Support (PD Rev2.0) */
+#define CABLE_USBSS_U2_ONLY 0
+#define CABLE_USBSS_U31_GEN1 1
+#define CABLE_USBSS_U31_GEN2 2
+
/* USB Highest Speed */
#define CABLE_USB2_ONLY 0
#define CABLE_USB32_GEN1 1
#define CABLE_USB32_4_GEN2 2
#define CABLE_USB4_GEN3 3
+#define VDO_CABLE(hw, fw, cbl, lat, term, tx1d, tx2d, rx1d, rx2d, cur, vps, sopp, usbss) \
+ (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24 | ((cbl) & 0x3) << 18 \
+ | ((lat) & 0x7) << 13 | ((term) & 0x3) << 11 | (tx1d) << 10 \
+ | (tx2d) << 9 | (rx1d) << 8 | (rx2d) << 7 | ((cur) & 0x3) << 5 \
+ | (vps) << 4 | (sopp) << 3 | ((usbss) & 0x7))
#define VDO_PCABLE(hw, fw, ver, conn, lat, term, vbm, cur, spd) \
(((hw) & 0xf) << 28 | ((fw) & 0xf) << 24 | ((ver) & 0x7) << 21 \
| ((conn) & 0x3) << 18 | ((lat) & 0xf) << 13 | ((term) & 0x3) << 11 \
@@ -374,6 +408,35 @@
| (iso) << 2 | (gen))
/*
+ * AMA VDO (PD Rev2.0)
+ * ---------
+ * <31:28> :: Cable HW version
+ * <27:24> :: Cable FW version
+ * <23:12> :: Reserved, Shall be set to zero
+ * <11> :: SSTX1 Directionality support (0b == fixed, 1b == cfgable)
+ * <10> :: SSTX2 Directionality support
+ * <9> :: SSRX1 Directionality support
+ * <8> :: SSRX2 Directionality support
+ * <7:5> :: Vconn power
+ * <4> :: Vconn power required
+ * <3> :: Vbus power required
+ * <2:0> :: USB SS Signaling support
+ */
+#define VDO_AMA(hw, fw, tx1d, tx2d, rx1d, rx2d, vcpwr, vcr, vbr, usbss) \
+ (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24 \
+ | (tx1d) << 11 | (tx2d) << 10 | (rx1d) << 9 | (rx2d) << 8 \
+ | ((vcpwr) & 0x7) << 5 | (vcr) << 4 | (vbr) << 3 \
+ | ((usbss) & 0x7))
+
+#define PD_VDO_AMA_VCONN_REQ(vdo) (((vdo) >> 4) & 1)
+#define PD_VDO_AMA_VBUS_REQ(vdo) (((vdo) >> 3) & 1)
+
+#define AMA_USBSS_U2_ONLY 0
+#define AMA_USBSS_U31_GEN1 1
+#define AMA_USBSS_U31_GEN2 2
+#define AMA_USBSS_BBONLY 3
+
+/*
* VPD VDO
* ---------
* <31:28> :: HW version
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index f180240dc95f..11e555cfaecb 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -37,7 +37,6 @@ bool topology_scale_freq_invariant(void);
enum scale_freq_source {
SCALE_FREQ_SOURCE_CPUFREQ = 0,
SCALE_FREQ_SOURCE_ARCH,
- SCALE_FREQ_SOURCE_CPPC,
};
struct scale_freq_data {
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 6e67aded28f8..1b44f40c7700 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -13,7 +13,7 @@
#ifndef __LINUX_ATA_H__
#define __LINUX_ATA_H__
-#include <linux/kernel.h>
+#include <linux/bits.h>
#include <linux/string.h>
#include <linux/types.h>
#include <asm/byteorder.h>
diff --git a/include/linux/atomic-fallback.h b/include/linux/atomic-fallback.h
deleted file mode 100644
index 2a3f55d98be9..000000000000
--- a/include/linux/atomic-fallback.h
+++ /dev/null
@@ -1,2595 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-// Generated by scripts/atomic/gen-atomic-fallback.sh
-// DO NOT MODIFY THIS FILE DIRECTLY
-
-#ifndef _LINUX_ATOMIC_FALLBACK_H
-#define _LINUX_ATOMIC_FALLBACK_H
-
-#include <linux/compiler.h>
-
-#ifndef xchg_relaxed
-#define xchg_acquire xchg
-#define xchg_release xchg
-#define xchg_relaxed xchg
-#else /* xchg_relaxed */
-
-#ifndef xchg_acquire
-#define xchg_acquire(...) \
- __atomic_op_acquire(xchg, __VA_ARGS__)
-#endif
-
-#ifndef xchg_release
-#define xchg_release(...) \
- __atomic_op_release(xchg, __VA_ARGS__)
-#endif
-
-#ifndef xchg
-#define xchg(...) \
- __atomic_op_fence(xchg, __VA_ARGS__)
-#endif
-
-#endif /* xchg_relaxed */
-
-#ifndef cmpxchg_relaxed
-#define cmpxchg_acquire cmpxchg
-#define cmpxchg_release cmpxchg
-#define cmpxchg_relaxed cmpxchg
-#else /* cmpxchg_relaxed */
-
-#ifndef cmpxchg_acquire
-#define cmpxchg_acquire(...) \
- __atomic_op_acquire(cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg_release
-#define cmpxchg_release(...) \
- __atomic_op_release(cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg
-#define cmpxchg(...) \
- __atomic_op_fence(cmpxchg, __VA_ARGS__)
-#endif
-
-#endif /* cmpxchg_relaxed */
-
-#ifndef cmpxchg64_relaxed
-#define cmpxchg64_acquire cmpxchg64
-#define cmpxchg64_release cmpxchg64
-#define cmpxchg64_relaxed cmpxchg64
-#else /* cmpxchg64_relaxed */
-
-#ifndef cmpxchg64_acquire
-#define cmpxchg64_acquire(...) \
- __atomic_op_acquire(cmpxchg64, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg64_release
-#define cmpxchg64_release(...) \
- __atomic_op_release(cmpxchg64, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg64
-#define cmpxchg64(...) \
- __atomic_op_fence(cmpxchg64, __VA_ARGS__)
-#endif
-
-#endif /* cmpxchg64_relaxed */
-
-#ifndef try_cmpxchg_relaxed
-#ifdef try_cmpxchg
-#define try_cmpxchg_acquire try_cmpxchg
-#define try_cmpxchg_release try_cmpxchg
-#define try_cmpxchg_relaxed try_cmpxchg
-#endif /* try_cmpxchg */
-
-#ifndef try_cmpxchg
-#define try_cmpxchg(_ptr, _oldp, _new) \
-({ \
- typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = cmpxchg((_ptr), ___o, (_new)); \
- if (unlikely(___r != ___o)) \
- *___op = ___r; \
- likely(___r == ___o); \
-})
-#endif /* try_cmpxchg */
-
-#ifndef try_cmpxchg_acquire
-#define try_cmpxchg_acquire(_ptr, _oldp, _new) \
-({ \
- typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = cmpxchg_acquire((_ptr), ___o, (_new)); \
- if (unlikely(___r != ___o)) \
- *___op = ___r; \
- likely(___r == ___o); \
-})
-#endif /* try_cmpxchg_acquire */
-
-#ifndef try_cmpxchg_release
-#define try_cmpxchg_release(_ptr, _oldp, _new) \
-({ \
- typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = cmpxchg_release((_ptr), ___o, (_new)); \
- if (unlikely(___r != ___o)) \
- *___op = ___r; \
- likely(___r == ___o); \
-})
-#endif /* try_cmpxchg_release */
-
-#ifndef try_cmpxchg_relaxed
-#define try_cmpxchg_relaxed(_ptr, _oldp, _new) \
-({ \
- typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = cmpxchg_relaxed((_ptr), ___o, (_new)); \
- if (unlikely(___r != ___o)) \
- *___op = ___r; \
- likely(___r == ___o); \
-})
-#endif /* try_cmpxchg_relaxed */
-
-#else /* try_cmpxchg_relaxed */
-
-#ifndef try_cmpxchg_acquire
-#define try_cmpxchg_acquire(...) \
- __atomic_op_acquire(try_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef try_cmpxchg_release
-#define try_cmpxchg_release(...) \
- __atomic_op_release(try_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef try_cmpxchg
-#define try_cmpxchg(...) \
- __atomic_op_fence(try_cmpxchg, __VA_ARGS__)
-#endif
-
-#endif /* try_cmpxchg_relaxed */
-
-#define arch_atomic_read atomic_read
-#define arch_atomic_read_acquire atomic_read_acquire
-
-#ifndef atomic_read_acquire
-static __always_inline int
-atomic_read_acquire(const atomic_t *v)
-{
- return smp_load_acquire(&(v)->counter);
-}
-#define atomic_read_acquire atomic_read_acquire
-#endif
-
-#define arch_atomic_set atomic_set
-#define arch_atomic_set_release atomic_set_release
-
-#ifndef atomic_set_release
-static __always_inline void
-atomic_set_release(atomic_t *v, int i)
-{
- smp_store_release(&(v)->counter, i);
-}
-#define atomic_set_release atomic_set_release
-#endif
-
-#define arch_atomic_add atomic_add
-
-#define arch_atomic_add_return atomic_add_return
-#define arch_atomic_add_return_acquire atomic_add_return_acquire
-#define arch_atomic_add_return_release atomic_add_return_release
-#define arch_atomic_add_return_relaxed atomic_add_return_relaxed
-
-#ifndef atomic_add_return_relaxed
-#define atomic_add_return_acquire atomic_add_return
-#define atomic_add_return_release atomic_add_return
-#define atomic_add_return_relaxed atomic_add_return
-#else /* atomic_add_return_relaxed */
-
-#ifndef atomic_add_return_acquire
-static __always_inline int
-atomic_add_return_acquire(int i, atomic_t *v)
-{
- int ret = atomic_add_return_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_add_return_acquire atomic_add_return_acquire
-#endif
-
-#ifndef atomic_add_return_release
-static __always_inline int
-atomic_add_return_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_add_return_relaxed(i, v);
-}
-#define atomic_add_return_release atomic_add_return_release
-#endif
-
-#ifndef atomic_add_return
-static __always_inline int
-atomic_add_return(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_add_return_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_add_return atomic_add_return
-#endif
-
-#endif /* atomic_add_return_relaxed */
-
-#define arch_atomic_fetch_add atomic_fetch_add
-#define arch_atomic_fetch_add_acquire atomic_fetch_add_acquire
-#define arch_atomic_fetch_add_release atomic_fetch_add_release
-#define arch_atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-
-#ifndef atomic_fetch_add_relaxed
-#define atomic_fetch_add_acquire atomic_fetch_add
-#define atomic_fetch_add_release atomic_fetch_add
-#define atomic_fetch_add_relaxed atomic_fetch_add
-#else /* atomic_fetch_add_relaxed */
-
-#ifndef atomic_fetch_add_acquire
-static __always_inline int
-atomic_fetch_add_acquire(int i, atomic_t *v)
-{
- int ret = atomic_fetch_add_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_add_acquire atomic_fetch_add_acquire
-#endif
-
-#ifndef atomic_fetch_add_release
-static __always_inline int
-atomic_fetch_add_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_add_relaxed(i, v);
-}
-#define atomic_fetch_add_release atomic_fetch_add_release
-#endif
-
-#ifndef atomic_fetch_add
-static __always_inline int
-atomic_fetch_add(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_add_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_add atomic_fetch_add
-#endif
-
-#endif /* atomic_fetch_add_relaxed */
-
-#define arch_atomic_sub atomic_sub
-
-#define arch_atomic_sub_return atomic_sub_return
-#define arch_atomic_sub_return_acquire atomic_sub_return_acquire
-#define arch_atomic_sub_return_release atomic_sub_return_release
-#define arch_atomic_sub_return_relaxed atomic_sub_return_relaxed
-
-#ifndef atomic_sub_return_relaxed
-#define atomic_sub_return_acquire atomic_sub_return
-#define atomic_sub_return_release atomic_sub_return
-#define atomic_sub_return_relaxed atomic_sub_return
-#else /* atomic_sub_return_relaxed */
-
-#ifndef atomic_sub_return_acquire
-static __always_inline int
-atomic_sub_return_acquire(int i, atomic_t *v)
-{
- int ret = atomic_sub_return_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_sub_return_acquire atomic_sub_return_acquire
-#endif
-
-#ifndef atomic_sub_return_release
-static __always_inline int
-atomic_sub_return_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_sub_return_relaxed(i, v);
-}
-#define atomic_sub_return_release atomic_sub_return_release
-#endif
-
-#ifndef atomic_sub_return
-static __always_inline int
-atomic_sub_return(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_sub_return_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_sub_return atomic_sub_return
-#endif
-
-#endif /* atomic_sub_return_relaxed */
-
-#define arch_atomic_fetch_sub atomic_fetch_sub
-#define arch_atomic_fetch_sub_acquire atomic_fetch_sub_acquire
-#define arch_atomic_fetch_sub_release atomic_fetch_sub_release
-#define arch_atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
-
-#ifndef atomic_fetch_sub_relaxed
-#define atomic_fetch_sub_acquire atomic_fetch_sub
-#define atomic_fetch_sub_release atomic_fetch_sub
-#define atomic_fetch_sub_relaxed atomic_fetch_sub
-#else /* atomic_fetch_sub_relaxed */
-
-#ifndef atomic_fetch_sub_acquire
-static __always_inline int
-atomic_fetch_sub_acquire(int i, atomic_t *v)
-{
- int ret = atomic_fetch_sub_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire
-#endif
-
-#ifndef atomic_fetch_sub_release
-static __always_inline int
-atomic_fetch_sub_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_sub_relaxed(i, v);
-}
-#define atomic_fetch_sub_release atomic_fetch_sub_release
-#endif
-
-#ifndef atomic_fetch_sub
-static __always_inline int
-atomic_fetch_sub(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_sub_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_sub atomic_fetch_sub
-#endif
-
-#endif /* atomic_fetch_sub_relaxed */
-
-#define arch_atomic_inc atomic_inc
-
-#ifndef atomic_inc
-static __always_inline void
-atomic_inc(atomic_t *v)
-{
- atomic_add(1, v);
-}
-#define atomic_inc atomic_inc
-#endif
-
-#define arch_atomic_inc_return atomic_inc_return
-#define arch_atomic_inc_return_acquire atomic_inc_return_acquire
-#define arch_atomic_inc_return_release atomic_inc_return_release
-#define arch_atomic_inc_return_relaxed atomic_inc_return_relaxed
-
-#ifndef atomic_inc_return_relaxed
-#ifdef atomic_inc_return
-#define atomic_inc_return_acquire atomic_inc_return
-#define atomic_inc_return_release atomic_inc_return
-#define atomic_inc_return_relaxed atomic_inc_return
-#endif /* atomic_inc_return */
-
-#ifndef atomic_inc_return
-static __always_inline int
-atomic_inc_return(atomic_t *v)
-{
- return atomic_add_return(1, v);
-}
-#define atomic_inc_return atomic_inc_return
-#endif
-
-#ifndef atomic_inc_return_acquire
-static __always_inline int
-atomic_inc_return_acquire(atomic_t *v)
-{
- return atomic_add_return_acquire(1, v);
-}
-#define atomic_inc_return_acquire atomic_inc_return_acquire
-#endif
-
-#ifndef atomic_inc_return_release
-static __always_inline int
-atomic_inc_return_release(atomic_t *v)
-{
- return atomic_add_return_release(1, v);
-}
-#define atomic_inc_return_release atomic_inc_return_release
-#endif
-
-#ifndef atomic_inc_return_relaxed
-static __always_inline int
-atomic_inc_return_relaxed(atomic_t *v)
-{
- return atomic_add_return_relaxed(1, v);
-}
-#define atomic_inc_return_relaxed atomic_inc_return_relaxed
-#endif
-
-#else /* atomic_inc_return_relaxed */
-
-#ifndef atomic_inc_return_acquire
-static __always_inline int
-atomic_inc_return_acquire(atomic_t *v)
-{
- int ret = atomic_inc_return_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_inc_return_acquire atomic_inc_return_acquire
-#endif
-
-#ifndef atomic_inc_return_release
-static __always_inline int
-atomic_inc_return_release(atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_inc_return_relaxed(v);
-}
-#define atomic_inc_return_release atomic_inc_return_release
-#endif
-
-#ifndef atomic_inc_return
-static __always_inline int
-atomic_inc_return(atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_inc_return_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_inc_return atomic_inc_return
-#endif
-
-#endif /* atomic_inc_return_relaxed */
-
-#define arch_atomic_fetch_inc atomic_fetch_inc
-#define arch_atomic_fetch_inc_acquire atomic_fetch_inc_acquire
-#define arch_atomic_fetch_inc_release atomic_fetch_inc_release
-#define arch_atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
-
-#ifndef atomic_fetch_inc_relaxed
-#ifdef atomic_fetch_inc
-#define atomic_fetch_inc_acquire atomic_fetch_inc
-#define atomic_fetch_inc_release atomic_fetch_inc
-#define atomic_fetch_inc_relaxed atomic_fetch_inc
-#endif /* atomic_fetch_inc */
-
-#ifndef atomic_fetch_inc
-static __always_inline int
-atomic_fetch_inc(atomic_t *v)
-{
- return atomic_fetch_add(1, v);
-}
-#define atomic_fetch_inc atomic_fetch_inc
-#endif
-
-#ifndef atomic_fetch_inc_acquire
-static __always_inline int
-atomic_fetch_inc_acquire(atomic_t *v)
-{
- return atomic_fetch_add_acquire(1, v);
-}
-#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire
-#endif
-
-#ifndef atomic_fetch_inc_release
-static __always_inline int
-atomic_fetch_inc_release(atomic_t *v)
-{
- return atomic_fetch_add_release(1, v);
-}
-#define atomic_fetch_inc_release atomic_fetch_inc_release
-#endif
-
-#ifndef atomic_fetch_inc_relaxed
-static __always_inline int
-atomic_fetch_inc_relaxed(atomic_t *v)
-{
- return atomic_fetch_add_relaxed(1, v);
-}
-#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
-#endif
-
-#else /* atomic_fetch_inc_relaxed */
-
-#ifndef atomic_fetch_inc_acquire
-static __always_inline int
-atomic_fetch_inc_acquire(atomic_t *v)
-{
- int ret = atomic_fetch_inc_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire
-#endif
-
-#ifndef atomic_fetch_inc_release
-static __always_inline int
-atomic_fetch_inc_release(atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_inc_relaxed(v);
-}
-#define atomic_fetch_inc_release atomic_fetch_inc_release
-#endif
-
-#ifndef atomic_fetch_inc
-static __always_inline int
-atomic_fetch_inc(atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_inc_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_inc atomic_fetch_inc
-#endif
-
-#endif /* atomic_fetch_inc_relaxed */
-
-#define arch_atomic_dec atomic_dec
-
-#ifndef atomic_dec
-static __always_inline void
-atomic_dec(atomic_t *v)
-{
- atomic_sub(1, v);
-}
-#define atomic_dec atomic_dec
-#endif
-
-#define arch_atomic_dec_return atomic_dec_return
-#define arch_atomic_dec_return_acquire atomic_dec_return_acquire
-#define arch_atomic_dec_return_release atomic_dec_return_release
-#define arch_atomic_dec_return_relaxed atomic_dec_return_relaxed
-
-#ifndef atomic_dec_return_relaxed
-#ifdef atomic_dec_return
-#define atomic_dec_return_acquire atomic_dec_return
-#define atomic_dec_return_release atomic_dec_return
-#define atomic_dec_return_relaxed atomic_dec_return
-#endif /* atomic_dec_return */
-
-#ifndef atomic_dec_return
-static __always_inline int
-atomic_dec_return(atomic_t *v)
-{
- return atomic_sub_return(1, v);
-}
-#define atomic_dec_return atomic_dec_return
-#endif
-
-#ifndef atomic_dec_return_acquire
-static __always_inline int
-atomic_dec_return_acquire(atomic_t *v)
-{
- return atomic_sub_return_acquire(1, v);
-}
-#define atomic_dec_return_acquire atomic_dec_return_acquire
-#endif
-
-#ifndef atomic_dec_return_release
-static __always_inline int
-atomic_dec_return_release(atomic_t *v)
-{
- return atomic_sub_return_release(1, v);
-}
-#define atomic_dec_return_release atomic_dec_return_release
-#endif
-
-#ifndef atomic_dec_return_relaxed
-static __always_inline int
-atomic_dec_return_relaxed(atomic_t *v)
-{
- return atomic_sub_return_relaxed(1, v);
-}
-#define atomic_dec_return_relaxed atomic_dec_return_relaxed
-#endif
-
-#else /* atomic_dec_return_relaxed */
-
-#ifndef atomic_dec_return_acquire
-static __always_inline int
-atomic_dec_return_acquire(atomic_t *v)
-{
- int ret = atomic_dec_return_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_dec_return_acquire atomic_dec_return_acquire
-#endif
-
-#ifndef atomic_dec_return_release
-static __always_inline int
-atomic_dec_return_release(atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_dec_return_relaxed(v);
-}
-#define atomic_dec_return_release atomic_dec_return_release
-#endif
-
-#ifndef atomic_dec_return
-static __always_inline int
-atomic_dec_return(atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_dec_return_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_dec_return atomic_dec_return
-#endif
-
-#endif /* atomic_dec_return_relaxed */
-
-#define arch_atomic_fetch_dec atomic_fetch_dec
-#define arch_atomic_fetch_dec_acquire atomic_fetch_dec_acquire
-#define arch_atomic_fetch_dec_release atomic_fetch_dec_release
-#define arch_atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
-
-#ifndef atomic_fetch_dec_relaxed
-#ifdef atomic_fetch_dec
-#define atomic_fetch_dec_acquire atomic_fetch_dec
-#define atomic_fetch_dec_release atomic_fetch_dec
-#define atomic_fetch_dec_relaxed atomic_fetch_dec
-#endif /* atomic_fetch_dec */
-
-#ifndef atomic_fetch_dec
-static __always_inline int
-atomic_fetch_dec(atomic_t *v)
-{
- return atomic_fetch_sub(1, v);
-}
-#define atomic_fetch_dec atomic_fetch_dec
-#endif
-
-#ifndef atomic_fetch_dec_acquire
-static __always_inline int
-atomic_fetch_dec_acquire(atomic_t *v)
-{
- return atomic_fetch_sub_acquire(1, v);
-}
-#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire
-#endif
-
-#ifndef atomic_fetch_dec_release
-static __always_inline int
-atomic_fetch_dec_release(atomic_t *v)
-{
- return atomic_fetch_sub_release(1, v);
-}
-#define atomic_fetch_dec_release atomic_fetch_dec_release
-#endif
-
-#ifndef atomic_fetch_dec_relaxed
-static __always_inline int
-atomic_fetch_dec_relaxed(atomic_t *v)
-{
- return atomic_fetch_sub_relaxed(1, v);
-}
-#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
-#endif
-
-#else /* atomic_fetch_dec_relaxed */
-
-#ifndef atomic_fetch_dec_acquire
-static __always_inline int
-atomic_fetch_dec_acquire(atomic_t *v)
-{
- int ret = atomic_fetch_dec_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire
-#endif
-
-#ifndef atomic_fetch_dec_release
-static __always_inline int
-atomic_fetch_dec_release(atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_dec_relaxed(v);
-}
-#define atomic_fetch_dec_release atomic_fetch_dec_release
-#endif
-
-#ifndef atomic_fetch_dec
-static __always_inline int
-atomic_fetch_dec(atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_dec_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_dec atomic_fetch_dec
-#endif
-
-#endif /* atomic_fetch_dec_relaxed */
-
-#define arch_atomic_and atomic_and
-
-#define arch_atomic_fetch_and atomic_fetch_and
-#define arch_atomic_fetch_and_acquire atomic_fetch_and_acquire
-#define arch_atomic_fetch_and_release atomic_fetch_and_release
-#define arch_atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-
-#ifndef atomic_fetch_and_relaxed
-#define atomic_fetch_and_acquire atomic_fetch_and
-#define atomic_fetch_and_release atomic_fetch_and
-#define atomic_fetch_and_relaxed atomic_fetch_and
-#else /* atomic_fetch_and_relaxed */
-
-#ifndef atomic_fetch_and_acquire
-static __always_inline int
-atomic_fetch_and_acquire(int i, atomic_t *v)
-{
- int ret = atomic_fetch_and_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_and_acquire atomic_fetch_and_acquire
-#endif
-
-#ifndef atomic_fetch_and_release
-static __always_inline int
-atomic_fetch_and_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_and_relaxed(i, v);
-}
-#define atomic_fetch_and_release atomic_fetch_and_release
-#endif
-
-#ifndef atomic_fetch_and
-static __always_inline int
-atomic_fetch_and(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_and_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_and atomic_fetch_and
-#endif
-
-#endif /* atomic_fetch_and_relaxed */
-
-#define arch_atomic_andnot atomic_andnot
-
-#ifndef atomic_andnot
-static __always_inline void
-atomic_andnot(int i, atomic_t *v)
-{
- atomic_and(~i, v);
-}
-#define atomic_andnot atomic_andnot
-#endif
-
-#define arch_atomic_fetch_andnot atomic_fetch_andnot
-#define arch_atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
-#define arch_atomic_fetch_andnot_release atomic_fetch_andnot_release
-#define arch_atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
-
-#ifndef atomic_fetch_andnot_relaxed
-#ifdef atomic_fetch_andnot
-#define atomic_fetch_andnot_acquire atomic_fetch_andnot
-#define atomic_fetch_andnot_release atomic_fetch_andnot
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot
-#endif /* atomic_fetch_andnot */
-
-#ifndef atomic_fetch_andnot
-static __always_inline int
-atomic_fetch_andnot(int i, atomic_t *v)
-{
- return atomic_fetch_and(~i, v);
-}
-#define atomic_fetch_andnot atomic_fetch_andnot
-#endif
-
-#ifndef atomic_fetch_andnot_acquire
-static __always_inline int
-atomic_fetch_andnot_acquire(int i, atomic_t *v)
-{
- return atomic_fetch_and_acquire(~i, v);
-}
-#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
-#endif
-
-#ifndef atomic_fetch_andnot_release
-static __always_inline int
-atomic_fetch_andnot_release(int i, atomic_t *v)
-{
- return atomic_fetch_and_release(~i, v);
-}
-#define atomic_fetch_andnot_release atomic_fetch_andnot_release
-#endif
-
-#ifndef atomic_fetch_andnot_relaxed
-static __always_inline int
-atomic_fetch_andnot_relaxed(int i, atomic_t *v)
-{
- return atomic_fetch_and_relaxed(~i, v);
-}
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
-#endif
-
-#else /* atomic_fetch_andnot_relaxed */
-
-#ifndef atomic_fetch_andnot_acquire
-static __always_inline int
-atomic_fetch_andnot_acquire(int i, atomic_t *v)
-{
- int ret = atomic_fetch_andnot_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
-#endif
-
-#ifndef atomic_fetch_andnot_release
-static __always_inline int
-atomic_fetch_andnot_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_andnot_relaxed(i, v);
-}
-#define atomic_fetch_andnot_release atomic_fetch_andnot_release
-#endif
-
-#ifndef atomic_fetch_andnot
-static __always_inline int
-atomic_fetch_andnot(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_andnot_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_andnot atomic_fetch_andnot
-#endif
-
-#endif /* atomic_fetch_andnot_relaxed */
-
-#define arch_atomic_or atomic_or
-
-#define arch_atomic_fetch_or atomic_fetch_or
-#define arch_atomic_fetch_or_acquire atomic_fetch_or_acquire
-#define arch_atomic_fetch_or_release atomic_fetch_or_release
-#define arch_atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-
-#ifndef atomic_fetch_or_relaxed
-#define atomic_fetch_or_acquire atomic_fetch_or
-#define atomic_fetch_or_release atomic_fetch_or
-#define atomic_fetch_or_relaxed atomic_fetch_or
-#else /* atomic_fetch_or_relaxed */
-
-#ifndef atomic_fetch_or_acquire
-static __always_inline int
-atomic_fetch_or_acquire(int i, atomic_t *v)
-{
- int ret = atomic_fetch_or_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_or_acquire atomic_fetch_or_acquire
-#endif
-
-#ifndef atomic_fetch_or_release
-static __always_inline int
-atomic_fetch_or_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_or_relaxed(i, v);
-}
-#define atomic_fetch_or_release atomic_fetch_or_release
-#endif
-
-#ifndef atomic_fetch_or
-static __always_inline int
-atomic_fetch_or(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_or_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_or atomic_fetch_or
-#endif
-
-#endif /* atomic_fetch_or_relaxed */
-
-#define arch_atomic_xor atomic_xor
-
-#define arch_atomic_fetch_xor atomic_fetch_xor
-#define arch_atomic_fetch_xor_acquire atomic_fetch_xor_acquire
-#define arch_atomic_fetch_xor_release atomic_fetch_xor_release
-#define arch_atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
-
-#ifndef atomic_fetch_xor_relaxed
-#define atomic_fetch_xor_acquire atomic_fetch_xor
-#define atomic_fetch_xor_release atomic_fetch_xor
-#define atomic_fetch_xor_relaxed atomic_fetch_xor
-#else /* atomic_fetch_xor_relaxed */
-
-#ifndef atomic_fetch_xor_acquire
-static __always_inline int
-atomic_fetch_xor_acquire(int i, atomic_t *v)
-{
- int ret = atomic_fetch_xor_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire
-#endif
-
-#ifndef atomic_fetch_xor_release
-static __always_inline int
-atomic_fetch_xor_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_xor_relaxed(i, v);
-}
-#define atomic_fetch_xor_release atomic_fetch_xor_release
-#endif
-
-#ifndef atomic_fetch_xor
-static __always_inline int
-atomic_fetch_xor(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_xor_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_xor atomic_fetch_xor
-#endif
-
-#endif /* atomic_fetch_xor_relaxed */
-
-#define arch_atomic_xchg atomic_xchg
-#define arch_atomic_xchg_acquire atomic_xchg_acquire
-#define arch_atomic_xchg_release atomic_xchg_release
-#define arch_atomic_xchg_relaxed atomic_xchg_relaxed
-
-#ifndef atomic_xchg_relaxed
-#define atomic_xchg_acquire atomic_xchg
-#define atomic_xchg_release atomic_xchg
-#define atomic_xchg_relaxed atomic_xchg
-#else /* atomic_xchg_relaxed */
-
-#ifndef atomic_xchg_acquire
-static __always_inline int
-atomic_xchg_acquire(atomic_t *v, int i)
-{
- int ret = atomic_xchg_relaxed(v, i);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_xchg_acquire atomic_xchg_acquire
-#endif
-
-#ifndef atomic_xchg_release
-static __always_inline int
-atomic_xchg_release(atomic_t *v, int i)
-{
- __atomic_release_fence();
- return atomic_xchg_relaxed(v, i);
-}
-#define atomic_xchg_release atomic_xchg_release
-#endif
-
-#ifndef atomic_xchg
-static __always_inline int
-atomic_xchg(atomic_t *v, int i)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_xchg_relaxed(v, i);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_xchg atomic_xchg
-#endif
-
-#endif /* atomic_xchg_relaxed */
-
-#define arch_atomic_cmpxchg atomic_cmpxchg
-#define arch_atomic_cmpxchg_acquire atomic_cmpxchg_acquire
-#define arch_atomic_cmpxchg_release atomic_cmpxchg_release
-#define arch_atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
-
-#ifndef atomic_cmpxchg_relaxed
-#define atomic_cmpxchg_acquire atomic_cmpxchg
-#define atomic_cmpxchg_release atomic_cmpxchg
-#define atomic_cmpxchg_relaxed atomic_cmpxchg
-#else /* atomic_cmpxchg_relaxed */
-
-#ifndef atomic_cmpxchg_acquire
-static __always_inline int
-atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
-{
- int ret = atomic_cmpxchg_relaxed(v, old, new);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
-#endif
-
-#ifndef atomic_cmpxchg_release
-static __always_inline int
-atomic_cmpxchg_release(atomic_t *v, int old, int new)
-{
- __atomic_release_fence();
- return atomic_cmpxchg_relaxed(v, old, new);
-}
-#define atomic_cmpxchg_release atomic_cmpxchg_release
-#endif
-
-#ifndef atomic_cmpxchg
-static __always_inline int
-atomic_cmpxchg(atomic_t *v, int old, int new)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_cmpxchg_relaxed(v, old, new);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_cmpxchg atomic_cmpxchg
-#endif
-
-#endif /* atomic_cmpxchg_relaxed */
-
-#define arch_atomic_try_cmpxchg atomic_try_cmpxchg
-#define arch_atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
-#define arch_atomic_try_cmpxchg_release atomic_try_cmpxchg_release
-#define arch_atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed
-
-#ifndef atomic_try_cmpxchg_relaxed
-#ifdef atomic_try_cmpxchg
-#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg
-#define atomic_try_cmpxchg_release atomic_try_cmpxchg
-#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg
-#endif /* atomic_try_cmpxchg */
-
-#ifndef atomic_try_cmpxchg
-static __always_inline bool
-atomic_try_cmpxchg(atomic_t *v, int *old, int new)
-{
- int r, o = *old;
- r = atomic_cmpxchg(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic_try_cmpxchg atomic_try_cmpxchg
-#endif
-
-#ifndef atomic_try_cmpxchg_acquire
-static __always_inline bool
-atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
-{
- int r, o = *old;
- r = atomic_cmpxchg_acquire(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
-#endif
-
-#ifndef atomic_try_cmpxchg_release
-static __always_inline bool
-atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
-{
- int r, o = *old;
- r = atomic_cmpxchg_release(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release
-#endif
-
-#ifndef atomic_try_cmpxchg_relaxed
-static __always_inline bool
-atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
-{
- int r, o = *old;
- r = atomic_cmpxchg_relaxed(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed
-#endif
-
-#else /* atomic_try_cmpxchg_relaxed */
-
-#ifndef atomic_try_cmpxchg_acquire
-static __always_inline bool
-atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
-{
- bool ret = atomic_try_cmpxchg_relaxed(v, old, new);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
-#endif
-
-#ifndef atomic_try_cmpxchg_release
-static __always_inline bool
-atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
-{
- __atomic_release_fence();
- return atomic_try_cmpxchg_relaxed(v, old, new);
-}
-#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release
-#endif
-
-#ifndef atomic_try_cmpxchg
-static __always_inline bool
-atomic_try_cmpxchg(atomic_t *v, int *old, int new)
-{
- bool ret;
- __atomic_pre_full_fence();
- ret = atomic_try_cmpxchg_relaxed(v, old, new);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_try_cmpxchg atomic_try_cmpxchg
-#endif
-
-#endif /* atomic_try_cmpxchg_relaxed */
-
-#define arch_atomic_sub_and_test atomic_sub_and_test
-
-#ifndef atomic_sub_and_test
-/**
- * atomic_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-static __always_inline bool
-atomic_sub_and_test(int i, atomic_t *v)
-{
- return atomic_sub_return(i, v) == 0;
-}
-#define atomic_sub_and_test atomic_sub_and_test
-#endif
-
-#define arch_atomic_dec_and_test atomic_dec_and_test
-
-#ifndef atomic_dec_and_test
-/**
- * atomic_dec_and_test - decrement and test
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-static __always_inline bool
-atomic_dec_and_test(atomic_t *v)
-{
- return atomic_dec_return(v) == 0;
-}
-#define atomic_dec_and_test atomic_dec_and_test
-#endif
-
-#define arch_atomic_inc_and_test atomic_inc_and_test
-
-#ifndef atomic_inc_and_test
-/**
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-static __always_inline bool
-atomic_inc_and_test(atomic_t *v)
-{
- return atomic_inc_return(v) == 0;
-}
-#define atomic_inc_and_test atomic_inc_and_test
-#endif
-
-#define arch_atomic_add_negative atomic_add_negative
-
-#ifndef atomic_add_negative
-/**
- * atomic_add_negative - add and test if negative
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-static __always_inline bool
-atomic_add_negative(int i, atomic_t *v)
-{
- return atomic_add_return(i, v) < 0;
-}
-#define atomic_add_negative atomic_add_negative
-#endif
-
-#define arch_atomic_fetch_add_unless atomic_fetch_add_unless
-
-#ifndef atomic_fetch_add_unless
-/**
- * atomic_fetch_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns original value of @v
- */
-static __always_inline int
-atomic_fetch_add_unless(atomic_t *v, int a, int u)
-{
- int c = atomic_read(v);
-
- do {
- if (unlikely(c == u))
- break;
- } while (!atomic_try_cmpxchg(v, &c, c + a));
-
- return c;
-}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
-#endif
-
-#define arch_atomic_add_unless atomic_add_unless
-
-#ifndef atomic_add_unless
-/**
- * atomic_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, if @v was not already @u.
- * Returns true if the addition was done.
- */
-static __always_inline bool
-atomic_add_unless(atomic_t *v, int a, int u)
-{
- return atomic_fetch_add_unless(v, a, u) != u;
-}
-#define atomic_add_unless atomic_add_unless
-#endif
-
-#define arch_atomic_inc_not_zero atomic_inc_not_zero
-
-#ifndef atomic_inc_not_zero
-/**
- * atomic_inc_not_zero - increment unless the number is zero
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1, if @v is non-zero.
- * Returns true if the increment was done.
- */
-static __always_inline bool
-atomic_inc_not_zero(atomic_t *v)
-{
- return atomic_add_unless(v, 1, 0);
-}
-#define atomic_inc_not_zero atomic_inc_not_zero
-#endif
-
-#define arch_atomic_inc_unless_negative atomic_inc_unless_negative
-
-#ifndef atomic_inc_unless_negative
-static __always_inline bool
-atomic_inc_unless_negative(atomic_t *v)
-{
- int c = atomic_read(v);
-
- do {
- if (unlikely(c < 0))
- return false;
- } while (!atomic_try_cmpxchg(v, &c, c + 1));
-
- return true;
-}
-#define atomic_inc_unless_negative atomic_inc_unless_negative
-#endif
-
-#define arch_atomic_dec_unless_positive atomic_dec_unless_positive
-
-#ifndef atomic_dec_unless_positive
-static __always_inline bool
-atomic_dec_unless_positive(atomic_t *v)
-{
- int c = atomic_read(v);
-
- do {
- if (unlikely(c > 0))
- return false;
- } while (!atomic_try_cmpxchg(v, &c, c - 1));
-
- return true;
-}
-#define atomic_dec_unless_positive atomic_dec_unless_positive
-#endif
-
-#define arch_atomic_dec_if_positive atomic_dec_if_positive
-
-#ifndef atomic_dec_if_positive
-static __always_inline int
-atomic_dec_if_positive(atomic_t *v)
-{
- int dec, c = atomic_read(v);
-
- do {
- dec = c - 1;
- if (unlikely(dec < 0))
- break;
- } while (!atomic_try_cmpxchg(v, &c, dec));
-
- return dec;
-}
-#define atomic_dec_if_positive atomic_dec_if_positive
-#endif
-
-#ifdef CONFIG_GENERIC_ATOMIC64
-#include <asm-generic/atomic64.h>
-#endif
-
-#define arch_atomic64_read atomic64_read
-#define arch_atomic64_read_acquire atomic64_read_acquire
-
-#ifndef atomic64_read_acquire
-static __always_inline s64
-atomic64_read_acquire(const atomic64_t *v)
-{
- return smp_load_acquire(&(v)->counter);
-}
-#define atomic64_read_acquire atomic64_read_acquire
-#endif
-
-#define arch_atomic64_set atomic64_set
-#define arch_atomic64_set_release atomic64_set_release
-
-#ifndef atomic64_set_release
-static __always_inline void
-atomic64_set_release(atomic64_t *v, s64 i)
-{
- smp_store_release(&(v)->counter, i);
-}
-#define atomic64_set_release atomic64_set_release
-#endif
-
-#define arch_atomic64_add atomic64_add
-
-#define arch_atomic64_add_return atomic64_add_return
-#define arch_atomic64_add_return_acquire atomic64_add_return_acquire
-#define arch_atomic64_add_return_release atomic64_add_return_release
-#define arch_atomic64_add_return_relaxed atomic64_add_return_relaxed
-
-#ifndef atomic64_add_return_relaxed
-#define atomic64_add_return_acquire atomic64_add_return
-#define atomic64_add_return_release atomic64_add_return
-#define atomic64_add_return_relaxed atomic64_add_return
-#else /* atomic64_add_return_relaxed */
-
-#ifndef atomic64_add_return_acquire
-static __always_inline s64
-atomic64_add_return_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_add_return_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_add_return_acquire atomic64_add_return_acquire
-#endif
-
-#ifndef atomic64_add_return_release
-static __always_inline s64
-atomic64_add_return_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_add_return_relaxed(i, v);
-}
-#define atomic64_add_return_release atomic64_add_return_release
-#endif
-
-#ifndef atomic64_add_return
-static __always_inline s64
-atomic64_add_return(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_add_return_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_add_return atomic64_add_return
-#endif
-
-#endif /* atomic64_add_return_relaxed */
-
-#define arch_atomic64_fetch_add atomic64_fetch_add
-#define arch_atomic64_fetch_add_acquire atomic64_fetch_add_acquire
-#define arch_atomic64_fetch_add_release atomic64_fetch_add_release
-#define arch_atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-
-#ifndef atomic64_fetch_add_relaxed
-#define atomic64_fetch_add_acquire atomic64_fetch_add
-#define atomic64_fetch_add_release atomic64_fetch_add
-#define atomic64_fetch_add_relaxed atomic64_fetch_add
-#else /* atomic64_fetch_add_relaxed */
-
-#ifndef atomic64_fetch_add_acquire
-static __always_inline s64
-atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_fetch_add_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire
-#endif
-
-#ifndef atomic64_fetch_add_release
-static __always_inline s64
-atomic64_fetch_add_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_add_relaxed(i, v);
-}
-#define atomic64_fetch_add_release atomic64_fetch_add_release
-#endif
-
-#ifndef atomic64_fetch_add
-static __always_inline s64
-atomic64_fetch_add(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_add_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_add atomic64_fetch_add
-#endif
-
-#endif /* atomic64_fetch_add_relaxed */
-
-#define arch_atomic64_sub atomic64_sub
-
-#define arch_atomic64_sub_return atomic64_sub_return
-#define arch_atomic64_sub_return_acquire atomic64_sub_return_acquire
-#define arch_atomic64_sub_return_release atomic64_sub_return_release
-#define arch_atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-
-#ifndef atomic64_sub_return_relaxed
-#define atomic64_sub_return_acquire atomic64_sub_return
-#define atomic64_sub_return_release atomic64_sub_return
-#define atomic64_sub_return_relaxed atomic64_sub_return
-#else /* atomic64_sub_return_relaxed */
-
-#ifndef atomic64_sub_return_acquire
-static __always_inline s64
-atomic64_sub_return_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_sub_return_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_sub_return_acquire atomic64_sub_return_acquire
-#endif
-
-#ifndef atomic64_sub_return_release
-static __always_inline s64
-atomic64_sub_return_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_sub_return_relaxed(i, v);
-}
-#define atomic64_sub_return_release atomic64_sub_return_release
-#endif
-
-#ifndef atomic64_sub_return
-static __always_inline s64
-atomic64_sub_return(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_sub_return_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_sub_return atomic64_sub_return
-#endif
-
-#endif /* atomic64_sub_return_relaxed */
-
-#define arch_atomic64_fetch_sub atomic64_fetch_sub
-#define arch_atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire
-#define arch_atomic64_fetch_sub_release atomic64_fetch_sub_release
-#define arch_atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
-
-#ifndef atomic64_fetch_sub_relaxed
-#define atomic64_fetch_sub_acquire atomic64_fetch_sub
-#define atomic64_fetch_sub_release atomic64_fetch_sub
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub
-#else /* atomic64_fetch_sub_relaxed */
-
-#ifndef atomic64_fetch_sub_acquire
-static __always_inline s64
-atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_fetch_sub_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire
-#endif
-
-#ifndef atomic64_fetch_sub_release
-static __always_inline s64
-atomic64_fetch_sub_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_sub_relaxed(i, v);
-}
-#define atomic64_fetch_sub_release atomic64_fetch_sub_release
-#endif
-
-#ifndef atomic64_fetch_sub
-static __always_inline s64
-atomic64_fetch_sub(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_sub_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_sub atomic64_fetch_sub
-#endif
-
-#endif /* atomic64_fetch_sub_relaxed */
-
-#define arch_atomic64_inc atomic64_inc
-
-#ifndef atomic64_inc
-static __always_inline void
-atomic64_inc(atomic64_t *v)
-{
- atomic64_add(1, v);
-}
-#define atomic64_inc atomic64_inc
-#endif
-
-#define arch_atomic64_inc_return atomic64_inc_return
-#define arch_atomic64_inc_return_acquire atomic64_inc_return_acquire
-#define arch_atomic64_inc_return_release atomic64_inc_return_release
-#define arch_atomic64_inc_return_relaxed atomic64_inc_return_relaxed
-
-#ifndef atomic64_inc_return_relaxed
-#ifdef atomic64_inc_return
-#define atomic64_inc_return_acquire atomic64_inc_return
-#define atomic64_inc_return_release atomic64_inc_return
-#define atomic64_inc_return_relaxed atomic64_inc_return
-#endif /* atomic64_inc_return */
-
-#ifndef atomic64_inc_return
-static __always_inline s64
-atomic64_inc_return(atomic64_t *v)
-{
- return atomic64_add_return(1, v);
-}
-#define atomic64_inc_return atomic64_inc_return
-#endif
-
-#ifndef atomic64_inc_return_acquire
-static __always_inline s64
-atomic64_inc_return_acquire(atomic64_t *v)
-{
- return atomic64_add_return_acquire(1, v);
-}
-#define atomic64_inc_return_acquire atomic64_inc_return_acquire
-#endif
-
-#ifndef atomic64_inc_return_release
-static __always_inline s64
-atomic64_inc_return_release(atomic64_t *v)
-{
- return atomic64_add_return_release(1, v);
-}
-#define atomic64_inc_return_release atomic64_inc_return_release
-#endif
-
-#ifndef atomic64_inc_return_relaxed
-static __always_inline s64
-atomic64_inc_return_relaxed(atomic64_t *v)
-{
- return atomic64_add_return_relaxed(1, v);
-}
-#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
-#endif
-
-#else /* atomic64_inc_return_relaxed */
-
-#ifndef atomic64_inc_return_acquire
-static __always_inline s64
-atomic64_inc_return_acquire(atomic64_t *v)
-{
- s64 ret = atomic64_inc_return_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_inc_return_acquire atomic64_inc_return_acquire
-#endif
-
-#ifndef atomic64_inc_return_release
-static __always_inline s64
-atomic64_inc_return_release(atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_inc_return_relaxed(v);
-}
-#define atomic64_inc_return_release atomic64_inc_return_release
-#endif
-
-#ifndef atomic64_inc_return
-static __always_inline s64
-atomic64_inc_return(atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_inc_return_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_inc_return atomic64_inc_return
-#endif
-
-#endif /* atomic64_inc_return_relaxed */
-
-#define arch_atomic64_fetch_inc atomic64_fetch_inc
-#define arch_atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
-#define arch_atomic64_fetch_inc_release atomic64_fetch_inc_release
-#define arch_atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
-
-#ifndef atomic64_fetch_inc_relaxed
-#ifdef atomic64_fetch_inc
-#define atomic64_fetch_inc_acquire atomic64_fetch_inc
-#define atomic64_fetch_inc_release atomic64_fetch_inc
-#define atomic64_fetch_inc_relaxed atomic64_fetch_inc
-#endif /* atomic64_fetch_inc */
-
-#ifndef atomic64_fetch_inc
-static __always_inline s64
-atomic64_fetch_inc(atomic64_t *v)
-{
- return atomic64_fetch_add(1, v);
-}
-#define atomic64_fetch_inc atomic64_fetch_inc
-#endif
-
-#ifndef atomic64_fetch_inc_acquire
-static __always_inline s64
-atomic64_fetch_inc_acquire(atomic64_t *v)
-{
- return atomic64_fetch_add_acquire(1, v);
-}
-#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
-#endif
-
-#ifndef atomic64_fetch_inc_release
-static __always_inline s64
-atomic64_fetch_inc_release(atomic64_t *v)
-{
- return atomic64_fetch_add_release(1, v);
-}
-#define atomic64_fetch_inc_release atomic64_fetch_inc_release
-#endif
-
-#ifndef atomic64_fetch_inc_relaxed
-static __always_inline s64
-atomic64_fetch_inc_relaxed(atomic64_t *v)
-{
- return atomic64_fetch_add_relaxed(1, v);
-}
-#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
-#endif
-
-#else /* atomic64_fetch_inc_relaxed */
-
-#ifndef atomic64_fetch_inc_acquire
-static __always_inline s64
-atomic64_fetch_inc_acquire(atomic64_t *v)
-{
- s64 ret = atomic64_fetch_inc_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
-#endif
-
-#ifndef atomic64_fetch_inc_release
-static __always_inline s64
-atomic64_fetch_inc_release(atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_inc_relaxed(v);
-}
-#define atomic64_fetch_inc_release atomic64_fetch_inc_release
-#endif
-
-#ifndef atomic64_fetch_inc
-static __always_inline s64
-atomic64_fetch_inc(atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_inc_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_inc atomic64_fetch_inc
-#endif
-
-#endif /* atomic64_fetch_inc_relaxed */
-
-#define arch_atomic64_dec atomic64_dec
-
-#ifndef atomic64_dec
-static __always_inline void
-atomic64_dec(atomic64_t *v)
-{
- atomic64_sub(1, v);
-}
-#define atomic64_dec atomic64_dec
-#endif
-
-#define arch_atomic64_dec_return atomic64_dec_return
-#define arch_atomic64_dec_return_acquire atomic64_dec_return_acquire
-#define arch_atomic64_dec_return_release atomic64_dec_return_release
-#define arch_atomic64_dec_return_relaxed atomic64_dec_return_relaxed
-
-#ifndef atomic64_dec_return_relaxed
-#ifdef atomic64_dec_return
-#define atomic64_dec_return_acquire atomic64_dec_return
-#define atomic64_dec_return_release atomic64_dec_return
-#define atomic64_dec_return_relaxed atomic64_dec_return
-#endif /* atomic64_dec_return */
-
-#ifndef atomic64_dec_return
-static __always_inline s64
-atomic64_dec_return(atomic64_t *v)
-{
- return atomic64_sub_return(1, v);
-}
-#define atomic64_dec_return atomic64_dec_return
-#endif
-
-#ifndef atomic64_dec_return_acquire
-static __always_inline s64
-atomic64_dec_return_acquire(atomic64_t *v)
-{
- return atomic64_sub_return_acquire(1, v);
-}
-#define atomic64_dec_return_acquire atomic64_dec_return_acquire
-#endif
-
-#ifndef atomic64_dec_return_release
-static __always_inline s64
-atomic64_dec_return_release(atomic64_t *v)
-{
- return atomic64_sub_return_release(1, v);
-}
-#define atomic64_dec_return_release atomic64_dec_return_release
-#endif
-
-#ifndef atomic64_dec_return_relaxed
-static __always_inline s64
-atomic64_dec_return_relaxed(atomic64_t *v)
-{
- return atomic64_sub_return_relaxed(1, v);
-}
-#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
-#endif
-
-#else /* atomic64_dec_return_relaxed */
-
-#ifndef atomic64_dec_return_acquire
-static __always_inline s64
-atomic64_dec_return_acquire(atomic64_t *v)
-{
- s64 ret = atomic64_dec_return_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_dec_return_acquire atomic64_dec_return_acquire
-#endif
-
-#ifndef atomic64_dec_return_release
-static __always_inline s64
-atomic64_dec_return_release(atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_dec_return_relaxed(v);
-}
-#define atomic64_dec_return_release atomic64_dec_return_release
-#endif
-
-#ifndef atomic64_dec_return
-static __always_inline s64
-atomic64_dec_return(atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_dec_return_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_dec_return atomic64_dec_return
-#endif
-
-#endif /* atomic64_dec_return_relaxed */
-
-#define arch_atomic64_fetch_dec atomic64_fetch_dec
-#define arch_atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
-#define arch_atomic64_fetch_dec_release atomic64_fetch_dec_release
-#define arch_atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
-
-#ifndef atomic64_fetch_dec_relaxed
-#ifdef atomic64_fetch_dec
-#define atomic64_fetch_dec_acquire atomic64_fetch_dec
-#define atomic64_fetch_dec_release atomic64_fetch_dec
-#define atomic64_fetch_dec_relaxed atomic64_fetch_dec
-#endif /* atomic64_fetch_dec */
-
-#ifndef atomic64_fetch_dec
-static __always_inline s64
-atomic64_fetch_dec(atomic64_t *v)
-{
- return atomic64_fetch_sub(1, v);
-}
-#define atomic64_fetch_dec atomic64_fetch_dec
-#endif
-
-#ifndef atomic64_fetch_dec_acquire
-static __always_inline s64
-atomic64_fetch_dec_acquire(atomic64_t *v)
-{
- return atomic64_fetch_sub_acquire(1, v);
-}
-#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
-#endif
-
-#ifndef atomic64_fetch_dec_release
-static __always_inline s64
-atomic64_fetch_dec_release(atomic64_t *v)
-{
- return atomic64_fetch_sub_release(1, v);
-}
-#define atomic64_fetch_dec_release atomic64_fetch_dec_release
-#endif
-
-#ifndef atomic64_fetch_dec_relaxed
-static __always_inline s64
-atomic64_fetch_dec_relaxed(atomic64_t *v)
-{
- return atomic64_fetch_sub_relaxed(1, v);
-}
-#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
-#endif
-
-#else /* atomic64_fetch_dec_relaxed */
-
-#ifndef atomic64_fetch_dec_acquire
-static __always_inline s64
-atomic64_fetch_dec_acquire(atomic64_t *v)
-{
- s64 ret = atomic64_fetch_dec_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
-#endif
-
-#ifndef atomic64_fetch_dec_release
-static __always_inline s64
-atomic64_fetch_dec_release(atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_dec_relaxed(v);
-}
-#define atomic64_fetch_dec_release atomic64_fetch_dec_release
-#endif
-
-#ifndef atomic64_fetch_dec
-static __always_inline s64
-atomic64_fetch_dec(atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_dec_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_dec atomic64_fetch_dec
-#endif
-
-#endif /* atomic64_fetch_dec_relaxed */
-
-#define arch_atomic64_and atomic64_and
-
-#define arch_atomic64_fetch_and atomic64_fetch_and
-#define arch_atomic64_fetch_and_acquire atomic64_fetch_and_acquire
-#define arch_atomic64_fetch_and_release atomic64_fetch_and_release
-#define arch_atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-
-#ifndef atomic64_fetch_and_relaxed
-#define atomic64_fetch_and_acquire atomic64_fetch_and
-#define atomic64_fetch_and_release atomic64_fetch_and
-#define atomic64_fetch_and_relaxed atomic64_fetch_and
-#else /* atomic64_fetch_and_relaxed */
-
-#ifndef atomic64_fetch_and_acquire
-static __always_inline s64
-atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_fetch_and_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire
-#endif
-
-#ifndef atomic64_fetch_and_release
-static __always_inline s64
-atomic64_fetch_and_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_and_relaxed(i, v);
-}
-#define atomic64_fetch_and_release atomic64_fetch_and_release
-#endif
-
-#ifndef atomic64_fetch_and
-static __always_inline s64
-atomic64_fetch_and(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_and_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_and atomic64_fetch_and
-#endif
-
-#endif /* atomic64_fetch_and_relaxed */
-
-#define arch_atomic64_andnot atomic64_andnot
-
-#ifndef atomic64_andnot
-static __always_inline void
-atomic64_andnot(s64 i, atomic64_t *v)
-{
- atomic64_and(~i, v);
-}
-#define atomic64_andnot atomic64_andnot
-#endif
-
-#define arch_atomic64_fetch_andnot atomic64_fetch_andnot
-#define arch_atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
-#define arch_atomic64_fetch_andnot_release atomic64_fetch_andnot_release
-#define arch_atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
-
-#ifndef atomic64_fetch_andnot_relaxed
-#ifdef atomic64_fetch_andnot
-#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot
-#define atomic64_fetch_andnot_release atomic64_fetch_andnot
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot
-#endif /* atomic64_fetch_andnot */
-
-#ifndef atomic64_fetch_andnot
-static __always_inline s64
-atomic64_fetch_andnot(s64 i, atomic64_t *v)
-{
- return atomic64_fetch_and(~i, v);
-}
-#define atomic64_fetch_andnot atomic64_fetch_andnot
-#endif
-
-#ifndef atomic64_fetch_andnot_acquire
-static __always_inline s64
-atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
-{
- return atomic64_fetch_and_acquire(~i, v);
-}
-#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
-#endif
-
-#ifndef atomic64_fetch_andnot_release
-static __always_inline s64
-atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
-{
- return atomic64_fetch_and_release(~i, v);
-}
-#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release
-#endif
-
-#ifndef atomic64_fetch_andnot_relaxed
-static __always_inline s64
-atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
-{
- return atomic64_fetch_and_relaxed(~i, v);
-}
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
-#endif
-
-#else /* atomic64_fetch_andnot_relaxed */
-
-#ifndef atomic64_fetch_andnot_acquire
-static __always_inline s64
-atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_fetch_andnot_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
-#endif
-
-#ifndef atomic64_fetch_andnot_release
-static __always_inline s64
-atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_andnot_relaxed(i, v);
-}
-#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release
-#endif
-
-#ifndef atomic64_fetch_andnot
-static __always_inline s64
-atomic64_fetch_andnot(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_andnot_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_andnot atomic64_fetch_andnot
-#endif
-
-#endif /* atomic64_fetch_andnot_relaxed */
-
-#define arch_atomic64_or atomic64_or
-
-#define arch_atomic64_fetch_or atomic64_fetch_or
-#define arch_atomic64_fetch_or_acquire atomic64_fetch_or_acquire
-#define arch_atomic64_fetch_or_release atomic64_fetch_or_release
-#define arch_atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-
-#ifndef atomic64_fetch_or_relaxed
-#define atomic64_fetch_or_acquire atomic64_fetch_or
-#define atomic64_fetch_or_release atomic64_fetch_or
-#define atomic64_fetch_or_relaxed atomic64_fetch_or
-#else /* atomic64_fetch_or_relaxed */
-
-#ifndef atomic64_fetch_or_acquire
-static __always_inline s64
-atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_fetch_or_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire
-#endif
-
-#ifndef atomic64_fetch_or_release
-static __always_inline s64
-atomic64_fetch_or_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_or_relaxed(i, v);
-}
-#define atomic64_fetch_or_release atomic64_fetch_or_release
-#endif
-
-#ifndef atomic64_fetch_or
-static __always_inline s64
-atomic64_fetch_or(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_or_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_or atomic64_fetch_or
-#endif
-
-#endif /* atomic64_fetch_or_relaxed */
-
-#define arch_atomic64_xor atomic64_xor
-
-#define arch_atomic64_fetch_xor atomic64_fetch_xor
-#define arch_atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire
-#define arch_atomic64_fetch_xor_release atomic64_fetch_xor_release
-#define arch_atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
-
-#ifndef atomic64_fetch_xor_relaxed
-#define atomic64_fetch_xor_acquire atomic64_fetch_xor
-#define atomic64_fetch_xor_release atomic64_fetch_xor
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor
-#else /* atomic64_fetch_xor_relaxed */
-
-#ifndef atomic64_fetch_xor_acquire
-static __always_inline s64
-atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_fetch_xor_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire
-#endif
-
-#ifndef atomic64_fetch_xor_release
-static __always_inline s64
-atomic64_fetch_xor_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_xor_relaxed(i, v);
-}
-#define atomic64_fetch_xor_release atomic64_fetch_xor_release
-#endif
-
-#ifndef atomic64_fetch_xor
-static __always_inline s64
-atomic64_fetch_xor(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_xor_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_xor atomic64_fetch_xor
-#endif
-
-#endif /* atomic64_fetch_xor_relaxed */
-
-#define arch_atomic64_xchg atomic64_xchg
-#define arch_atomic64_xchg_acquire atomic64_xchg_acquire
-#define arch_atomic64_xchg_release atomic64_xchg_release
-#define arch_atomic64_xchg_relaxed atomic64_xchg_relaxed
-
-#ifndef atomic64_xchg_relaxed
-#define atomic64_xchg_acquire atomic64_xchg
-#define atomic64_xchg_release atomic64_xchg
-#define atomic64_xchg_relaxed atomic64_xchg
-#else /* atomic64_xchg_relaxed */
-
-#ifndef atomic64_xchg_acquire
-static __always_inline s64
-atomic64_xchg_acquire(atomic64_t *v, s64 i)
-{
- s64 ret = atomic64_xchg_relaxed(v, i);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_xchg_acquire atomic64_xchg_acquire
-#endif
-
-#ifndef atomic64_xchg_release
-static __always_inline s64
-atomic64_xchg_release(atomic64_t *v, s64 i)
-{
- __atomic_release_fence();
- return atomic64_xchg_relaxed(v, i);
-}
-#define atomic64_xchg_release atomic64_xchg_release
-#endif
-
-#ifndef atomic64_xchg
-static __always_inline s64
-atomic64_xchg(atomic64_t *v, s64 i)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_xchg_relaxed(v, i);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_xchg atomic64_xchg
-#endif
-
-#endif /* atomic64_xchg_relaxed */
-
-#define arch_atomic64_cmpxchg atomic64_cmpxchg
-#define arch_atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire
-#define arch_atomic64_cmpxchg_release atomic64_cmpxchg_release
-#define arch_atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
-
-#ifndef atomic64_cmpxchg_relaxed
-#define atomic64_cmpxchg_acquire atomic64_cmpxchg
-#define atomic64_cmpxchg_release atomic64_cmpxchg
-#define atomic64_cmpxchg_relaxed atomic64_cmpxchg
-#else /* atomic64_cmpxchg_relaxed */
-
-#ifndef atomic64_cmpxchg_acquire
-static __always_inline s64
-atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
-{
- s64 ret = atomic64_cmpxchg_relaxed(v, old, new);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire
-#endif
-
-#ifndef atomic64_cmpxchg_release
-static __always_inline s64
-atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
-{
- __atomic_release_fence();
- return atomic64_cmpxchg_relaxed(v, old, new);
-}
-#define atomic64_cmpxchg_release atomic64_cmpxchg_release
-#endif
-
-#ifndef atomic64_cmpxchg
-static __always_inline s64
-atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_cmpxchg_relaxed(v, old, new);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_cmpxchg atomic64_cmpxchg
-#endif
-
-#endif /* atomic64_cmpxchg_relaxed */
-
-#define arch_atomic64_try_cmpxchg atomic64_try_cmpxchg
-#define arch_atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
-#define arch_atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
-#define arch_atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed
-
-#ifndef atomic64_try_cmpxchg_relaxed
-#ifdef atomic64_try_cmpxchg
-#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg
-#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg
-#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg
-#endif /* atomic64_try_cmpxchg */
-
-#ifndef atomic64_try_cmpxchg
-static __always_inline bool
-atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
-{
- s64 r, o = *old;
- r = atomic64_cmpxchg(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic64_try_cmpxchg atomic64_try_cmpxchg
-#endif
-
-#ifndef atomic64_try_cmpxchg_acquire
-static __always_inline bool
-atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
-{
- s64 r, o = *old;
- r = atomic64_cmpxchg_acquire(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
-#endif
-
-#ifndef atomic64_try_cmpxchg_release
-static __always_inline bool
-atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
-{
- s64 r, o = *old;
- r = atomic64_cmpxchg_release(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
-#endif
-
-#ifndef atomic64_try_cmpxchg_relaxed
-static __always_inline bool
-atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
-{
- s64 r, o = *old;
- r = atomic64_cmpxchg_relaxed(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed
-#endif
-
-#else /* atomic64_try_cmpxchg_relaxed */
-
-#ifndef atomic64_try_cmpxchg_acquire
-static __always_inline bool
-atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
-{
- bool ret = atomic64_try_cmpxchg_relaxed(v, old, new);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
-#endif
-
-#ifndef atomic64_try_cmpxchg_release
-static __always_inline bool
-atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
-{
- __atomic_release_fence();
- return atomic64_try_cmpxchg_relaxed(v, old, new);
-}
-#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
-#endif
-
-#ifndef atomic64_try_cmpxchg
-static __always_inline bool
-atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
-{
- bool ret;
- __atomic_pre_full_fence();
- ret = atomic64_try_cmpxchg_relaxed(v, old, new);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_try_cmpxchg atomic64_try_cmpxchg
-#endif
-
-#endif /* atomic64_try_cmpxchg_relaxed */
-
-#define arch_atomic64_sub_and_test atomic64_sub_and_test
-
-#ifndef atomic64_sub_and_test
-/**
- * atomic64_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic64_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-static __always_inline bool
-atomic64_sub_and_test(s64 i, atomic64_t *v)
-{
- return atomic64_sub_return(i, v) == 0;
-}
-#define atomic64_sub_and_test atomic64_sub_and_test
-#endif
-
-#define arch_atomic64_dec_and_test atomic64_dec_and_test
-
-#ifndef atomic64_dec_and_test
-/**
- * atomic64_dec_and_test - decrement and test
- * @v: pointer of type atomic64_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-static __always_inline bool
-atomic64_dec_and_test(atomic64_t *v)
-{
- return atomic64_dec_return(v) == 0;
-}
-#define atomic64_dec_and_test atomic64_dec_and_test
-#endif
-
-#define arch_atomic64_inc_and_test atomic64_inc_and_test
-
-#ifndef atomic64_inc_and_test
-/**
- * atomic64_inc_and_test - increment and test
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-static __always_inline bool
-atomic64_inc_and_test(atomic64_t *v)
-{
- return atomic64_inc_return(v) == 0;
-}
-#define atomic64_inc_and_test atomic64_inc_and_test
-#endif
-
-#define arch_atomic64_add_negative atomic64_add_negative
-
-#ifndef atomic64_add_negative
-/**
- * atomic64_add_negative - add and test if negative
- * @i: integer value to add
- * @v: pointer of type atomic64_t
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-static __always_inline bool
-atomic64_add_negative(s64 i, atomic64_t *v)
-{
- return atomic64_add_return(i, v) < 0;
-}
-#define atomic64_add_negative atomic64_add_negative
-#endif
-
-#define arch_atomic64_fetch_add_unless atomic64_fetch_add_unless
-
-#ifndef atomic64_fetch_add_unless
-/**
- * atomic64_fetch_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns original value of @v
- */
-static __always_inline s64
-atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
-{
- s64 c = atomic64_read(v);
-
- do {
- if (unlikely(c == u))
- break;
- } while (!atomic64_try_cmpxchg(v, &c, c + a));
-
- return c;
-}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
-#endif
-
-#define arch_atomic64_add_unless atomic64_add_unless
-
-#ifndef atomic64_add_unless
-/**
- * atomic64_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, if @v was not already @u.
- * Returns true if the addition was done.
- */
-static __always_inline bool
-atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
-{
- return atomic64_fetch_add_unless(v, a, u) != u;
-}
-#define atomic64_add_unless atomic64_add_unless
-#endif
-
-#define arch_atomic64_inc_not_zero atomic64_inc_not_zero
-
-#ifndef atomic64_inc_not_zero
-/**
- * atomic64_inc_not_zero - increment unless the number is zero
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1, if @v is non-zero.
- * Returns true if the increment was done.
- */
-static __always_inline bool
-atomic64_inc_not_zero(atomic64_t *v)
-{
- return atomic64_add_unless(v, 1, 0);
-}
-#define atomic64_inc_not_zero atomic64_inc_not_zero
-#endif
-
-#define arch_atomic64_inc_unless_negative atomic64_inc_unless_negative
-
-#ifndef atomic64_inc_unless_negative
-static __always_inline bool
-atomic64_inc_unless_negative(atomic64_t *v)
-{
- s64 c = atomic64_read(v);
-
- do {
- if (unlikely(c < 0))
- return false;
- } while (!atomic64_try_cmpxchg(v, &c, c + 1));
-
- return true;
-}
-#define atomic64_inc_unless_negative atomic64_inc_unless_negative
-#endif
-
-#define arch_atomic64_dec_unless_positive atomic64_dec_unless_positive
-
-#ifndef atomic64_dec_unless_positive
-static __always_inline bool
-atomic64_dec_unless_positive(atomic64_t *v)
-{
- s64 c = atomic64_read(v);
-
- do {
- if (unlikely(c > 0))
- return false;
- } while (!atomic64_try_cmpxchg(v, &c, c - 1));
-
- return true;
-}
-#define atomic64_dec_unless_positive atomic64_dec_unless_positive
-#endif
-
-#define arch_atomic64_dec_if_positive atomic64_dec_if_positive
-
-#ifndef atomic64_dec_if_positive
-static __always_inline s64
-atomic64_dec_if_positive(atomic64_t *v)
-{
- s64 dec, c = atomic64_read(v);
-
- do {
- dec = c - 1;
- if (unlikely(dec < 0))
- break;
- } while (!atomic64_try_cmpxchg(v, &c, dec));
-
- return dec;
-}
-#define atomic64_dec_if_positive atomic64_dec_if_positive
-#endif
-
-#endif /* _LINUX_ATOMIC_FALLBACK_H */
-// d78e6c293c661c15188f0ec05bce45188c8d5892
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 571a11008ab5..ed1d3ffd5b9d 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -77,12 +77,8 @@
__ret; \
})
-#ifdef ARCH_ATOMIC
#include <linux/atomic-arch-fallback.h>
#include <asm-generic/atomic-instrumented.h>
-#else
-#include <linux/atomic-fallback.h>
-#endif
#include <asm-generic/atomic-long.h>
diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h
index 565deea6ffe8..8612f8fc86c1 100644
--- a/include/linux/avf/virtchnl.h
+++ b/include/linux/avf/virtchnl.h
@@ -830,6 +830,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);
struct virtchnl_proto_hdrs {
u8 tunnel_level;
+ u8 pad[3];
/**
* specify where protocol header start from.
* 0 - from the outer layer
diff --git a/include/linux/bits.h b/include/linux/bits.h
index 7f475d59a097..87d112650dfb 100644
--- a/include/linux/bits.h
+++ b/include/linux/bits.h
@@ -22,7 +22,7 @@
#include <linux/build_bug.h>
#define GENMASK_INPUT_CHECK(h, l) \
(BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
- __builtin_constant_p((l) > (h)), (l) > (h), 0)))
+ __is_constexpr((l) > (h)), (l) > (h), 0)))
#else
/*
* BUILD_BUG_ON_ZERO is not available in h files included from asm files,
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 1255823b2bc0..f69c75bd6d27 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -676,11 +676,6 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
extern void blk_set_pm_only(struct request_queue *q);
extern void blk_clear_pm_only(struct request_queue *q);
-static inline bool blk_account_rq(struct request *rq)
-{
- return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq);
-}
-
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h
index 71b5d481c653..6b138fa97db8 100644
--- a/include/linux/ceph/auth.h
+++ b/include/linux/ceph/auth.h
@@ -50,7 +50,7 @@ struct ceph_auth_client_ops {
* another request.
*/
int (*build_request)(struct ceph_auth_client *ac, void *buf, void *end);
- int (*handle_reply)(struct ceph_auth_client *ac, int result,
+ int (*handle_reply)(struct ceph_auth_client *ac, u64 global_id,
void *buf, void *end, u8 *session_key,
int *session_key_len, u8 *con_secret,
int *con_secret_len);
@@ -104,6 +104,8 @@ struct ceph_auth_client {
struct mutex mutex;
};
+void ceph_auth_set_global_id(struct ceph_auth_client *ac, u64 global_id);
+
struct ceph_auth_client *ceph_auth_init(const char *name,
const struct ceph_crypto_key *key,
const int *con_modes);
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 559ee05f86b2..fb8f6d2cd104 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -232,7 +232,7 @@ struct css_set {
struct list_head task_iters;
/*
- * On the default hierarhcy, ->subsys[ssid] may point to a css
+ * On the default hierarchy, ->subsys[ssid] may point to a css
* attached to an ancestor instead of the cgroup this css_set is
* associated with. The following node is anchored at
* ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to
@@ -668,7 +668,7 @@ struct cgroup_subsys {
*/
bool threaded:1;
- /* the following two fields are initialized automtically during boot */
+ /* the following two fields are initialized automatically during boot */
int id;
const char *name;
@@ -757,7 +757,7 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
* sock_cgroup_data overloads (prioidx, classid) and the cgroup pointer.
* On boot, sock_cgroup_data records the cgroup that the sock was created
* in so that cgroup2 matches can be made; however, once either net_prio or
- * net_cls starts being used, the area is overriden to carry prioidx and/or
+ * net_cls starts being used, the area is overridden to carry prioidx and/or
* classid. The two modes are distinguished by whether the lowest bit is
* set. Clear bit indicates cgroup pointer while set bit prioidx and
* classid.
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 4f2f79de083e..6bc9c76680b2 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -32,7 +32,7 @@ struct kernel_clone_args;
#ifdef CONFIG_CGROUPS
/*
- * All weight knobs on the default hierarhcy should use the following min,
+ * All weight knobs on the default hierarchy should use the following min,
* default and max values. The default value is the logarithmic center of
* MIN and MAX and allows 100x to be expressed in both directions.
*/
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 98dd7b324c35..8855b1b702b2 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -213,12 +213,11 @@ typedef struct compat_siginfo {
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */
struct {
compat_uptr_t _addr; /* faulting insn/memory ref. */
-#ifdef __ARCH_SI_TRAPNO
- int _trapno; /* TRAP # which caused the signal */
-#endif
#define __COMPAT_ADDR_BND_PKEY_PAD (__alignof__(compat_uptr_t) < sizeof(short) ? \
sizeof(short) : __alignof__(compat_uptr_t))
union {
+ /* used on alpha and sparc */
+ int _trapno; /* TRAP # which caused the signal */
/*
* used when si_code=BUS_MCEERR_AR or
* used when si_code=BUS_MCEERR_AO
@@ -236,7 +235,10 @@ typedef struct compat_siginfo {
u32 _pkey;
} _addr_pkey;
/* used when si_code=TRAP_PERF */
- compat_ulong_t _perf;
+ struct {
+ compat_ulong_t _data;
+ u32 _type;
+ } _perf;
};
} _sigfault;
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index df5b405e6305..77047904cf70 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -115,18 +115,24 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
* The __COUNTER__ based labels are a hack to make each instance of the macros
* unique, to convince GCC not to merge duplicate inline asm statements.
*/
-#define annotate_reachable() ({ \
- asm volatile("%c0:\n\t" \
+#define __stringify_label(n) #n
+
+#define __annotate_reachable(c) ({ \
+ asm volatile(__stringify_label(c) ":\n\t" \
".pushsection .discard.reachable\n\t" \
- ".long %c0b - .\n\t" \
- ".popsection\n\t" : : "i" (__COUNTER__)); \
+ ".long " __stringify_label(c) "b - .\n\t" \
+ ".popsection\n\t"); \
})
-#define annotate_unreachable() ({ \
- asm volatile("%c0:\n\t" \
+#define annotate_reachable() __annotate_reachable(__COUNTER__)
+
+#define __annotate_unreachable(c) ({ \
+ asm volatile(__stringify_label(c) ":\n\t" \
".pushsection .discard.unreachable\n\t" \
- ".long %c0b - .\n\t" \
- ".popsection\n\t" : : "i" (__COUNTER__)); \
+ ".long " __stringify_label(c) "b - .\n\t" \
+ ".popsection\n\t"); \
})
+#define annotate_unreachable() __annotate_unreachable(__COUNTER__)
+
#define ASM_UNREACHABLE \
"999:\n\t" \
".pushsection .discard.unreachable\n\t" \
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
index c043b8d2b17b..183ddd5fd072 100644
--- a/include/linux/compiler_attributes.h
+++ b/include/linux/compiler_attributes.h
@@ -199,6 +199,7 @@
* must end with any of these keywords:
* break;
* fallthrough;
+ * continue;
* goto <label>;
* return [expression];
*
diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
index 153734816b49..d5b9c8d40c18 100644
--- a/include/linux/console_struct.h
+++ b/include/linux/console_struct.h
@@ -101,6 +101,7 @@ struct vc_data {
unsigned int vc_rows;
unsigned int vc_size_row; /* Bytes per row */
unsigned int vc_scan_lines; /* # of scan lines */
+ unsigned int vc_cell_height; /* CRTC character cell height */
unsigned long vc_origin; /* [!] Start of real screen */
unsigned long vc_scr_end; /* [!] End of real screen */
unsigned long vc_visible_origin; /* [!] Top of visible window */
diff --git a/include/linux/const.h b/include/linux/const.h
index 81b8aae5a855..435ddd72d2c4 100644
--- a/include/linux/const.h
+++ b/include/linux/const.h
@@ -3,4 +3,12 @@
#include <vdso/const.h>
+/*
+ * This returns a constant expression while determining if an argument is
+ * a constant expression, most importantly without evaluating the argument.
+ * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de>
+ */
+#define __is_constexpr(x) \
+ (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
+
#endif /* _LINUX_CONST_H */
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index 2915f56ad421..edb5c186b0b7 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -27,8 +27,10 @@ extern int debug_locks_off(void);
int __ret = 0; \
\
if (!oops_in_progress && unlikely(c)) { \
+ instrumentation_begin(); \
if (debug_locks_off() && !debug_locks_silent) \
WARN(1, "DEBUG_LOCKS_WARN_ON(%s)", #c); \
+ instrumentation_end(); \
__ret = 1; \
} \
__ret; \
diff --git a/include/linux/device.h b/include/linux/device.h
index 38a2071cf776..f1a00040fa53 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -570,7 +570,7 @@ struct device {
* @flags: Link flags.
* @rpm_active: Whether or not the consumer device is runtime-PM-active.
* @kref: Count repeated addition of the same link.
- * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks.
+ * @rm_work: Work structure used for removing the link.
* @supplier_preactivated: Supplier has been made active before consumer probe.
*/
struct device_link {
@@ -583,9 +583,7 @@ struct device_link {
u32 flags;
refcount_t rpm_active;
struct kref kref;
-#ifdef CONFIG_SRCU
- struct rcu_head rcu_head;
-#endif
+ struct work_struct rm_work;
bool supplier_preactivated; /* Owned by consumer probe. */
};
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index a57ee75342cf..dce631e678dd 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -32,6 +32,11 @@ struct _ddebug {
#define _DPRINTK_FLAGS_INCL_FUNCNAME (1<<2)
#define _DPRINTK_FLAGS_INCL_LINENO (1<<3)
#define _DPRINTK_FLAGS_INCL_TID (1<<4)
+
+#define _DPRINTK_FLAGS_INCL_ANY \
+ (_DPRINTK_FLAGS_INCL_MODNAME | _DPRINTK_FLAGS_INCL_FUNCNAME |\
+ _DPRINTK_FLAGS_INCL_LINENO | _DPRINTK_FLAGS_INCL_TID)
+
#if defined DEBUG
#define _DPRINTK_FLAGS_DEFAULT _DPRINTK_FLAGS_PRINT
#else
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 1fe8e105b83b..dcb2f9022c1d 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -34,7 +34,7 @@ struct elevator_mq_ops {
void (*depth_updated)(struct blk_mq_hw_ctx *);
bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
- bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *, unsigned int);
+ bool (*bio_merge)(struct request_queue *, struct bio *, unsigned int);
int (*request_merge)(struct request_queue *q, struct request **, struct bio *);
void (*request_merged)(struct request_queue *, struct request *, enum elv_merge);
void (*requests_merged)(struct request_queue *, struct request *, struct request *);
diff --git a/include/linux/entry-kvm.h b/include/linux/entry-kvm.h
index 8b2b1d68b954..136b8d97d8c0 100644
--- a/include/linux/entry-kvm.h
+++ b/include/linux/entry-kvm.h
@@ -3,6 +3,7 @@
#define __LINUX_ENTRYKVM_H
#include <linux/entry-common.h>
+#include <linux/tick.h>
/* Transfer to guest mode work */
#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
@@ -57,7 +58,7 @@ int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu);
static inline void xfer_to_guest_mode_prepare(void)
{
lockdep_assert_irqs_disabled();
- rcu_nocb_flush_deferred_wakeup();
+ tick_nohz_user_enter_prepare();
}
/**
diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h
index bad41bcb25df..a16dbeced152 100644
--- a/include/linux/fanotify.h
+++ b/include/linux/fanotify.h
@@ -51,6 +51,10 @@ extern struct ctl_table fanotify_table[]; /* for sysctl */
#define FANOTIFY_INIT_FLAGS (FANOTIFY_ADMIN_INIT_FLAGS | \
FANOTIFY_USER_INIT_FLAGS)
+/* Internal group flags */
+#define FANOTIFY_UNPRIV 0x80000000
+#define FANOTIFY_INTERNAL_GROUP_FLAGS (FANOTIFY_UNPRIV)
+
#define FANOTIFY_MARK_TYPE_BITS (FAN_MARK_INODE | FAN_MARK_MOUNT | \
FAN_MARK_FILESYSTEM)
diff --git a/include/linux/fb.h b/include/linux/fb.h
index a8dccd23c249..ecfbcc0553a5 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -659,6 +659,9 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch,
/* drivers/video/fb_defio.c */
int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma);
extern void fb_deferred_io_init(struct fb_info *info);
+extern void fb_deferred_io_open(struct fb_info *info,
+ struct inode *inode,
+ struct file *file);
extern void fb_deferred_io_cleanup(struct fb_info *info);
extern int fb_deferred_io_fsync(struct file *file, loff_t start,
loff_t end, int datasync);
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index ed4e67a7ff1c..59828516ebaf 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -187,5 +187,6 @@ extern u32 fw_devlink_get_flags(void);
extern bool fw_devlink_is_strict(void);
int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup);
void fwnode_links_purge(struct fwnode_handle *fwnode);
+void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode);
#endif
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 7e9660ea967d..6fc26f7bdf71 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -306,8 +306,6 @@ static inline void bd_unlink_disk_holder(struct block_device *bdev,
}
#endif /* CONFIG_SYSFS */
-extern struct rw_semaphore bdev_lookup_sem;
-
dev_t blk_lookup_devt(const char *name, int partno);
void blk_request_module(dev_t devt);
#ifdef CONFIG_BLOCK
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 271021e20a3f..10e922cee4eb 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -1167,8 +1167,7 @@ static inline void hid_hw_wait(struct hid_device *hdev)
*/
static inline u32 hid_report_len(struct hid_report *report)
{
- /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
- return ((report->size - 1) >> 3) + 1 + (report->id > 0);
+ return DIV_ROUND_UP(report->size, 8) + (report->id > 0);
}
int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index 232e1bd507a7..9b0487c88571 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -332,12 +332,30 @@ static inline struct host1x_device *to_host1x_device(struct device *dev)
int host1x_device_init(struct host1x_device *device);
int host1x_device_exit(struct host1x_device *device);
-int __host1x_client_register(struct host1x_client *client,
- struct lock_class_key *key);
-#define host1x_client_register(class) \
- ({ \
- static struct lock_class_key __key; \
- __host1x_client_register(class, &__key); \
+void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key);
+void host1x_client_exit(struct host1x_client *client);
+
+#define host1x_client_init(client) \
+ ({ \
+ static struct lock_class_key __key; \
+ __host1x_client_init(client, &__key); \
+ })
+
+int __host1x_client_register(struct host1x_client *client);
+
+/*
+ * Note that this wrapper calls __host1x_client_init() for compatibility
+ * with existing callers. Callers that want to separately initialize and
+ * register a host1x client must first initialize using either of the
+ * __host1x_client_init() or host1x_client_init() functions and then use
+ * the low-level __host1x_client_register() function to avoid the client
+ * getting reinitialized.
+ */
+#define host1x_client_register(client) \
+ ({ \
+ static struct lock_class_key __key; \
+ __host1x_client_init(client, &__key); \
+ __host1x_client_register(client); \
})
int host1x_client_unregister(struct host1x_client *client);
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 9626fda5efce..2a8ebe6c222e 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -286,6 +286,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
extern struct page *huge_zero_page;
+extern unsigned long huge_zero_pfn;
static inline bool is_huge_zero_page(struct page *page)
{
@@ -294,7 +295,7 @@ static inline bool is_huge_zero_page(struct page *page)
static inline bool is_huge_zero_pmd(pmd_t pmd)
{
- return is_huge_zero_page(pmd_page(pmd));
+ return READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd) && pmd_present(pmd);
}
static inline bool is_huge_zero_pud(pud_t pud)
@@ -440,6 +441,11 @@ static inline bool is_huge_zero_page(struct page *page)
return false;
}
+static inline bool is_huge_zero_pmd(pmd_t pmd)
+{
+ return false;
+}
+
static inline bool is_huge_zero_pud(pud_t pud)
{
return false;
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index b92f25ccef58..3c0117656745 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -149,6 +149,7 @@ bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
long freed);
bool isolate_huge_page(struct page *page, struct list_head *list);
+int get_hwpoison_huge_page(struct page *page, bool *hugetlb);
void putback_active_hugepage(struct page *page);
void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
void free_huge_page(struct page *page);
@@ -339,6 +340,11 @@ static inline bool isolate_huge_page(struct page *page, struct list_head *list)
return false;
}
+static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
+{
+ return 0;
+}
+
static inline void putback_active_hugepage(struct page *page)
{
}
@@ -604,6 +610,8 @@ struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
unsigned long address);
int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
pgoff_t idx);
+void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
+ unsigned long address, struct page *page);
/* arch callback */
int __init __alloc_bootmem_huge_page(struct hstate *h);
@@ -733,17 +741,6 @@ static inline int hstate_index(struct hstate *h)
return h - hstates;
}
-pgoff_t __basepage_index(struct page *page);
-
-/* Return page->index in PAGE_SIZE units */
-static inline pgoff_t basepage_index(struct page *page)
-{
- if (!PageCompound(page))
- return page->index;
-
- return __basepage_index(page);
-}
-
extern int dissolve_free_huge_page(struct page *page);
extern int dissolve_free_huge_pages(unsigned long start_pfn,
unsigned long end_pfn);
@@ -980,11 +977,6 @@ static inline int hstate_index(struct hstate *h)
return 0;
}
-static inline pgoff_t basepage_index(struct page *page)
-{
- return page->index;
-}
-
static inline int dissolve_free_huge_page(struct page *page)
{
return 0;
diff --git a/include/linux/ide.h b/include/linux/ide.h
deleted file mode 100644
index 2c300689a51a..000000000000
--- a/include/linux/ide.h
+++ /dev/null
@@ -1,1623 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _IDE_H
-#define _IDE_H
-/*
- * linux/include/linux/ide.h
- *
- * Copyright (C) 1994-2002 Linus Torvalds & authors
- */
-
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/ata.h>
-#include <linux/blk-mq.h>
-#include <linux/proc_fs.h>
-#include <linux/interrupt.h>
-#include <linux/bitops.h>
-#include <linux/bio.h>
-#include <linux/pci.h>
-#include <linux/completion.h>
-#include <linux/pm.h>
-#include <linux/mutex.h>
-/* for request_sense */
-#include <linux/cdrom.h>
-#include <scsi/scsi_cmnd.h>
-#include <asm/byteorder.h>
-#include <asm/io.h>
-
-/*
- * Probably not wise to fiddle with these
- */
-#define SUPPORT_VLB_SYNC 1
-#define IDE_DEFAULT_MAX_FAILURES 1
-#define ERROR_MAX 8 /* Max read/write errors per sector */
-#define ERROR_RESET 3 /* Reset controller every 4th retry */
-#define ERROR_RECAL 1 /* Recalibrate every 2nd retry */
-
-struct device;
-
-/* values for ide_request.type */
-enum ata_priv_type {
- ATA_PRIV_MISC,
- ATA_PRIV_TASKFILE,
- ATA_PRIV_PC,
- ATA_PRIV_SENSE, /* sense request */
- ATA_PRIV_PM_SUSPEND, /* suspend request */
- ATA_PRIV_PM_RESUME, /* resume request */
-};
-
-struct ide_request {
- struct scsi_request sreq;
- u8 sense[SCSI_SENSE_BUFFERSIZE];
- u8 type;
- void *special;
-};
-
-static inline struct ide_request *ide_req(struct request *rq)
-{
- return blk_mq_rq_to_pdu(rq);
-}
-
-static inline bool ata_misc_request(struct request *rq)
-{
- return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_MISC;
-}
-
-static inline bool ata_taskfile_request(struct request *rq)
-{
- return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_TASKFILE;
-}
-
-static inline bool ata_pc_request(struct request *rq)
-{
- return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_PC;
-}
-
-static inline bool ata_sense_request(struct request *rq)
-{
- return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_SENSE;
-}
-
-static inline bool ata_pm_request(struct request *rq)
-{
- return blk_rq_is_private(rq) &&
- (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND ||
- ide_req(rq)->type == ATA_PRIV_PM_RESUME);
-}
-
-/* Error codes returned in result to the higher part of the driver. */
-enum {
- IDE_DRV_ERROR_GENERAL = 101,
- IDE_DRV_ERROR_FILEMARK = 102,
- IDE_DRV_ERROR_EOD = 103,
-};
-
-/*
- * Definitions for accessing IDE controller registers
- */
-#define IDE_NR_PORTS (10)
-
-struct ide_io_ports {
- unsigned long data_addr;
-
- union {
- unsigned long error_addr; /* read: error */
- unsigned long feature_addr; /* write: feature */
- };
-
- unsigned long nsect_addr;
- unsigned long lbal_addr;
- unsigned long lbam_addr;
- unsigned long lbah_addr;
-
- unsigned long device_addr;
-
- union {
- unsigned long status_addr; /*  read: status  */
- unsigned long command_addr; /* write: command */
- };
-
- unsigned long ctl_addr;
-
- unsigned long irq_addr;
-};
-
-#define OK_STAT(stat,good,bad) (((stat)&((good)|(bad)))==(good))
-
-#define BAD_R_STAT (ATA_BUSY | ATA_ERR)
-#define BAD_W_STAT (BAD_R_STAT | ATA_DF)
-#define BAD_STAT (BAD_R_STAT | ATA_DRQ)
-#define DRIVE_READY (ATA_DRDY | ATA_DSC)
-
-#define BAD_CRC (ATA_ABORTED | ATA_ICRC)
-
-#define SATA_NR_PORTS (3) /* 16 possible ?? */
-
-#define SATA_STATUS_OFFSET (0)
-#define SATA_ERROR_OFFSET (1)
-#define SATA_CONTROL_OFFSET (2)
-
-/*
- * Our Physical Region Descriptor (PRD) table should be large enough
- * to handle the biggest I/O request we are likely to see. Since requests
- * can have no more than 256 sectors, and since the typical blocksize is
- * two or more sectors, we could get by with a limit of 128 entries here for
- * the usual worst case. Most requests seem to include some contiguous blocks,
- * further reducing the number of table entries required.
- *
- * The driver reverts to PIO mode for individual requests that exceed
- * this limit (possible with 512 byte blocksizes, eg. MSDOS f/s), so handling
- * 100% of all crazy scenarios here is not necessary.
- *
- * As it turns out though, we must allocate a full 4KB page for this,
- * so the two PRD tables (ide0 & ide1) will each get half of that,
- * allowing each to have about 256 entries (8 bytes each) from this.
- */
-#define PRD_BYTES 8
-#define PRD_ENTRIES 256
-
-/*
- * Some more useful definitions
- */
-#define PARTN_BITS 6 /* number of minor dev bits for partitions */
-#define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */
-
-/*
- * Timeouts for various operations:
- */
-enum {
- /* spec allows up to 20ms, but CF cards and SSD drives need more */
- WAIT_DRQ = 1 * HZ, /* 1s */
- /* some laptops are very slow */
- WAIT_READY = 5 * HZ, /* 5s */
- /* should be less than 3ms (?), if all ATAPI CD is closed at boot */
- WAIT_PIDENTIFY = 10 * HZ, /* 10s */
- /* worst case when spinning up */
- WAIT_WORSTCASE = 30 * HZ, /* 30s */
- /* maximum wait for an IRQ to happen */
- WAIT_CMD = 10 * HZ, /* 10s */
- /* Some drives require a longer IRQ timeout. */
- WAIT_FLOPPY_CMD = 50 * HZ, /* 50s */
- /*
- * Some drives (for example, Seagate STT3401A Travan) require a very
- * long timeout, because they don't return an interrupt or clear their
- * BSY bit until after the command completes (even retension commands).
- */
- WAIT_TAPE_CMD = 900 * HZ, /* 900s */
- /* minimum sleep time */
- WAIT_MIN_SLEEP = HZ / 50, /* 20ms */
-};
-
-/*
- * Op codes for special requests to be handled by ide_special_rq().
- * Values should be in the range of 0x20 to 0x3f.
- */
-#define REQ_DRIVE_RESET 0x20
-#define REQ_DEVSET_EXEC 0x21
-#define REQ_PARK_HEADS 0x22
-#define REQ_UNPARK_HEADS 0x23
-
-/*
- * hwif_chipset_t is used to keep track of the specific hardware
- * chipset used by each IDE interface, if known.
- */
-enum { ide_unknown, ide_generic, ide_pci,
- ide_cmd640, ide_dtc2278, ide_ali14xx,
- ide_qd65xx, ide_umc8672, ide_ht6560b,
- ide_4drives, ide_pmac, ide_acorn,
- ide_au1xxx, ide_palm3710
-};
-
-typedef u8 hwif_chipset_t;
-
-/*
- * Structure to hold all information about the location of this port
- */
-struct ide_hw {
- union {
- struct ide_io_ports io_ports;
- unsigned long io_ports_array[IDE_NR_PORTS];
- };
-
- int irq; /* our irq number */
- struct device *dev, *parent;
- unsigned long config;
-};
-
-static inline void ide_std_init_ports(struct ide_hw *hw,
- unsigned long io_addr,
- unsigned long ctl_addr)
-{
- unsigned int i;
-
- for (i = 0; i <= 7; i++)
- hw->io_ports_array[i] = io_addr++;
-
- hw->io_ports.ctl_addr = ctl_addr;
-}
-
-#define MAX_HWIFS 10
-
-/*
- * Now for the data we need to maintain per-drive: ide_drive_t
- */
-
-#define ide_scsi 0x21
-#define ide_disk 0x20
-#define ide_optical 0x7
-#define ide_cdrom 0x5
-#define ide_tape 0x1
-#define ide_floppy 0x0
-
-/*
- * Special Driver Flags
- */
-enum {
- IDE_SFLAG_SET_GEOMETRY = BIT(0),
- IDE_SFLAG_RECALIBRATE = BIT(1),
- IDE_SFLAG_SET_MULTMODE = BIT(2),
-};
-
-/*
- * Status returned from various ide_ functions
- */
-typedef enum {
- ide_stopped, /* no drive operation was started */
- ide_started, /* a drive operation was started, handler was set */
-} ide_startstop_t;
-
-enum {
- IDE_VALID_ERROR = BIT(1),
- IDE_VALID_FEATURE = IDE_VALID_ERROR,
- IDE_VALID_NSECT = BIT(2),
- IDE_VALID_LBAL = BIT(3),
- IDE_VALID_LBAM = BIT(4),
- IDE_VALID_LBAH = BIT(5),
- IDE_VALID_DEVICE = BIT(6),
- IDE_VALID_LBA = IDE_VALID_LBAL |
- IDE_VALID_LBAM |
- IDE_VALID_LBAH,
- IDE_VALID_OUT_TF = IDE_VALID_FEATURE |
- IDE_VALID_NSECT |
- IDE_VALID_LBA,
- IDE_VALID_IN_TF = IDE_VALID_NSECT |
- IDE_VALID_LBA,
- IDE_VALID_OUT_HOB = IDE_VALID_OUT_TF,
- IDE_VALID_IN_HOB = IDE_VALID_ERROR |
- IDE_VALID_NSECT |
- IDE_VALID_LBA,
-};
-
-enum {
- IDE_TFLAG_LBA48 = BIT(0),
- IDE_TFLAG_WRITE = BIT(1),
- IDE_TFLAG_CUSTOM_HANDLER = BIT(2),
- IDE_TFLAG_DMA_PIO_FALLBACK = BIT(3),
- /* force 16-bit I/O operations */
- IDE_TFLAG_IO_16BIT = BIT(4),
- /* struct ide_cmd was allocated using kmalloc() */
- IDE_TFLAG_DYN = BIT(5),
- IDE_TFLAG_FS = BIT(6),
- IDE_TFLAG_MULTI_PIO = BIT(7),
- IDE_TFLAG_SET_XFER = BIT(8),
-};
-
-enum {
- IDE_FTFLAG_FLAGGED = BIT(0),
- IDE_FTFLAG_SET_IN_FLAGS = BIT(1),
- IDE_FTFLAG_OUT_DATA = BIT(2),
- IDE_FTFLAG_IN_DATA = BIT(3),
-};
-
-struct ide_taskfile {
- u8 data; /* 0: data byte (for TASKFILE ioctl) */
- union { /* 1: */
- u8 error; /* read: error */
- u8 feature; /* write: feature */
- };
- u8 nsect; /* 2: number of sectors */
- u8 lbal; /* 3: LBA low */
- u8 lbam; /* 4: LBA mid */
- u8 lbah; /* 5: LBA high */
- u8 device; /* 6: device select */
- union { /* 7: */
- u8 status; /* read: status */
- u8 command; /* write: command */
- };
-};
-
-struct ide_cmd {
- struct ide_taskfile tf;
- struct ide_taskfile hob;
- struct {
- struct {
- u8 tf;
- u8 hob;
- } out, in;
- } valid;
-
- u16 tf_flags;
- u8 ftf_flags; /* for TASKFILE ioctl */
- int protocol;
-
- int sg_nents; /* number of sg entries */
- int orig_sg_nents;
- int sg_dma_direction; /* DMA transfer direction */
-
- unsigned int nbytes;
- unsigned int nleft;
- unsigned int last_xfer_len;
-
- struct scatterlist *cursg;
- unsigned int cursg_ofs;
-
- struct request *rq; /* copy of request */
-};
-
-/* ATAPI packet command flags */
-enum {
- /* set when an error is considered normal - no retry (ide-tape) */
- PC_FLAG_ABORT = BIT(0),
- PC_FLAG_SUPPRESS_ERROR = BIT(1),
- PC_FLAG_WAIT_FOR_DSC = BIT(2),
- PC_FLAG_DMA_OK = BIT(3),
- PC_FLAG_DMA_IN_PROGRESS = BIT(4),
- PC_FLAG_DMA_ERROR = BIT(5),
- PC_FLAG_WRITING = BIT(6),
-};
-
-#define ATAPI_WAIT_PC (60 * HZ)
-
-struct ide_atapi_pc {
- /* actual packet bytes */
- u8 c[12];
- /* incremented on each retry */
- int retries;
- int error;
-
- /* bytes to transfer */
- int req_xfer;
-
- /* the corresponding request */
- struct request *rq;
-
- unsigned long flags;
-
- /*
- * those are more or less driver-specific and some of them are subject
- * to change/removal later.
- */
- unsigned long timeout;
-};
-
-struct ide_devset;
-struct ide_driver;
-
-#ifdef CONFIG_BLK_DEV_IDEACPI
-struct ide_acpi_drive_link;
-struct ide_acpi_hwif_link;
-#endif
-
-struct ide_drive_s;
-
-struct ide_disk_ops {
- int (*check)(struct ide_drive_s *, const char *);
- int (*get_capacity)(struct ide_drive_s *);
- void (*unlock_native_capacity)(struct ide_drive_s *);
- void (*setup)(struct ide_drive_s *);
- void (*flush)(struct ide_drive_s *);
- int (*init_media)(struct ide_drive_s *, struct gendisk *);
- int (*set_doorlock)(struct ide_drive_s *, struct gendisk *,
- int);
- ide_startstop_t (*do_request)(struct ide_drive_s *, struct request *,
- sector_t);
- int (*ioctl)(struct ide_drive_s *, struct block_device *,
- fmode_t, unsigned int, unsigned long);
- int (*compat_ioctl)(struct ide_drive_s *, struct block_device *,
- fmode_t, unsigned int, unsigned long);
-};
-
-/* ATAPI device flags */
-enum {
- IDE_AFLAG_DRQ_INTERRUPT = BIT(0),
-
- /* ide-cd */
- /* Drive cannot eject the disc. */
- IDE_AFLAG_NO_EJECT = BIT(1),
- /* Drive is a pre ATAPI 1.2 drive. */
- IDE_AFLAG_PRE_ATAPI12 = BIT(2),
- /* TOC addresses are in BCD. */
- IDE_AFLAG_TOCADDR_AS_BCD = BIT(3),
- /* TOC track numbers are in BCD. */
- IDE_AFLAG_TOCTRACKS_AS_BCD = BIT(4),
- /* Saved TOC information is current. */
- IDE_AFLAG_TOC_VALID = BIT(6),
- /* We think that the drive door is locked. */
- IDE_AFLAG_DOOR_LOCKED = BIT(7),
- /* SET_CD_SPEED command is unsupported. */
- IDE_AFLAG_NO_SPEED_SELECT = BIT(8),
- IDE_AFLAG_VERTOS_300_SSD = BIT(9),
- IDE_AFLAG_VERTOS_600_ESD = BIT(10),
- IDE_AFLAG_SANYO_3CD = BIT(11),
- IDE_AFLAG_FULL_CAPS_PAGE = BIT(12),
- IDE_AFLAG_PLAY_AUDIO_OK = BIT(13),
- IDE_AFLAG_LE_SPEED_FIELDS = BIT(14),
-
- /* ide-floppy */
- /* Avoid commands not supported in Clik drive */
- IDE_AFLAG_CLIK_DRIVE = BIT(15),
- /* Requires BH algorithm for packets */
- IDE_AFLAG_ZIP_DRIVE = BIT(16),
- /* Supports format progress report */
- IDE_AFLAG_SRFP = BIT(17),
-
- /* ide-tape */
- IDE_AFLAG_IGNORE_DSC = BIT(18),
- /* 0 When the tape position is unknown */
- IDE_AFLAG_ADDRESS_VALID = BIT(19),
- /* Device already opened */
- IDE_AFLAG_BUSY = BIT(20),
- /* Attempt to auto-detect the current user block size */
- IDE_AFLAG_DETECT_BS = BIT(21),
- /* Currently on a filemark */
- IDE_AFLAG_FILEMARK = BIT(22),
- /* 0 = no tape is loaded, so we don't rewind after ejecting */
- IDE_AFLAG_MEDIUM_PRESENT = BIT(23),
-
- IDE_AFLAG_NO_AUTOCLOSE = BIT(24),
-};
-
-/* device flags */
-enum {
- /* restore settings after device reset */
- IDE_DFLAG_KEEP_SETTINGS = BIT(0),
- /* device is using DMA for read/write */
- IDE_DFLAG_USING_DMA = BIT(1),
- /* okay to unmask other IRQs */
- IDE_DFLAG_UNMASK = BIT(2),
- /* don't attempt flushes */
- IDE_DFLAG_NOFLUSH = BIT(3),
- /* DSC overlap */
- IDE_DFLAG_DSC_OVERLAP = BIT(4),
- /* give potential excess bandwidth */
- IDE_DFLAG_NICE1 = BIT(5),
- /* device is physically present */
- IDE_DFLAG_PRESENT = BIT(6),
- /* disable Host Protected Area */
- IDE_DFLAG_NOHPA = BIT(7),
- /* id read from device (synthetic if not set) */
- IDE_DFLAG_ID_READ = BIT(8),
- IDE_DFLAG_NOPROBE = BIT(9),
- /* need to do check_media_change() */
- IDE_DFLAG_REMOVABLE = BIT(10),
- IDE_DFLAG_FORCED_GEOM = BIT(12),
- /* disallow setting unmask bit */
- IDE_DFLAG_NO_UNMASK = BIT(13),
- /* disallow enabling 32-bit I/O */
- IDE_DFLAG_NO_IO_32BIT = BIT(14),
- /* for removable only: door lock/unlock works */
- IDE_DFLAG_DOORLOCKING = BIT(15),
- /* disallow DMA */
- IDE_DFLAG_NODMA = BIT(16),
- /* powermanagement told us not to do anything, so sleep nicely */
- IDE_DFLAG_BLOCKED = BIT(17),
- /* sleeping & sleep field valid */
- IDE_DFLAG_SLEEPING = BIT(18),
- IDE_DFLAG_POST_RESET = BIT(19),
- IDE_DFLAG_UDMA33_WARNED = BIT(20),
- IDE_DFLAG_LBA48 = BIT(21),
- /* status of write cache */
- IDE_DFLAG_WCACHE = BIT(22),
- /* used for ignoring ATA_DF */
- IDE_DFLAG_NOWERR = BIT(23),
- /* retrying in PIO */
- IDE_DFLAG_DMA_PIO_RETRY = BIT(24),
- IDE_DFLAG_LBA = BIT(25),
- /* don't unload heads */
- IDE_DFLAG_NO_UNLOAD = BIT(26),
- /* heads unloaded, please don't reset port */
- IDE_DFLAG_PARKED = BIT(27),
- IDE_DFLAG_MEDIA_CHANGED = BIT(28),
- /* write protect */
- IDE_DFLAG_WP = BIT(29),
- IDE_DFLAG_FORMAT_IN_PROGRESS = BIT(30),
- IDE_DFLAG_NIEN_QUIRK = BIT(31),
-};
-
-struct ide_drive_s {
- char name[4]; /* drive name, such as "hda" */
- char driver_req[10]; /* requests specific driver */
-
- struct request_queue *queue; /* request queue */
-
- bool (*prep_rq)(struct ide_drive_s *, struct request *);
-
- struct blk_mq_tag_set tag_set;
-
- struct request *rq; /* current request */
- void *driver_data; /* extra driver data */
- u16 *id; /* identification info */
-#ifdef CONFIG_IDE_PROC_FS
- struct proc_dir_entry *proc; /* /proc/ide/ directory entry */
- const struct ide_proc_devset *settings; /* /proc/ide/ drive settings */
-#endif
- struct hwif_s *hwif; /* actually (ide_hwif_t *) */
-
- const struct ide_disk_ops *disk_ops;
-
- unsigned long dev_flags;
-
- unsigned long sleep; /* sleep until this time */
- unsigned long timeout; /* max time to wait for irq */
-
- u8 special_flags; /* special action flags */
-
- u8 select; /* basic drive/head select reg value */
- u8 retry_pio; /* retrying dma capable host in pio */
- u8 waiting_for_dma; /* dma currently in progress */
- u8 dma; /* atapi dma flag */
-
- u8 init_speed; /* transfer rate set at boot */
- u8 current_speed; /* current transfer rate set */
- u8 desired_speed; /* desired transfer rate set */
- u8 pio_mode; /* for ->set_pio_mode _only_ */
- u8 dma_mode; /* for ->set_dma_mode _only_ */
- u8 dn; /* now wide spread use */
- u8 acoustic; /* acoustic management */
- u8 media; /* disk, cdrom, tape, floppy, ... */
- u8 ready_stat; /* min status value for drive ready */
- u8 mult_count; /* current multiple sector setting */
- u8 mult_req; /* requested multiple sector setting */
- u8 io_32bit; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */
- u8 bad_wstat; /* used for ignoring ATA_DF */
- u8 head; /* "real" number of heads */
- u8 sect; /* "real" sectors per track */
- u8 bios_head; /* BIOS/fdisk/LILO number of heads */
- u8 bios_sect; /* BIOS/fdisk/LILO sectors per track */
-
- /* delay this long before sending packet command */
- u8 pc_delay;
-
- unsigned int bios_cyl; /* BIOS/fdisk/LILO number of cyls */
- unsigned int cyl; /* "real" number of cyls */
- void *drive_data; /* used by set_pio_mode/dev_select() */
- unsigned int failures; /* current failure count */
- unsigned int max_failures; /* maximum allowed failure count */
- u64 probed_capacity;/* initial/native media capacity */
- u64 capacity64; /* total number of sectors */
-
- int lun; /* logical unit */
- int crc_count; /* crc counter to reduce drive speed */
-
- unsigned long debug_mask; /* debugging levels switch */
-
-#ifdef CONFIG_BLK_DEV_IDEACPI
- struct ide_acpi_drive_link *acpidata;
-#endif
- struct list_head list;
- struct device gendev;
- struct completion gendev_rel_comp; /* to deal with device release() */
-
- /* current packet command */
- struct ide_atapi_pc *pc;
-
- /* last failed packet command */
- struct ide_atapi_pc *failed_pc;
-
- /* callback for packet commands */
- int (*pc_callback)(struct ide_drive_s *, int);
-
- ide_startstop_t (*irq_handler)(struct ide_drive_s *);
-
- unsigned long atapi_flags;
-
- struct ide_atapi_pc request_sense_pc;
-
- /* current sense rq and buffer */
- bool sense_rq_armed;
- bool sense_rq_active;
- struct request *sense_rq;
- struct request_sense sense_data;
-
- /* async sense insertion */
- struct work_struct rq_work;
- struct list_head rq_list;
-};
-
-typedef struct ide_drive_s ide_drive_t;
-
-#define to_ide_device(dev) container_of(dev, ide_drive_t, gendev)
-
-#define to_ide_drv(obj, cont_type) \
- container_of(obj, struct cont_type, dev)
-
-#define ide_drv_g(disk, cont_type) \
- container_of((disk)->private_data, struct cont_type, driver)
-
-struct ide_port_info;
-
-struct ide_tp_ops {
- void (*exec_command)(struct hwif_s *, u8);
- u8 (*read_status)(struct hwif_s *);
- u8 (*read_altstatus)(struct hwif_s *);
- void (*write_devctl)(struct hwif_s *, u8);
-
- void (*dev_select)(ide_drive_t *);
- void (*tf_load)(ide_drive_t *, struct ide_taskfile *, u8);
- void (*tf_read)(ide_drive_t *, struct ide_taskfile *, u8);
-
- void (*input_data)(ide_drive_t *, struct ide_cmd *,
- void *, unsigned int);
- void (*output_data)(ide_drive_t *, struct ide_cmd *,
- void *, unsigned int);
-};
-
-extern const struct ide_tp_ops default_tp_ops;
-
-/**
- * struct ide_port_ops - IDE port operations
- *
- * @init_dev: host specific initialization of a device
- * @set_pio_mode: routine to program host for PIO mode
- * @set_dma_mode: routine to program host for DMA mode
- * @reset_poll: chipset polling based on hba specifics
- * @pre_reset: chipset specific changes to default for device-hba resets
- * @resetproc: routine to reset controller after a disk reset
- * @maskproc: special host masking for drive selection
- * @quirkproc: check host's drive quirk list
- * @clear_irq: clear IRQ
- *
- * @mdma_filter: filter MDMA modes
- * @udma_filter: filter UDMA modes
- *
- * @cable_detect: detect cable type
- */
-struct ide_port_ops {
- void (*init_dev)(ide_drive_t *);
- void (*set_pio_mode)(struct hwif_s *, ide_drive_t *);
- void (*set_dma_mode)(struct hwif_s *, ide_drive_t *);
- blk_status_t (*reset_poll)(ide_drive_t *);
- void (*pre_reset)(ide_drive_t *);
- void (*resetproc)(ide_drive_t *);
- void (*maskproc)(ide_drive_t *, int);
- void (*quirkproc)(ide_drive_t *);
- void (*clear_irq)(ide_drive_t *);
- int (*test_irq)(struct hwif_s *);
-
- u8 (*mdma_filter)(ide_drive_t *);
- u8 (*udma_filter)(ide_drive_t *);
-
- u8 (*cable_detect)(struct hwif_s *);
-};
-
-struct ide_dma_ops {
- void (*dma_host_set)(struct ide_drive_s *, int);
- int (*dma_setup)(struct ide_drive_s *, struct ide_cmd *);
- void (*dma_start)(struct ide_drive_s *);
- int (*dma_end)(struct ide_drive_s *);
- int (*dma_test_irq)(struct ide_drive_s *);
- void (*dma_lost_irq)(struct ide_drive_s *);
- /* below ones are optional */
- int (*dma_check)(struct ide_drive_s *, struct ide_cmd *);
- int (*dma_timer_expiry)(struct ide_drive_s *);
- void (*dma_clear)(struct ide_drive_s *);
- /*
- * The following method is optional and only required to be
- * implemented for the SFF-8038i compatible controllers.
- */
- u8 (*dma_sff_read_status)(struct hwif_s *);
-};
-
-enum {
- IDE_PFLAG_PROBING = BIT(0),
-};
-
-struct ide_host;
-
-typedef struct hwif_s {
- struct hwif_s *mate; /* other hwif from same PCI chip */
- struct proc_dir_entry *proc; /* /proc/ide/ directory entry */
-
- struct ide_host *host;
-
- char name[6]; /* name of interface, eg. "ide0" */
-
- struct ide_io_ports io_ports;
-
- unsigned long sata_scr[SATA_NR_PORTS];
-
- ide_drive_t *devices[MAX_DRIVES + 1];
-
- unsigned long port_flags;
-
- u8 major; /* our major number */
- u8 index; /* 0 for ide0; 1 for ide1; ... */
- u8 channel; /* for dual-port chips: 0=primary, 1=secondary */
-
- u32 host_flags;
-
- u8 pio_mask;
-
- u8 ultra_mask;
- u8 mwdma_mask;
- u8 swdma_mask;
-
- u8 cbl; /* cable type */
-
- hwif_chipset_t chipset; /* sub-module for tuning.. */
-
- struct device *dev;
-
- void (*rw_disk)(ide_drive_t *, struct request *);
-
- const struct ide_tp_ops *tp_ops;
- const struct ide_port_ops *port_ops;
- const struct ide_dma_ops *dma_ops;
-
- /* dma physical region descriptor table (cpu view) */
- unsigned int *dmatable_cpu;
- /* dma physical region descriptor table (dma view) */
- dma_addr_t dmatable_dma;
-
- /* maximum number of PRD table entries */
- int prd_max_nents;
- /* PRD entry size in bytes */
- int prd_ent_size;
-
- /* Scatter-gather list used to build the above */
- struct scatterlist *sg_table;
- int sg_max_nents; /* Maximum number of entries in it */
-
- struct ide_cmd cmd; /* current command */
-
- int rqsize; /* max sectors per request */
- int irq; /* our irq number */
-
- unsigned long dma_base; /* base addr for dma ports */
-
- unsigned long config_data; /* for use by chipset-specific code */
- unsigned long select_data; /* for use by chipset-specific code */
-
- unsigned long extra_base; /* extra addr for dma ports */
- unsigned extra_ports; /* number of extra dma ports */
-
- unsigned present : 1; /* this interface exists */
- unsigned busy : 1; /* serializes devices on a port */
-
- struct device gendev;
- struct device *portdev;
-
- struct completion gendev_rel_comp; /* To deal with device release() */
-
- void *hwif_data; /* extra hwif data */
-
-#ifdef CONFIG_BLK_DEV_IDEACPI
- struct ide_acpi_hwif_link *acpidata;
-#endif
-
- /* IRQ handler, if active */
- ide_startstop_t (*handler)(ide_drive_t *);
-
- /* BOOL: polling active & poll_timeout field valid */
- unsigned int polling : 1;
-
- /* current drive */
- ide_drive_t *cur_dev;
-
- /* current request */
- struct request *rq;
-
- /* failsafe timer */
- struct timer_list timer;
- /* timeout value during long polls */
- unsigned long poll_timeout;
- /* queried upon timeouts */
- int (*expiry)(ide_drive_t *);
-
- int req_gen;
- int req_gen_timer;
-
- spinlock_t lock;
-} ____cacheline_internodealigned_in_smp ide_hwif_t;
-
-#define MAX_HOST_PORTS 4
-
-struct ide_host {
- ide_hwif_t *ports[MAX_HOST_PORTS + 1];
- unsigned int n_ports;
- struct device *dev[2];
-
- int (*init_chipset)(struct pci_dev *);
-
- void (*get_lock)(irq_handler_t, void *);
- void (*release_lock)(void);
-
- irq_handler_t irq_handler;
-
- unsigned long host_flags;
-
- int irq_flags;
-
- void *host_priv;
- ide_hwif_t *cur_port; /* for hosts requiring serialization */
-
- /* used for hosts requiring serialization */
- volatile unsigned long host_busy;
-};
-
-#define IDE_HOST_BUSY 0
-
-/*
- * internal ide interrupt handler type
- */
-typedef ide_startstop_t (ide_handler_t)(ide_drive_t *);
-typedef int (ide_expiry_t)(ide_drive_t *);
-
-/* used by ide-cd, ide-floppy, etc. */
-typedef void (xfer_func_t)(ide_drive_t *, struct ide_cmd *, void *, unsigned);
-
-extern struct mutex ide_setting_mtx;
-
-/*
- * configurable drive settings
- */
-
-#define DS_SYNC BIT(0)
-
-struct ide_devset {
- int (*get)(ide_drive_t *);
- int (*set)(ide_drive_t *, int);
- unsigned int flags;
-};
-
-#define __DEVSET(_flags, _get, _set) { \
- .flags = _flags, \
- .get = _get, \
- .set = _set, \
-}
-
-#define ide_devset_get(name, field) \
-static int get_##name(ide_drive_t *drive) \
-{ \
- return drive->field; \
-}
-
-#define ide_devset_set(name, field) \
-static int set_##name(ide_drive_t *drive, int arg) \
-{ \
- drive->field = arg; \
- return 0; \
-}
-
-#define ide_devset_get_flag(name, flag) \
-static int get_##name(ide_drive_t *drive) \
-{ \
- return !!(drive->dev_flags & flag); \
-}
-
-#define ide_devset_set_flag(name, flag) \
-static int set_##name(ide_drive_t *drive, int arg) \
-{ \
- if (arg) \
- drive->dev_flags |= flag; \
- else \
- drive->dev_flags &= ~flag; \
- return 0; \
-}
-
-#define __IDE_DEVSET(_name, _flags, _get, _set) \
-const struct ide_devset ide_devset_##_name = \
- __DEVSET(_flags, _get, _set)
-
-#define IDE_DEVSET(_name, _flags, _get, _set) \
-static __IDE_DEVSET(_name, _flags, _get, _set)
-
-#define ide_devset_rw(_name, _func) \
-IDE_DEVSET(_name, 0, get_##_func, set_##_func)
-
-#define ide_devset_w(_name, _func) \
-IDE_DEVSET(_name, 0, NULL, set_##_func)
-
-#define ide_ext_devset_rw(_name, _func) \
-__IDE_DEVSET(_name, 0, get_##_func, set_##_func)
-
-#define ide_ext_devset_rw_sync(_name, _func) \
-__IDE_DEVSET(_name, DS_SYNC, get_##_func, set_##_func)
-
-#define ide_decl_devset(_name) \
-extern const struct ide_devset ide_devset_##_name
-
-ide_decl_devset(io_32bit);
-ide_decl_devset(keepsettings);
-ide_decl_devset(pio_mode);
-ide_decl_devset(unmaskirq);
-ide_decl_devset(using_dma);
-
-#ifdef CONFIG_IDE_PROC_FS
-/*
- * /proc/ide interface
- */
-
-#define ide_devset_rw_field(_name, _field) \
-ide_devset_get(_name, _field); \
-ide_devset_set(_name, _field); \
-IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name)
-
-#define ide_devset_ro_field(_name, _field) \
-ide_devset_get(_name, _field); \
-IDE_DEVSET(_name, 0, get_##_name, NULL)
-
-#define ide_devset_rw_flag(_name, _field) \
-ide_devset_get_flag(_name, _field); \
-ide_devset_set_flag(_name, _field); \
-IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name)
-
-struct ide_proc_devset {
- const char *name;
- const struct ide_devset *setting;
- int min, max;
- int (*mulf)(ide_drive_t *);
- int (*divf)(ide_drive_t *);
-};
-
-#define __IDE_PROC_DEVSET(_name, _min, _max, _mulf, _divf) { \
- .name = __stringify(_name), \
- .setting = &ide_devset_##_name, \
- .min = _min, \
- .max = _max, \
- .mulf = _mulf, \
- .divf = _divf, \
-}
-
-#define IDE_PROC_DEVSET(_name, _min, _max) \
-__IDE_PROC_DEVSET(_name, _min, _max, NULL, NULL)
-
-typedef struct {
- const char *name;
- umode_t mode;
- int (*show)(struct seq_file *, void *);
-} ide_proc_entry_t;
-
-void proc_ide_create(void);
-void proc_ide_destroy(void);
-void ide_proc_register_port(ide_hwif_t *);
-void ide_proc_port_register_devices(ide_hwif_t *);
-void ide_proc_unregister_device(ide_drive_t *);
-void ide_proc_unregister_port(ide_hwif_t *);
-void ide_proc_register_driver(ide_drive_t *, struct ide_driver *);
-void ide_proc_unregister_driver(ide_drive_t *, struct ide_driver *);
-
-int ide_capacity_proc_show(struct seq_file *m, void *v);
-int ide_geometry_proc_show(struct seq_file *m, void *v);
-#else
-static inline void proc_ide_create(void) { ; }
-static inline void proc_ide_destroy(void) { ; }
-static inline void ide_proc_register_port(ide_hwif_t *hwif) { ; }
-static inline void ide_proc_port_register_devices(ide_hwif_t *hwif) { ; }
-static inline void ide_proc_unregister_device(ide_drive_t *drive) { ; }
-static inline void ide_proc_unregister_port(ide_hwif_t *hwif) { ; }
-static inline void ide_proc_register_driver(ide_drive_t *drive,
- struct ide_driver *driver) { ; }
-static inline void ide_proc_unregister_driver(ide_drive_t *drive,
- struct ide_driver *driver) { ; }
-#endif
-
-enum {
- /* enter/exit functions */
- IDE_DBG_FUNC = BIT(0),
- /* sense key/asc handling */
- IDE_DBG_SENSE = BIT(1),
- /* packet commands handling */
- IDE_DBG_PC = BIT(2),
- /* request handling */
- IDE_DBG_RQ = BIT(3),
- /* driver probing/setup */
- IDE_DBG_PROBE = BIT(4),
-};
-
-/* DRV_NAME has to be defined in the driver before using the macro below */
-#define __ide_debug_log(lvl, fmt, args...) \
-{ \
- if (unlikely(drive->debug_mask & lvl)) \
- printk(KERN_INFO DRV_NAME ": %s: " fmt "\n", \
- __func__, ## args); \
-}
-
-/*
- * Power Management state machine (rq->pm->pm_step).
- *
- * For each step, the core calls ide_start_power_step() first.
- * This can return:
- * - ide_stopped : In this case, the core calls us back again unless
- * step have been set to ide_power_state_completed.
- * - ide_started : In this case, the channel is left busy until an
- * async event (interrupt) occurs.
- * Typically, ide_start_power_step() will issue a taskfile request with
- * do_rw_taskfile().
- *
- * Upon reception of the interrupt, the core will call ide_complete_power_step()
- * with the error code if any. This routine should update the step value
- * and return. It should not start a new request. The core will call
- * ide_start_power_step() for the new step value, unless step have been
- * set to IDE_PM_COMPLETED.
- */
-enum {
- IDE_PM_START_SUSPEND,
- IDE_PM_FLUSH_CACHE = IDE_PM_START_SUSPEND,
- IDE_PM_STANDBY,
-
- IDE_PM_START_RESUME,
- IDE_PM_RESTORE_PIO = IDE_PM_START_RESUME,
- IDE_PM_IDLE,
- IDE_PM_RESTORE_DMA,
-
- IDE_PM_COMPLETED,
-};
-
-int generic_ide_suspend(struct device *, pm_message_t);
-int generic_ide_resume(struct device *);
-
-void ide_complete_power_step(ide_drive_t *, struct request *);
-ide_startstop_t ide_start_power_step(ide_drive_t *, struct request *);
-void ide_complete_pm_rq(ide_drive_t *, struct request *);
-void ide_check_pm_state(ide_drive_t *, struct request *);
-
-/*
- * Subdrivers support.
- *
- * The gendriver.owner field should be set to the module owner of this driver.
- * The gendriver.name field should be set to the name of this driver
- */
-struct ide_driver {
- const char *version;
- ide_startstop_t (*do_request)(ide_drive_t *, struct request *, sector_t);
- struct device_driver gen_driver;
- int (*probe)(ide_drive_t *);
- void (*remove)(ide_drive_t *);
- void (*resume)(ide_drive_t *);
- void (*shutdown)(ide_drive_t *);
-#ifdef CONFIG_IDE_PROC_FS
- ide_proc_entry_t * (*proc_entries)(ide_drive_t *);
- const struct ide_proc_devset * (*proc_devsets)(ide_drive_t *);
-#endif
-};
-
-#define to_ide_driver(drv) container_of(drv, struct ide_driver, gen_driver)
-
-int ide_device_get(ide_drive_t *);
-void ide_device_put(ide_drive_t *);
-
-struct ide_ioctl_devset {
- unsigned int get_ioctl;
- unsigned int set_ioctl;
- const struct ide_devset *setting;
-};
-
-int ide_setting_ioctl(ide_drive_t *, struct block_device *, unsigned int,
- unsigned long, const struct ide_ioctl_devset *);
-
-int generic_ide_ioctl(ide_drive_t *, struct block_device *, unsigned, unsigned long);
-
-extern int ide_vlb_clk;
-extern int ide_pci_clk;
-
-int ide_end_rq(ide_drive_t *, struct request *, blk_status_t, unsigned int);
-void ide_kill_rq(ide_drive_t *, struct request *);
-void ide_insert_request_head(ide_drive_t *, struct request *);
-
-void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
-void ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
-
-void ide_execute_command(ide_drive_t *, struct ide_cmd *, ide_handler_t *,
- unsigned int);
-
-void ide_pad_transfer(ide_drive_t *, int, int);
-
-ide_startstop_t ide_error(ide_drive_t *, const char *, u8);
-
-void ide_fix_driveid(u16 *);
-
-extern void ide_fixstring(u8 *, const int, const int);
-
-int ide_busy_sleep(ide_drive_t *, unsigned long, int);
-
-int __ide_wait_stat(ide_drive_t *, u8, u8, unsigned long, u8 *);
-int ide_wait_stat(ide_startstop_t *, ide_drive_t *, u8, u8, unsigned long);
-
-ide_startstop_t ide_do_park_unpark(ide_drive_t *, struct request *);
-ide_startstop_t ide_do_devset(ide_drive_t *, struct request *);
-
-extern ide_startstop_t ide_do_reset (ide_drive_t *);
-
-extern int ide_devset_execute(ide_drive_t *drive,
- const struct ide_devset *setting, int arg);
-
-void ide_complete_cmd(ide_drive_t *, struct ide_cmd *, u8, u8);
-int ide_complete_rq(ide_drive_t *, blk_status_t, unsigned int);
-
-void ide_tf_readback(ide_drive_t *drive, struct ide_cmd *cmd);
-void ide_tf_dump(const char *, struct ide_cmd *);
-
-void ide_exec_command(ide_hwif_t *, u8);
-u8 ide_read_status(ide_hwif_t *);
-u8 ide_read_altstatus(ide_hwif_t *);
-void ide_write_devctl(ide_hwif_t *, u8);
-
-void ide_dev_select(ide_drive_t *);
-void ide_tf_load(ide_drive_t *, struct ide_taskfile *, u8);
-void ide_tf_read(ide_drive_t *, struct ide_taskfile *, u8);
-
-void ide_input_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int);
-void ide_output_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int);
-
-void SELECT_MASK(ide_drive_t *, int);
-
-u8 ide_read_error(ide_drive_t *);
-void ide_read_bcount_and_ireason(ide_drive_t *, u16 *, u8 *);
-
-int ide_check_ireason(ide_drive_t *, struct request *, int, int, int);
-
-int ide_check_atapi_device(ide_drive_t *, const char *);
-
-void ide_init_pc(struct ide_atapi_pc *);
-
-/* Disk head parking */
-extern wait_queue_head_t ide_park_wq;
-ssize_t ide_park_show(struct device *dev, struct device_attribute *attr,
- char *buf);
-ssize_t ide_park_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t len);
-
-/*
- * Special requests for ide-tape block device strategy routine.
- *
- * In order to service a character device command, we add special requests to
- * the tail of our block device request queue and wait for their completion.
- */
-enum {
- REQ_IDETAPE_PC1 = BIT(0), /* packet command (first stage) */
- REQ_IDETAPE_PC2 = BIT(1), /* packet command (second stage) */
- REQ_IDETAPE_READ = BIT(2),
- REQ_IDETAPE_WRITE = BIT(3),
-};
-
-int ide_queue_pc_tail(ide_drive_t *, struct gendisk *, struct ide_atapi_pc *,
- void *, unsigned int);
-
-int ide_do_test_unit_ready(ide_drive_t *, struct gendisk *);
-int ide_do_start_stop(ide_drive_t *, struct gendisk *, int);
-int ide_set_media_lock(ide_drive_t *, struct gendisk *, int);
-void ide_create_request_sense_cmd(ide_drive_t *, struct ide_atapi_pc *);
-void ide_retry_pc(ide_drive_t *drive);
-
-void ide_prep_sense(ide_drive_t *drive, struct request *rq);
-int ide_queue_sense_rq(ide_drive_t *drive, void *special);
-
-int ide_cd_expiry(ide_drive_t *);
-
-int ide_cd_get_xferlen(struct request *);
-
-ide_startstop_t ide_issue_pc(ide_drive_t *, struct ide_cmd *);
-
-ide_startstop_t do_rw_taskfile(ide_drive_t *, struct ide_cmd *);
-
-void ide_pio_bytes(ide_drive_t *, struct ide_cmd *, unsigned int, unsigned int);
-
-void ide_finish_cmd(ide_drive_t *, struct ide_cmd *, u8);
-
-int ide_raw_taskfile(ide_drive_t *, struct ide_cmd *, u8 *, u16);
-int ide_no_data_taskfile(ide_drive_t *, struct ide_cmd *);
-
-int ide_taskfile_ioctl(ide_drive_t *, unsigned long);
-
-int ide_dev_read_id(ide_drive_t *, u8, u16 *, int);
-
-extern int ide_driveid_update(ide_drive_t *);
-extern int ide_config_drive_speed(ide_drive_t *, u8);
-extern u8 eighty_ninty_three (ide_drive_t *);
-extern int taskfile_lib_get_identify(ide_drive_t *drive, u8 *);
-
-extern int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout);
-
-extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout);
-
-extern void ide_timer_expiry(struct timer_list *t);
-extern irqreturn_t ide_intr(int irq, void *dev_id);
-extern blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
-extern blk_status_t ide_issue_rq(ide_drive_t *, struct request *, bool);
-extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq);
-
-void ide_init_disk(struct gendisk *, ide_drive_t *);
-
-#ifdef CONFIG_IDEPCI_PCIBUS_ORDER
-extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *owner, const char *mod_name);
-#define ide_pci_register_driver(d) __ide_pci_register_driver(d, THIS_MODULE, KBUILD_MODNAME)
-#else
-#define ide_pci_register_driver(d) pci_register_driver(d)
-#endif
-
-static inline int ide_pci_is_in_compatibility_mode(struct pci_dev *dev)
-{
- if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && (dev->class & 5) != 5)
- return 1;
- return 0;
-}
-
-void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *,
- struct ide_hw *, struct ide_hw **);
-void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *);
-
-#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
-int ide_pci_set_master(struct pci_dev *, const char *);
-unsigned long ide_pci_dma_base(ide_hwif_t *, const struct ide_port_info *);
-int ide_pci_check_simplex(ide_hwif_t *, const struct ide_port_info *);
-int ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *);
-#else
-static inline int ide_hwif_setup_dma(ide_hwif_t *hwif,
- const struct ide_port_info *d)
-{
- return -EINVAL;
-}
-#endif
-
-struct ide_pci_enablebit {
- u8 reg; /* byte pci reg holding the enable-bit */
- u8 mask; /* mask to isolate the enable-bit */
- u8 val; /* value of masked reg when "enabled" */
-};
-
-enum {
- /* Uses ISA control ports not PCI ones. */
- IDE_HFLAG_ISA_PORTS = BIT(0),
- /* single port device */
- IDE_HFLAG_SINGLE = BIT(1),
- /* don't use legacy PIO blacklist */
- IDE_HFLAG_PIO_NO_BLACKLIST = BIT(2),
- /* set for the second port of QD65xx */
- IDE_HFLAG_QD_2ND_PORT = BIT(3),
- /* use PIO8/9 for prefetch off/on */
- IDE_HFLAG_ABUSE_PREFETCH = BIT(4),
- /* use PIO6/7 for fast-devsel off/on */
- IDE_HFLAG_ABUSE_FAST_DEVSEL = BIT(5),
- /* use 100-102 and 200-202 PIO values to set DMA modes */
- IDE_HFLAG_ABUSE_DMA_MODES = BIT(6),
- /*
- * keep DMA setting when programming PIO mode, may be used only
- * for hosts which have separate PIO and DMA timings (ie. PMAC)
- */
- IDE_HFLAG_SET_PIO_MODE_KEEP_DMA = BIT(7),
- /* program host for the transfer mode after programming device */
- IDE_HFLAG_POST_SET_MODE = BIT(8),
- /* don't program host/device for the transfer mode ("smart" hosts) */
- IDE_HFLAG_NO_SET_MODE = BIT(9),
- /* trust BIOS for programming chipset/device for DMA */
- IDE_HFLAG_TRUST_BIOS_FOR_DMA = BIT(10),
- /* host is CS5510/CS5520 */
- IDE_HFLAG_CS5520 = BIT(11),
- /* ATAPI DMA is unsupported */
- IDE_HFLAG_NO_ATAPI_DMA = BIT(12),
- /* set if host is a "non-bootable" controller */
- IDE_HFLAG_NON_BOOTABLE = BIT(13),
- /* host doesn't support DMA */
- IDE_HFLAG_NO_DMA = BIT(14),
- /* check if host is PCI IDE device before allowing DMA */
- IDE_HFLAG_NO_AUTODMA = BIT(15),
- /* host uses MMIO */
- IDE_HFLAG_MMIO = BIT(16),
- /* no LBA48 */
- IDE_HFLAG_NO_LBA48 = BIT(17),
- /* no LBA48 DMA */
- IDE_HFLAG_NO_LBA48_DMA = BIT(18),
- /* data FIFO is cleared by an error */
- IDE_HFLAG_ERROR_STOPS_FIFO = BIT(19),
- /* serialize ports */
- IDE_HFLAG_SERIALIZE = BIT(20),
- /* host is DTC2278 */
- IDE_HFLAG_DTC2278 = BIT(21),
- /* 4 devices on a single set of I/O ports */
- IDE_HFLAG_4DRIVES = BIT(22),
- /* host is TRM290 */
- IDE_HFLAG_TRM290 = BIT(23),
- /* use 32-bit I/O ops */
- IDE_HFLAG_IO_32BIT = BIT(24),
- /* unmask IRQs */
- IDE_HFLAG_UNMASK_IRQS = BIT(25),
- IDE_HFLAG_BROKEN_ALTSTATUS = BIT(26),
- /* serialize ports if DMA is possible (for sl82c105) */
- IDE_HFLAG_SERIALIZE_DMA = BIT(27),
- /* force host out of "simplex" mode */
- IDE_HFLAG_CLEAR_SIMPLEX = BIT(28),
- /* DSC overlap is unsupported */
- IDE_HFLAG_NO_DSC = BIT(29),
- /* never use 32-bit I/O ops */
- IDE_HFLAG_NO_IO_32BIT = BIT(30),
- /* never unmask IRQs */
- IDE_HFLAG_NO_UNMASK_IRQS = BIT(31),
-};
-
-#ifdef CONFIG_BLK_DEV_OFFBOARD
-# define IDE_HFLAG_OFF_BOARD 0
-#else
-# define IDE_HFLAG_OFF_BOARD IDE_HFLAG_NON_BOOTABLE
-#endif
-
-struct ide_port_info {
- char *name;
-
- int (*init_chipset)(struct pci_dev *);
-
- void (*get_lock)(irq_handler_t, void *);
- void (*release_lock)(void);
-
- void (*init_iops)(ide_hwif_t *);
- void (*init_hwif)(ide_hwif_t *);
- int (*init_dma)(ide_hwif_t *,
- const struct ide_port_info *);
-
- const struct ide_tp_ops *tp_ops;
- const struct ide_port_ops *port_ops;
- const struct ide_dma_ops *dma_ops;
-
- struct ide_pci_enablebit enablebits[2];
-
- hwif_chipset_t chipset;
-
- u16 max_sectors; /* if < than the default one */
-
- u32 host_flags;
-
- int irq_flags;
-
- u8 pio_mask;
- u8 swdma_mask;
- u8 mwdma_mask;
- u8 udma_mask;
-};
-
-/*
- * State information carried for REQ_TYPE_ATA_PM_SUSPEND and REQ_TYPE_ATA_PM_RESUME
- * requests.
- */
-struct ide_pm_state {
- /* PM state machine step value, currently driver specific */
- int pm_step;
- /* requested PM state value (S1, S2, S3, S4, ...) */
- u32 pm_state;
- void* data; /* for driver use */
-};
-
-
-int ide_pci_init_one(struct pci_dev *, const struct ide_port_info *, void *);
-int ide_pci_init_two(struct pci_dev *, struct pci_dev *,
- const struct ide_port_info *, void *);
-void ide_pci_remove(struct pci_dev *);
-
-#ifdef CONFIG_PM
-int ide_pci_suspend(struct pci_dev *, pm_message_t);
-int ide_pci_resume(struct pci_dev *);
-#else
-#define ide_pci_suspend NULL
-#define ide_pci_resume NULL
-#endif
-
-void ide_map_sg(ide_drive_t *, struct ide_cmd *);
-void ide_init_sg_cmd(struct ide_cmd *, unsigned int);
-
-#define BAD_DMA_DRIVE 0
-#define GOOD_DMA_DRIVE 1
-
-struct drive_list_entry {
- const char *id_model;
- const char *id_firmware;
-};
-
-int ide_in_drive_list(u16 *, const struct drive_list_entry *);
-
-#ifdef CONFIG_BLK_DEV_IDEDMA
-int ide_dma_good_drive(ide_drive_t *);
-int __ide_dma_bad_drive(ide_drive_t *);
-
-u8 ide_find_dma_mode(ide_drive_t *, u8);
-
-static inline u8 ide_max_dma_mode(ide_drive_t *drive)
-{
- return ide_find_dma_mode(drive, XFER_UDMA_6);
-}
-
-void ide_dma_off_quietly(ide_drive_t *);
-void ide_dma_off(ide_drive_t *);
-void ide_dma_on(ide_drive_t *);
-int ide_set_dma(ide_drive_t *);
-void ide_check_dma_crc(ide_drive_t *);
-ide_startstop_t ide_dma_intr(ide_drive_t *);
-
-int ide_allocate_dma_engine(ide_hwif_t *);
-void ide_release_dma_engine(ide_hwif_t *);
-
-int ide_dma_prepare(ide_drive_t *, struct ide_cmd *);
-void ide_dma_unmap_sg(ide_drive_t *, struct ide_cmd *);
-
-#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
-int config_drive_for_dma(ide_drive_t *);
-int ide_build_dmatable(ide_drive_t *, struct ide_cmd *);
-void ide_dma_host_set(ide_drive_t *, int);
-int ide_dma_setup(ide_drive_t *, struct ide_cmd *);
-extern void ide_dma_start(ide_drive_t *);
-int ide_dma_end(ide_drive_t *);
-int ide_dma_test_irq(ide_drive_t *);
-int ide_dma_sff_timer_expiry(ide_drive_t *);
-u8 ide_dma_sff_read_status(ide_hwif_t *);
-extern const struct ide_dma_ops sff_dma_ops;
-#else
-static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; }
-#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
-
-void ide_dma_lost_irq(ide_drive_t *);
-ide_startstop_t ide_dma_timeout_retry(ide_drive_t *, int);
-
-#else
-static inline u8 ide_find_dma_mode(ide_drive_t *drive, u8 speed) { return 0; }
-static inline u8 ide_max_dma_mode(ide_drive_t *drive) { return 0; }
-static inline void ide_dma_off_quietly(ide_drive_t *drive) { ; }
-static inline void ide_dma_off(ide_drive_t *drive) { ; }
-static inline void ide_dma_on(ide_drive_t *drive) { ; }
-static inline void ide_dma_verbose(ide_drive_t *drive) { ; }
-static inline int ide_set_dma(ide_drive_t *drive) { return 1; }
-static inline void ide_check_dma_crc(ide_drive_t *drive) { ; }
-static inline ide_startstop_t ide_dma_intr(ide_drive_t *drive) { return ide_stopped; }
-static inline ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) { return ide_stopped; }
-static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; }
-static inline int ide_dma_prepare(ide_drive_t *drive,
- struct ide_cmd *cmd) { return 1; }
-static inline void ide_dma_unmap_sg(ide_drive_t *drive,
- struct ide_cmd *cmd) { ; }
-#endif /* CONFIG_BLK_DEV_IDEDMA */
-
-#ifdef CONFIG_BLK_DEV_IDEACPI
-int ide_acpi_init(void);
-bool ide_port_acpi(ide_hwif_t *hwif);
-extern int ide_acpi_exec_tfs(ide_drive_t *drive);
-extern void ide_acpi_get_timing(ide_hwif_t *hwif);
-extern void ide_acpi_push_timing(ide_hwif_t *hwif);
-void ide_acpi_init_port(ide_hwif_t *);
-void ide_acpi_port_init_devices(ide_hwif_t *);
-extern void ide_acpi_set_state(ide_hwif_t *hwif, int on);
-#else
-static inline int ide_acpi_init(void) { return 0; }
-static inline bool ide_port_acpi(ide_hwif_t *hwif) { return 0; }
-static inline int ide_acpi_exec_tfs(ide_drive_t *drive) { return 0; }
-static inline void ide_acpi_get_timing(ide_hwif_t *hwif) { ; }
-static inline void ide_acpi_push_timing(ide_hwif_t *hwif) { ; }
-static inline void ide_acpi_init_port(ide_hwif_t *hwif) { ; }
-static inline void ide_acpi_port_init_devices(ide_hwif_t *hwif) { ; }
-static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {}
-#endif
-
-void ide_check_nien_quirk_list(ide_drive_t *);
-void ide_undecoded_slave(ide_drive_t *);
-
-void ide_port_apply_params(ide_hwif_t *);
-int ide_sysfs_register_port(ide_hwif_t *);
-
-struct ide_host *ide_host_alloc(const struct ide_port_info *, struct ide_hw **,
- unsigned int);
-void ide_host_free(struct ide_host *);
-int ide_host_register(struct ide_host *, const struct ide_port_info *,
- struct ide_hw **);
-int ide_host_add(const struct ide_port_info *, struct ide_hw **, unsigned int,
- struct ide_host **);
-void ide_host_remove(struct ide_host *);
-int ide_legacy_device_add(const struct ide_port_info *, unsigned long);
-void ide_port_unregister_devices(ide_hwif_t *);
-void ide_port_scan(ide_hwif_t *);
-
-static inline void *ide_get_hwifdata (ide_hwif_t * hwif)
-{
- return hwif->hwif_data;
-}
-
-static inline void ide_set_hwifdata (ide_hwif_t * hwif, void *data)
-{
- hwif->hwif_data = data;
-}
-
-u64 ide_get_lba_addr(struct ide_cmd *, int);
-u8 ide_dump_status(ide_drive_t *, const char *, u8);
-
-struct ide_timing {
- u8 mode;
- u8 setup; /* t1 */
- u16 act8b; /* t2 for 8-bit io */
- u16 rec8b; /* t2i for 8-bit io */
- u16 cyc8b; /* t0 for 8-bit io */
- u16 active; /* t2 or tD */
- u16 recover; /* t2i or tK */
- u16 cycle; /* t0 */
- u16 udma; /* t2CYCTYP/2 */
-};
-
-enum {
- IDE_TIMING_SETUP = BIT(0),
- IDE_TIMING_ACT8B = BIT(1),
- IDE_TIMING_REC8B = BIT(2),
- IDE_TIMING_CYC8B = BIT(3),
- IDE_TIMING_8BIT = IDE_TIMING_ACT8B | IDE_TIMING_REC8B |
- IDE_TIMING_CYC8B,
- IDE_TIMING_ACTIVE = BIT(4),
- IDE_TIMING_RECOVER = BIT(5),
- IDE_TIMING_CYCLE = BIT(6),
- IDE_TIMING_UDMA = BIT(7),
- IDE_TIMING_ALL = IDE_TIMING_SETUP | IDE_TIMING_8BIT |
- IDE_TIMING_ACTIVE | IDE_TIMING_RECOVER |
- IDE_TIMING_CYCLE | IDE_TIMING_UDMA,
-};
-
-struct ide_timing *ide_timing_find_mode(u8);
-u16 ide_pio_cycle_time(ide_drive_t *, u8);
-void ide_timing_merge(struct ide_timing *, struct ide_timing *,
- struct ide_timing *, unsigned int);
-int ide_timing_compute(ide_drive_t *, u8, struct ide_timing *, int, int);
-
-#ifdef CONFIG_IDE_XFER_MODE
-int ide_scan_pio_blacklist(char *);
-const char *ide_xfer_verbose(u8);
-int ide_pio_need_iordy(ide_drive_t *, const u8);
-int ide_set_pio_mode(ide_drive_t *, u8);
-int ide_set_dma_mode(ide_drive_t *, u8);
-void ide_set_pio(ide_drive_t *, u8);
-int ide_set_xfer_rate(ide_drive_t *, u8);
-#else
-static inline void ide_set_pio(ide_drive_t *drive, u8 pio) { ; }
-static inline int ide_set_xfer_rate(ide_drive_t *drive, u8 rate) { return -1; }
-#endif
-
-static inline void ide_set_max_pio(ide_drive_t *drive)
-{
- ide_set_pio(drive, 255);
-}
-
-char *ide_media_string(ide_drive_t *);
-
-extern const struct attribute_group *ide_dev_groups[];
-extern struct bus_type ide_bus_type;
-extern struct class *ide_port_class;
-
-static inline void ide_dump_identify(u8 *id)
-{
- print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 2, id, 512, 0);
-}
-
-static inline int hwif_to_node(ide_hwif_t *hwif)
-{
- return hwif->dev ? dev_to_node(hwif->dev) : -1;
-}
-
-static inline ide_drive_t *ide_get_pair_dev(ide_drive_t *drive)
-{
- ide_drive_t *peer = drive->hwif->devices[(drive->dn ^ 1) & 1];
-
- return (peer->dev_flags & IDE_DFLAG_PRESENT) ? peer : NULL;
-}
-
-static inline void *ide_get_drivedata(ide_drive_t *drive)
-{
- return drive->drive_data;
-}
-
-static inline void ide_set_drivedata(ide_drive_t *drive, void *data)
-{
- drive->drive_data = data;
-}
-
-#define ide_port_for_each_dev(i, dev, port) \
- for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++)
-
-#define ide_port_for_each_present_dev(i, dev, port) \
- for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++) \
- if ((dev)->dev_flags & IDE_DFLAG_PRESENT)
-
-#define ide_host_for_each_port(i, port, host) \
- for ((i) = 0; ((port) = (host)->ports[i]) || (i) < MAX_HOST_PORTS; (i)++)
-
-
-#endif /* _IDE_H */
diff --git a/include/linux/init.h b/include/linux/init.h
index 045ad1650ed1..d82b4b2e1d25 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -242,7 +242,8 @@ extern bool initcall_debug;
asm(".section \"" __sec "\", \"a\" \n" \
__stringify(__name) ": \n" \
".long " __stringify(__stub) " - . \n" \
- ".previous \n");
+ ".previous \n"); \
+ static_assert(__same_type(initcall_t, &fn));
#else
#define ____define_initcall(fn, __unused, __name, __sec) \
static initcall_t __name __used \
diff --git a/include/linux/instrumentation.h b/include/linux/instrumentation.h
index 93e2ad67fc10..fa2cd8c63dcc 100644
--- a/include/linux/instrumentation.h
+++ b/include/linux/instrumentation.h
@@ -4,13 +4,16 @@
#if defined(CONFIG_DEBUG_ENTRY) && defined(CONFIG_STACK_VALIDATION)
+#include <linux/stringify.h>
+
/* Begin/end of an instrumentation safe region */
-#define instrumentation_begin() ({ \
- asm volatile("%c0: nop\n\t" \
+#define __instrumentation_begin(c) ({ \
+ asm volatile(__stringify(c) ": nop\n\t" \
".pushsection .discard.instr_begin\n\t" \
- ".long %c0b - .\n\t" \
- ".popsection\n\t" : : "i" (__COUNTER__)); \
+ ".long " __stringify(c) "b - .\n\t" \
+ ".popsection\n\t"); \
})
+#define instrumentation_begin() __instrumentation_begin(__COUNTER__)
/*
* Because instrumentation_{begin,end}() can nest, objtool validation considers
@@ -43,12 +46,13 @@
* To avoid this, have _end() be a NOP instruction, this ensures it will be
* part of the condition block and does not escape.
*/
-#define instrumentation_end() ({ \
- asm volatile("%c0: nop\n\t" \
+#define __instrumentation_end(c) ({ \
+ asm volatile(__stringify(c) ": nop\n\t" \
".pushsection .discard.instr_end\n\t" \
- ".long %c0b - .\n\t" \
- ".popsection\n\t" : : "i" (__COUNTER__)); \
+ ".long " __stringify(c) "b - .\n\t" \
+ ".popsection\n\t"); \
})
+#define instrumentation_end() __instrumentation_end(__COUNTER__)
#else
# define instrumentation_begin() do { } while(0)
# define instrumentation_end() do { } while(0)
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 05f5554d860f..48b9b2a82767 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -171,9 +171,21 @@ static inline bool jump_entry_is_init(const struct jump_entry *entry)
return (unsigned long)entry->key & 2UL;
}
-static inline void jump_entry_set_init(struct jump_entry *entry)
+static inline void jump_entry_set_init(struct jump_entry *entry, bool set)
{
- entry->key |= 2;
+ if (set)
+ entry->key |= 2;
+ else
+ entry->key &= ~2;
+}
+
+static inline int jump_entry_size(struct jump_entry *entry)
+{
+#ifdef JUMP_LABEL_NOP_SIZE
+ return JUMP_LABEL_NOP_SIZE;
+#else
+ return arch_jump_entry_size(entry);
+#endif
}
#endif
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 2f34487e21f2..8583ed3ff344 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -10,6 +10,7 @@
#include <linux/spinlock.h>
#include <linux/signal.h>
#include <linux/sched.h>
+#include <linux/sched/stat.h>
#include <linux/bug.h>
#include <linux/minmax.h>
#include <linux/mm.h>
@@ -146,7 +147,7 @@ static inline bool is_error_page(struct page *page)
*/
#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
-#define KVM_REQ_PENDING_TIMER 2
+#define KVM_REQ_UNBLOCK 2
#define KVM_REQ_UNHALT 3
#define KVM_REQUEST_ARCH_BASE 8
@@ -265,6 +266,11 @@ static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)
return !!map->hva;
}
+static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop)
+{
+ return single_task_running() && !need_resched() && ktime_before(cur, stop);
+}
+
/*
* Sometimes a large or cross-page mmio needs to be broken up into separate
* exits for userspace servicing.
@@ -1179,7 +1185,15 @@ __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
static inline unsigned long
__gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
{
- return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
+ /*
+ * The index was checked originally in search_memslots. To avoid
+ * that a malicious guest builds a Spectre gadget out of e.g. page
+ * table walks, do not let the processor speculate loads outside
+ * the guest's registered memslots.
+ */
+ unsigned long offset = gfn - slot->base_gfn;
+ offset = array_index_nospec(offset, slot->npages);
+ return slot->userspace_addr + offset * PAGE_SIZE;
}
static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 5f550eb27f81..3fcd24236793 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1397,25 +1397,28 @@ extern struct device_attribute *ata_common_sdev_attrs[];
ATA_SCSI_COMPAT_IOCTL \
.queuecommand = ata_scsi_queuecmd, \
.dma_need_drain = ata_scsi_dma_need_drain, \
- .can_queue = ATA_DEF_QUEUE, \
- .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
.this_id = ATA_SHT_THIS_ID, \
.emulated = ATA_SHT_EMULATED, \
.proc_name = drv_name, \
- .slave_configure = ata_scsi_slave_config, \
.slave_destroy = ata_scsi_slave_destroy, \
.bios_param = ata_std_bios_param, \
.unlock_native_capacity = ata_scsi_unlock_native_capacity
-#define ATA_BASE_SHT(drv_name) \
+#define ATA_SUBBASE_SHT(drv_name) \
__ATA_BASE_SHT(drv_name), \
+ .can_queue = ATA_DEF_QUEUE, \
+ .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
+ .slave_configure = ata_scsi_slave_config
+
+#define ATA_BASE_SHT(drv_name) \
+ ATA_SUBBASE_SHT(drv_name), \
.sdev_attrs = ata_common_sdev_attrs
#ifdef CONFIG_SATA_HOST
extern struct device_attribute *ata_ncq_sdev_attrs[];
#define ATA_NCQ_SHT(drv_name) \
- __ATA_BASE_SHT(drv_name), \
+ ATA_SUBBASE_SHT(drv_name), \
.sdev_attrs = ata_ncq_sdev_attrs, \
.change_queue_depth = ata_scsi_change_queue_depth
#endif
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index 01f251b6e36c..89b69e645ac7 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -141,7 +141,6 @@ static inline void __iomem *devm_nvdimm_ioremap(struct device *dev,
struct nvdimm_bus;
struct module;
-struct device;
struct nd_blk_region;
struct nd_blk_region_desc {
int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev);
diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h
index 2ec9ff5a7fff..3e726ace5c62 100644
--- a/include/linux/lockdep_types.h
+++ b/include/linux/lockdep_types.h
@@ -52,7 +52,7 @@ enum lockdep_lock_type {
* NR_LOCKDEP_CACHING_CLASSES ... Number of classes
* cached in the instance of lockdep_map
*
- * Currently main class (subclass == 0) and signle depth subclass
+ * Currently main class (subclass == 0) and single depth subclass
* are cached in lockdep_map. This optimization is mainly targeting
* on rq->lock. double_rq_lock() acquires this highly competitive with
* single depth.
diff --git a/include/linux/mfd/mt6358/core.h b/include/linux/mfd/mt6358/core.h
index c5a11b7458d4..68578e2019b0 100644
--- a/include/linux/mfd/mt6358/core.h
+++ b/include/linux/mfd/mt6358/core.h
@@ -6,12 +6,9 @@
#ifndef __MFD_MT6358_CORE_H__
#define __MFD_MT6358_CORE_H__
-#define MT6358_REG_WIDTH 16
-
struct irq_top_t {
int hwirq_base;
unsigned int num_int_regs;
- unsigned int num_int_bits;
unsigned int en_reg;
unsigned int en_reg_shift;
unsigned int sta_reg;
@@ -25,6 +22,7 @@ struct pmic_irq_data {
unsigned short top_int_status_reg;
bool *enable_hwirq;
bool *cache_hwirq;
+ const struct irq_top_t *pmic_ints;
};
enum mt6358_irq_top_status_shift {
@@ -146,8 +144,8 @@ enum mt6358_irq_numbers {
{ \
.hwirq_base = MT6358_IRQ_##sp##_BASE, \
.num_int_regs = \
- ((MT6358_IRQ_##sp##_BITS - 1) / MT6358_REG_WIDTH) + 1, \
- .num_int_bits = MT6358_IRQ_##sp##_BITS, \
+ ((MT6358_IRQ_##sp##_BITS - 1) / \
+ MTK_PMIC_REG_WIDTH) + 1, \
.en_reg = MT6358_##sp##_TOP_INT_CON0, \
.en_reg_shift = 0x6, \
.sta_reg = MT6358_##sp##_TOP_INT_STATUS0, \
diff --git a/include/linux/mfd/mt6359/core.h b/include/linux/mfd/mt6359/core.h
new file mode 100644
index 000000000000..8d298868126d
--- /dev/null
+++ b/include/linux/mfd/mt6359/core.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6359_CORE_H__
+#define __MFD_MT6359_CORE_H__
+
+enum mt6359_irq_top_status_shift {
+ MT6359_BUCK_TOP = 0,
+ MT6359_LDO_TOP,
+ MT6359_PSC_TOP,
+ MT6359_SCK_TOP,
+ MT6359_BM_TOP,
+ MT6359_HK_TOP,
+ MT6359_AUD_TOP = 7,
+ MT6359_MISC_TOP,
+};
+
+enum mt6359_irq_numbers {
+ MT6359_IRQ_VCORE_OC = 1,
+ MT6359_IRQ_VGPU11_OC,
+ MT6359_IRQ_VGPU12_OC,
+ MT6359_IRQ_VMODEM_OC,
+ MT6359_IRQ_VPROC1_OC,
+ MT6359_IRQ_VPROC2_OC,
+ MT6359_IRQ_VS1_OC,
+ MT6359_IRQ_VS2_OC,
+ MT6359_IRQ_VPA_OC = 9,
+ MT6359_IRQ_VFE28_OC = 16,
+ MT6359_IRQ_VXO22_OC,
+ MT6359_IRQ_VRF18_OC,
+ MT6359_IRQ_VRF12_OC,
+ MT6359_IRQ_VEFUSE_OC,
+ MT6359_IRQ_VCN33_1_OC,
+ MT6359_IRQ_VCN33_2_OC,
+ MT6359_IRQ_VCN13_OC,
+ MT6359_IRQ_VCN18_OC,
+ MT6359_IRQ_VA09_OC,
+ MT6359_IRQ_VCAMIO_OC,
+ MT6359_IRQ_VA12_OC,
+ MT6359_IRQ_VAUX18_OC,
+ MT6359_IRQ_VAUD18_OC,
+ MT6359_IRQ_VIO18_OC,
+ MT6359_IRQ_VSRAM_PROC1_OC,
+ MT6359_IRQ_VSRAM_PROC2_OC,
+ MT6359_IRQ_VSRAM_OTHERS_OC,
+ MT6359_IRQ_VSRAM_MD_OC,
+ MT6359_IRQ_VEMC_OC,
+ MT6359_IRQ_VSIM1_OC,
+ MT6359_IRQ_VSIM2_OC,
+ MT6359_IRQ_VUSB_OC,
+ MT6359_IRQ_VRFCK_OC,
+ MT6359_IRQ_VBBCK_OC,
+ MT6359_IRQ_VBIF28_OC,
+ MT6359_IRQ_VIBR_OC,
+ MT6359_IRQ_VIO28_OC,
+ MT6359_IRQ_VM18_OC,
+ MT6359_IRQ_VUFS_OC = 45,
+ MT6359_IRQ_PWRKEY = 48,
+ MT6359_IRQ_HOMEKEY,
+ MT6359_IRQ_PWRKEY_R,
+ MT6359_IRQ_HOMEKEY_R,
+ MT6359_IRQ_NI_LBAT_INT,
+ MT6359_IRQ_CHRDET_EDGE = 53,
+ MT6359_IRQ_RTC = 64,
+ MT6359_IRQ_FG_BAT_H = 80,
+ MT6359_IRQ_FG_BAT_L,
+ MT6359_IRQ_FG_CUR_H,
+ MT6359_IRQ_FG_CUR_L,
+ MT6359_IRQ_FG_ZCV = 84,
+ MT6359_IRQ_FG_N_CHARGE_L = 87,
+ MT6359_IRQ_FG_IAVG_H,
+ MT6359_IRQ_FG_IAVG_L = 89,
+ MT6359_IRQ_FG_DISCHARGE = 91,
+ MT6359_IRQ_FG_CHARGE,
+ MT6359_IRQ_BATON_LV = 96,
+ MT6359_IRQ_BATON_BAT_IN = 98,
+ MT6359_IRQ_BATON_BAT_OU,
+ MT6359_IRQ_BIF = 100,
+ MT6359_IRQ_BAT_H = 112,
+ MT6359_IRQ_BAT_L,
+ MT6359_IRQ_BAT2_H,
+ MT6359_IRQ_BAT2_L,
+ MT6359_IRQ_BAT_TEMP_H,
+ MT6359_IRQ_BAT_TEMP_L,
+ MT6359_IRQ_THR_H,
+ MT6359_IRQ_THR_L,
+ MT6359_IRQ_AUXADC_IMP,
+ MT6359_IRQ_NAG_C_DLTV = 121,
+ MT6359_IRQ_AUDIO = 128,
+ MT6359_IRQ_ACCDET = 133,
+ MT6359_IRQ_ACCDET_EINT0,
+ MT6359_IRQ_ACCDET_EINT1,
+ MT6359_IRQ_SPI_CMD_ALERT = 144,
+ MT6359_IRQ_NR,
+};
+
+#define MT6359_IRQ_BUCK_BASE MT6359_IRQ_VCORE_OC
+#define MT6359_IRQ_LDO_BASE MT6359_IRQ_VFE28_OC
+#define MT6359_IRQ_PSC_BASE MT6359_IRQ_PWRKEY
+#define MT6359_IRQ_SCK_BASE MT6359_IRQ_RTC
+#define MT6359_IRQ_BM_BASE MT6359_IRQ_FG_BAT_H
+#define MT6359_IRQ_HK_BASE MT6359_IRQ_BAT_H
+#define MT6359_IRQ_AUD_BASE MT6359_IRQ_AUDIO
+#define MT6359_IRQ_MISC_BASE MT6359_IRQ_SPI_CMD_ALERT
+
+#define MT6359_IRQ_BUCK_BITS (MT6359_IRQ_VPA_OC - MT6359_IRQ_BUCK_BASE + 1)
+#define MT6359_IRQ_LDO_BITS (MT6359_IRQ_VUFS_OC - MT6359_IRQ_LDO_BASE + 1)
+#define MT6359_IRQ_PSC_BITS \
+ (MT6359_IRQ_CHRDET_EDGE - MT6359_IRQ_PSC_BASE + 1)
+#define MT6359_IRQ_SCK_BITS (MT6359_IRQ_RTC - MT6359_IRQ_SCK_BASE + 1)
+#define MT6359_IRQ_BM_BITS (MT6359_IRQ_BIF - MT6359_IRQ_BM_BASE + 1)
+#define MT6359_IRQ_HK_BITS (MT6359_IRQ_NAG_C_DLTV - MT6359_IRQ_HK_BASE + 1)
+#define MT6359_IRQ_AUD_BITS \
+ (MT6359_IRQ_ACCDET_EINT1 - MT6359_IRQ_AUD_BASE + 1)
+#define MT6359_IRQ_MISC_BITS \
+ (MT6359_IRQ_SPI_CMD_ALERT - MT6359_IRQ_MISC_BASE + 1)
+
+#define MT6359_TOP_GEN(sp) \
+{ \
+ .hwirq_base = MT6359_IRQ_##sp##_BASE, \
+ .num_int_regs = \
+ ((MT6359_IRQ_##sp##_BITS - 1) / \
+ MTK_PMIC_REG_WIDTH) + 1, \
+ .en_reg = MT6359_##sp##_TOP_INT_CON0, \
+ .en_reg_shift = 0x6, \
+ .sta_reg = MT6359_##sp##_TOP_INT_STATUS0, \
+ .sta_reg_shift = 0x2, \
+ .top_offset = MT6359_##sp##_TOP, \
+}
+
+#endif /* __MFD_MT6359_CORE_H__ */
diff --git a/include/linux/mfd/mt6359/registers.h b/include/linux/mfd/mt6359/registers.h
new file mode 100644
index 000000000000..2135c9695918
--- /dev/null
+++ b/include/linux/mfd/mt6359/registers.h
@@ -0,0 +1,529 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6359_REGISTERS_H__
+#define __MFD_MT6359_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6359_SWCID 0xa
+#define MT6359_MISC_TOP_INT_CON0 0x188
+#define MT6359_MISC_TOP_INT_STATUS0 0x194
+#define MT6359_TOP_INT_STATUS0 0x19e
+#define MT6359_SCK_TOP_INT_CON0 0x528
+#define MT6359_SCK_TOP_INT_STATUS0 0x534
+#define MT6359_EOSC_CALI_CON0 0x53a
+#define MT6359_EOSC_CALI_CON1 0x53c
+#define MT6359_RTC_MIX_CON0 0x53e
+#define MT6359_RTC_MIX_CON1 0x540
+#define MT6359_RTC_MIX_CON2 0x542
+#define MT6359_RTC_DSN_ID 0x580
+#define MT6359_RTC_DSN_REV0 0x582
+#define MT6359_RTC_DBI 0x584
+#define MT6359_RTC_DXI 0x586
+#define MT6359_RTC_BBPU 0x588
+#define MT6359_RTC_IRQ_STA 0x58a
+#define MT6359_RTC_IRQ_EN 0x58c
+#define MT6359_RTC_CII_EN 0x58e
+#define MT6359_RTC_AL_MASK 0x590
+#define MT6359_RTC_TC_SEC 0x592
+#define MT6359_RTC_TC_MIN 0x594
+#define MT6359_RTC_TC_HOU 0x596
+#define MT6359_RTC_TC_DOM 0x598
+#define MT6359_RTC_TC_DOW 0x59a
+#define MT6359_RTC_TC_MTH 0x59c
+#define MT6359_RTC_TC_YEA 0x59e
+#define MT6359_RTC_AL_SEC 0x5a0
+#define MT6359_RTC_AL_MIN 0x5a2
+#define MT6359_RTC_AL_HOU 0x5a4
+#define MT6359_RTC_AL_DOM 0x5a6
+#define MT6359_RTC_AL_DOW 0x5a8
+#define MT6359_RTC_AL_MTH 0x5aa
+#define MT6359_RTC_AL_YEA 0x5ac
+#define MT6359_RTC_OSC32CON 0x5ae
+#define MT6359_RTC_POWERKEY1 0x5b0
+#define MT6359_RTC_POWERKEY2 0x5b2
+#define MT6359_RTC_PDN1 0x5b4
+#define MT6359_RTC_PDN2 0x5b6
+#define MT6359_RTC_SPAR0 0x5b8
+#define MT6359_RTC_SPAR1 0x5ba
+#define MT6359_RTC_PROT 0x5bc
+#define MT6359_RTC_DIFF 0x5be
+#define MT6359_RTC_CALI 0x5c0
+#define MT6359_RTC_WRTGR 0x5c2
+#define MT6359_RTC_CON 0x5c4
+#define MT6359_RTC_SEC_CTRL 0x5c6
+#define MT6359_RTC_INT_CNT 0x5c8
+#define MT6359_RTC_SEC_DAT0 0x5ca
+#define MT6359_RTC_SEC_DAT1 0x5cc
+#define MT6359_RTC_SEC_DAT2 0x5ce
+#define MT6359_RTC_SEC_DSN_ID 0x600
+#define MT6359_RTC_SEC_DSN_REV0 0x602
+#define MT6359_RTC_SEC_DBI 0x604
+#define MT6359_RTC_SEC_DXI 0x606
+#define MT6359_RTC_TC_SEC_SEC 0x608
+#define MT6359_RTC_TC_MIN_SEC 0x60a
+#define MT6359_RTC_TC_HOU_SEC 0x60c
+#define MT6359_RTC_TC_DOM_SEC 0x60e
+#define MT6359_RTC_TC_DOW_SEC 0x610
+#define MT6359_RTC_TC_MTH_SEC 0x612
+#define MT6359_RTC_TC_YEA_SEC 0x614
+#define MT6359_RTC_SEC_CK_PDN 0x616
+#define MT6359_RTC_SEC_WRTGR 0x618
+#define MT6359_PSC_TOP_INT_CON0 0x910
+#define MT6359_PSC_TOP_INT_STATUS0 0x91c
+#define MT6359_BM_TOP_INT_CON0 0xc32
+#define MT6359_BM_TOP_INT_CON1 0xc38
+#define MT6359_BM_TOP_INT_STATUS0 0xc4a
+#define MT6359_BM_TOP_INT_STATUS1 0xc4c
+#define MT6359_HK_TOP_INT_CON0 0xf92
+#define MT6359_HK_TOP_INT_STATUS0 0xf9e
+#define MT6359_BUCK_TOP_INT_CON0 0x1418
+#define MT6359_BUCK_TOP_INT_STATUS0 0x1424
+#define MT6359_BUCK_VPU_CON0 0x1488
+#define MT6359_BUCK_VPU_DBG0 0x14a6
+#define MT6359_BUCK_VPU_DBG1 0x14a8
+#define MT6359_BUCK_VPU_ELR0 0x14ac
+#define MT6359_BUCK_VCORE_CON0 0x1508
+#define MT6359_BUCK_VCORE_DBG0 0x1526
+#define MT6359_BUCK_VCORE_DBG1 0x1528
+#define MT6359_BUCK_VCORE_SSHUB_CON0 0x152a
+#define MT6359_BUCK_VCORE_ELR0 0x1534
+#define MT6359_BUCK_VGPU11_CON0 0x1588
+#define MT6359_BUCK_VGPU11_DBG0 0x15a6
+#define MT6359_BUCK_VGPU11_DBG1 0x15a8
+#define MT6359_BUCK_VGPU11_ELR0 0x15ac
+#define MT6359_BUCK_VMODEM_CON0 0x1688
+#define MT6359_BUCK_VMODEM_DBG0 0x16a6
+#define MT6359_BUCK_VMODEM_DBG1 0x16a8
+#define MT6359_BUCK_VMODEM_ELR0 0x16ae
+#define MT6359_BUCK_VPROC1_CON0 0x1708
+#define MT6359_BUCK_VPROC1_DBG0 0x1726
+#define MT6359_BUCK_VPROC1_DBG1 0x1728
+#define MT6359_BUCK_VPROC1_ELR0 0x172e
+#define MT6359_BUCK_VPROC2_CON0 0x1788
+#define MT6359_BUCK_VPROC2_DBG0 0x17a6
+#define MT6359_BUCK_VPROC2_DBG1 0x17a8
+#define MT6359_BUCK_VPROC2_ELR0 0x17b2
+#define MT6359_BUCK_VS1_CON0 0x1808
+#define MT6359_BUCK_VS1_DBG0 0x1826
+#define MT6359_BUCK_VS1_DBG1 0x1828
+#define MT6359_BUCK_VS1_ELR0 0x1834
+#define MT6359_BUCK_VS2_CON0 0x1888
+#define MT6359_BUCK_VS2_DBG0 0x18a6
+#define MT6359_BUCK_VS2_DBG1 0x18a8
+#define MT6359_BUCK_VS2_ELR0 0x18b4
+#define MT6359_BUCK_VPA_CON0 0x1908
+#define MT6359_BUCK_VPA_CON1 0x190e
+#define MT6359_BUCK_VPA_CFG0 0x1910
+#define MT6359_BUCK_VPA_CFG1 0x1912
+#define MT6359_BUCK_VPA_DBG0 0x1914
+#define MT6359_BUCK_VPA_DBG1 0x1916
+#define MT6359_VGPUVCORE_ANA_CON2 0x198e
+#define MT6359_VGPUVCORE_ANA_CON13 0x19a4
+#define MT6359_VPROC1_ANA_CON3 0x19b2
+#define MT6359_VPROC2_ANA_CON3 0x1a0e
+#define MT6359_VMODEM_ANA_CON3 0x1a1a
+#define MT6359_VPU_ANA_CON3 0x1a26
+#define MT6359_VS1_ANA_CON0 0x1a2c
+#define MT6359_VS2_ANA_CON0 0x1a34
+#define MT6359_VPA_ANA_CON0 0x1a3c
+#define MT6359_LDO_TOP_INT_CON0 0x1b14
+#define MT6359_LDO_TOP_INT_CON1 0x1b1a
+#define MT6359_LDO_TOP_INT_STATUS0 0x1b28
+#define MT6359_LDO_TOP_INT_STATUS1 0x1b2a
+#define MT6359_LDO_VSRAM_PROC1_ELR 0x1b40
+#define MT6359_LDO_VSRAM_PROC2_ELR 0x1b42
+#define MT6359_LDO_VSRAM_OTHERS_ELR 0x1b44
+#define MT6359_LDO_VSRAM_MD_ELR 0x1b46
+#define MT6359_LDO_VFE28_CON0 0x1b88
+#define MT6359_LDO_VFE28_MON 0x1b8a
+#define MT6359_LDO_VXO22_CON0 0x1b98
+#define MT6359_LDO_VXO22_MON 0x1b9a
+#define MT6359_LDO_VRF18_CON0 0x1ba8
+#define MT6359_LDO_VRF18_MON 0x1baa
+#define MT6359_LDO_VRF12_CON0 0x1bb8
+#define MT6359_LDO_VRF12_MON 0x1bba
+#define MT6359_LDO_VEFUSE_CON0 0x1bc8
+#define MT6359_LDO_VEFUSE_MON 0x1bca
+#define MT6359_LDO_VCN33_1_CON0 0x1bd8
+#define MT6359_LDO_VCN33_1_MON 0x1bda
+#define MT6359_LDO_VCN33_1_MULTI_SW 0x1be8
+#define MT6359_LDO_VCN33_2_CON0 0x1c08
+#define MT6359_LDO_VCN33_2_MON 0x1c0a
+#define MT6359_LDO_VCN33_2_MULTI_SW 0x1c18
+#define MT6359_LDO_VCN13_CON0 0x1c1a
+#define MT6359_LDO_VCN13_MON 0x1c1c
+#define MT6359_LDO_VCN18_CON0 0x1c2a
+#define MT6359_LDO_VCN18_MON 0x1c2c
+#define MT6359_LDO_VA09_CON0 0x1c3a
+#define MT6359_LDO_VA09_MON 0x1c3c
+#define MT6359_LDO_VCAMIO_CON0 0x1c4a
+#define MT6359_LDO_VCAMIO_MON 0x1c4c
+#define MT6359_LDO_VA12_CON0 0x1c5a
+#define MT6359_LDO_VA12_MON 0x1c5c
+#define MT6359_LDO_VAUX18_CON0 0x1c88
+#define MT6359_LDO_VAUX18_MON 0x1c8a
+#define MT6359_LDO_VAUD18_CON0 0x1c98
+#define MT6359_LDO_VAUD18_MON 0x1c9a
+#define MT6359_LDO_VIO18_CON0 0x1ca8
+#define MT6359_LDO_VIO18_MON 0x1caa
+#define MT6359_LDO_VEMC_CON0 0x1cb8
+#define MT6359_LDO_VEMC_MON 0x1cba
+#define MT6359_LDO_VSIM1_CON0 0x1cc8
+#define MT6359_LDO_VSIM1_MON 0x1cca
+#define MT6359_LDO_VSIM2_CON0 0x1cd8
+#define MT6359_LDO_VSIM2_MON 0x1cda
+#define MT6359_LDO_VUSB_CON0 0x1d08
+#define MT6359_LDO_VUSB_MON 0x1d0a
+#define MT6359_LDO_VUSB_MULTI_SW 0x1d18
+#define MT6359_LDO_VRFCK_CON0 0x1d1a
+#define MT6359_LDO_VRFCK_MON 0x1d1c
+#define MT6359_LDO_VBBCK_CON0 0x1d2a
+#define MT6359_LDO_VBBCK_MON 0x1d2c
+#define MT6359_LDO_VBIF28_CON0 0x1d3a
+#define MT6359_LDO_VBIF28_MON 0x1d3c
+#define MT6359_LDO_VIBR_CON0 0x1d4a
+#define MT6359_LDO_VIBR_MON 0x1d4c
+#define MT6359_LDO_VIO28_CON0 0x1d5a
+#define MT6359_LDO_VIO28_MON 0x1d5c
+#define MT6359_LDO_VM18_CON0 0x1d88
+#define MT6359_LDO_VM18_MON 0x1d8a
+#define MT6359_LDO_VUFS_CON0 0x1d98
+#define MT6359_LDO_VUFS_MON 0x1d9a
+#define MT6359_LDO_VSRAM_PROC1_CON0 0x1e88
+#define MT6359_LDO_VSRAM_PROC1_MON 0x1e8a
+#define MT6359_LDO_VSRAM_PROC1_VOSEL1 0x1e8e
+#define MT6359_LDO_VSRAM_PROC2_CON0 0x1ea6
+#define MT6359_LDO_VSRAM_PROC2_MON 0x1ea8
+#define MT6359_LDO_VSRAM_PROC2_VOSEL1 0x1eac
+#define MT6359_LDO_VSRAM_OTHERS_CON0 0x1f08
+#define MT6359_LDO_VSRAM_OTHERS_MON 0x1f0a
+#define MT6359_LDO_VSRAM_OTHERS_VOSEL1 0x1f0e
+#define MT6359_LDO_VSRAM_OTHERS_SSHUB 0x1f26
+#define MT6359_LDO_VSRAM_MD_CON0 0x1f2c
+#define MT6359_LDO_VSRAM_MD_MON 0x1f2e
+#define MT6359_LDO_VSRAM_MD_VOSEL1 0x1f32
+#define MT6359_VFE28_ANA_CON0 0x1f88
+#define MT6359_VAUX18_ANA_CON0 0x1f8c
+#define MT6359_VUSB_ANA_CON0 0x1f90
+#define MT6359_VBIF28_ANA_CON0 0x1f94
+#define MT6359_VCN33_1_ANA_CON0 0x1f98
+#define MT6359_VCN33_2_ANA_CON0 0x1f9c
+#define MT6359_VEMC_ANA_CON0 0x1fa0
+#define MT6359_VSIM1_ANA_CON0 0x1fa4
+#define MT6359_VSIM2_ANA_CON0 0x1fa8
+#define MT6359_VIO28_ANA_CON0 0x1fac
+#define MT6359_VIBR_ANA_CON0 0x1fb0
+#define MT6359_VRF18_ANA_CON0 0x2008
+#define MT6359_VEFUSE_ANA_CON0 0x200c
+#define MT6359_VCN18_ANA_CON0 0x2010
+#define MT6359_VCAMIO_ANA_CON0 0x2014
+#define MT6359_VAUD18_ANA_CON0 0x2018
+#define MT6359_VIO18_ANA_CON0 0x201c
+#define MT6359_VM18_ANA_CON0 0x2020
+#define MT6359_VUFS_ANA_CON0 0x2024
+#define MT6359_VRF12_ANA_CON0 0x202a
+#define MT6359_VCN13_ANA_CON0 0x202e
+#define MT6359_VA09_ANA_CON0 0x2032
+#define MT6359_VA12_ANA_CON0 0x2036
+#define MT6359_VXO22_ANA_CON0 0x2088
+#define MT6359_VRFCK_ANA_CON0 0x208c
+#define MT6359_VBBCK_ANA_CON0 0x2094
+#define MT6359_AUD_TOP_INT_CON0 0x2328
+#define MT6359_AUD_TOP_INT_STATUS0 0x2334
+
+#define MT6359_RG_BUCK_VPU_EN_ADDR MT6359_BUCK_VPU_CON0
+#define MT6359_RG_BUCK_VPU_LP_ADDR MT6359_BUCK_VPU_CON0
+#define MT6359_RG_BUCK_VPU_LP_SHIFT 1
+#define MT6359_DA_VPU_VOSEL_ADDR MT6359_BUCK_VPU_DBG0
+#define MT6359_DA_VPU_VOSEL_MASK 0x7F
+#define MT6359_DA_VPU_VOSEL_SHIFT 0
+#define MT6359_DA_VPU_EN_ADDR MT6359_BUCK_VPU_DBG1
+#define MT6359_RG_BUCK_VPU_VOSEL_ADDR MT6359_BUCK_VPU_ELR0
+#define MT6359_RG_BUCK_VPU_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VPU_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VCORE_EN_ADDR MT6359_BUCK_VCORE_CON0
+#define MT6359_RG_BUCK_VCORE_LP_ADDR MT6359_BUCK_VCORE_CON0
+#define MT6359_RG_BUCK_VCORE_LP_SHIFT 1
+#define MT6359_DA_VCORE_VOSEL_ADDR MT6359_BUCK_VCORE_DBG0
+#define MT6359_DA_VCORE_VOSEL_MASK 0x7F
+#define MT6359_DA_VCORE_VOSEL_SHIFT 0
+#define MT6359_DA_VCORE_EN_ADDR MT6359_BUCK_VCORE_DBG1
+#define MT6359_RG_BUCK_VCORE_SSHUB_EN_ADDR MT6359_BUCK_VCORE_SSHUB_CON0
+#define MT6359_RG_BUCK_VCORE_SSHUB_VOSEL_ADDR MT6359_BUCK_VCORE_SSHUB_CON0
+#define MT6359_RG_BUCK_VCORE_SSHUB_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VCORE_SSHUB_VOSEL_SHIFT 4
+#define MT6359_RG_BUCK_VCORE_VOSEL_ADDR MT6359_BUCK_VCORE_ELR0
+#define MT6359_RG_BUCK_VCORE_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VCORE_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VGPU11_EN_ADDR MT6359_BUCK_VGPU11_CON0
+#define MT6359_RG_BUCK_VGPU11_LP_ADDR MT6359_BUCK_VGPU11_CON0
+#define MT6359_RG_BUCK_VGPU11_LP_SHIFT 1
+#define MT6359_DA_VGPU11_VOSEL_ADDR MT6359_BUCK_VGPU11_DBG0
+#define MT6359_DA_VGPU11_VOSEL_MASK 0x7F
+#define MT6359_DA_VGPU11_VOSEL_SHIFT 0
+#define MT6359_DA_VGPU11_EN_ADDR MT6359_BUCK_VGPU11_DBG1
+#define MT6359_RG_BUCK_VGPU11_VOSEL_ADDR MT6359_BUCK_VGPU11_ELR0
+#define MT6359_RG_BUCK_VGPU11_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VGPU11_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VMODEM_EN_ADDR MT6359_BUCK_VMODEM_CON0
+#define MT6359_RG_BUCK_VMODEM_LP_ADDR MT6359_BUCK_VMODEM_CON0
+#define MT6359_RG_BUCK_VMODEM_LP_SHIFT 1
+#define MT6359_DA_VMODEM_VOSEL_ADDR MT6359_BUCK_VMODEM_DBG0
+#define MT6359_DA_VMODEM_VOSEL_MASK 0x7F
+#define MT6359_DA_VMODEM_VOSEL_SHIFT 0
+#define MT6359_DA_VMODEM_EN_ADDR MT6359_BUCK_VMODEM_DBG1
+#define MT6359_RG_BUCK_VMODEM_VOSEL_ADDR MT6359_BUCK_VMODEM_ELR0
+#define MT6359_RG_BUCK_VMODEM_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VMODEM_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VPROC1_EN_ADDR MT6359_BUCK_VPROC1_CON0
+#define MT6359_RG_BUCK_VPROC1_LP_ADDR MT6359_BUCK_VPROC1_CON0
+#define MT6359_RG_BUCK_VPROC1_LP_SHIFT 1
+#define MT6359_DA_VPROC1_VOSEL_ADDR MT6359_BUCK_VPROC1_DBG0
+#define MT6359_DA_VPROC1_VOSEL_MASK 0x7F
+#define MT6359_DA_VPROC1_VOSEL_SHIFT 0
+#define MT6359_DA_VPROC1_EN_ADDR MT6359_BUCK_VPROC1_DBG1
+#define MT6359_RG_BUCK_VPROC1_VOSEL_ADDR MT6359_BUCK_VPROC1_ELR0
+#define MT6359_RG_BUCK_VPROC1_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VPROC1_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VPROC2_EN_ADDR MT6359_BUCK_VPROC2_CON0
+#define MT6359_RG_BUCK_VPROC2_LP_ADDR MT6359_BUCK_VPROC2_CON0
+#define MT6359_RG_BUCK_VPROC2_LP_SHIFT 1
+#define MT6359_DA_VPROC2_VOSEL_ADDR MT6359_BUCK_VPROC2_DBG0
+#define MT6359_DA_VPROC2_VOSEL_MASK 0x7F
+#define MT6359_DA_VPROC2_VOSEL_SHIFT 0
+#define MT6359_DA_VPROC2_EN_ADDR MT6359_BUCK_VPROC2_DBG1
+#define MT6359_RG_BUCK_VPROC2_VOSEL_ADDR MT6359_BUCK_VPROC2_ELR0
+#define MT6359_RG_BUCK_VPROC2_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VPROC2_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VS1_EN_ADDR MT6359_BUCK_VS1_CON0
+#define MT6359_RG_BUCK_VS1_LP_ADDR MT6359_BUCK_VS1_CON0
+#define MT6359_RG_BUCK_VS1_LP_SHIFT 1
+#define MT6359_DA_VS1_VOSEL_ADDR MT6359_BUCK_VS1_DBG0
+#define MT6359_DA_VS1_VOSEL_MASK 0x7F
+#define MT6359_DA_VS1_VOSEL_SHIFT 0
+#define MT6359_DA_VS1_EN_ADDR MT6359_BUCK_VS1_DBG1
+#define MT6359_RG_BUCK_VS1_VOSEL_ADDR MT6359_BUCK_VS1_ELR0
+#define MT6359_RG_BUCK_VS1_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VS1_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VS2_EN_ADDR MT6359_BUCK_VS2_CON0
+#define MT6359_RG_BUCK_VS2_LP_ADDR MT6359_BUCK_VS2_CON0
+#define MT6359_RG_BUCK_VS2_LP_SHIFT 1
+#define MT6359_DA_VS2_VOSEL_ADDR MT6359_BUCK_VS2_DBG0
+#define MT6359_DA_VS2_VOSEL_MASK 0x7F
+#define MT6359_DA_VS2_VOSEL_SHIFT 0
+#define MT6359_DA_VS2_EN_ADDR MT6359_BUCK_VS2_DBG1
+#define MT6359_RG_BUCK_VS2_VOSEL_ADDR MT6359_BUCK_VS2_ELR0
+#define MT6359_RG_BUCK_VS2_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VS2_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VPA_EN_ADDR MT6359_BUCK_VPA_CON0
+#define MT6359_RG_BUCK_VPA_LP_ADDR MT6359_BUCK_VPA_CON0
+#define MT6359_RG_BUCK_VPA_LP_SHIFT 1
+#define MT6359_RG_BUCK_VPA_VOSEL_ADDR MT6359_BUCK_VPA_CON1
+#define MT6359_RG_BUCK_VPA_VOSEL_MASK 0x3F
+#define MT6359_RG_BUCK_VPA_VOSEL_SHIFT 0
+#define MT6359_DA_VPA_VOSEL_ADDR MT6359_BUCK_VPA_DBG0
+#define MT6359_DA_VPA_VOSEL_MASK 0x3F
+#define MT6359_DA_VPA_VOSEL_SHIFT 0
+#define MT6359_DA_VPA_EN_ADDR MT6359_BUCK_VPA_DBG1
+#define MT6359_RG_VGPU11_FCCM_ADDR MT6359_VGPUVCORE_ANA_CON2
+#define MT6359_RG_VGPU11_FCCM_SHIFT 9
+#define MT6359_RG_VCORE_FCCM_ADDR MT6359_VGPUVCORE_ANA_CON13
+#define MT6359_RG_VCORE_FCCM_SHIFT 5
+#define MT6359_RG_VPROC1_FCCM_ADDR MT6359_VPROC1_ANA_CON3
+#define MT6359_RG_VPROC1_FCCM_SHIFT 1
+#define MT6359_RG_VPROC2_FCCM_ADDR MT6359_VPROC2_ANA_CON3
+#define MT6359_RG_VPROC2_FCCM_SHIFT 1
+#define MT6359_RG_VMODEM_FCCM_ADDR MT6359_VMODEM_ANA_CON3
+#define MT6359_RG_VMODEM_FCCM_SHIFT 1
+#define MT6359_RG_VPU_FCCM_ADDR MT6359_VPU_ANA_CON3
+#define MT6359_RG_VPU_FCCM_SHIFT 1
+#define MT6359_RG_VS1_FPWM_ADDR MT6359_VS1_ANA_CON0
+#define MT6359_RG_VS1_FPWM_SHIFT 3
+#define MT6359_RG_VS2_FPWM_ADDR MT6359_VS2_ANA_CON0
+#define MT6359_RG_VS2_FPWM_SHIFT 3
+#define MT6359_RG_VPA_MODESET_ADDR MT6359_VPA_ANA_CON0
+#define MT6359_RG_VPA_MODESET_SHIFT 1
+#define MT6359_RG_LDO_VSRAM_PROC1_VOSEL_ADDR MT6359_LDO_VSRAM_PROC1_ELR
+#define MT6359_RG_LDO_VSRAM_PROC1_VOSEL_MASK 0x7F
+#define MT6359_RG_LDO_VSRAM_PROC1_VOSEL_SHIFT 0
+#define MT6359_RG_LDO_VSRAM_PROC2_VOSEL_ADDR MT6359_LDO_VSRAM_PROC2_ELR
+#define MT6359_RG_LDO_VSRAM_PROC2_VOSEL_MASK 0x7F
+#define MT6359_RG_LDO_VSRAM_PROC2_VOSEL_SHIFT 0
+#define MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_ADDR MT6359_LDO_VSRAM_OTHERS_ELR
+#define MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_MASK 0x7F
+#define MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_SHIFT 0
+#define MT6359_RG_LDO_VSRAM_MD_VOSEL_ADDR MT6359_LDO_VSRAM_MD_ELR
+#define MT6359_RG_LDO_VSRAM_MD_VOSEL_MASK 0x7F
+#define MT6359_RG_LDO_VSRAM_MD_VOSEL_SHIFT 0
+#define MT6359_RG_LDO_VFE28_EN_ADDR MT6359_LDO_VFE28_CON0
+#define MT6359_DA_VFE28_B_EN_ADDR MT6359_LDO_VFE28_MON
+#define MT6359_RG_LDO_VXO22_EN_ADDR MT6359_LDO_VXO22_CON0
+#define MT6359_RG_LDO_VXO22_EN_SHIFT 0
+#define MT6359_DA_VXO22_B_EN_ADDR MT6359_LDO_VXO22_MON
+#define MT6359_RG_LDO_VRF18_EN_ADDR MT6359_LDO_VRF18_CON0
+#define MT6359_RG_LDO_VRF18_EN_SHIFT 0
+#define MT6359_DA_VRF18_B_EN_ADDR MT6359_LDO_VRF18_MON
+#define MT6359_RG_LDO_VRF12_EN_ADDR MT6359_LDO_VRF12_CON0
+#define MT6359_RG_LDO_VRF12_EN_SHIFT 0
+#define MT6359_DA_VRF12_B_EN_ADDR MT6359_LDO_VRF12_MON
+#define MT6359_RG_LDO_VEFUSE_EN_ADDR MT6359_LDO_VEFUSE_CON0
+#define MT6359_RG_LDO_VEFUSE_EN_SHIFT 0
+#define MT6359_DA_VEFUSE_B_EN_ADDR MT6359_LDO_VEFUSE_MON
+#define MT6359_RG_LDO_VCN33_1_EN_0_ADDR MT6359_LDO_VCN33_1_CON0
+#define MT6359_RG_LDO_VCN33_1_EN_0_MASK 0x1
+#define MT6359_RG_LDO_VCN33_1_EN_0_SHIFT 0
+#define MT6359_DA_VCN33_1_B_EN_ADDR MT6359_LDO_VCN33_1_MON
+#define MT6359_RG_LDO_VCN33_1_EN_1_ADDR MT6359_LDO_VCN33_1_MULTI_SW
+#define MT6359_RG_LDO_VCN33_1_EN_1_SHIFT 15
+#define MT6359_RG_LDO_VCN33_2_EN_0_ADDR MT6359_LDO_VCN33_2_CON0
+#define MT6359_RG_LDO_VCN33_2_EN_0_SHIFT 0
+#define MT6359_DA_VCN33_2_B_EN_ADDR MT6359_LDO_VCN33_2_MON
+#define MT6359_RG_LDO_VCN33_2_EN_1_ADDR MT6359_LDO_VCN33_2_MULTI_SW
+#define MT6359_RG_LDO_VCN33_2_EN_1_MASK 0x1
+#define MT6359_RG_LDO_VCN33_2_EN_1_SHIFT 15
+#define MT6359_RG_LDO_VCN13_EN_ADDR MT6359_LDO_VCN13_CON0
+#define MT6359_RG_LDO_VCN13_EN_SHIFT 0
+#define MT6359_DA_VCN13_B_EN_ADDR MT6359_LDO_VCN13_MON
+#define MT6359_RG_LDO_VCN18_EN_ADDR MT6359_LDO_VCN18_CON0
+#define MT6359_DA_VCN18_B_EN_ADDR MT6359_LDO_VCN18_MON
+#define MT6359_RG_LDO_VA09_EN_ADDR MT6359_LDO_VA09_CON0
+#define MT6359_RG_LDO_VA09_EN_SHIFT 0
+#define MT6359_DA_VA09_B_EN_ADDR MT6359_LDO_VA09_MON
+#define MT6359_RG_LDO_VCAMIO_EN_ADDR MT6359_LDO_VCAMIO_CON0
+#define MT6359_RG_LDO_VCAMIO_EN_SHIFT 0
+#define MT6359_DA_VCAMIO_B_EN_ADDR MT6359_LDO_VCAMIO_MON
+#define MT6359_RG_LDO_VA12_EN_ADDR MT6359_LDO_VA12_CON0
+#define MT6359_RG_LDO_VA12_EN_SHIFT 0
+#define MT6359_DA_VA12_B_EN_ADDR MT6359_LDO_VA12_MON
+#define MT6359_RG_LDO_VAUX18_EN_ADDR MT6359_LDO_VAUX18_CON0
+#define MT6359_DA_VAUX18_B_EN_ADDR MT6359_LDO_VAUX18_MON
+#define MT6359_RG_LDO_VAUD18_EN_ADDR MT6359_LDO_VAUD18_CON0
+#define MT6359_DA_VAUD18_B_EN_ADDR MT6359_LDO_VAUD18_MON
+#define MT6359_RG_LDO_VIO18_EN_ADDR MT6359_LDO_VIO18_CON0
+#define MT6359_RG_LDO_VIO18_EN_SHIFT 0
+#define MT6359_DA_VIO18_B_EN_ADDR MT6359_LDO_VIO18_MON
+#define MT6359_RG_LDO_VEMC_EN_ADDR MT6359_LDO_VEMC_CON0
+#define MT6359_RG_LDO_VEMC_EN_SHIFT 0
+#define MT6359_DA_VEMC_B_EN_ADDR MT6359_LDO_VEMC_MON
+#define MT6359_RG_LDO_VSIM1_EN_ADDR MT6359_LDO_VSIM1_CON0
+#define MT6359_RG_LDO_VSIM1_EN_SHIFT 0
+#define MT6359_DA_VSIM1_B_EN_ADDR MT6359_LDO_VSIM1_MON
+#define MT6359_RG_LDO_VSIM2_EN_ADDR MT6359_LDO_VSIM2_CON0
+#define MT6359_RG_LDO_VSIM2_EN_SHIFT 0
+#define MT6359_DA_VSIM2_B_EN_ADDR MT6359_LDO_VSIM2_MON
+#define MT6359_RG_LDO_VUSB_EN_0_ADDR MT6359_LDO_VUSB_CON0
+#define MT6359_RG_LDO_VUSB_EN_0_MASK 0x1
+#define MT6359_RG_LDO_VUSB_EN_0_SHIFT 0
+#define MT6359_DA_VUSB_B_EN_ADDR MT6359_LDO_VUSB_MON
+#define MT6359_RG_LDO_VUSB_EN_1_ADDR MT6359_LDO_VUSB_MULTI_SW
+#define MT6359_RG_LDO_VUSB_EN_1_MASK 0x1
+#define MT6359_RG_LDO_VUSB_EN_1_SHIFT 15
+#define MT6359_RG_LDO_VRFCK_EN_ADDR MT6359_LDO_VRFCK_CON0
+#define MT6359_RG_LDO_VRFCK_EN_SHIFT 0
+#define MT6359_DA_VRFCK_B_EN_ADDR MT6359_LDO_VRFCK_MON
+#define MT6359_RG_LDO_VBBCK_EN_ADDR MT6359_LDO_VBBCK_CON0
+#define MT6359_RG_LDO_VBBCK_EN_SHIFT 0
+#define MT6359_DA_VBBCK_B_EN_ADDR MT6359_LDO_VBBCK_MON
+#define MT6359_RG_LDO_VBIF28_EN_ADDR MT6359_LDO_VBIF28_CON0
+#define MT6359_DA_VBIF28_B_EN_ADDR MT6359_LDO_VBIF28_MON
+#define MT6359_RG_LDO_VIBR_EN_ADDR MT6359_LDO_VIBR_CON0
+#define MT6359_RG_LDO_VIBR_EN_SHIFT 0
+#define MT6359_DA_VIBR_B_EN_ADDR MT6359_LDO_VIBR_MON
+#define MT6359_RG_LDO_VIO28_EN_ADDR MT6359_LDO_VIO28_CON0
+#define MT6359_RG_LDO_VIO28_EN_SHIFT 0
+#define MT6359_DA_VIO28_B_EN_ADDR MT6359_LDO_VIO28_MON
+#define MT6359_RG_LDO_VM18_EN_ADDR MT6359_LDO_VM18_CON0
+#define MT6359_RG_LDO_VM18_EN_SHIFT 0
+#define MT6359_DA_VM18_B_EN_ADDR MT6359_LDO_VM18_MON
+#define MT6359_RG_LDO_VUFS_EN_ADDR MT6359_LDO_VUFS_CON0
+#define MT6359_RG_LDO_VUFS_EN_SHIFT 0
+#define MT6359_DA_VUFS_B_EN_ADDR MT6359_LDO_VUFS_MON
+#define MT6359_RG_LDO_VSRAM_PROC1_EN_ADDR MT6359_LDO_VSRAM_PROC1_CON0
+#define MT6359_DA_VSRAM_PROC1_B_EN_ADDR MT6359_LDO_VSRAM_PROC1_MON
+#define MT6359_DA_VSRAM_PROC1_VOSEL_ADDR MT6359_LDO_VSRAM_PROC1_VOSEL1
+#define MT6359_DA_VSRAM_PROC1_VOSEL_MASK 0x7F
+#define MT6359_DA_VSRAM_PROC1_VOSEL_SHIFT 8
+#define MT6359_RG_LDO_VSRAM_PROC2_EN_ADDR MT6359_LDO_VSRAM_PROC2_CON0
+#define MT6359_DA_VSRAM_PROC2_B_EN_ADDR MT6359_LDO_VSRAM_PROC2_MON
+#define MT6359_DA_VSRAM_PROC2_VOSEL_ADDR MT6359_LDO_VSRAM_PROC2_VOSEL1
+#define MT6359_DA_VSRAM_PROC2_VOSEL_MASK 0x7F
+#define MT6359_DA_VSRAM_PROC2_VOSEL_SHIFT 8
+#define MT6359_RG_LDO_VSRAM_OTHERS_EN_ADDR MT6359_LDO_VSRAM_OTHERS_CON0
+#define MT6359_DA_VSRAM_OTHERS_B_EN_ADDR MT6359_LDO_VSRAM_OTHERS_MON
+#define MT6359_DA_VSRAM_OTHERS_VOSEL_ADDR MT6359_LDO_VSRAM_OTHERS_VOSEL1
+#define MT6359_DA_VSRAM_OTHERS_VOSEL_MASK 0x7F
+#define MT6359_DA_VSRAM_OTHERS_VOSEL_SHIFT 8
+#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_EN_ADDR MT6359_LDO_VSRAM_OTHERS_SSHUB
+#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_ADDR MT6359_LDO_VSRAM_OTHERS_SSHUB
+#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_MASK 0x7F
+#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_SHIFT 1
+#define MT6359_RG_LDO_VSRAM_MD_EN_ADDR MT6359_LDO_VSRAM_MD_CON0
+#define MT6359_DA_VSRAM_MD_B_EN_ADDR MT6359_LDO_VSRAM_MD_MON
+#define MT6359_DA_VSRAM_MD_VOSEL_ADDR MT6359_LDO_VSRAM_MD_VOSEL1
+#define MT6359_DA_VSRAM_MD_VOSEL_MASK 0x7F
+#define MT6359_DA_VSRAM_MD_VOSEL_SHIFT 8
+#define MT6359_RG_VCN33_1_VOSEL_ADDR MT6359_VCN33_1_ANA_CON0
+#define MT6359_RG_VCN33_1_VOSEL_MASK 0xF
+#define MT6359_RG_VCN33_1_VOSEL_SHIFT 8
+#define MT6359_RG_VCN33_2_VOSEL_ADDR MT6359_VCN33_2_ANA_CON0
+#define MT6359_RG_VCN33_2_VOSEL_MASK 0xF
+#define MT6359_RG_VCN33_2_VOSEL_SHIFT 8
+#define MT6359_RG_VEMC_VOSEL_ADDR MT6359_VEMC_ANA_CON0
+#define MT6359_RG_VEMC_VOSEL_MASK 0xF
+#define MT6359_RG_VEMC_VOSEL_SHIFT 8
+#define MT6359_RG_VSIM1_VOSEL_ADDR MT6359_VSIM1_ANA_CON0
+#define MT6359_RG_VSIM1_VOSEL_MASK 0xF
+#define MT6359_RG_VSIM1_VOSEL_SHIFT 8
+#define MT6359_RG_VSIM2_VOSEL_ADDR MT6359_VSIM2_ANA_CON0
+#define MT6359_RG_VSIM2_VOSEL_MASK 0xF
+#define MT6359_RG_VSIM2_VOSEL_SHIFT 8
+#define MT6359_RG_VIO28_VOSEL_ADDR MT6359_VIO28_ANA_CON0
+#define MT6359_RG_VIO28_VOSEL_MASK 0xF
+#define MT6359_RG_VIO28_VOSEL_SHIFT 8
+#define MT6359_RG_VIBR_VOSEL_ADDR MT6359_VIBR_ANA_CON0
+#define MT6359_RG_VIBR_VOSEL_MASK 0xF
+#define MT6359_RG_VIBR_VOSEL_SHIFT 8
+#define MT6359_RG_VRF18_VOSEL_ADDR MT6359_VRF18_ANA_CON0
+#define MT6359_RG_VRF18_VOSEL_MASK 0xF
+#define MT6359_RG_VRF18_VOSEL_SHIFT 8
+#define MT6359_RG_VEFUSE_VOSEL_ADDR MT6359_VEFUSE_ANA_CON0
+#define MT6359_RG_VEFUSE_VOSEL_MASK 0xF
+#define MT6359_RG_VEFUSE_VOSEL_SHIFT 8
+#define MT6359_RG_VCAMIO_VOSEL_ADDR MT6359_VCAMIO_ANA_CON0
+#define MT6359_RG_VCAMIO_VOSEL_MASK 0xF
+#define MT6359_RG_VCAMIO_VOSEL_SHIFT 8
+#define MT6359_RG_VIO18_VOSEL_ADDR MT6359_VIO18_ANA_CON0
+#define MT6359_RG_VIO18_VOSEL_MASK 0xF
+#define MT6359_RG_VIO18_VOSEL_SHIFT 8
+#define MT6359_RG_VM18_VOSEL_ADDR MT6359_VM18_ANA_CON0
+#define MT6359_RG_VM18_VOSEL_MASK 0xF
+#define MT6359_RG_VM18_VOSEL_SHIFT 8
+#define MT6359_RG_VUFS_VOSEL_ADDR MT6359_VUFS_ANA_CON0
+#define MT6359_RG_VUFS_VOSEL_MASK 0xF
+#define MT6359_RG_VUFS_VOSEL_SHIFT 8
+#define MT6359_RG_VRF12_VOSEL_ADDR MT6359_VRF12_ANA_CON0
+#define MT6359_RG_VRF12_VOSEL_MASK 0xF
+#define MT6359_RG_VRF12_VOSEL_SHIFT 8
+#define MT6359_RG_VCN13_VOSEL_ADDR MT6359_VCN13_ANA_CON0
+#define MT6359_RG_VCN13_VOSEL_MASK 0xF
+#define MT6359_RG_VCN13_VOSEL_SHIFT 8
+#define MT6359_RG_VA09_VOSEL_ADDR MT6359_VA09_ANA_CON0
+#define MT6359_RG_VA09_VOSEL_MASK 0xF
+#define MT6359_RG_VA09_VOSEL_SHIFT 8
+#define MT6359_RG_VA12_VOSEL_ADDR MT6359_VA12_ANA_CON0
+#define MT6359_RG_VA12_VOSEL_MASK 0xF
+#define MT6359_RG_VA12_VOSEL_SHIFT 8
+#define MT6359_RG_VXO22_VOSEL_ADDR MT6359_VXO22_ANA_CON0
+#define MT6359_RG_VXO22_VOSEL_MASK 0xF
+#define MT6359_RG_VXO22_VOSEL_SHIFT 8
+#define MT6359_RG_VRFCK_VOSEL_ADDR MT6359_VRFCK_ANA_CON0
+#define MT6359_RG_VRFCK_VOSEL_MASK 0xF
+#define MT6359_RG_VRFCK_VOSEL_SHIFT 8
+#define MT6359_RG_VBBCK_VOSEL_ADDR MT6359_VBBCK_ANA_CON0
+#define MT6359_RG_VBBCK_VOSEL_MASK 0xF
+#define MT6359_RG_VBBCK_VOSEL_SHIFT 8
+
+#endif /* __MFD_MT6359_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6359p/registers.h b/include/linux/mfd/mt6359p/registers.h
new file mode 100644
index 000000000000..3d97c1885171
--- /dev/null
+++ b/include/linux/mfd/mt6359p/registers.h
@@ -0,0 +1,249 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6359P_REGISTERS_H__
+#define __MFD_MT6359P_REGISTERS_H__
+
+#define MT6359P_CHIP_VER 0x5930
+
+/* PMIC Registers */
+#define MT6359P_HWCID 0x8
+#define MT6359P_TOP_TRAP 0x50
+#define MT6359P_TOP_TMA_KEY 0x3a8
+#define MT6359P_BUCK_VCORE_ELR_NUM 0x152a
+#define MT6359P_BUCK_VCORE_ELR0 0x152c
+#define MT6359P_BUCK_VGPU11_SSHUB_CON0 0x15aa
+#define MT6359P_BUCK_VGPU11_ELR0 0x15b4
+#define MT6359P_LDO_VSRAM_PROC1_ELR 0x1b44
+#define MT6359P_LDO_VSRAM_PROC2_ELR 0x1b46
+#define MT6359P_LDO_VSRAM_OTHERS_ELR 0x1b48
+#define MT6359P_LDO_VSRAM_MD_ELR 0x1b4a
+#define MT6359P_LDO_VEMC_ELR_0 0x1b4c
+#define MT6359P_LDO_VFE28_CON0 0x1b88
+#define MT6359P_LDO_VFE28_MON 0x1b8c
+#define MT6359P_LDO_VXO22_CON0 0x1b9a
+#define MT6359P_LDO_VXO22_MON 0x1b9e
+#define MT6359P_LDO_VRF18_CON0 0x1bac
+#define MT6359P_LDO_VRF18_MON 0x1bb0
+#define MT6359P_LDO_VRF12_CON0 0x1bbe
+#define MT6359P_LDO_VRF12_MON 0x1bc2
+#define MT6359P_LDO_VEFUSE_CON0 0x1bd0
+#define MT6359P_LDO_VEFUSE_MON 0x1bd4
+#define MT6359P_LDO_VCN33_1_CON0 0x1be2
+#define MT6359P_LDO_VCN33_1_MON 0x1be6
+#define MT6359P_LDO_VCN33_1_MULTI_SW 0x1bf4
+#define MT6359P_LDO_VCN33_2_CON0 0x1c08
+#define MT6359P_LDO_VCN33_2_MON 0x1c0c
+#define MT6359P_LDO_VCN33_2_MULTI_SW 0x1c1a
+#define MT6359P_LDO_VCN13_CON0 0x1c1c
+#define MT6359P_LDO_VCN13_MON 0x1c20
+#define MT6359P_LDO_VCN18_CON0 0x1c2e
+#define MT6359P_LDO_VCN18_MON 0x1c32
+#define MT6359P_LDO_VA09_CON0 0x1c40
+#define MT6359P_LDO_VA09_MON 0x1c44
+#define MT6359P_LDO_VCAMIO_CON0 0x1c52
+#define MT6359P_LDO_VCAMIO_MON 0x1c56
+#define MT6359P_LDO_VA12_CON0 0x1c64
+#define MT6359P_LDO_VA12_MON 0x1c68
+#define MT6359P_LDO_VAUX18_CON0 0x1c88
+#define MT6359P_LDO_VAUX18_MON 0x1c8c
+#define MT6359P_LDO_VAUD18_CON0 0x1c9a
+#define MT6359P_LDO_VAUD18_MON 0x1c9e
+#define MT6359P_LDO_VIO18_CON0 0x1cac
+#define MT6359P_LDO_VIO18_MON 0x1cb0
+#define MT6359P_LDO_VEMC_CON0 0x1cbe
+#define MT6359P_LDO_VEMC_MON 0x1cc2
+#define MT6359P_LDO_VSIM1_CON0 0x1cd0
+#define MT6359P_LDO_VSIM1_MON 0x1cd4
+#define MT6359P_LDO_VSIM2_CON0 0x1ce2
+#define MT6359P_LDO_VSIM2_MON 0x1ce6
+#define MT6359P_LDO_VUSB_CON0 0x1d08
+#define MT6359P_LDO_VUSB_MON 0x1d0c
+#define MT6359P_LDO_VUSB_MULTI_SW 0x1d1a
+#define MT6359P_LDO_VRFCK_CON0 0x1d1c
+#define MT6359P_LDO_VRFCK_MON 0x1d20
+#define MT6359P_LDO_VBBCK_CON0 0x1d2e
+#define MT6359P_LDO_VBBCK_MON 0x1d32
+#define MT6359P_LDO_VBIF28_CON0 0x1d40
+#define MT6359P_LDO_VBIF28_MON 0x1d44
+#define MT6359P_LDO_VIBR_CON0 0x1d52
+#define MT6359P_LDO_VIBR_MON 0x1d56
+#define MT6359P_LDO_VIO28_CON0 0x1d64
+#define MT6359P_LDO_VIO28_MON 0x1d68
+#define MT6359P_LDO_VM18_CON0 0x1d88
+#define MT6359P_LDO_VM18_MON 0x1d8c
+#define MT6359P_LDO_VUFS_CON0 0x1d9a
+#define MT6359P_LDO_VUFS_MON 0x1d9e
+#define MT6359P_LDO_VSRAM_PROC1_CON0 0x1e88
+#define MT6359P_LDO_VSRAM_PROC1_MON 0x1e8c
+#define MT6359P_LDO_VSRAM_PROC1_VOSEL1 0x1e90
+#define MT6359P_LDO_VSRAM_PROC2_CON0 0x1ea8
+#define MT6359P_LDO_VSRAM_PROC2_MON 0x1eac
+#define MT6359P_LDO_VSRAM_PROC2_VOSEL1 0x1eb0
+#define MT6359P_LDO_VSRAM_OTHERS_CON0 0x1f08
+#define MT6359P_LDO_VSRAM_OTHERS_MON 0x1f0c
+#define MT6359P_LDO_VSRAM_OTHERS_VOSEL1 0x1f10
+#define MT6359P_LDO_VSRAM_OTHERS_SSHUB 0x1f28
+#define MT6359P_LDO_VSRAM_MD_CON0 0x1f2e
+#define MT6359P_LDO_VSRAM_MD_MON 0x1f32
+#define MT6359P_LDO_VSRAM_MD_VOSEL1 0x1f36
+#define MT6359P_VFE28_ANA_CON0 0x1f88
+#define MT6359P_VAUX18_ANA_CON0 0x1f8c
+#define MT6359P_VUSB_ANA_CON0 0x1f90
+#define MT6359P_VBIF28_ANA_CON0 0x1f94
+#define MT6359P_VCN33_1_ANA_CON0 0x1f98
+#define MT6359P_VCN33_2_ANA_CON0 0x1f9c
+#define MT6359P_VEMC_ANA_CON0 0x1fa0
+#define MT6359P_VSIM1_ANA_CON0 0x1fa2
+#define MT6359P_VSIM2_ANA_CON0 0x1fa6
+#define MT6359P_VIO28_ANA_CON0 0x1faa
+#define MT6359P_VIBR_ANA_CON0 0x1fae
+#define MT6359P_VFE28_ELR_4 0x1fc0
+#define MT6359P_VRF18_ANA_CON0 0x2008
+#define MT6359P_VEFUSE_ANA_CON0 0x200c
+#define MT6359P_VCN18_ANA_CON0 0x2010
+#define MT6359P_VCAMIO_ANA_CON0 0x2014
+#define MT6359P_VAUD18_ANA_CON0 0x2018
+#define MT6359P_VIO18_ANA_CON0 0x201c
+#define MT6359P_VM18_ANA_CON0 0x2020
+#define MT6359P_VUFS_ANA_CON0 0x2024
+#define MT6359P_VRF12_ANA_CON0 0x202a
+#define MT6359P_VCN13_ANA_CON0 0x202e
+#define MT6359P_VA09_ANA_CON0 0x2032
+#define MT6359P_VRF18_ELR_3 0x204e
+#define MT6359P_VXO22_ANA_CON0 0x2088
+#define MT6359P_VRFCK_ANA_CON0 0x208c
+#define MT6359P_VBBCK_ANA_CON0 0x2096
+
+#define MT6359P_RG_BUCK_VCORE_VOSEL_ADDR MT6359P_BUCK_VCORE_ELR0
+#define MT6359P_RG_BUCK_VGPU11_SSHUB_EN_ADDR MT6359P_BUCK_VGPU11_SSHUB_CON0
+#define MT6359P_RG_BUCK_VGPU11_VOSEL_ADDR MT6359P_BUCK_VGPU11_ELR0
+#define MT6359P_RG_BUCK_VGPU11_SSHUB_VOSEL_ADDR MT6359P_BUCK_VGPU11_SSHUB_CON0
+#define MT6359P_RG_BUCK_VGPU11_SSHUB_VOSEL_MASK 0x7F
+#define MT6359P_RG_BUCK_VGPU11_SSHUB_VOSEL_SHIFT 4
+#define MT6359P_RG_LDO_VSRAM_PROC1_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC1_ELR
+#define MT6359P_RG_LDO_VSRAM_PROC2_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC2_ELR
+#define MT6359P_RG_LDO_VSRAM_OTHERS_VOSEL_ADDR MT6359P_LDO_VSRAM_OTHERS_ELR
+#define MT6359P_RG_LDO_VSRAM_MD_VOSEL_ADDR MT6359P_LDO_VSRAM_MD_ELR
+#define MT6359P_RG_LDO_VEMC_VOSEL_0_ADDR MT6359P_LDO_VEMC_ELR_0
+#define MT6359P_RG_LDO_VEMC_VOSEL_0_MASK 0xF
+#define MT6359P_RG_LDO_VEMC_VOSEL_0_SHIFT 0
+#define MT6359P_RG_LDO_VFE28_EN_ADDR MT6359P_LDO_VFE28_CON0
+#define MT6359P_DA_VFE28_B_EN_ADDR MT6359P_LDO_VFE28_MON
+#define MT6359P_RG_LDO_VXO22_EN_ADDR MT6359P_LDO_VXO22_CON0
+#define MT6359P_RG_LDO_VXO22_EN_SHIFT 0
+#define MT6359P_DA_VXO22_B_EN_ADDR MT6359P_LDO_VXO22_MON
+#define MT6359P_RG_LDO_VRF18_EN_ADDR MT6359P_LDO_VRF18_CON0
+#define MT6359P_RG_LDO_VRF18_EN_SHIFT 0
+#define MT6359P_DA_VRF18_B_EN_ADDR MT6359P_LDO_VRF18_MON
+#define MT6359P_RG_LDO_VRF12_EN_ADDR MT6359P_LDO_VRF12_CON0
+#define MT6359P_RG_LDO_VRF12_EN_SHIFT 0
+#define MT6359P_DA_VRF12_B_EN_ADDR MT6359P_LDO_VRF12_MON
+#define MT6359P_RG_LDO_VEFUSE_EN_ADDR MT6359P_LDO_VEFUSE_CON0
+#define MT6359P_RG_LDO_VEFUSE_EN_SHIFT 0
+#define MT6359P_DA_VEFUSE_B_EN_ADDR MT6359P_LDO_VEFUSE_MON
+#define MT6359P_RG_LDO_VCN33_1_EN_0_ADDR MT6359P_LDO_VCN33_1_CON0
+#define MT6359P_DA_VCN33_1_B_EN_ADDR MT6359P_LDO_VCN33_1_MON
+#define MT6359P_RG_LDO_VCN33_1_EN_1_ADDR MT6359P_LDO_VCN33_1_MULTI_SW
+#define MT6359P_RG_LDO_VCN33_1_EN_1_SHIFT 15
+#define MT6359P_RG_LDO_VCN33_2_EN_0_ADDR MT6359P_LDO_VCN33_2_CON0
+#define MT6359P_RG_LDO_VCN33_2_EN_0_SHIFT 0
+#define MT6359P_DA_VCN33_2_B_EN_ADDR MT6359P_LDO_VCN33_2_MON
+#define MT6359P_RG_LDO_VCN33_2_EN_1_ADDR MT6359P_LDO_VCN33_2_MULTI_SW
+#define MT6359P_RG_LDO_VCN13_EN_ADDR MT6359P_LDO_VCN13_CON0
+#define MT6359P_RG_LDO_VCN13_EN_SHIFT 0
+#define MT6359P_DA_VCN13_B_EN_ADDR MT6359P_LDO_VCN13_MON
+#define MT6359P_RG_LDO_VCN18_EN_ADDR MT6359P_LDO_VCN18_CON0
+#define MT6359P_DA_VCN18_B_EN_ADDR MT6359P_LDO_VCN18_MON
+#define MT6359P_RG_LDO_VA09_EN_ADDR MT6359P_LDO_VA09_CON0
+#define MT6359P_RG_LDO_VA09_EN_SHIFT 0
+#define MT6359P_DA_VA09_B_EN_ADDR MT6359P_LDO_VA09_MON
+#define MT6359P_RG_LDO_VCAMIO_EN_ADDR MT6359P_LDO_VCAMIO_CON0
+#define MT6359P_RG_LDO_VCAMIO_EN_SHIFT 0
+#define MT6359P_DA_VCAMIO_B_EN_ADDR MT6359P_LDO_VCAMIO_MON
+#define MT6359P_RG_LDO_VA12_EN_ADDR MT6359P_LDO_VA12_CON0
+#define MT6359P_RG_LDO_VA12_EN_SHIFT 0
+#define MT6359P_DA_VA12_B_EN_ADDR MT6359P_LDO_VA12_MON
+#define MT6359P_RG_LDO_VAUX18_EN_ADDR MT6359P_LDO_VAUX18_CON0
+#define MT6359P_DA_VAUX18_B_EN_ADDR MT6359P_LDO_VAUX18_MON
+#define MT6359P_RG_LDO_VAUD18_EN_ADDR MT6359P_LDO_VAUD18_CON0
+#define MT6359P_DA_VAUD18_B_EN_ADDR MT6359P_LDO_VAUD18_MON
+#define MT6359P_RG_LDO_VIO18_EN_ADDR MT6359P_LDO_VIO18_CON0
+#define MT6359P_RG_LDO_VIO18_EN_SHIFT 0
+#define MT6359P_DA_VIO18_B_EN_ADDR MT6359P_LDO_VIO18_MON
+#define MT6359P_RG_LDO_VEMC_EN_ADDR MT6359P_LDO_VEMC_CON0
+#define MT6359P_RG_LDO_VEMC_EN_SHIFT 0
+#define MT6359P_DA_VEMC_B_EN_ADDR MT6359P_LDO_VEMC_MON
+#define MT6359P_RG_LDO_VSIM1_EN_ADDR MT6359P_LDO_VSIM1_CON0
+#define MT6359P_RG_LDO_VSIM1_EN_SHIFT 0
+#define MT6359P_DA_VSIM1_B_EN_ADDR MT6359P_LDO_VSIM1_MON
+#define MT6359P_RG_LDO_VSIM2_EN_ADDR MT6359P_LDO_VSIM2_CON0
+#define MT6359P_RG_LDO_VSIM2_EN_SHIFT 0
+#define MT6359P_DA_VSIM2_B_EN_ADDR MT6359P_LDO_VSIM2_MON
+#define MT6359P_RG_LDO_VUSB_EN_0_ADDR MT6359P_LDO_VUSB_CON0
+#define MT6359P_DA_VUSB_B_EN_ADDR MT6359P_LDO_VUSB_MON
+#define MT6359P_RG_LDO_VUSB_EN_1_ADDR MT6359P_LDO_VUSB_MULTI_SW
+#define MT6359P_RG_LDO_VRFCK_EN_ADDR MT6359P_LDO_VRFCK_CON0
+#define MT6359P_RG_LDO_VRFCK_EN_SHIFT 0
+#define MT6359P_DA_VRFCK_B_EN_ADDR MT6359P_LDO_VRFCK_MON
+#define MT6359P_RG_LDO_VBBCK_EN_ADDR MT6359P_LDO_VBBCK_CON0
+#define MT6359P_RG_LDO_VBBCK_EN_SHIFT 0
+#define MT6359P_DA_VBBCK_B_EN_ADDR MT6359P_LDO_VBBCK_MON
+#define MT6359P_RG_LDO_VBIF28_EN_ADDR MT6359P_LDO_VBIF28_CON0
+#define MT6359P_DA_VBIF28_B_EN_ADDR MT6359P_LDO_VBIF28_MON
+#define MT6359P_RG_LDO_VIBR_EN_ADDR MT6359P_LDO_VIBR_CON0
+#define MT6359P_RG_LDO_VIBR_EN_SHIFT 0
+#define MT6359P_DA_VIBR_B_EN_ADDR MT6359P_LDO_VIBR_MON
+#define MT6359P_RG_LDO_VIO28_EN_ADDR MT6359P_LDO_VIO28_CON0
+#define MT6359P_RG_LDO_VIO28_EN_SHIFT 0
+#define MT6359P_DA_VIO28_B_EN_ADDR MT6359P_LDO_VIO28_MON
+#define MT6359P_RG_LDO_VM18_EN_ADDR MT6359P_LDO_VM18_CON0
+#define MT6359P_RG_LDO_VM18_EN_SHIFT 0
+#define MT6359P_DA_VM18_B_EN_ADDR MT6359P_LDO_VM18_MON
+#define MT6359P_RG_LDO_VUFS_EN_ADDR MT6359P_LDO_VUFS_CON0
+#define MT6359P_RG_LDO_VUFS_EN_SHIFT 0
+#define MT6359P_DA_VUFS_B_EN_ADDR MT6359P_LDO_VUFS_MON
+#define MT6359P_RG_LDO_VSRAM_PROC1_EN_ADDR MT6359P_LDO_VSRAM_PROC1_CON0
+#define MT6359P_DA_VSRAM_PROC1_B_EN_ADDR MT6359P_LDO_VSRAM_PROC1_MON
+#define MT6359P_DA_VSRAM_PROC1_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC1_VOSEL1
+#define MT6359P_RG_LDO_VSRAM_PROC2_EN_ADDR MT6359P_LDO_VSRAM_PROC2_CON0
+#define MT6359P_DA_VSRAM_PROC2_B_EN_ADDR MT6359P_LDO_VSRAM_PROC2_MON
+#define MT6359P_DA_VSRAM_PROC2_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC2_VOSEL1
+#define MT6359P_RG_LDO_VSRAM_OTHERS_EN_ADDR MT6359P_LDO_VSRAM_OTHERS_CON0
+#define MT6359P_DA_VSRAM_OTHERS_B_EN_ADDR MT6359P_LDO_VSRAM_OTHERS_MON
+#define MT6359P_DA_VSRAM_OTHERS_VOSEL_ADDR MT6359P_LDO_VSRAM_OTHERS_VOSEL1
+#define MT6359P_RG_LDO_VSRAM_OTHERS_SSHUB_EN_ADDR MT6359P_LDO_VSRAM_OTHERS_SSHUB
+#define MT6359P_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_ADDR MT6359P_LDO_VSRAM_OTHERS_SSHUB
+#define MT6359P_RG_LDO_VSRAM_MD_EN_ADDR MT6359P_LDO_VSRAM_MD_CON0
+#define MT6359P_DA_VSRAM_MD_B_EN_ADDR MT6359P_LDO_VSRAM_MD_MON
+#define MT6359P_DA_VSRAM_MD_VOSEL_ADDR MT6359P_LDO_VSRAM_MD_VOSEL1
+#define MT6359P_RG_VCN33_1_VOSEL_ADDR MT6359P_VCN33_1_ANA_CON0
+#define MT6359P_RG_VCN33_2_VOSEL_ADDR MT6359P_VCN33_2_ANA_CON0
+#define MT6359P_RG_VEMC_VOSEL_ADDR MT6359P_VEMC_ANA_CON0
+#define MT6359P_RG_VSIM1_VOSEL_ADDR MT6359P_VSIM1_ANA_CON0
+#define MT6359P_RG_VSIM2_VOSEL_ADDR MT6359P_VSIM2_ANA_CON0
+#define MT6359P_RG_VIO28_VOSEL_ADDR MT6359P_VIO28_ANA_CON0
+#define MT6359P_RG_VIBR_VOSEL_ADDR MT6359P_VIBR_ANA_CON0
+#define MT6359P_RG_VRF18_VOSEL_ADDR MT6359P_VRF18_ANA_CON0
+#define MT6359P_RG_VEFUSE_VOSEL_ADDR MT6359P_VEFUSE_ANA_CON0
+#define MT6359P_RG_VCAMIO_VOSEL_ADDR MT6359P_VCAMIO_ANA_CON0
+#define MT6359P_RG_VIO18_VOSEL_ADDR MT6359P_VIO18_ANA_CON0
+#define MT6359P_RG_VM18_VOSEL_ADDR MT6359P_VM18_ANA_CON0
+#define MT6359P_RG_VUFS_VOSEL_ADDR MT6359P_VUFS_ANA_CON0
+#define MT6359P_RG_VRF12_VOSEL_ADDR MT6359P_VRF12_ANA_CON0
+#define MT6359P_RG_VCN13_VOSEL_ADDR MT6359P_VCN13_ANA_CON0
+#define MT6359P_RG_VA09_VOSEL_ADDR MT6359P_VRF18_ELR_3
+#define MT6359P_RG_VA12_VOSEL_ADDR MT6359P_VFE28_ELR_4
+#define MT6359P_RG_VXO22_VOSEL_ADDR MT6359P_VXO22_ANA_CON0
+#define MT6359P_RG_VRFCK_VOSEL_ADDR MT6359P_VRFCK_ANA_CON0
+#define MT6359P_RG_VBBCK_VOSEL_ADDR MT6359P_VBBCK_ANA_CON0
+#define MT6359P_RG_VBBCK_VOSEL_MASK 0xF
+#define MT6359P_RG_VBBCK_VOSEL_SHIFT 4
+#define MT6359P_VM_MODE_ADDR MT6359P_TOP_TRAP
+#define MT6359P_TMA_KEY_ADDR MT6359P_TOP_TMA_KEY
+
+#define TMA_KEY 0x9CA6
+
+#endif /* __MFD_MT6359P_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h
index 949268581b36..56f210eebc54 100644
--- a/include/linux/mfd/mt6397/core.h
+++ b/include/linux/mfd/mt6397/core.h
@@ -13,6 +13,7 @@
enum chip_id {
MT6323_CHIP_ID = 0x23,
MT6358_CHIP_ID = 0x58,
+ MT6359_CHIP_ID = 0x59,
MT6391_CHIP_ID = 0x91,
MT6397_CHIP_ID = 0x97,
};
diff --git a/include/linux/mfd/mt6397/rtc.h b/include/linux/mfd/mt6397/rtc.h
index c3748b53bf7d..068ae1c0f0e8 100644
--- a/include/linux/mfd/mt6397/rtc.h
+++ b/include/linux/mfd/mt6397/rtc.h
@@ -36,6 +36,7 @@
#define RTC_AL_MASK_DOW BIT(4)
#define RTC_TC_SEC 0x000a
+#define RTC_TC_MTH_MASK 0x000f
/* Min, Hour, Dom... register offset to RTC_TC_SEC */
#define RTC_OFFSET_SEC 0
#define RTC_OFFSET_MIN 1
diff --git a/include/linux/mfd/rohm-bd70528.h b/include/linux/mfd/rohm-bd70528.h
index a57af878fd0c..4a5966475a35 100644
--- a/include/linux/mfd/rohm-bd70528.h
+++ b/include/linux/mfd/rohm-bd70528.h
@@ -26,9 +26,7 @@ struct bd70528_data {
struct mutex rtc_timer_lock;
};
-#define BD70528_BUCK_VOLTS 17
-#define BD70528_BUCK_VOLTS 17
-#define BD70528_BUCK_VOLTS 17
+#define BD70528_BUCK_VOLTS 0x10
#define BD70528_LDO_VOLTS 0x20
#define BD70528_REG_BUCK1_EN 0x0F
diff --git a/include/linux/mfd/rohm-bd71828.h b/include/linux/mfd/rohm-bd71828.h
index c7ab69c87ee8..3b5f3a7db4bd 100644
--- a/include/linux/mfd/rohm-bd71828.h
+++ b/include/linux/mfd/rohm-bd71828.h
@@ -26,11 +26,11 @@ enum {
BD71828_REGULATOR_AMOUNT,
};
-#define BD71828_BUCK1267_VOLTS 0xEF
-#define BD71828_BUCK3_VOLTS 0x10
-#define BD71828_BUCK4_VOLTS 0x20
-#define BD71828_BUCK5_VOLTS 0x10
-#define BD71828_LDO_VOLTS 0x32
+#define BD71828_BUCK1267_VOLTS 0x100
+#define BD71828_BUCK3_VOLTS 0x20
+#define BD71828_BUCK4_VOLTS 0x40
+#define BD71828_BUCK5_VOLTS 0x20
+#define BD71828_LDO_VOLTS 0x40
/* LDO6 is fixed 1.8V voltage */
#define BD71828_LDO_6_VOLTAGE 1800000
diff --git a/include/linux/minmax.h b/include/linux/minmax.h
index c0f57b0c64d9..5433c08fcc68 100644
--- a/include/linux/minmax.h
+++ b/include/linux/minmax.h
@@ -2,6 +2,8 @@
#ifndef _LINUX_MINMAX_H
#define _LINUX_MINMAX_H
+#include <linux/const.h>
+
/*
* min()/max()/clamp() macros must accomplish three things:
*
@@ -17,14 +19,6 @@
#define __typecheck(x, y) \
(!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
-/*
- * This returns a constant expression while determining if an argument is
- * a constant expression, most importantly without evaluating the argument.
- * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de>
- */
-#define __is_constexpr(x) \
- (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
-
#define __no_side_effects(x, y) \
(__is_constexpr(x) && __is_constexpr(y))
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 236a7d04f891..30bb59fe970c 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -630,6 +630,7 @@ struct mlx4_caps {
bool wol_port[MLX4_MAX_PORTS + 1];
struct mlx4_rate_limit_caps rl_caps;
u32 health_buffer_addrs;
+ bool map_clock_to_user;
};
struct mlx4_buf_list {
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index f8e8d7e90616..f8902bcd91e2 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -542,6 +542,10 @@ struct mlx5_core_roce {
enum {
MLX5_PRIV_FLAGS_DISABLE_IB_ADEV = 1 << 0,
MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV = 1 << 1,
+ /* Set during device detach to block any further devices
+ * creation/deletion on drivers rescan. Unset during device attach.
+ */
+ MLX5_PRIV_FLAGS_DETACH = 1 << 2,
};
struct mlx5_adev {
@@ -703,6 +707,27 @@ struct mlx5_hv_vhca;
#define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity))
#define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
+enum {
+ MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
+ MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
+};
+
+enum {
+ MR_CACHE_LAST_STD_ENTRY = 20,
+ MLX5_IMR_MTT_CACHE_ENTRY,
+ MLX5_IMR_KSM_CACHE_ENTRY,
+ MAX_MR_CACHE_ENTRIES
+};
+
+struct mlx5_profile {
+ u64 mask;
+ u8 log_max_qp;
+ struct {
+ int size;
+ int limit;
+ } mr_cache[MAX_MR_CACHE_ENTRIES];
+};
+
struct mlx5_core_dev {
struct device *device;
enum mlx5_coredev_type coredev_type;
@@ -731,7 +756,7 @@ struct mlx5_core_dev {
struct mutex intf_state_mutex;
unsigned long intf_state;
struct mlx5_priv priv;
- struct mlx5_profile *profile;
+ struct mlx5_profile profile;
u32 issi;
struct mlx5e_resources mlx5e_res;
struct mlx5_dm *dm;
@@ -1083,18 +1108,6 @@ static inline u8 mlx5_mkey_variant(u32 mkey)
return mkey & 0xff;
}
-enum {
- MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
- MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
-};
-
-enum {
- MR_CACHE_LAST_STD_ENTRY = 20,
- MLX5_IMR_MTT_CACHE_ENTRY,
- MLX5_IMR_KSM_CACHE_ENTRY,
- MAX_MR_CACHE_ENTRIES
-};
-
/* Async-atomic event notifier used by mlx5 core to forward FW
* evetns recived from event queue to mlx5 consumers.
* Optimise event queue dipatching.
@@ -1148,15 +1161,6 @@ int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
struct ib_device *device,
struct rdma_netdev_alloc_params *params);
-struct mlx5_profile {
- u64 mask;
- u8 log_max_qp;
- struct {
- int size;
- int limit;
- } mr_cache[MAX_MR_CACHE_ENTRIES];
-};
-
enum {
MLX5_PCI_DEV_IS_VF = 1 << 0,
};
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 6d16eed6850e..eb86e80e4643 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1289,6 +1289,8 @@ enum mlx5_fc_bulk_alloc_bitmask {
#define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum))
+#define MLX5_FT_MAX_MULTIPATH_LEVEL 63
+
enum {
MLX5_STEERING_FORMAT_CONNECTX_5 = 0,
MLX5_STEERING_FORMAT_CONNECTX_6DX = 1,
diff --git a/include/linux/mlx5/mpfs.h b/include/linux/mlx5/mpfs.h
new file mode 100644
index 000000000000..bf700c8d5516
--- /dev/null
+++ b/include/linux/mlx5/mpfs.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+ * Copyright (c) 2021 Mellanox Technologies Ltd.
+ */
+
+#ifndef _MLX5_MPFS_
+#define _MLX5_MPFS_
+
+struct mlx5_core_dev;
+
+#ifdef CONFIG_MLX5_MPFS
+int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac);
+int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac);
+#else /* #ifndef CONFIG_MLX5_MPFS */
+static inline int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
+static inline int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
+#endif
+
+#endif
diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h
index 028f442530cf..60ffeb6b67ae 100644
--- a/include/linux/mlx5/transobj.h
+++ b/include/linux/mlx5/transobj.h
@@ -85,4 +85,5 @@ mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev,
struct mlx5_hairpin_params *params);
void mlx5_core_hairpin_destroy(struct mlx5_hairpin *pair);
+void mlx5_core_hairpin_clear_dead_peer(struct mlx5_hairpin *hp);
#endif /* __TRANSOBJ_H__ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 322ec61d0da7..8ae31622deef 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1719,6 +1719,7 @@ struct zap_details {
struct address_space *check_mapping; /* Check page->mapping if set */
pgoff_t first_index; /* Lowest page->index to unmap */
pgoff_t last_index; /* Highest page->index to unmap */
+ struct page *single_page; /* Locked page to be unmapped */
};
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
@@ -1766,6 +1767,7 @@ extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
extern int fixup_user_fault(struct mm_struct *mm,
unsigned long address, unsigned int fault_flags,
bool *unlocked);
+void unmap_mapping_page(struct page *page);
void unmap_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t nr, bool even_cows);
void unmap_mapping_range(struct address_space *mapping,
@@ -1786,6 +1788,7 @@ static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address,
BUG();
return -EFAULT;
}
+static inline void unmap_mapping_page(struct page *page) { }
static inline void unmap_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t nr, bool even_cows) { }
static inline void unmap_mapping_range(struct address_space *mapping,
@@ -3216,5 +3219,37 @@ void mem_dump_obj(void *object);
static inline void mem_dump_obj(void *object) {}
#endif
+/**
+ * seal_check_future_write - Check for F_SEAL_FUTURE_WRITE flag and handle it
+ * @seals: the seals to check
+ * @vma: the vma to operate on
+ *
+ * Check whether F_SEAL_FUTURE_WRITE is set; if so, do proper check/handling on
+ * the vma flags. Return 0 if check pass, or <0 for errors.
+ */
+static inline int seal_check_future_write(int seals, struct vm_area_struct *vma)
+{
+ if (seals & F_SEAL_FUTURE_WRITE) {
+ /*
+ * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
+ * "future write" seal active.
+ */
+ if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
+ return -EPERM;
+
+ /*
+ * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as
+ * MAP_SHARED and read-only, take care to not allow mprotect to
+ * revert protections on such mappings. Do this only for shared
+ * mappings. For private mappings, don't need to mask
+ * VM_MAYWRITE as we still want them to be COW-writable.
+ */
+ if (vma->vm_flags & VM_SHARED)
+ vma->vm_flags &= ~(VM_MAYWRITE);
+ }
+
+ return 0;
+}
+
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 6613b26a8894..8f0fb62e8975 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -97,10 +97,10 @@ struct page {
};
struct { /* page_pool used by netstack */
/**
- * @dma_addr: might require a 64-bit value even on
+ * @dma_addr: might require a 64-bit value on
* 32-bit architectures.
*/
- dma_addr_t dma_addr;
+ unsigned long dma_addr[2];
};
struct { /* slab, slob and slub */
union {
@@ -445,13 +445,6 @@ struct mm_struct {
*/
atomic_t has_pinned;
- /**
- * @write_protect_seq: Locked when any thread is write
- * protecting pages mapped by this mm to enforce a later COW,
- * for instance during page table copying for fork().
- */
- seqcount_t write_protect_seq;
-
#ifdef CONFIG_MMU
atomic_long_t pgtables_bytes; /* PTE page table pages */
#endif
@@ -460,6 +453,18 @@ struct mm_struct {
spinlock_t page_table_lock; /* Protects page tables and some
* counters
*/
+ /*
+ * With some kernel config, the current mmap_lock's offset
+ * inside 'mm_struct' is at 0x120, which is very optimal, as
+ * its two hot fields 'count' and 'owner' sit in 2 different
+ * cachelines, and when mmap_lock is highly contended, both
+ * of the 2 fields will be accessed frequently, current layout
+ * will help to reduce cache bouncing.
+ *
+ * So please be careful with adding new fields before
+ * mmap_lock, which can easily push the 2 fields into one
+ * cacheline.
+ */
struct rw_semaphore mmap_lock;
struct list_head mmlist; /* List of maybe swapped mm's. These
@@ -480,7 +485,15 @@ struct mm_struct {
unsigned long stack_vm; /* VM_STACK */
unsigned long def_flags;
+ /**
+ * @write_protect_seq: Locked when any thread is write
+ * protecting pages mapped by this mm to enforce a later COW,
+ * for instance during page table copying for fork().
+ */
+ seqcount_t write_protect_seq;
+
spinlock_t arg_lock; /* protect the below fields */
+
unsigned long start_code, end_code, start_data, end_data;
unsigned long start_brk, brk, start_stack;
unsigned long arg_start, arg_end, env_start, env_end;
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index f9ad35dd6012..74e6c0624d27 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -139,6 +139,8 @@ struct sd_scr {
unsigned char cmds;
#define SD_SCR_CMD20_SUPPORT (1<<0)
#define SD_SCR_CMD23_SUPPORT (1<<1)
+#define SD_SCR_CMD48_SUPPORT (1<<2)
+#define SD_SCR_CMD58_SUPPORT (1<<3)
};
struct sd_ssr {
@@ -189,6 +191,25 @@ struct sd_switch_caps {
#define SD_MAX_CURRENT_800 (1 << SD_SET_CURRENT_LIMIT_800)
};
+struct sd_ext_reg {
+ u8 fno;
+ u8 page;
+ u16 offset;
+ u8 rev;
+ u8 feature_enabled;
+ u8 feature_support;
+/* Power Management Function. */
+#define SD_EXT_POWER_OFF_NOTIFY (1<<0)
+#define SD_EXT_POWER_SUSTENANCE (1<<1)
+#define SD_EXT_POWER_DOWN_MODE (1<<2)
+/* Performance Enhancement Function. */
+#define SD_EXT_PERF_FX_EVENT (1<<0)
+#define SD_EXT_PERF_CARD_MAINT (1<<1)
+#define SD_EXT_PERF_HOST_MAINT (1<<2)
+#define SD_EXT_PERF_CACHE (1<<3)
+#define SD_EXT_PERF_CMD_QUEUE (1<<4)
+};
+
struct sdio_cccr {
unsigned int sdio_vsn;
unsigned int sd_vsn;
@@ -290,6 +311,8 @@ struct mmc_card {
struct sd_scr scr; /* extra SD information */
struct sd_ssr ssr; /* yet more SD information */
struct sd_switch_caps sw_caps; /* switch (CMD6) caps */
+ struct sd_ext_reg ext_power; /* SD extension reg for PM */
+ struct sd_ext_reg ext_perf; /* SD extension reg for PERF */
unsigned int sdio_funcs; /* number of SDIO functions */
atomic_t sdio_funcs_probed; /* number of probed SDIO funcs */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index c7e7b43600e9..0abd47e9ef9b 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -632,6 +632,6 @@ static inline enum dma_data_direction mmc_get_dma_dir(struct mmc_data *data)
}
int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
-int mmc_abort_tuning(struct mmc_host *host, u32 opcode);
+int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode);
#endif /* LINUX_MMC_HOST_H */
diff --git a/include/linux/mmc/sd.h b/include/linux/mmc/sd.h
index 2236aa540faa..6727576a8755 100644
--- a/include/linux/mmc/sd.h
+++ b/include/linux/mmc/sd.h
@@ -29,6 +29,10 @@
#define SD_APP_OP_COND 41 /* bcr [31:0] OCR R3 */
#define SD_APP_SEND_SCR 51 /* adtc R1 */
+ /* class 11 */
+#define SD_READ_EXTR_SINGLE 48 /* adtc [31:0] R1 */
+#define SD_WRITE_EXTR_SINGLE 49 /* adtc [31:0] R1 */
+
/* OCR bit definitions */
#define SD_OCR_S18R (1 << 24) /* 1.8V switching request */
#define SD_ROCR_S18A SD_OCR_S18R /* 1.8V switching accepted by card */
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index 6bb92f26833e..6988956b8492 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -170,6 +170,28 @@ struct spinand_op;
struct spinand_device;
#define SPINAND_MAX_ID_LEN 4
+/*
+ * For erase, write and read operation, we got the following timings :
+ * tBERS (erase) 1ms to 4ms
+ * tPROG 300us to 400us
+ * tREAD 25us to 100us
+ * In order to minimize latency, the min value is divided by 4 for the
+ * initial delay, and dividing by 20 for the poll delay.
+ * For reset, 5us/10us/500us if the device is respectively
+ * reading/programming/erasing when the RESET occurs. Since we always
+ * issue a RESET when the device is IDLE, 5us is selected for both initial
+ * and poll delay.
+ */
+#define SPINAND_READ_INITIAL_DELAY_US 6
+#define SPINAND_READ_POLL_DELAY_US 5
+#define SPINAND_RESET_INITIAL_DELAY_US 5
+#define SPINAND_RESET_POLL_DELAY_US 5
+#define SPINAND_WRITE_INITIAL_DELAY_US 75
+#define SPINAND_WRITE_POLL_DELAY_US 15
+#define SPINAND_ERASE_INITIAL_DELAY_US 250
+#define SPINAND_ERASE_POLL_DELAY_US 50
+
+#define SPINAND_WAITRDY_TIMEOUT_MS 400
/**
* struct spinand_id - SPI NAND id structure
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index a4bd41128bf3..0f1b34dbf3a2 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -516,7 +516,7 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
}
/*
- * Get index of the page with in radix-tree
+ * Get index of the page within radix-tree (but not for hugetlb pages).
* (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
*/
static inline pgoff_t page_to_index(struct page *page)
@@ -535,15 +535,16 @@ static inline pgoff_t page_to_index(struct page *page)
return pgoff;
}
+extern pgoff_t hugetlb_basepage_index(struct page *page);
+
/*
- * Get the offset in PAGE_SIZE.
- * (TODO: hugepage should have ->index in PAGE_SIZE)
+ * Get the offset in PAGE_SIZE (even for hugetlb pages).
+ * (TODO: hugetlb pages should have ->index in PAGE_SIZE)
*/
static inline pgoff_t page_to_pgoff(struct page *page)
{
- if (unlikely(PageHeadHuge(page)))
- return page->index << compound_order(page);
-
+ if (unlikely(PageHuge(page)))
+ return hugetlb_basepage_index(page);
return page_to_index(page);
}
@@ -997,9 +998,9 @@ static inline loff_t readahead_pos(struct readahead_control *rac)
* readahead_length - The number of bytes in this readahead request.
* @rac: The readahead request.
*/
-static inline loff_t readahead_length(struct readahead_control *rac)
+static inline size_t readahead_length(struct readahead_control *rac)
{
- return (loff_t)rac->_nr_pages * PAGE_SIZE;
+ return rac->_nr_pages * PAGE_SIZE;
}
/**
@@ -1024,7 +1025,7 @@ static inline unsigned int readahead_count(struct readahead_control *rac)
* readahead_batch_length - The number of bytes in the current batch.
* @rac: The readahead request.
*/
-static inline loff_t readahead_batch_length(struct readahead_control *rac)
+static inline size_t readahead_batch_length(struct readahead_control *rac)
{
return rac->_batch_count * PAGE_SIZE;
}
diff --git a/include/linux/pci.h b/include/linux/pci.h
index c20211e59a57..24306504226a 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -2344,6 +2344,7 @@ int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
struct device_node;
struct irq_domain;
struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
+bool pci_host_of_has_msi_map(struct device *dev);
/* Arch may override this (weak) */
struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
@@ -2351,6 +2352,7 @@ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
#else /* CONFIG_OF */
static inline struct irq_domain *
pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
+static inline bool pci_host_of_has_msi_map(struct device *dev) { return false; }
#endif /* CONFIG_OF */
static inline struct device_node *
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 4c3fa5293d76..4bac1831de80 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -555,6 +555,7 @@
#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F3 0x144b
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443
#define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653
+#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F3 0x166d
#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
@@ -631,6 +632,8 @@
#define PCI_DEVICE_ID_DELL_RAC4 0x0012
#define PCI_DEVICE_ID_DELL_PERC5 0x0015
+#define PCI_SUBVENDOR_ID_DELL 0x1028
+
#define PCI_VENDOR_ID_MATROX 0x102B
#define PCI_DEVICE_ID_MATROX_MGA_2 0x0518
#define PCI_DEVICE_ID_MATROX_MIL 0x0519
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 46b13780c2c8..a43047b1030d 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -432,6 +432,14 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
* To be differentiate with macro pte_mkyoung, this macro is used on platforms
* where software maintains page access bit.
*/
+#ifndef pte_sw_mkyoung
+static inline pte_t pte_sw_mkyoung(pte_t pte)
+{
+ return pte;
+}
+#define pte_sw_mkyoung pte_sw_mkyoung
+#endif
+
#ifndef pte_savedwrite
#define pte_savedwrite pte_write
#endif
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 60d2b26026a2..852743f07e3e 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -496,6 +496,11 @@ struct macsec_ops;
* @mac_managed_pm: Set true if MAC driver takes of suspending/resuming PHY
* @state: State of the PHY for management purposes
* @dev_flags: Device-specific flags used by the PHY driver.
+ * Bits [15:0] are free to use by the PHY driver to communicate
+ * driver specific behavior.
+ * Bits [23:16] are currently reserved for future use.
+ * Bits [31:24] are reserved for defining generic
+ * PHY driver behavior.
* @irq: IRQ number of the PHY's interrupt (-1 if none)
* @phy_timer: The timer for handling the state machine
* @phylink: Pointer to phylink instance for this PHY
diff --git a/include/linux/platform_data/spi-ath79.h b/include/linux/platform_data/spi-ath79.h
deleted file mode 100644
index 81a388ff58cc..000000000000
--- a/include/linux/platform_data/spi-ath79.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Platform data definition for Atheros AR71XX/AR724X/AR913X SPI controller
- *
- * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
- */
-
-#ifndef _ATH79_SPI_PLATFORM_H
-#define _ATH79_SPI_PLATFORM_H
-
-struct ath79_spi_platform_data {
- unsigned bus_num;
- unsigned num_chipselect;
-};
-
-#endif /* _ATH79_SPI_PLATFORM_H */
diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h
index fafc1beea504..9837fb011f2f 100644
--- a/include/linux/platform_data/ti-sysc.h
+++ b/include/linux/platform_data/ti-sysc.h
@@ -50,6 +50,7 @@ struct sysc_regbits {
s8 emufree_shift;
};
+#define SYSC_QUIRK_REINIT_ON_RESUME BIT(27)
#define SYSC_QUIRK_GPMC_DEBUG BIT(26)
#define SYSC_MODULE_QUIRK_ENA_RESETDONE BIT(25)
#define SYSC_MODULE_QUIRK_PRUSS BIT(24)
diff --git a/include/linux/pm.h b/include/linux/pm.h
index c9657408fee1..1d8209c09686 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -601,6 +601,7 @@ struct dev_pm_info {
unsigned int idle_notification:1;
unsigned int request_pending:1;
unsigned int deferred_resume:1;
+ unsigned int needs_force_resume:1;
unsigned int runtime_auto:1;
bool ignore_children:1;
unsigned int no_callbacks:1;
diff --git a/include/linux/pmbus.h b/include/linux/pmbus.h
index 12cbbf305969..fa9f08164c36 100644
--- a/include/linux/pmbus.h
+++ b/include/linux/pmbus.h
@@ -43,6 +43,36 @@
*/
#define PMBUS_NO_CAPABILITY BIT(2)
+/*
+ * PMBUS_READ_STATUS_AFTER_FAILED_CHECK
+ *
+ * Some PMBus chips end up in an undefined state when trying to read an
+ * unsupported register. For such chips, it is necessary to reset the
+ * chip pmbus controller to a known state after a failed register check.
+ * This can be done by reading a known register. By setting this flag the
+ * driver will try to read the STATUS register after each failed
+ * register check. This read may fail, but it will put the chip in a
+ * known state.
+ */
+#define PMBUS_READ_STATUS_AFTER_FAILED_CHECK BIT(3)
+
+/*
+ * PMBUS_NO_WRITE_PROTECT
+ *
+ * Some PMBus chips respond with invalid data when reading the WRITE_PROTECT
+ * register. For such chips, this flag should be set so that the PMBus core
+ * driver doesn't use the WRITE_PROTECT command to determine its behavior.
+ */
+#define PMBUS_NO_WRITE_PROTECT BIT(4)
+
+/*
+ * PMBUS_USE_COEFFICIENTS_CMD
+ *
+ * When this flag is set the PMBus core driver will use the COEFFICIENTS
+ * register to initialize the coefficients for the direct mode format.
+ */
+#define PMBUS_USE_COEFFICIENTS_CMD BIT(5)
+
struct pmbus_platform_data {
u32 flags; /* Device specific flags */
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index 0d47fd33b228..51d7f1b8b32a 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -235,7 +235,7 @@ extern int ptp_clock_index(struct ptp_clock *ptp);
* @ppm: Parts per million, but with a 16 bit binary fractional field
*/
-extern s32 scaled_ppm_to_ppb(long ppm);
+extern long scaled_ppm_to_ppb(long ppm);
/**
* ptp_find_pin() - obtain the pin index of a given auxiliary function
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index 7f73b26ed22e..a3fec2de512f 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2003 Russell King, All Rights Reserved.
+ * Copyright (C) 2003 Russell King, All Rights Reserved.
*
* This driver supports the following PXA CPU/SSP ports:-
*
@@ -11,8 +11,8 @@
* PXA3xx SSP1, SSP2, SSP3, SSP4
*/
-#ifndef __LINUX_SSP_H
-#define __LINUX_SSP_H
+#ifndef __LINUX_PXA2XX_SSP_H
+#define __LINUX_PXA2XX_SSP_H
#include <linux/bits.h>
#include <linux/compiler_types.h>
@@ -38,7 +38,6 @@ struct device_node;
#define SSDR (0x10) /* SSP Data Write/Data Read Register */
#define SSTO (0x28) /* SSP Time Out Register */
-#define DDS_RATE (0x28) /* SSP DDS Clock Rate Register (Intel Quark) */
#define SSPSP (0x2C) /* SSP Programmable Serial Protocol */
#define SSTSA (0x30) /* SSP Tx Timeslot Active */
#define SSRSA (0x34) /* SSP Rx Timeslot Active */
@@ -60,7 +59,7 @@ struct device_node;
/* PXA27x, PXA3xx */
#define SSCR0_EDSS BIT(20) /* Extended data size select */
#define SSCR0_NCS BIT(21) /* Network clock select */
-#define SSCR0_RIM BIT(22) /* Receive FIFO overrrun interrupt mask */
+#define SSCR0_RIM BIT(22) /* Receive FIFO overrun interrupt mask */
#define SSCR0_TUM BIT(23) /* Transmit FIFO underrun interrupt mask */
#define SSCR0_FRDC GENMASK(26, 24) /* Frame rate divider control (mask) */
#define SSCR0_SlotsPerFrm(x) (((x) - 1) << 24) /* Time slots per frame [1..8] */
@@ -105,6 +104,9 @@ struct device_node;
#define CE4100_SSCR1_RFT GENMASK(11, 10) /* Receive FIFO Threshold (mask) */
#define CE4100_SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */
+/* Intel Quark X1000 */
+#define DDS_RATE 0x28 /* SSP DDS Clock Rate Register */
+
/* QUARK_X1000 SSCR0 bit definition */
#define QUARK_X1000_SSCR0_DSS GENMASK(4, 0) /* Data Size Select (mask) */
#define QUARK_X1000_SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..32] */
@@ -124,7 +126,7 @@ struct device_node;
#define QUARK_X1000_SSCR1_EFWR BIT(16) /* Enable FIFO Write/Read */
#define QUARK_X1000_SSCR1_STRF BIT(17) /* Select FIFO or EFWR */
-/* extra bits in PXA255, PXA26x and PXA27x SSP ports */
+/* Extra bits in PXA255, PXA26x and PXA27x SSP ports */
#define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */
#define SSCR0_PSP (3 << 4) /* PSP - Programmable Serial Protocol */
@@ -181,6 +183,21 @@ struct device_node;
#define SSACD_ACPS(x) ((x) << 4) /* Audio clock PLL select */
#define SSACD_SCDX8 BIT(7) /* SYSCLK division ratio select */
+/* Intel Merrifield SSP */
+#define SFIFOL 0x68 /* FIFO level */
+#define SFIFOTT 0x6c /* FIFO trigger threshold */
+
+#define RX_THRESH_MRFLD_DFLT 16
+#define TX_THRESH_MRFLD_DFLT 16
+
+#define SFIFOL_TFL_MASK GENMASK(15, 0) /* Transmit FIFO Level mask */
+#define SFIFOL_RFL_MASK GENMASK(31, 16) /* Receive FIFO Level mask */
+
+#define SFIFOTT_TFT GENMASK(15, 0) /* Transmit FIFO Threshold (mask) */
+#define SFIFOTT_TxThresh(x) (((x) - 1) << 0) /* TX FIFO trigger threshold / level */
+#define SFIFOTT_RFT GENMASK(31, 16) /* Receive FIFO Threshold (mask) */
+#define SFIFOTT_RxThresh(x) (((x) - 1) << 16) /* RX FIFO trigger threshold / level */
+
/* LPSS SSP */
#define SSITF 0x44 /* TX FIFO trigger level */
#define SSITF_TxHiThresh(x) (((x) - 1) << 0)
@@ -203,8 +220,10 @@ enum pxa_ssp_type {
MMP2_SSP,
PXA910_SSP,
CE4100_SSP,
+ MRFLD_SSP,
QUARK_X1000_SSP,
- LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */
+ /* Keep LPSS types sorted with lpss_platforms[] */
+ LPSS_LPT_SSP,
LPSS_BYT_SSP,
LPSS_BSW_SSP,
LPSS_SPT_SSP,
@@ -252,6 +271,22 @@ static inline u32 pxa_ssp_read_reg(struct ssp_device *dev, u32 reg)
return __raw_readl(dev->mmio_base + reg);
}
+static inline void pxa_ssp_enable(struct ssp_device *ssp)
+{
+ u32 sscr0;
+
+ sscr0 = pxa_ssp_read_reg(ssp, SSCR0) | SSCR0_SSE;
+ pxa_ssp_write_reg(ssp, SSCR0, sscr0);
+}
+
+static inline void pxa_ssp_disable(struct ssp_device *ssp)
+{
+ u32 sscr0;
+
+ sscr0 = pxa_ssp_read_reg(ssp, SSCR0) & ~SSCR0_SSE;
+ pxa_ssp_write_reg(ssp, SSCR0, sscr0);
+}
+
#if IS_ENABLED(CONFIG_PXA_SSP)
struct ssp_device *pxa_ssp_request(int port, const char *label);
void pxa_ssp_free(struct ssp_device *);
@@ -270,4 +305,4 @@ static inline struct ssp_device *pxa_ssp_request_of(const struct device_node *n,
static inline void pxa_ssp_free(struct ssp_device *ssp) {}
#endif
-#endif
+#endif /* __LINUX_PXA2XX_SSP_H */
diff --git a/include/linux/randomize_kstack.h b/include/linux/randomize_kstack.h
index fd80fab663a9..bebc911161b6 100644
--- a/include/linux/randomize_kstack.h
+++ b/include/linux/randomize_kstack.h
@@ -38,7 +38,7 @@ void *__builtin_alloca(size_t size);
u32 offset = raw_cpu_read(kstack_offset); \
u8 *ptr = __builtin_alloca(KSTACK_OFFSET_MAX(offset)); \
/* Keep allocation even after "ptr" loses scope. */ \
- asm volatile("" : "=o"(*ptr) :: "memory"); \
+ asm volatile("" :: "r"(ptr) : "memory"); \
} \
} while (0)
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index 3734cd8f38a8..af907a3d68d1 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -79,6 +79,7 @@ extern char poweroff_cmd[POWEROFF_CMD_PATH_LEN];
extern void orderly_poweroff(bool force);
extern void orderly_reboot(void);
+void hw_protection_shutdown(const char *reason, int ms_until_forced);
/*
* Emergency restart, callable from an interrupt handler.
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index f87a11a5cc4a..f5f08dd0a116 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -27,6 +27,7 @@ struct device_node;
struct i2c_client;
struct i3c_device;
struct irq_domain;
+struct mdio_device;
struct slim_device;
struct spi_device;
struct spmi_device;
@@ -502,6 +503,7 @@ typedef void (*regmap_hw_free_context)(void *context);
* DEFAULT, BIG is assumed.
* @max_raw_read: Max raw read size that can be used on the bus.
* @max_raw_write: Max raw write size that can be used on the bus.
+ * @free_on_exit: kfree this on exit of regmap
*/
struct regmap_bus {
bool fast_io;
@@ -519,6 +521,7 @@ struct regmap_bus {
enum regmap_endian val_format_endian_default;
size_t max_raw_read;
size_t max_raw_write;
+ bool free_on_exit;
};
/*
@@ -538,6 +541,10 @@ struct regmap *__regmap_init_i2c(struct i2c_client *i2c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__regmap_init_mdio(struct mdio_device *mdio_dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
struct regmap *__regmap_init_sccb(struct i2c_client *i2c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
@@ -594,6 +601,10 @@ struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__devm_regmap_init_mdio(struct mdio_device *mdio_dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
struct regmap *__devm_regmap_init_sccb(struct i2c_client *i2c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
@@ -698,6 +709,19 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
i2c, config)
/**
+ * regmap_init_mdio() - Initialise register map
+ *
+ * @mdio_dev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_mdio(mdio_dev, config) \
+ __regmap_lockdep_wrapper(__regmap_init_mdio, #config, \
+ mdio_dev, config)
+
+/**
* regmap_init_sccb() - Initialise register map
*
* @i2c: Device that will be interacted with
@@ -889,6 +913,20 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
i2c, config)
/**
+ * devm_regmap_init_mdio() - Initialise managed register map
+ *
+ * @mdio_dev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_mdio(mdio_dev, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_mdio, #config, \
+ mdio_dev, config)
+
+/**
* devm_regmap_init_sccb() - Initialise managed register map
*
* @i2c: Device that will be interacted with
@@ -1411,6 +1449,7 @@ struct regmap_irq_sub_irq_map {
* @not_fixed_stride: Used when chip peripherals are not laid out with fixed
* stride. Must be used with sub_reg_offsets containing the
* offsets to each peripheral.
+ * @status_invert: Inverted status register: cleared bits are active interrupts.
* @runtime_pm: Hold a runtime PM lock on the device when accessing it.
*
* @num_regs: Number of registers in each control bank.
@@ -1463,6 +1502,7 @@ struct regmap_irq_chip {
bool type_in_mask:1;
bool clear_on_unmask:1;
bool not_fixed_stride:1;
+ bool status_invert:1;
int num_regs;
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 20e84a84fb77..f72ca73631be 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -119,6 +119,16 @@ struct regulator_dev;
#define REGULATOR_EVENT_PRE_DISABLE 0x400
#define REGULATOR_EVENT_ABORT_DISABLE 0x800
#define REGULATOR_EVENT_ENABLE 0x1000
+/*
+ * Following notifications should be emitted only if detected condition
+ * is such that the HW is likely to still be working but consumers should
+ * take a recovery action to prevent problems esacalating into errors.
+ */
+#define REGULATOR_EVENT_UNDER_VOLTAGE_WARN 0x2000
+#define REGULATOR_EVENT_OVER_CURRENT_WARN 0x4000
+#define REGULATOR_EVENT_OVER_VOLTAGE_WARN 0x8000
+#define REGULATOR_EVENT_OVER_TEMP_WARN 0x10000
+#define REGULATOR_EVENT_WARN_MASK 0x1E000
/*
* Regulator errors that can be queried using regulator_get_error_flags
@@ -138,6 +148,10 @@ struct regulator_dev;
#define REGULATOR_ERROR_FAIL BIT(4)
#define REGULATOR_ERROR_OVER_TEMP BIT(5)
+#define REGULATOR_ERROR_UNDER_VOLTAGE_WARN BIT(6)
+#define REGULATOR_ERROR_OVER_CURRENT_WARN BIT(7)
+#define REGULATOR_ERROR_OVER_VOLTAGE_WARN BIT(8)
+#define REGULATOR_ERROR_OVER_TEMP_WARN BIT(9)
/**
* struct pre_voltage_change_data - Data sent with PRE_VOLTAGE_CHANGE event
diff --git a/include/linux/regulator/coupler.h b/include/linux/regulator/coupler.h
index 5f86824bd117..73291f280a23 100644
--- a/include/linux/regulator/coupler.h
+++ b/include/linux/regulator/coupler.h
@@ -52,7 +52,6 @@ struct regulator_coupler {
#ifdef CONFIG_REGULATOR
int regulator_coupler_register(struct regulator_coupler *coupler);
-const char *rdev_get_name(struct regulator_dev *rdev);
int regulator_check_consumers(struct regulator_dev *rdev,
int *min_uV, int *max_uV,
suspend_state_t state);
@@ -69,10 +68,6 @@ static inline int regulator_coupler_register(struct regulator_coupler *coupler)
{
return 0;
}
-static inline const char *rdev_get_name(struct regulator_dev *rdev)
-{
- return NULL;
-}
static inline int regulator_check_consumers(struct regulator_dev *rdev,
int *min_uV, int *max_uV,
suspend_state_t state)
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 4ea520c248e9..4aec20387857 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -40,6 +40,15 @@ enum regulator_status {
REGULATOR_STATUS_UNDEFINED,
};
+enum regulator_detection_severity {
+ /* Hardware shut down voltage outputs if condition is detected */
+ REGULATOR_SEVERITY_PROT,
+ /* Hardware is probably damaged/inoperable */
+ REGULATOR_SEVERITY_ERR,
+ /* Hardware is still recoverable but recovery action must be taken */
+ REGULATOR_SEVERITY_WARN,
+};
+
/* Initialize struct linear_range for regulators */
#define REGULATOR_LINEAR_RANGE(_min_uV, _min_sel, _max_sel, _step_uV) \
{ \
@@ -78,8 +87,25 @@ enum regulator_status {
* @get_current_limit: Get the configured limit for a current-limited regulator.
* @set_input_current_limit: Configure an input limit.
*
- * @set_over_current_protection: Support capability of automatically shutting
- * down when detecting an over current event.
+ * @set_over_current_protection: Support enabling of and setting limits for over
+ * current situation detection. Detection can be configured for three
+ * levels of severity.
+ * REGULATOR_SEVERITY_PROT should automatically shut down the regulator(s).
+ * REGULATOR_SEVERITY_ERR should indicate that over-current situation is
+ * caused by an unrecoverable error but HW does not perform
+ * automatic shut down.
+ * REGULATOR_SEVERITY_WARN should indicate situation where hardware is
+ * still believed to not be damaged but that a board sepcific
+ * recovery action is needed. If lim_uA is 0 the limit should not
+ * be changed but the detection should just be enabled/disabled as
+ * is requested.
+ * @set_over_voltage_protection: Support enabling of and setting limits for over
+ * voltage situation detection. Detection can be configured for same
+ * severities as over current protection.
+ * @set_under_voltage_protection: Support enabling of and setting limits for
+ * under situation detection.
+ * @set_thermal_protection: Support enabling of and setting limits for over
+ * temperature situation detection.
*
* @set_active_discharge: Set active discharge enable/disable of regulators.
*
@@ -143,8 +169,15 @@ struct regulator_ops {
int (*get_current_limit) (struct regulator_dev *);
int (*set_input_current_limit) (struct regulator_dev *, int lim_uA);
- int (*set_over_current_protection) (struct regulator_dev *);
- int (*set_active_discharge) (struct regulator_dev *, bool enable);
+ int (*set_over_current_protection)(struct regulator_dev *, int lim_uA,
+ int severity, bool enable);
+ int (*set_over_voltage_protection)(struct regulator_dev *, int lim_uV,
+ int severity, bool enable);
+ int (*set_under_voltage_protection)(struct regulator_dev *, int lim_uV,
+ int severity, bool enable);
+ int (*set_thermal_protection)(struct regulator_dev *, int lim,
+ int severity, bool enable);
+ int (*set_active_discharge)(struct regulator_dev *, bool enable);
/* enable/disable regulator */
int (*enable) (struct regulator_dev *);
@@ -413,6 +446,128 @@ struct regulator_config {
struct gpio_desc *ena_gpiod;
};
+/**
+ * struct regulator_err_state - regulator error/notification status
+ *
+ * @rdev: Regulator which status the struct indicates.
+ * @notifs: Events which have occurred on the regulator.
+ * @errors: Errors which are active on the regulator.
+ * @possible_errs: Errors which can be signaled (by given IRQ).
+ */
+struct regulator_err_state {
+ struct regulator_dev *rdev;
+ unsigned long notifs;
+ unsigned long errors;
+ int possible_errs;
+};
+
+/**
+ * struct regulator_irq_data - regulator error/notification status date
+ *
+ * @states: Status structs for each of the associated regulators.
+ * @num_states: Amount of associated regulators.
+ * @data: Driver data pointer given at regulator_irq_desc.
+ * @opaque: Value storage for IC driver. Core does not update this. ICs
+ * may want to store status register value here at map_event and
+ * compare contents at 'renable' callback to see if new problems
+ * have been added to status. If that is the case it may be
+ * desirable to return REGULATOR_ERROR_CLEARED and not
+ * REGULATOR_ERROR_ON to allow IRQ fire again and to generate
+ * notifications also for the new issues.
+ *
+ * This structure is passed to 'map_event' and 'renable' callbacks for
+ * reporting regulator status to core.
+ */
+struct regulator_irq_data {
+ struct regulator_err_state *states;
+ int num_states;
+ void *data;
+ long opaque;
+};
+
+/**
+ * struct regulator_irq_desc - notification sender for IRQ based events.
+ *
+ * @name: The visible name for the IRQ
+ * @fatal_cnt: If this IRQ is used to signal HW damaging condition it may be
+ * best to shut-down regulator(s) or reboot the SOC if error
+ * handling is repeatedly failing. If fatal_cnt is given the IRQ
+ * handling is aborted if it fails for fatal_cnt times and die()
+ * callback (if populated) or BUG() is called to try to prevent
+ * further damage.
+ * @reread_ms: The time which is waited before attempting to re-read status
+ * at the worker if IC reading fails. Immediate re-read is done
+ * if time is not specified.
+ * @irq_off_ms: The time which IRQ is kept disabled before re-evaluating the
+ * status for devices which keep IRQ disabled for duration of the
+ * error. If this is not given the IRQ is left enabled and renable
+ * is not called.
+ * @skip_off: If set to true the IRQ handler will attempt to check if any of
+ * the associated regulators are enabled prior to taking other
+ * actions. If no regulators are enabled and this is set to true
+ * a spurious IRQ is assumed and IRQ_NONE is returned.
+ * @high_prio: Boolean to indicate that high priority WQ should be used.
+ * @data: Driver private data pointer which will be passed as such to
+ * the renable, map_event and die callbacks in regulator_irq_data.
+ * @die: Protection callback. If IC status reading or recovery actions
+ * fail fatal_cnt times this callback or BUG() is called. This
+ * callback should implement a final protection attempt like
+ * disabling the regulator. If protection succeeded this may
+ * return 0. If anything else is returned the core assumes final
+ * protection failed and calls BUG() as a last resort.
+ * @map_event: Driver callback to map IRQ status into regulator devices with
+ * events / errors. NOTE: callback MUST initialize both the
+ * errors and notifs for all rdevs which it signals having
+ * active events as core does not clean the map data.
+ * REGULATOR_FAILED_RETRY can be returned to indicate that the
+ * status reading from IC failed. If this is repeated for
+ * fatal_cnt times the core will call die() callback or BUG()
+ * as a last resort to protect the HW.
+ * @renable: Optional callback to check status (if HW supports that) before
+ * re-enabling IRQ. If implemented this should clear the error
+ * flags so that errors fetched by regulator_get_error_flags()
+ * are updated. If callback is not implemented then errors are
+ * assumed to be cleared and IRQ is re-enabled.
+ * REGULATOR_FAILED_RETRY can be returned to
+ * indicate that the status reading from IC failed. If this is
+ * repeated for 'fatal_cnt' times the core will call die()
+ * callback or BUG() as a last resort to protect the HW.
+ * Returning zero indicates that the problem in HW has been solved
+ * and IRQ will be re-enabled. Returning REGULATOR_ERROR_ON
+ * indicates the error condition is still active and keeps IRQ
+ * disabled. Please note that returning REGULATOR_ERROR_ON does
+ * not retrigger evaluating what events are active or resending
+ * notifications. If this is needed you probably want to return
+ * zero and allow IRQ to retrigger causing events to be
+ * re-evaluated and re-sent.
+ *
+ * This structure is used for registering regulator IRQ notification helper.
+ */
+struct regulator_irq_desc {
+ const char *name;
+ int irq_flags;
+ int fatal_cnt;
+ int reread_ms;
+ int irq_off_ms;
+ bool skip_off;
+ bool high_prio;
+ void *data;
+
+ int (*die)(struct regulator_irq_data *rid);
+ int (*map_event)(int irq, struct regulator_irq_data *rid,
+ unsigned long *dev_mask);
+ int (*renable)(struct regulator_irq_data *rid);
+};
+
+/*
+ * Return values for regulator IRQ helpers.
+ */
+enum {
+ REGULATOR_ERROR_CLEARED,
+ REGULATOR_FAILED_RETRY,
+ REGULATOR_ERROR_ON,
+};
+
/*
* struct coupling_desc
*
@@ -477,6 +632,9 @@ struct regulator_dev {
/* time when this regulator was disabled last time */
ktime_t last_off;
+ int cached_err;
+ bool use_cached_err;
+ spinlock_t err_lock;
};
struct regulator_dev *
@@ -491,6 +649,16 @@ void devm_regulator_unregister(struct device *dev, struct regulator_dev *rdev);
int regulator_notifier_call_chain(struct regulator_dev *rdev,
unsigned long event, void *data);
+void *devm_regulator_irq_helper(struct device *dev,
+ const struct regulator_irq_desc *d, int irq,
+ int irq_flags, int common_errs,
+ int *per_rdev_errs, struct regulator_dev **rdev,
+ int rdev_amount);
+void *regulator_irq_helper(struct device *dev,
+ const struct regulator_irq_desc *d, int irq,
+ int irq_flags, int common_errs, int *per_rdev_errs,
+ struct regulator_dev **rdev, int rdev_amount);
+void regulator_irq_helper_cancel(void **handle);
void *rdev_get_drvdata(struct regulator_dev *rdev);
struct device *rdev_get_dev(struct regulator_dev *rdev);
@@ -540,6 +708,7 @@ int regulator_set_current_limit_regmap(struct regulator_dev *rdev,
int regulator_get_current_limit_regmap(struct regulator_dev *rdev);
void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data);
int regulator_set_ramp_delay_regmap(struct regulator_dev *rdev, int ramp_delay);
+int regulator_sync_voltage_rdev(struct regulator_dev *rdev);
/*
* Helper functions intended to be used by regulator drivers prior registering
@@ -550,4 +719,14 @@ int regulator_desc_list_voltage_linear_range(const struct regulator_desc *desc,
int regulator_desc_list_voltage_linear(const struct regulator_desc *desc,
unsigned int selector);
+
+#ifdef CONFIG_REGULATOR
+const char *rdev_get_name(struct regulator_dev *rdev);
+#else
+static inline const char *rdev_get_name(struct regulator_dev *rdev)
+{
+ return NULL;
+}
+#endif
+
#endif
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 8a56f033b6cd..68b4a514a410 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -83,6 +83,14 @@ struct regulator_state {
bool changeable;
};
+#define REGULATOR_NOTIF_LIMIT_DISABLE -1
+#define REGULATOR_NOTIF_LIMIT_ENABLE -2
+struct notification_limit {
+ int prot;
+ int err;
+ int warn;
+};
+
/**
* struct regulation_constraints - regulator operating constraints.
*
@@ -100,6 +108,11 @@ struct regulator_state {
* @ilim_uA: Maximum input current.
* @system_load: Load that isn't captured by any consumer requests.
*
+ * @over_curr_limits: Limits for acting on over current.
+ * @over_voltage_limits: Limits for acting on over voltage.
+ * @under_voltage_limits: Limits for acting on under voltage.
+ * @temp_limits: Limits for acting on over temperature.
+
* @max_spread: Max possible spread between coupled regulators
* @max_uV_step: Max possible step change in voltage
* @valid_modes_mask: Mask of modes which may be configured by consumers.
@@ -116,6 +129,11 @@ struct regulator_state {
* @pull_down: Enable pull down when regulator is disabled.
* @over_current_protection: Auto disable on over current event.
*
+ * @over_current_detection: Configure over current limits.
+ * @over_voltage_detection: Configure over voltage limits.
+ * @under_voltage_detection: Configure under voltage limits.
+ * @over_temp_detection: Configure over temperature limits.
+ *
* @input_uV: Input voltage for regulator when supplied by another regulator.
*
* @state_disk: State for regulator when system is suspended in disk mode.
@@ -172,6 +190,10 @@ struct regulation_constraints {
struct regulator_state state_disk;
struct regulator_state state_mem;
struct regulator_state state_standby;
+ struct notification_limit over_curr_limits;
+ struct notification_limit over_voltage_limits;
+ struct notification_limit under_voltage_limits;
+ struct notification_limit temp_limits;
suspend_state_t initial_state; /* suspend state to set at init */
/* mode to set on startup */
@@ -193,6 +215,10 @@ struct regulation_constraints {
unsigned soft_start:1; /* ramp voltage slowly */
unsigned pull_down:1; /* pull down resistor when regulator off */
unsigned over_current_protection:1; /* auto disable on over current */
+ unsigned over_current_detection:1; /* notify on over current */
+ unsigned over_voltage_detection:1; /* notify on over voltage */
+ unsigned under_voltage_detection:1; /* notify on under voltage */
+ unsigned over_temp_detection:1; /* notify on over temperature */
};
/**
diff --git a/include/linux/regulator/mt6359-regulator.h b/include/linux/regulator/mt6359-regulator.h
new file mode 100644
index 000000000000..6d6e5a58f482
--- /dev/null
+++ b/include/linux/regulator/mt6359-regulator.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#ifndef __LINUX_REGULATOR_MT6359_H
+#define __LINUX_REGULATOR_MT6359_H
+
+enum {
+ MT6359_ID_VS1 = 0,
+ MT6359_ID_VGPU11,
+ MT6359_ID_VMODEM,
+ MT6359_ID_VPU,
+ MT6359_ID_VCORE,
+ MT6359_ID_VS2,
+ MT6359_ID_VPA,
+ MT6359_ID_VPROC2,
+ MT6359_ID_VPROC1,
+ MT6359_ID_VCORE_SSHUB,
+ MT6359_ID_VGPU11_SSHUB = MT6359_ID_VCORE_SSHUB,
+ MT6359_ID_VAUD18 = 10,
+ MT6359_ID_VSIM1,
+ MT6359_ID_VIBR,
+ MT6359_ID_VRF12,
+ MT6359_ID_VUSB,
+ MT6359_ID_VSRAM_PROC2,
+ MT6359_ID_VIO18,
+ MT6359_ID_VCAMIO,
+ MT6359_ID_VCN18,
+ MT6359_ID_VFE28,
+ MT6359_ID_VCN13,
+ MT6359_ID_VCN33_1_BT,
+ MT6359_ID_VCN33_1_WIFI,
+ MT6359_ID_VAUX18,
+ MT6359_ID_VSRAM_OTHERS,
+ MT6359_ID_VEFUSE,
+ MT6359_ID_VXO22,
+ MT6359_ID_VRFCK,
+ MT6359_ID_VBIF28,
+ MT6359_ID_VIO28,
+ MT6359_ID_VEMC,
+ MT6359_ID_VCN33_2_BT,
+ MT6359_ID_VCN33_2_WIFI,
+ MT6359_ID_VA12,
+ MT6359_ID_VA09,
+ MT6359_ID_VRF18,
+ MT6359_ID_VSRAM_MD,
+ MT6359_ID_VUFS,
+ MT6359_ID_VM18,
+ MT6359_ID_VBBCK,
+ MT6359_ID_VSRAM_PROC1,
+ MT6359_ID_VSIM2,
+ MT6359_ID_VSRAM_OTHERS_SSHUB,
+ MT6359_ID_RG_MAX,
+};
+
+#define MT6359_MAX_REGULATOR MT6359_ID_RG_MAX
+
+#endif /* __LINUX_REGULATOR_MT6359_H */
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index def5c62c93b3..8d04e7deedc6 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -91,6 +91,7 @@ enum ttu_flags {
TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */
TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */
+ TTU_SYNC = 0x10, /* avoid racy checks with PVMW_SYNC */
TTU_IGNORE_HWPOISON = 0x20, /* corrupted page is recoverable */
TTU_BATCH_FLUSH = 0x40, /* Batch TLB flushes where possible
* and caller guarantees they will
diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h
index 6f155f99aa16..4ab7bfc675f1 100644
--- a/include/linux/rtsx_pci.h
+++ b/include/linux/rtsx_pci.h
@@ -1109,6 +1109,7 @@ struct pcr_ops {
};
enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN};
+enum ASPM_MODE {ASPM_MODE_CFG, ASPM_MODE_REG};
#define ASPM_L1_1_EN BIT(0)
#define ASPM_L1_2_EN BIT(1)
@@ -1234,6 +1235,7 @@ struct rtsx_pcr {
u8 card_drive_sel;
#define ASPM_L1_EN 0x02
u8 aspm_en;
+ enum ASPM_MODE aspm_mode;
bool aspm_enabled;
#define PCR_MS_PMOS (1 << 0)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d2c881384517..32813c345115 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -350,11 +350,19 @@ struct load_weight {
* Only for tasks we track a moving average of the past instantaneous
* estimated utilization. This allows to absorb sporadic drops in utilization
* of an otherwise almost periodic task.
+ *
+ * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg
+ * updates. When a task is dequeued, its util_est should not be updated if its
+ * util_avg has not been updated in the meantime.
+ * This information is mapped into the MSB bit of util_est.enqueued at dequeue
+ * time. Since max value of util_est.enqueued for a task is 1024 (PELT util_avg
+ * for a task) it is safe to use MSB.
*/
struct util_est {
unsigned int enqueued;
unsigned int ewma;
#define UTIL_EST_WEIGHT_SHIFT 2
+#define UTIL_AVG_UNCHANGED 0x80000000
} __attribute__((__aligned__(sizeof(u64))));
/*
@@ -989,7 +997,6 @@ struct task_struct {
/* Signal handlers: */
struct signal_struct *signal;
struct sighand_struct __rcu *sighand;
- struct sigqueue *sigqueue_cache;
sigset_t blocked;
sigset_t real_blocked;
/* Restored if set_restore_sigmask() was used: */
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 3f6a0fcaa10c..7f4278fa21fe 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -326,6 +326,7 @@ int send_sig_mceerr(int code, void __user *, short, struct task_struct *);
int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper);
int force_sig_pkuerr(void __user *addr, u32 pkey);
+int force_sig_perf(void __user *addr, u32 type, u64 sig_data);
int force_sig_ptrace_errno_trap(int errno, void __user *addr);
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index f61e34fbaaea..37ded6b8fee6 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -182,9 +182,9 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
#define seqcount_raw_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, raw_spinlock)
#define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock)
-#define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock);
-#define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex);
-#define seqcount_ww_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, ww_mutex);
+#define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock)
+#define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex)
+#define seqcount_ww_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, ww_mutex)
/*
* SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 0dbfda8d99d0..5160fd45e5ca 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -40,6 +40,7 @@ enum siginfo_layout {
SIL_TIMER,
SIL_POLL,
SIL_FAULT,
+ SIL_FAULT_TRAPNO,
SIL_FAULT_MCEERR,
SIL_FAULT_BNDERR,
SIL_FAULT_PKUERR,
@@ -266,7 +267,6 @@ static inline void init_sigpending(struct sigpending *sig)
}
extern void flush_sigqueue(struct sigpending *queue);
-extern void exit_task_sigqueue_cache(struct task_struct *tsk);
/* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
static inline int valid_signal(unsigned long sig)
diff --git a/include/linux/socket.h b/include/linux/socket.h
index b8fc5c53ba6f..0d8e3dcb7f88 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -438,6 +438,4 @@ extern int __sys_socketpair(int family, int type, int protocol,
int __user *usockvec);
extern int __sys_shutdown_sock(struct socket *sock, int how);
extern int __sys_shutdown(int fd, int how);
-
-extern struct ns_common *get_net_ns(struct ns_common *ns);
#endif /* _LINUX_SOCKET_H */
diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h
index 31f00c7f4f59..eaab121ee575 100644
--- a/include/linux/spi/pxa2xx_spi.h
+++ b/include/linux/spi/pxa2xx_spi.h
@@ -2,8 +2,10 @@
/*
* Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
*/
-#ifndef __linux_pxa2xx_spi_h
-#define __linux_pxa2xx_spi_h
+#ifndef __LINUX_SPI_PXA2XX_SPI_H
+#define __LINUX_SPI_PXA2XX_SPI_H
+
+#include <linux/types.h>
#include <linux/pxa2xx_ssp.h>
@@ -12,7 +14,10 @@
struct dma_chan;
-/* device.platform_data for SSP controller devices */
+/*
+ * The platform data for SSP controller devices
+ * (resides in device.platform_data).
+ */
struct pxa2xx_spi_controller {
u16 num_chipselect;
u8 enable_dma;
@@ -28,8 +33,11 @@ struct pxa2xx_spi_controller {
struct ssp_device ssp;
};
-/* spi_board_info.controller_data for SPI slave devices,
- * copied to spi_device.platform_data ... mostly for dma tuning
+/*
+ * The controller specific data for SPI slave devices
+ * (resides in spi_board_info.controller_data),
+ * copied to spi_device.platform_data ... mostly for
+ * DMA tuning.
*/
struct pxa2xx_spi_chip {
u8 tx_threshold;
@@ -49,4 +57,5 @@ struct pxa2xx_spi_chip {
extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_controller *info);
#endif
-#endif
+
+#endif /* __LINUX_SPI_PXA2XX_SPI_H */
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h
index 2b65c9edc34e..85e2ff7b840d 100644
--- a/include/linux/spi/spi-mem.h
+++ b/include/linux/spi/spi-mem.h
@@ -250,6 +250,9 @@ static inline void *spi_mem_get_drvdata(struct spi_mem *mem)
* the currently mapped area), and the caller of
* spi_mem_dirmap_write() is responsible for calling it again in
* this case.
+ * @poll_status: poll memory device status until (status & mask) == match or
+ * when the timeout has expired. It fills the data buffer with
+ * the last status value.
*
* This interface should be implemented by SPI controllers providing an
* high-level interface to execute SPI memory operation, which is usually the
@@ -274,6 +277,12 @@ struct spi_controller_mem_ops {
u64 offs, size_t len, void *buf);
ssize_t (*dirmap_write)(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, const void *buf);
+ int (*poll_status)(struct spi_mem *mem,
+ const struct spi_mem_op *op,
+ u16 mask, u16 match,
+ unsigned long initial_delay_us,
+ unsigned long polling_rate_us,
+ unsigned long timeout_ms);
};
/**
@@ -369,6 +378,13 @@ devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem,
void devm_spi_mem_dirmap_destroy(struct device *dev,
struct spi_mem_dirmap_desc *desc);
+int spi_mem_poll_status(struct spi_mem *mem,
+ const struct spi_mem_op *op,
+ u16 mask, u16 match,
+ unsigned long initial_delay_us,
+ unsigned long polling_delay_us,
+ u16 timeout_ms);
+
int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv,
struct module *owner);
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 360a3bc767ca..97b8d12b5f2b 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -299,6 +299,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
driver_unregister(&sdrv->driver);
}
+extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 chip_select);
+
/* use a define to avoid include chaining to get THIS_MODULE */
#define spi_register_driver(driver) \
__spi_register_driver(THIS_MODULE, driver)
@@ -586,6 +588,7 @@ struct spi_controller {
bool (*can_dma)(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *xfer);
+ struct device *dma_map_dev;
/*
* These hooks are for drivers that want to use the generic
@@ -644,8 +647,8 @@ struct spi_controller {
int *cs_gpios;
struct gpio_desc **cs_gpiods;
bool use_gpio_descriptors;
- u8 unused_native_cs;
- u8 max_native_cs;
+ s8 unused_native_cs;
+ s8 max_native_cs;
/* statistics */
struct spi_statistics statistics;
@@ -1108,11 +1111,6 @@ static inline void spi_message_free(struct spi_message *m)
kfree(m);
}
-extern int spi_set_cs_timing(struct spi_device *spi,
- struct spi_delay *setup,
- struct spi_delay *hold,
- struct spi_delay *inactive);
-
extern int spi_setup(struct spi_device *spi);
extern int spi_async(struct spi_device *spi, struct spi_message *message);
extern int spi_async_locked(struct spi_device *spi,
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index d81fe8b364d0..61b622e334ee 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -368,6 +368,8 @@ struct rpc_xprt * xprt_alloc(struct net *net, size_t size,
unsigned int num_prealloc,
unsigned int max_req);
void xprt_free(struct rpc_xprt *);
+void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task);
+bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req);
static inline int
xprt_enable_swap(struct rpc_xprt *xprt)
diff --git a/include/linux/surface_aggregator/device.h b/include/linux/surface_aggregator/device.h
index 4441ad667c3f..6ff9c58b3e17 100644
--- a/include/linux/surface_aggregator/device.h
+++ b/include/linux/surface_aggregator/device.h
@@ -98,9 +98,9 @@ struct ssam_device_uid {
| (((fun) != SSAM_ANY_FUN) ? SSAM_MATCH_FUNCTION : 0), \
.domain = d, \
.category = cat, \
- .target = ((tid) != SSAM_ANY_TID) ? (tid) : 0, \
- .instance = ((iid) != SSAM_ANY_IID) ? (iid) : 0, \
- .function = ((fun) != SSAM_ANY_FUN) ? (fun) : 0 \
+ .target = __builtin_choose_expr((tid) != SSAM_ANY_TID, (tid), 0), \
+ .instance = __builtin_choose_expr((iid) != SSAM_ANY_IID, (iid), 0), \
+ .function = __builtin_choose_expr((fun) != SSAM_ANY_FUN, (fun), 0)
/**
* SSAM_VDEV() - Initialize a &struct ssam_device_id as virtual device with
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index d9b7c9132c2f..6430a94c6981 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -23,6 +23,16 @@
#define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
#define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1)
+/* Clear all flags but only keep swp_entry_t related information */
+static inline pte_t pte_swp_clear_flags(pte_t pte)
+{
+ if (pte_swp_soft_dirty(pte))
+ pte = pte_swp_clear_soft_dirty(pte);
+ if (pte_swp_uffd_wp(pte))
+ pte = pte_swp_clear_uffd_wp(pte);
+ return pte;
+}
+
/*
* Store a type+offset into a swp_entry_t in an arch-independent format
*/
@@ -66,10 +76,7 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte)
{
swp_entry_t arch_entry;
- if (pte_swp_soft_dirty(pte))
- pte = pte_swp_clear_soft_dirty(pte);
- if (pte_swp_uffd_wp(pte))
- pte = pte_swp_clear_uffd_wp(pte);
+ pte = pte_swp_clear_flags(pte);
arch_entry = __pte_to_swp_entry(pte);
return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
}
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 7340613c7eff..1a0ff88fa107 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -11,6 +11,7 @@
#include <linux/context_tracking_state.h>
#include <linux/cpumask.h>
#include <linux/sched.h>
+#include <linux/rcupdate.h>
#ifdef CONFIG_GENERIC_CLOCKEVENTS
extern void __init tick_init(void);
@@ -300,4 +301,10 @@ static inline void tick_nohz_task_switch(void)
__tick_nohz_task_switch();
}
+static inline void tick_nohz_user_enter_prepare(void)
+{
+ if (tick_nohz_full_cpu(smp_processor_id()))
+ rcu_nocb_flush_deferred_wakeup();
+}
+
#endif
diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h
index bf00259493e0..96b7ff66f074 100644
--- a/include/linux/usb/pd.h
+++ b/include/linux/usb/pd.h
@@ -460,7 +460,7 @@ static inline unsigned int rdo_max_power(u32 rdo)
#define PD_T_RECEIVER_RESPONSE 15 /* 15ms max */
#define PD_T_SOURCE_ACTIVITY 45
#define PD_T_SINK_ACTIVITY 135
-#define PD_T_SINK_WAIT_CAP 240
+#define PD_T_SINK_WAIT_CAP 310 /* 310 - 620 ms */
#define PD_T_PS_TRANSITION 500
#define PD_T_SRC_TRANSITION 35
#define PD_T_DRP_SNK 40
diff --git a/include/linux/usb/pd_ext_sdb.h b/include/linux/usb/pd_ext_sdb.h
index 0eb83ce19597..b517ebc8f0ff 100644
--- a/include/linux/usb/pd_ext_sdb.h
+++ b/include/linux/usb/pd_ext_sdb.h
@@ -24,8 +24,4 @@ enum usb_pd_ext_sdb_fields {
#define USB_PD_EXT_SDB_EVENT_OVP BIT(3)
#define USB_PD_EXT_SDB_EVENT_CF_CV_MODE BIT(4)
-#define USB_PD_EXT_SDB_PPS_EVENTS (USB_PD_EXT_SDB_EVENT_OCP | \
- USB_PD_EXT_SDB_EVENT_OTP | \
- USB_PD_EXT_SDB_EVENT_OVP)
-
#endif /* __LINUX_USB_PD_EXT_SDB_H */
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 4d668abb6391..bfaaf0b6fa76 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -135,6 +135,7 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
const void *caller);
void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
int node, const void *caller);
+void *vmalloc_no_huge(unsigned long size);
extern void vfree(const void *addr);
extern void vfree_atomic(const void *addr);
diff --git a/include/net/caif/caif_dev.h b/include/net/caif/caif_dev.h
index 48ecca8530ff..b655d8666f55 100644
--- a/include/net/caif/caif_dev.h
+++ b/include/net/caif/caif_dev.h
@@ -119,7 +119,7 @@ void caif_free_client(struct cflayer *adap_layer);
* The link_support layer is used to add any Link Layer specific
* framing.
*/
-void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
+int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
struct cflayer *link_support, int head_room,
struct cflayer **layer, int (**rcv_func)(
struct sk_buff *, struct net_device *,
diff --git a/include/net/caif/cfcnfg.h b/include/net/caif/cfcnfg.h
index 2aa5e91d8457..8819ff4db35a 100644
--- a/include/net/caif/cfcnfg.h
+++ b/include/net/caif/cfcnfg.h
@@ -62,7 +62,7 @@ void cfcnfg_remove(struct cfcnfg *cfg);
* @fcs: Specify if checksum is used in CAIF Framing Layer.
* @head_room: Head space needed by link specific protocol.
*/
-void
+int
cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
struct net_device *dev, struct cflayer *phy_layer,
enum cfcnfg_phy_preference pref,
diff --git a/include/net/caif/cfserl.h b/include/net/caif/cfserl.h
index 14a55e03bb3c..67cce8757175 100644
--- a/include/net/caif/cfserl.h
+++ b/include/net/caif/cfserl.h
@@ -9,4 +9,5 @@
#include <net/caif/caif_layer.h>
struct cflayer *cfserl_create(int instance, bool use_stx);
+void cfserl_release(struct cflayer *layer);
#endif
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 5224f885a99a..58c2cd417e89 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -5760,7 +5760,7 @@ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr);
*/
int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
const u8 *addr, enum nl80211_iftype iftype,
- u8 data_offset);
+ u8 data_offset, bool is_amsdu);
/**
* ieee80211_data_to_8023 - convert an 802.11 data frame to 802.3
@@ -5772,7 +5772,7 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
enum nl80211_iftype iftype)
{
- return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype, 0);
+ return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype, 0, false);
}
/**
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 445b66c6eb7e..e89530d0d9c6 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -5537,7 +5537,7 @@ void ieee80211_iterate_active_interfaces_atomic(struct ieee80211_hw *hw,
*
* This function iterates over the interfaces associated with a given
* hardware that are currently active and calls the callback for them.
- * This version can only be used while holding the RTNL.
+ * This version can only be used while holding the wiphy mutex.
*
* @hw: the hardware struct of which the interfaces should be iterated over
* @iter_flags: iteration flags, see &enum ieee80211_interface_iteration_flags
@@ -6392,7 +6392,12 @@ bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw,
/**
* ieee80211_parse_tx_radiotap - Sanity-check and parse the radiotap header
- * of injected frames
+ * of injected frames.
+ *
+ * To accurately parse and take into account rate and retransmission fields,
+ * you must initialize the chandef field in the ieee80211_tx_info structure
+ * of the skb before calling this function.
+ *
* @skb: packet injected by userspace
* @dev: the &struct device of this 802.11 device
*/
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index fa5887143f0d..bdc0459a595e 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -184,6 +184,9 @@ struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns,
void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid);
void net_ns_barrier(void);
+
+struct ns_common *get_net_ns(struct ns_common *ns);
+struct net *get_net_ns_by_fd(int fd);
#else /* CONFIG_NET_NS */
#include <linux/sched.h>
#include <linux/nsproxy.h>
@@ -203,13 +206,22 @@ static inline void net_ns_get_ownership(const struct net *net,
}
static inline void net_ns_barrier(void) {}
+
+static inline struct ns_common *get_net_ns(struct ns_common *ns)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline struct net *get_net_ns_by_fd(int fd)
+{
+ return ERR_PTR(-EINVAL);
+}
#endif /* CONFIG_NET_NS */
extern struct list_head net_namespace_list;
struct net *get_net_ns_by_pid(pid_t pid);
-struct net *get_net_ns_by_fd(int fd);
#ifdef CONFIG_SYSCTL
void ipx_register_sysctl(void);
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index 51d8eb99764d..48ef7460ff30 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -157,7 +157,6 @@ enum nf_flow_flags {
NF_FLOW_HW,
NF_FLOW_HW_DYING,
NF_FLOW_HW_DEAD,
- NF_FLOW_HW_REFRESH,
NF_FLOW_HW_PENDING,
};
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 27eeb613bb4e..0a5655e300b5 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -1506,16 +1506,10 @@ struct nft_trans_chain {
struct nft_trans_table {
bool update;
- u8 state;
- u32 flags;
};
#define nft_trans_table_update(trans) \
(((struct nft_trans_table *)trans->data)->update)
-#define nft_trans_table_state(trans) \
- (((struct nft_trans_table *)trans->data)->state)
-#define nft_trans_table_flags(trans) \
- (((struct nft_trans_table *)trans->data)->flags)
struct nft_trans_elem {
struct nft_set *set;
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index bd76e8e082c0..1df0f8074c9d 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -298,6 +298,7 @@ int nci_nfcc_loopback(struct nci_dev *ndev, void *data, size_t data_len,
struct sk_buff **resp);
struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev);
+void nci_hci_deallocate(struct nci_dev *ndev);
int nci_hci_send_event(struct nci_dev *ndev, u8 gate, u8 event,
const u8 *param, size_t param_len);
int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate,
diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index 6d517a37c18b..b4b6de909c93 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -198,7 +198,17 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
{
- return page->dma_addr;
+ dma_addr_t ret = page->dma_addr[0];
+ if (sizeof(dma_addr_t) > sizeof(unsigned long))
+ ret |= (dma_addr_t)page->dma_addr[1] << 16 << 16;
+ return ret;
+}
+
+static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
+{
+ page->dma_addr[0] = addr;
+ if (sizeof(dma_addr_t) > sizeof(unsigned long))
+ page->dma_addr[1] = upper_32_bits(addr);
}
static inline bool is_page_pool_compiled_in(void)
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 255e4f4b521f..ec7823921bd2 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -709,6 +709,17 @@ tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
cls_common->extack = extack;
}
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+static inline struct tc_skb_ext *tc_skb_ext_alloc(struct sk_buff *skb)
+{
+ struct tc_skb_ext *tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
+
+ if (tc_skb_ext)
+ memset(tc_skb_ext, 0, sizeof(*tc_skb_ext));
+ return tc_skb_ext;
+}
+#endif
+
enum tc_matchall_command {
TC_CLSMATCHALL_REPLACE,
TC_CLSMATCHALL_DESTROY,
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index f5c1bee0cd6a..6d7b12cba015 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -128,12 +128,7 @@ void __qdisc_run(struct Qdisc *q);
static inline void qdisc_run(struct Qdisc *q)
{
if (qdisc_run_begin(q)) {
- /* NOLOCK qdisc must check 'state' under the qdisc seqlock
- * to avoid racing with dev_qdisc_reset()
- */
- if (!(q->flags & TCQ_F_NOLOCK) ||
- likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
- __qdisc_run(q);
+ __qdisc_run(q);
qdisc_run_end(q);
}
}
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index f7a6e14491fb..1e625519ae96 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -36,6 +36,7 @@ struct qdisc_rate_table {
enum qdisc_state_t {
__QDISC_STATE_SCHED,
__QDISC_STATE_DEACTIVATED,
+ __QDISC_STATE_MISSED,
};
struct qdisc_size_table {
@@ -159,8 +160,33 @@ static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
static inline bool qdisc_run_begin(struct Qdisc *qdisc)
{
if (qdisc->flags & TCQ_F_NOLOCK) {
+ if (spin_trylock(&qdisc->seqlock))
+ goto nolock_empty;
+
+ /* If the MISSED flag is set, it means other thread has
+ * set the MISSED flag before second spin_trylock(), so
+ * we can return false here to avoid multi cpus doing
+ * the set_bit() and second spin_trylock() concurrently.
+ */
+ if (test_bit(__QDISC_STATE_MISSED, &qdisc->state))
+ return false;
+
+ /* Set the MISSED flag before the second spin_trylock(),
+ * if the second spin_trylock() return false, it means
+ * other cpu holding the lock will do dequeuing for us
+ * or it will see the MISSED flag set after releasing
+ * lock and reschedule the net_tx_action() to do the
+ * dequeuing.
+ */
+ set_bit(__QDISC_STATE_MISSED, &qdisc->state);
+
+ /* Retry again in case other CPU may not see the new flag
+ * after it releases the lock at the end of qdisc_run_end().
+ */
if (!spin_trylock(&qdisc->seqlock))
return false;
+
+nolock_empty:
WRITE_ONCE(qdisc->empty, false);
} else if (qdisc_is_running(qdisc)) {
return false;
@@ -176,8 +202,15 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
static inline void qdisc_run_end(struct Qdisc *qdisc)
{
write_seqcount_end(&qdisc->running);
- if (qdisc->flags & TCQ_F_NOLOCK)
+ if (qdisc->flags & TCQ_F_NOLOCK) {
spin_unlock(&qdisc->seqlock);
+
+ if (unlikely(test_bit(__QDISC_STATE_MISSED,
+ &qdisc->state))) {
+ clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
+ __netif_schedule(qdisc);
+ }
+ }
}
static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
diff --git a/include/net/sock.h b/include/net/sock.h
index 42bc5e1a627f..7a7058f4f265 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1934,7 +1934,8 @@ static inline u32 net_tx_rndhash(void)
static inline void sk_set_txhash(struct sock *sk)
{
- sk->sk_txhash = net_tx_rndhash();
+ /* This pairs with READ_ONCE() in skb_set_hash_from_sk() */
+ WRITE_ONCE(sk->sk_txhash, net_tx_rndhash());
}
static inline bool sk_rethink_txhash(struct sock *sk)
@@ -2206,9 +2207,12 @@ static inline void sock_poll_wait(struct file *filp, struct socket *sock,
static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
{
- if (sk->sk_txhash) {
+ /* This pairs with WRITE_ONCE() in sk_set_txhash() */
+ u32 txhash = READ_ONCE(sk->sk_txhash);
+
+ if (txhash) {
skb->l4_hash = 1;
- skb->hash = sk->sk_txhash;
+ skb->hash = txhash;
}
}
@@ -2231,13 +2235,15 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
sk_mem_charge(sk, skb->truesize);
}
-static inline void skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk)
+static inline __must_check bool skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk)
{
if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) {
skb_orphan(skb);
skb->destructor = sock_efree;
skb->sk = sk;
+ return true;
}
+ return false;
}
void sk_reset_timer(struct sock *sk, struct timer_list *timer,
@@ -2264,8 +2270,13 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
static inline int sock_error(struct sock *sk)
{
int err;
- if (likely(!sk->sk_err))
+
+ /* Avoid an atomic operation for the common case.
+ * This is racy since another cpu/thread can change sk_err under us.
+ */
+ if (likely(data_race(!sk->sk_err)))
return 0;
+
err = xchg(&sk->sk_err, 0);
return -err;
}
diff --git a/include/net/tls.h b/include/net/tls.h
index 3eccb525e8f7..8341a8d1e807 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -193,7 +193,11 @@ struct tls_offload_context_tx {
(sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX)
enum tls_context_flags {
- TLS_RX_SYNC_RUNNING = 0,
+ /* tls_device_down was called after the netdev went down, device state
+ * was released, and kTLS works in software, even though rx_conf is
+ * still TLS_HW (needed for transition).
+ */
+ TLS_RX_DEV_DEGRADED = 0,
/* Unlike RX where resync is driven entirely by the core in TX only
* the driver knows when things went out of sync, so we need the flag
* to be atomic.
@@ -266,6 +270,7 @@ struct tls_context {
/* cache cold stuff */
struct proto *sk_proto;
+ struct sock *sk;
void (*sk_destruct)(struct sock *sk);
@@ -448,6 +453,9 @@ static inline u16 tls_user_config(struct tls_context *ctx, bool tx)
struct sk_buff *
tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
struct sk_buff *skb);
+struct sk_buff *
+tls_validate_xmit_skb_sw(struct sock *sk, struct net_device *dev,
+ struct sk_buff *skb);
static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
{
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index 1358a0ceb4d0..0bc29c4516e7 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -81,7 +81,7 @@ struct snd_compr_stream;
#define SND_SOC_DAIFMT_CBP_CFP (1 << 12) /* codec clk provider & frame provider */
#define SND_SOC_DAIFMT_CBC_CFP (2 << 12) /* codec clk consumer & frame provider */
#define SND_SOC_DAIFMT_CBP_CFC (3 << 12) /* codec clk provider & frame consumer */
-#define SND_SOC_DAIFMT_CBC_CFC (4 << 12) /* codec clk consumer & frame follower */
+#define SND_SOC_DAIFMT_CBC_CFC (4 << 12) /* codec clk consumer & frame consumer */
/* previous definitions kept for backwards-compatibility, do not use in new contributions */
#define SND_SOC_DAIFMT_CBM_CFM SND_SOC_DAIFMT_CBP_CFP
diff --git a/include/trace/events/spi.h b/include/trace/events/spi.h
index 0dd9171d2ad8..c0d9844befd7 100644
--- a/include/trace/events/spi.h
+++ b/include/trace/events/spi.h
@@ -42,6 +42,63 @@ DEFINE_EVENT(spi_controller, spi_controller_busy,
);
+TRACE_EVENT(spi_setup,
+ TP_PROTO(struct spi_device *spi, int status),
+ TP_ARGS(spi, status),
+
+ TP_STRUCT__entry(
+ __field(int, bus_num)
+ __field(int, chip_select)
+ __field(unsigned long, mode)
+ __field(unsigned int, bits_per_word)
+ __field(unsigned int, max_speed_hz)
+ __field(int, status)
+ ),
+
+ TP_fast_assign(
+ __entry->bus_num = spi->controller->bus_num;
+ __entry->chip_select = spi->chip_select;
+ __entry->mode = spi->mode;
+ __entry->bits_per_word = spi->bits_per_word;
+ __entry->max_speed_hz = spi->max_speed_hz;
+ __entry->status = status;
+ ),
+
+ TP_printk("spi%d.%d setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d",
+ __entry->bus_num, __entry->chip_select,
+ (__entry->mode & SPI_MODE_X_MASK),
+ (__entry->mode & SPI_CS_HIGH) ? "cs_high, " : "",
+ (__entry->mode & SPI_LSB_FIRST) ? "lsb, " : "",
+ (__entry->mode & SPI_3WIRE) ? "3wire, " : "",
+ (__entry->mode & SPI_LOOP) ? "loopback, " : "",
+ __entry->bits_per_word, __entry->max_speed_hz,
+ __entry->status)
+);
+
+TRACE_EVENT(spi_set_cs,
+ TP_PROTO(struct spi_device *spi, bool enable),
+ TP_ARGS(spi, enable),
+
+ TP_STRUCT__entry(
+ __field(int, bus_num)
+ __field(int, chip_select)
+ __field(unsigned long, mode)
+ __field(bool, enable)
+ ),
+
+ TP_fast_assign(
+ __entry->bus_num = spi->controller->bus_num;
+ __entry->chip_select = spi->chip_select;
+ __entry->mode = spi->mode;
+ __entry->enable = enable;
+ ),
+
+ TP_printk("spi%d.%d %s%s",
+ __entry->bus_num, __entry->chip_select,
+ __entry->enable ? "activate" : "deactivate",
+ (__entry->mode & SPI_CS_HIGH) ? ", cs_high" : "")
+);
+
DECLARE_EVENT_CLASS(spi_message,
TP_PROTO(struct spi_message *msg),
diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h
index 03d6f6d2c1fe..5a3c221f4c9d 100644
--- a/include/uapi/asm-generic/siginfo.h
+++ b/include/uapi/asm-generic/siginfo.h
@@ -63,9 +63,6 @@ union __sifields {
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */
struct {
void __user *_addr; /* faulting insn/memory ref. */
-#ifdef __ARCH_SI_TRAPNO
- int _trapno; /* TRAP # which caused the signal */
-#endif
#ifdef __ia64__
int _imm; /* immediate value for "break" */
unsigned int _flags; /* see ia64 si_flags */
@@ -75,6 +72,8 @@ union __sifields {
#define __ADDR_BND_PKEY_PAD (__alignof__(void *) < sizeof(short) ? \
sizeof(short) : __alignof__(void *))
union {
+ /* used on alpha and sparc */
+ int _trapno; /* TRAP # which caused the signal */
/*
* used when si_code=BUS_MCEERR_AR or
* used when si_code=BUS_MCEERR_AO
@@ -92,7 +91,10 @@ union __sifields {
__u32 _pkey;
} _addr_pkey;
/* used when si_code=TRAP_PERF */
- unsigned long _perf;
+ struct {
+ unsigned long _data;
+ __u32 _type;
+ } _perf;
};
} _sigfault;
@@ -150,14 +152,13 @@ typedef struct siginfo {
#define si_int _sifields._rt._sigval.sival_int
#define si_ptr _sifields._rt._sigval.sival_ptr
#define si_addr _sifields._sigfault._addr
-#ifdef __ARCH_SI_TRAPNO
#define si_trapno _sifields._sigfault._trapno
-#endif
#define si_addr_lsb _sifields._sigfault._addr_lsb
#define si_lower _sifields._sigfault._addr_bnd._lower
#define si_upper _sifields._sigfault._addr_bnd._upper
#define si_pkey _sifields._sigfault._addr_pkey._pkey
-#define si_perf _sifields._sigfault._perf
+#define si_perf_data _sifields._sigfault._perf._data
+#define si_perf_type _sifields._sigfault._perf._type
#define si_band _sifields._sigpoll._band
#define si_fd _sifields._sigpoll._fd
#define si_call_addr _sifields._sigsys._call_addr
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 6de5a7fc066b..d2a942086fcb 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -863,8 +863,7 @@ __SYSCALL(__NR_process_madvise, sys_process_madvise)
__SC_COMP(__NR_epoll_pwait2, sys_epoll_pwait2, compat_sys_epoll_pwait2)
#define __NR_mount_setattr 442
__SYSCALL(__NR_mount_setattr, sys_mount_setattr)
-#define __NR_quotactl_path 443
-__SYSCALL(__NR_quotactl_path, sys_quotactl_path)
+/* 443 is reserved for quotactl_path */
#define __NR_landlock_create_ruleset 444
__SYSCALL(__NR_landlock_create_ruleset, sys_landlock_create_ruleset)
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index f44eb0a04afd..4c32e97dcdf0 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -185,7 +185,7 @@ struct fsxattr {
#define BLKROTATIONAL _IO(0x12,126)
#define BLKZEROOUT _IO(0x12,127)
/*
- * A jump here: 130-131 are reserved for zoned block devices
+ * A jump here: 130-136 are reserved for zoned block devices
* (see uapi/linux/blkzoned.h)
*/
diff --git a/include/uapi/linux/futex.h b/include/uapi/linux/futex.h
index a89eb0accd5e..235e5b2facaa 100644
--- a/include/uapi/linux/futex.h
+++ b/include/uapi/linux/futex.h
@@ -21,6 +21,7 @@
#define FUTEX_WAKE_BITSET 10
#define FUTEX_WAIT_REQUEUE_PI 11
#define FUTEX_CMP_REQUEUE_PI 12
+#define FUTEX_LOCK_PI2 13
#define FUTEX_PRIVATE_FLAG 128
#define FUTEX_CLOCK_REALTIME 256
@@ -32,6 +33,7 @@
#define FUTEX_CMP_REQUEUE_PRIVATE (FUTEX_CMP_REQUEUE | FUTEX_PRIVATE_FLAG)
#define FUTEX_WAKE_OP_PRIVATE (FUTEX_WAKE_OP | FUTEX_PRIVATE_FLAG)
#define FUTEX_LOCK_PI_PRIVATE (FUTEX_LOCK_PI | FUTEX_PRIVATE_FLAG)
+#define FUTEX_LOCK_PI2_PRIVATE (FUTEX_LOCK_PI2 | FUTEX_PRIVATE_FLAG)
#define FUTEX_UNLOCK_PI_PRIVATE (FUTEX_UNLOCK_PI | FUTEX_PRIVATE_FLAG)
#define FUTEX_TRYLOCK_PI_PRIVATE (FUTEX_TRYLOCK_PI | FUTEX_PRIVATE_FLAG)
#define FUTEX_WAIT_BITSET_PRIVATE (FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG)
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index 7d6687618d80..d1b327036ae4 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -289,6 +289,9 @@ struct sockaddr_in {
/* Address indicating an error return. */
#define INADDR_NONE ((unsigned long int) 0xffffffff)
+/* Dummy address for src of ICMP replies if no real address is set (RFC7600). */
+#define INADDR_DUMMY ((unsigned long int) 0xc0000008)
+
/* Network number for local host loopback. */
#define IN_LOOPBACKNET 127
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index ee93428ced9a..225ec87d4f22 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -611,6 +611,7 @@
#define KEY_VOICECOMMAND 0x246 /* Listening Voice Command */
#define KEY_ASSISTANT 0x247 /* AL Context-aware desktop assistant */
#define KEY_KBD_LAYOUT_NEXT 0x248 /* AC Next Keyboard Layout Select */
+#define KEY_EMOJI_PICKER 0x249 /* Show/hide emoji picker (HUTRR101) */
#define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */
#define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index e1ae46683301..162ff99ed2cb 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -280,6 +280,7 @@ struct io_uring_params {
#define IORING_FEAT_SQPOLL_NONFIXED (1U << 7)
#define IORING_FEAT_EXT_ARG (1U << 8)
#define IORING_FEAT_NATIVE_WORKERS (1U << 9)
+#define IORING_FEAT_RSRC_TAGS (1U << 10)
/*
* io_uring_register(2) opcodes and arguments
@@ -298,8 +299,12 @@ enum {
IORING_UNREGISTER_PERSONALITY = 10,
IORING_REGISTER_RESTRICTIONS = 11,
IORING_REGISTER_ENABLE_RINGS = 12,
- IORING_REGISTER_RSRC = 13,
- IORING_REGISTER_RSRC_UPDATE = 14,
+
+ /* extended with tagging */
+ IORING_REGISTER_FILES2 = 13,
+ IORING_REGISTER_FILES_UPDATE2 = 14,
+ IORING_REGISTER_BUFFERS2 = 15,
+ IORING_REGISTER_BUFFERS_UPDATE = 16,
/* this goes last */
IORING_REGISTER_LAST
@@ -312,14 +317,10 @@ struct io_uring_files_update {
__aligned_u64 /* __s32 * */ fds;
};
-enum {
- IORING_RSRC_FILE = 0,
- IORING_RSRC_BUFFER = 1,
-};
-
struct io_uring_rsrc_register {
- __u32 type;
__u32 nr;
+ __u32 resv;
+ __u64 resv2;
__aligned_u64 data;
__aligned_u64 tags;
};
@@ -335,8 +336,8 @@ struct io_uring_rsrc_update2 {
__u32 resv;
__aligned_u64 data;
__aligned_u64 tags;
- __u32 type;
__u32 nr;
+ __u32 resv2;
};
/* Skip updating fd indexes set to this value in the fd table */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 3fd9a7e9d90c..79d9c44d1ad7 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -8,6 +8,7 @@
* Note: you must update KVM_API_VERSION if you change this interface.
*/
+#include <linux/const.h>
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/ioctl.h>
@@ -1879,8 +1880,8 @@ struct kvm_hyperv_eventfd {
* conversion after harvesting an entry. Also, it must not skip any
* dirty bits, so that dirty bits are always harvested in sequence.
*/
-#define KVM_DIRTY_GFN_F_DIRTY BIT(0)
-#define KVM_DIRTY_GFN_F_RESET BIT(1)
+#define KVM_DIRTY_GFN_F_DIRTY _BITUL(0)
+#define KVM_DIRTY_GFN_F_RESET _BITUL(1)
#define KVM_DIRTY_GFN_F_MASK 0x3
/*
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index bf8143505c49..f92880a15645 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -464,7 +464,7 @@ struct perf_event_attr {
/*
* User provided data if sigtrap=1, passed back to user via
- * siginfo_t::si_perf, e.g. to permit user to identify the event.
+ * siginfo_t::si_perf_data, e.g. to permit user to identify the event.
*/
__u64 sig_data;
};
diff --git a/include/uapi/linux/signalfd.h b/include/uapi/linux/signalfd.h
index 7e333042c7e3..83429a05b698 100644
--- a/include/uapi/linux/signalfd.h
+++ b/include/uapi/linux/signalfd.h
@@ -39,8 +39,6 @@ struct signalfd_siginfo {
__s32 ssi_syscall;
__u64 ssi_call_addr;
__u32 ssi_arch;
- __u32 __pad3;
- __u64 ssi_perf;
/*
* Pad strcture to 128 bytes. Remember to update the
@@ -51,7 +49,7 @@ struct signalfd_siginfo {
* comes out of a read(2) and we really don't want to have
* a compat on read(2).
*/
- __u8 __pad[16];
+ __u8 __pad[28];
};
diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
index bafbeb1a2624..650480f41f1d 100644
--- a/include/uapi/linux/userfaultfd.h
+++ b/include/uapi/linux/userfaultfd.h
@@ -80,8 +80,8 @@
struct uffdio_zeropage)
#define UFFDIO_WRITEPROTECT _IOWR(UFFDIO, _UFFDIO_WRITEPROTECT, \
struct uffdio_writeprotect)
-#define UFFDIO_CONTINUE _IOR(UFFDIO, _UFFDIO_CONTINUE, \
- struct uffdio_continue)
+#define UFFDIO_CONTINUE _IOWR(UFFDIO, _UFFDIO_CONTINUE, \
+ struct uffdio_continue)
/* read() structure */
struct uffd_msg {
diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
index f0c35ce8628c..4fe842c3a3a9 100644
--- a/include/uapi/linux/virtio_ids.h
+++ b/include/uapi/linux/virtio_ids.h
@@ -54,7 +54,7 @@
#define VIRTIO_ID_SOUND 25 /* virtio sound */
#define VIRTIO_ID_FS 26 /* virtio filesystem */
#define VIRTIO_ID_PMEM 27 /* virtio pmem */
-#define VIRTIO_ID_BT 28 /* virtio bluetooth */
#define VIRTIO_ID_MAC80211_HWSIM 29 /* virtio mac80211-hwsim */
+#define VIRTIO_ID_BT 40 /* virtio bluetooth */
#endif /* _LINUX_VIRTIO_IDS_H */
diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h
index d3e017b5f0db..6d2d34c9f375 100644
--- a/include/uapi/misc/habanalabs.h
+++ b/include/uapi/misc/habanalabs.h
@@ -239,6 +239,39 @@ enum gaudi_engine_id {
GAUDI_ENGINE_ID_SIZE
};
+/*
+ * ASIC specific PLL index
+ *
+ * Used to retrieve in frequency info of different IPs via
+ * HL_INFO_PLL_FREQUENCY under HL_IOCTL_INFO IOCTL. The enums need to be
+ * used as an index in struct hl_pll_frequency_info
+ */
+
+enum hl_goya_pll_index {
+ HL_GOYA_CPU_PLL = 0,
+ HL_GOYA_IC_PLL,
+ HL_GOYA_MC_PLL,
+ HL_GOYA_MME_PLL,
+ HL_GOYA_PCI_PLL,
+ HL_GOYA_EMMC_PLL,
+ HL_GOYA_TPC_PLL,
+ HL_GOYA_PLL_MAX
+};
+
+enum hl_gaudi_pll_index {
+ HL_GAUDI_CPU_PLL = 0,
+ HL_GAUDI_PCI_PLL,
+ HL_GAUDI_SRAM_PLL,
+ HL_GAUDI_HBM_PLL,
+ HL_GAUDI_NIC_PLL,
+ HL_GAUDI_DMA_PLL,
+ HL_GAUDI_MESH_PLL,
+ HL_GAUDI_MME_PLL,
+ HL_GAUDI_TPC_PLL,
+ HL_GAUDI_IF_PLL,
+ HL_GAUDI_PLL_MAX
+};
+
enum hl_device_status {
HL_DEVICE_STATUS_OPERATIONAL,
HL_DEVICE_STATUS_IN_RESET,
diff --git a/include/xen/arm/swiotlb-xen.h b/include/xen/arm/swiotlb-xen.h
index 2994fe6031a0..33336ab58afc 100644
--- a/include/xen/arm/swiotlb-xen.h
+++ b/include/xen/arm/swiotlb-xen.h
@@ -2,6 +2,19 @@
#ifndef _ASM_ARM_SWIOTLB_XEN_H
#define _ASM_ARM_SWIOTLB_XEN_H
-extern int xen_swiotlb_detect(void);
+#include <xen/features.h>
+#include <xen/xen.h>
+
+static inline int xen_swiotlb_detect(void)
+{
+ if (!xen_domain())
+ return 0;
+ if (xen_feature(XENFEAT_direct_mapped))
+ return 1;
+ /* legacy case */
+ if (!xen_feature(XENFEAT_not_direct_mapped) && xen_initial_domain())
+ return 1;
+ return 0;
+}
#endif /* _ASM_ARM_SWIOTLB_XEN_H */
diff --git a/init/Kconfig b/init/Kconfig
index 1ea12c64e4c9..a61c92066c2e 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -442,6 +442,7 @@ config AUDITSYSCALL
source "kernel/irq/Kconfig"
source "kernel/time/Kconfig"
+source "kernel/bpf/Kconfig"
source "kernel/Kconfig.preempt"
menu "CPU/Task time and stats accounting"
@@ -1713,46 +1714,6 @@ config KALLSYMS_BASE_RELATIVE
# syscall, maps, verifier
-config BPF_LSM
- bool "LSM Instrumentation with BPF"
- depends on BPF_EVENTS
- depends on BPF_SYSCALL
- depends on SECURITY
- depends on BPF_JIT
- help
- Enables instrumentation of the security hooks with eBPF programs for
- implementing dynamic MAC and Audit Policies.
-
- If you are unsure how to answer this question, answer N.
-
-config BPF_SYSCALL
- bool "Enable bpf() system call"
- select BPF
- select IRQ_WORK
- select TASKS_TRACE_RCU
- select BINARY_PRINTF
- select NET_SOCK_MSG if INET
- default n
- help
- Enable the bpf() system call that allows to manipulate eBPF
- programs and maps via file descriptors.
-
-config ARCH_WANT_DEFAULT_BPF_JIT
- bool
-
-config BPF_JIT_ALWAYS_ON
- bool "Permanently enable BPF JIT and remove BPF interpreter"
- depends on BPF_SYSCALL && HAVE_EBPF_JIT && BPF_JIT
- help
- Enables BPF JIT and removes BPF interpreter to avoid
- speculative execution of BPF instructions by the interpreter
-
-config BPF_JIT_DEFAULT_ON
- def_bool ARCH_WANT_DEFAULT_BPF_JIT || BPF_JIT_ALWAYS_ON
- depends on HAVE_EBPF_JIT && BPF_JIT
-
-source "kernel/bpf/preload/Kconfig"
-
config USERFAULTFD
bool "Enable userfaultfd() system call"
depends on MMU
diff --git a/init/main.c b/init/main.c
index eb01e121d2f1..e9c42a183e33 100644
--- a/init/main.c
+++ b/init/main.c
@@ -1537,7 +1537,7 @@ static noinline void __init kernel_init_freeable(void)
*/
set_mems_allowed(node_states[N_MEMORY]);
- cad_pid = task_pid(current);
+ cad_pid = get_pid(task_pid(current));
smp_prepare_cpus(setup_max_cpus);
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 8031464ed4ae..4e4e61111500 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -1004,12 +1004,14 @@ static inline void __pipelined_op(struct wake_q_head *wake_q,
struct mqueue_inode_info *info,
struct ext_wait_queue *this)
{
+ struct task_struct *task;
+
list_del(&this->list);
- get_task_struct(this->task);
+ task = get_task_struct(this->task);
/* see MQ_BARRIER for purpose/pairing */
smp_store_release(&this->state, STATE_READY);
- wake_q_add_safe(wake_q, this->task);
+ wake_q_add_safe(wake_q, task);
}
/* pipelined_send() - send a message directly to the task waiting in
diff --git a/ipc/msg.c b/ipc/msg.c
index acd1bc7af55a..6e6c8e0c9380 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -251,11 +251,13 @@ static void expunge_all(struct msg_queue *msq, int res,
struct msg_receiver *msr, *t;
list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
- get_task_struct(msr->r_tsk);
+ struct task_struct *r_tsk;
+
+ r_tsk = get_task_struct(msr->r_tsk);
/* see MSG_BARRIER for purpose/pairing */
smp_store_release(&msr->r_msg, ERR_PTR(res));
- wake_q_add_safe(wake_q, msr->r_tsk);
+ wake_q_add_safe(wake_q, r_tsk);
}
}
diff --git a/ipc/sem.c b/ipc/sem.c
index e0ec239680cb..bf534c74293e 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -784,12 +784,14 @@ would_block:
static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
struct wake_q_head *wake_q)
{
- get_task_struct(q->sleeper);
+ struct task_struct *sleeper;
+
+ sleeper = get_task_struct(q->sleeper);
/* see SEM_BARRIER_2 for purpose/pairing */
smp_store_release(&q->status, error);
- wake_q_add_safe(wake_q, q->sleeper);
+ wake_q_add_safe(wake_q, sleeper);
}
static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
diff --git a/kernel/bpf/Kconfig b/kernel/bpf/Kconfig
new file mode 100644
index 000000000000..bd04f4a44c01
--- /dev/null
+++ b/kernel/bpf/Kconfig
@@ -0,0 +1,89 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+# BPF interpreter that, for example, classic socket filters depend on.
+config BPF
+ bool
+
+# Used by archs to tell that they support BPF JIT compiler plus which
+# flavour. Only one of the two can be selected for a specific arch since
+# eBPF JIT supersedes the cBPF JIT.
+
+# Classic BPF JIT (cBPF)
+config HAVE_CBPF_JIT
+ bool
+
+# Extended BPF JIT (eBPF)
+config HAVE_EBPF_JIT
+ bool
+
+# Used by archs to tell that they want the BPF JIT compiler enabled by
+# default for kernels that were compiled with BPF JIT support.
+config ARCH_WANT_DEFAULT_BPF_JIT
+ bool
+
+menu "BPF subsystem"
+
+config BPF_SYSCALL
+ bool "Enable bpf() system call"
+ select BPF
+ select IRQ_WORK
+ select TASKS_TRACE_RCU
+ select BINARY_PRINTF
+ select NET_SOCK_MSG if INET
+ default n
+ help
+ Enable the bpf() system call that allows to manipulate BPF programs
+ and maps via file descriptors.
+
+config BPF_JIT
+ bool "Enable BPF Just In Time compiler"
+ depends on BPF
+ depends on HAVE_CBPF_JIT || HAVE_EBPF_JIT
+ depends on MODULES
+ help
+ BPF programs are normally handled by a BPF interpreter. This option
+ allows the kernel to generate native code when a program is loaded
+ into the kernel. This will significantly speed-up processing of BPF
+ programs.
+
+ Note, an admin should enable this feature changing:
+ /proc/sys/net/core/bpf_jit_enable
+ /proc/sys/net/core/bpf_jit_harden (optional)
+ /proc/sys/net/core/bpf_jit_kallsyms (optional)
+
+config BPF_JIT_ALWAYS_ON
+ bool "Permanently enable BPF JIT and remove BPF interpreter"
+ depends on BPF_SYSCALL && HAVE_EBPF_JIT && BPF_JIT
+ help
+ Enables BPF JIT and removes BPF interpreter to avoid speculative
+ execution of BPF instructions by the interpreter.
+
+config BPF_JIT_DEFAULT_ON
+ def_bool ARCH_WANT_DEFAULT_BPF_JIT || BPF_JIT_ALWAYS_ON
+ depends on HAVE_EBPF_JIT && BPF_JIT
+
+config BPF_UNPRIV_DEFAULT_OFF
+ bool "Disable unprivileged BPF by default"
+ depends on BPF_SYSCALL
+ help
+ Disables unprivileged BPF by default by setting the corresponding
+ /proc/sys/kernel/unprivileged_bpf_disabled knob to 2. An admin can
+ still reenable it by setting it to 0 later on, or permanently
+ disable it by setting it to 1 (from which no other transition to
+ 0 is possible anymore).
+
+source "kernel/bpf/preload/Kconfig"
+
+config BPF_LSM
+ bool "Enable BPF LSM Instrumentation"
+ depends on BPF_EVENTS
+ depends on BPF_SYSCALL
+ depends on SECURITY
+ depends on BPF_JIT
+ help
+ Enables instrumentation of the security hooks with BPF programs for
+ implementing dynamic MAC and Audit Policies.
+
+ If you are unsure how to answer this question, answer N.
+
+endmenu # "BPF subsystem"
diff --git a/kernel/bpf/bpf_lsm.c b/kernel/bpf/bpf_lsm.c
index 5efb2b24012c..da471bf01b97 100644
--- a/kernel/bpf/bpf_lsm.c
+++ b/kernel/bpf/bpf_lsm.c
@@ -107,10 +107,12 @@ bpf_lsm_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_inode_storage_get_proto;
case BPF_FUNC_inode_storage_delete:
return &bpf_inode_storage_delete_proto;
+#ifdef CONFIG_NET
case BPF_FUNC_sk_storage_get:
return &bpf_sk_storage_get_proto;
case BPF_FUNC_sk_storage_delete:
return &bpf_sk_storage_delete_proto;
+#endif /* CONFIG_NET */
case BPF_FUNC_spin_lock:
return &bpf_spin_lock_proto;
case BPF_FUNC_spin_unlock:
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 0600ed325fa0..f982a9f0dbc4 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -5206,6 +5206,12 @@ int btf_distill_func_proto(struct bpf_verifier_log *log,
m->ret_size = ret;
for (i = 0; i < nargs; i++) {
+ if (i == nargs - 1 && args[i].type == 0) {
+ bpf_log(log,
+ "The function %s with variable args is unsupported.\n",
+ tname);
+ return -EINVAL;
+ }
ret = __get_type_size(btf, args[i].type, &t);
if (ret < 0) {
bpf_log(log,
@@ -5213,6 +5219,12 @@ int btf_distill_func_proto(struct bpf_verifier_log *log,
tname, i, btf_kind_str[BTF_INFO_KIND(t->info)]);
return -EINVAL;
}
+ if (ret == 0) {
+ bpf_log(log,
+ "The function %s has malformed void argument.\n",
+ tname);
+ return -EINVAL;
+ }
m->arg_size[i] = ret;
}
m->nr_args = nargs;
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 544773970dbc..a2f1f15ce432 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -14,6 +14,7 @@
#include <linux/jiffies.h>
#include <linux/pid_namespace.h>
#include <linux/proc_ns.h>
+#include <linux/security.h>
#include "../../lib/kstrtox.h"
@@ -692,38 +693,41 @@ static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
return -EINVAL;
}
-/* Per-cpu temp buffers which can be used by printf-like helpers for %s or %p
+/* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary
+ * arguments representation.
*/
-#define MAX_PRINTF_BUF_LEN 512
+#define MAX_BPRINTF_BUF_LEN 512
-struct bpf_printf_buf {
- char tmp_buf[MAX_PRINTF_BUF_LEN];
+/* Support executing three nested bprintf helper calls on a given CPU */
+#define MAX_BPRINTF_NEST_LEVEL 3
+struct bpf_bprintf_buffers {
+ char tmp_bufs[MAX_BPRINTF_NEST_LEVEL][MAX_BPRINTF_BUF_LEN];
};
-static DEFINE_PER_CPU(struct bpf_printf_buf, bpf_printf_buf);
-static DEFINE_PER_CPU(int, bpf_printf_buf_used);
+static DEFINE_PER_CPU(struct bpf_bprintf_buffers, bpf_bprintf_bufs);
+static DEFINE_PER_CPU(int, bpf_bprintf_nest_level);
static int try_get_fmt_tmp_buf(char **tmp_buf)
{
- struct bpf_printf_buf *bufs;
- int used;
+ struct bpf_bprintf_buffers *bufs;
+ int nest_level;
preempt_disable();
- used = this_cpu_inc_return(bpf_printf_buf_used);
- if (WARN_ON_ONCE(used > 1)) {
- this_cpu_dec(bpf_printf_buf_used);
+ nest_level = this_cpu_inc_return(bpf_bprintf_nest_level);
+ if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) {
+ this_cpu_dec(bpf_bprintf_nest_level);
preempt_enable();
return -EBUSY;
}
- bufs = this_cpu_ptr(&bpf_printf_buf);
- *tmp_buf = bufs->tmp_buf;
+ bufs = this_cpu_ptr(&bpf_bprintf_bufs);
+ *tmp_buf = bufs->tmp_bufs[nest_level - 1];
return 0;
}
void bpf_bprintf_cleanup(void)
{
- if (this_cpu_read(bpf_printf_buf_used)) {
- this_cpu_dec(bpf_printf_buf_used);
+ if (this_cpu_read(bpf_bprintf_nest_level)) {
+ this_cpu_dec(bpf_bprintf_nest_level);
preempt_enable();
}
}
@@ -760,7 +764,7 @@ int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
if (num_args && try_get_fmt_tmp_buf(&tmp_buf))
return -EBUSY;
- tmp_buf_end = tmp_buf + MAX_PRINTF_BUF_LEN;
+ tmp_buf_end = tmp_buf + MAX_BPRINTF_BUF_LEN;
*bin_args = (u32 *)tmp_buf;
}
@@ -1066,11 +1070,13 @@ bpf_base_func_proto(enum bpf_func_id func_id)
case BPF_FUNC_probe_read_user:
return &bpf_probe_read_user_proto;
case BPF_FUNC_probe_read_kernel:
- return &bpf_probe_read_kernel_proto;
+ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+ NULL : &bpf_probe_read_kernel_proto;
case BPF_FUNC_probe_read_user_str:
return &bpf_probe_read_user_str_proto;
case BPF_FUNC_probe_read_kernel_str:
- return &bpf_probe_read_kernel_str_proto;
+ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+ NULL : &bpf_probe_read_kernel_str_proto;
case BPF_FUNC_snprintf_btf:
return &bpf_snprintf_btf_proto;
case BPF_FUNC_snprintf:
diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
index f25b719ac786..84b3b35fc0d0 100644
--- a/kernel/bpf/ringbuf.c
+++ b/kernel/bpf/ringbuf.c
@@ -221,25 +221,20 @@ static int ringbuf_map_get_next_key(struct bpf_map *map, void *key,
return -ENOTSUPP;
}
-static size_t bpf_ringbuf_mmap_page_cnt(const struct bpf_ringbuf *rb)
-{
- size_t data_pages = (rb->mask + 1) >> PAGE_SHIFT;
-
- /* consumer page + producer page + 2 x data pages */
- return RINGBUF_POS_PAGES + 2 * data_pages;
-}
-
static int ringbuf_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
{
struct bpf_ringbuf_map *rb_map;
- size_t mmap_sz;
rb_map = container_of(map, struct bpf_ringbuf_map, map);
- mmap_sz = bpf_ringbuf_mmap_page_cnt(rb_map->rb) << PAGE_SHIFT;
-
- if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) > mmap_sz)
- return -EINVAL;
+ if (vma->vm_flags & VM_WRITE) {
+ /* allow writable mapping for the consumer_pos only */
+ if (vma->vm_pgoff != 0 || vma->vm_end - vma->vm_start != PAGE_SIZE)
+ return -EPERM;
+ } else {
+ vma->vm_flags &= ~VM_MAYWRITE;
+ }
+ /* remap_vmalloc_range() checks size and offset constraints */
return remap_vmalloc_range(vma, rb_map->rb,
vma->vm_pgoff + RINGBUF_PGOFF);
}
@@ -315,6 +310,9 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
return NULL;
len = round_up(size + BPF_RINGBUF_HDR_SZ, 8);
+ if (len > rb->mask + 1)
+ return NULL;
+
cons_pos = smp_load_acquire(&rb->consumer_pos);
if (in_nmi()) {
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 941ca06d9dfa..ea04b0deb5ce 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -50,7 +50,8 @@ static DEFINE_SPINLOCK(map_idr_lock);
static DEFINE_IDR(link_idr);
static DEFINE_SPINLOCK(link_idr_lock);
-int sysctl_unprivileged_bpf_disabled __read_mostly;
+int sysctl_unprivileged_bpf_disabled __read_mostly =
+ IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0;
static const struct bpf_map_ops * const bpf_map_types[] = {
#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 757476c91c98..c6a27574242d 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -6409,18 +6409,10 @@ enum {
};
static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
- const struct bpf_reg_state *off_reg,
- u32 *alu_limit, u8 opcode)
+ u32 *alu_limit, bool mask_to_left)
{
- bool off_is_neg = off_reg->smin_value < 0;
- bool mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
- (opcode == BPF_SUB && !off_is_neg);
u32 max = 0, ptr_limit = 0;
- if (!tnum_is_const(off_reg->var_off) &&
- (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
- return REASON_BOUNDS;
-
switch (ptr_reg->type) {
case PTR_TO_STACK:
/* Offset 0 is out-of-bounds, but acceptable start for the
@@ -6486,15 +6478,41 @@ static bool sanitize_needed(u8 opcode)
return opcode == BPF_ADD || opcode == BPF_SUB;
}
+struct bpf_sanitize_info {
+ struct bpf_insn_aux_data aux;
+ bool mask_to_left;
+};
+
+static struct bpf_verifier_state *
+sanitize_speculative_path(struct bpf_verifier_env *env,
+ const struct bpf_insn *insn,
+ u32 next_idx, u32 curr_idx)
+{
+ struct bpf_verifier_state *branch;
+ struct bpf_reg_state *regs;
+
+ branch = push_stack(env, next_idx, curr_idx, true);
+ if (branch && insn) {
+ regs = branch->frame[branch->curframe]->regs;
+ if (BPF_SRC(insn->code) == BPF_K) {
+ mark_reg_unknown(env, regs, insn->dst_reg);
+ } else if (BPF_SRC(insn->code) == BPF_X) {
+ mark_reg_unknown(env, regs, insn->dst_reg);
+ mark_reg_unknown(env, regs, insn->src_reg);
+ }
+ }
+ return branch;
+}
+
static int sanitize_ptr_alu(struct bpf_verifier_env *env,
struct bpf_insn *insn,
const struct bpf_reg_state *ptr_reg,
const struct bpf_reg_state *off_reg,
struct bpf_reg_state *dst_reg,
- struct bpf_insn_aux_data *tmp_aux,
+ struct bpf_sanitize_info *info,
const bool commit_window)
{
- struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : tmp_aux;
+ struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux;
struct bpf_verifier_state *vstate = env->cur_state;
bool off_is_imm = tnum_is_const(off_reg->var_off);
bool off_is_neg = off_reg->smin_value < 0;
@@ -6515,7 +6533,16 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
if (vstate->speculative)
goto do_sim;
- err = retrieve_ptr_limit(ptr_reg, off_reg, &alu_limit, opcode);
+ if (!commit_window) {
+ if (!tnum_is_const(off_reg->var_off) &&
+ (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
+ return REASON_BOUNDS;
+
+ info->mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
+ (opcode == BPF_SUB && !off_is_neg);
+ }
+
+ err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left);
if (err < 0)
return err;
@@ -6523,8 +6550,8 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
/* In commit phase we narrow the masking window based on
* the observed pointer move after the simulated operation.
*/
- alu_state = tmp_aux->alu_state;
- alu_limit = abs(tmp_aux->alu_limit - alu_limit);
+ alu_state = info->aux.alu_state;
+ alu_limit = abs(info->aux.alu_limit - alu_limit);
} else {
alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
@@ -6539,8 +6566,12 @@ do_sim:
/* If we're in commit phase, we're done here given we already
* pushed the truncated dst_reg into the speculative verification
* stack.
+ *
+ * Also, when register is a known constant, we rewrite register-based
+ * operation to immediate-based, and thus do not need masking (and as
+ * a consequence, do not need to simulate the zero-truncation either).
*/
- if (commit_window)
+ if (commit_window || off_is_imm)
return 0;
/* Simulate and find potential out-of-bounds access under
@@ -6556,12 +6587,26 @@ do_sim:
tmp = *dst_reg;
*dst_reg = *ptr_reg;
}
- ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
+ ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
+ env->insn_idx);
if (!ptr_is_dst_reg && ret)
*dst_reg = tmp;
return !ret ? REASON_STACK : 0;
}
+static void sanitize_mark_insn_seen(struct bpf_verifier_env *env)
+{
+ struct bpf_verifier_state *vstate = env->cur_state;
+
+ /* If we simulate paths under speculation, we don't update the
+ * insn as 'seen' such that when we verify unreachable paths in
+ * the non-speculative domain, sanitize_dead_code() can still
+ * rewrite/sanitize them.
+ */
+ if (!vstate->speculative)
+ env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
+}
+
static int sanitize_err(struct bpf_verifier_env *env,
const struct bpf_insn *insn, int reason,
const struct bpf_reg_state *off_reg,
@@ -6685,7 +6730,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
- struct bpf_insn_aux_data tmp_aux = {};
+ struct bpf_sanitize_info info = {};
u8 opcode = BPF_OP(insn->code);
u32 dst = insn->dst_reg;
int ret;
@@ -6754,7 +6799,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
if (sanitize_needed(opcode)) {
ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg,
- &tmp_aux, false);
+ &info, false);
if (ret < 0)
return sanitize_err(env, insn, ret, off_reg, dst_reg);
}
@@ -6895,7 +6940,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
return -EACCES;
if (sanitize_needed(opcode)) {
ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg,
- &tmp_aux, true);
+ &info, true);
if (ret < 0)
return sanitize_err(env, insn, ret, off_reg, dst_reg);
}
@@ -7084,11 +7129,10 @@ static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
s32 smin_val = src_reg->s32_min_value;
u32 umax_val = src_reg->u32_max_value;
- /* Assuming scalar64_min_max_and will be called so its safe
- * to skip updating register for known 32-bit case.
- */
- if (src_known && dst_known)
+ if (src_known && dst_known) {
+ __mark_reg32_known(dst_reg, var32_off.value);
return;
+ }
/* We get our minimum from the var_off, since that's inherently
* bitwise. Our maximum is the minimum of the operands' maxima.
@@ -7108,7 +7152,6 @@ static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
dst_reg->s32_min_value = dst_reg->u32_min_value;
dst_reg->s32_max_value = dst_reg->u32_max_value;
}
-
}
static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
@@ -7155,11 +7198,10 @@ static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
s32 smin_val = src_reg->s32_min_value;
u32 umin_val = src_reg->u32_min_value;
- /* Assuming scalar64_min_max_or will be called so it is safe
- * to skip updating register for known case.
- */
- if (src_known && dst_known)
+ if (src_known && dst_known) {
+ __mark_reg32_known(dst_reg, var32_off.value);
return;
+ }
/* We get our maximum from the var_off, and our minimum is the
* maximum of the operands' minima
@@ -7224,11 +7266,10 @@ static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg,
struct tnum var32_off = tnum_subreg(dst_reg->var_off);
s32 smin_val = src_reg->s32_min_value;
- /* Assuming scalar64_min_max_xor will be called so it is safe
- * to skip updating register for known case.
- */
- if (src_known && dst_known)
+ if (src_known && dst_known) {
+ __mark_reg32_known(dst_reg, var32_off.value);
return;
+ }
/* We get both minimum and maximum from the var32_off. */
dst_reg->u32_min_value = var32_off.value;
@@ -8744,14 +8785,28 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
if (err)
return err;
}
+
if (pred == 1) {
- /* only follow the goto, ignore fall-through */
+ /* Only follow the goto, ignore fall-through. If needed, push
+ * the fall-through branch for simulation under speculative
+ * execution.
+ */
+ if (!env->bypass_spec_v1 &&
+ !sanitize_speculative_path(env, insn, *insn_idx + 1,
+ *insn_idx))
+ return -EFAULT;
*insn_idx += insn->off;
return 0;
} else if (pred == 0) {
- /* only follow fall-through branch, since
- * that's where the program will go
+ /* Only follow the fall-through branch, since that's where the
+ * program will go. If needed, push the goto branch for
+ * simulation under speculative execution.
*/
+ if (!env->bypass_spec_v1 &&
+ !sanitize_speculative_path(env, insn,
+ *insn_idx + insn->off + 1,
+ *insn_idx))
+ return -EFAULT;
return 0;
}
@@ -10624,7 +10679,7 @@ static int do_check(struct bpf_verifier_env *env)
}
regs = cur_regs(env);
- env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
+ sanitize_mark_insn_seen(env);
prev_insn_idx = env->insn_idx;
if (class == BPF_ALU || class == BPF_ALU64) {
@@ -10851,7 +10906,7 @@ process_bpf_exit:
return err;
env->insn_idx++;
- env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
+ sanitize_mark_insn_seen(env);
} else {
verbose(env, "invalid BPF_LD mode\n");
return -EINVAL;
@@ -11360,6 +11415,7 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env,
{
struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
struct bpf_insn *insn = new_prog->insnsi;
+ u32 old_seen = old_data[off].seen;
u32 prog_len;
int i;
@@ -11380,7 +11436,8 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env,
memcpy(new_data + off + cnt - 1, old_data + off,
sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
for (i = off; i < off + cnt - 1; i++) {
- new_data[i].seen = env->pass_cnt;
+ /* Expand insni[off]'s seen count to the patched range. */
+ new_data[i].seen = old_seen;
new_data[i].zext_dst = insn_has_def32(env, insn + i);
}
env->insn_aux_data = new_data;
@@ -12704,6 +12761,9 @@ static void free_states(struct bpf_verifier_env *env)
* insn_aux_data was touched. These variables are compared to clear temporary
* data from failed pass. For testing and experiments do_check_common() can be
* run multiple times even when prior attempt to verify is unsuccessful.
+ *
+ * Note that special handling is needed on !env->bypass_spec_v1 if this is
+ * ever called outside of error path with subsequent program rejection.
*/
static void sanitize_insn_aux_data(struct bpf_verifier_env *env)
{
@@ -13200,6 +13260,17 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
return 0;
}
+BTF_SET_START(btf_id_deny)
+BTF_ID_UNUSED
+#ifdef CONFIG_SMP
+BTF_ID(func, migrate_disable)
+BTF_ID(func, migrate_enable)
+#endif
+#if !defined CONFIG_PREEMPT_RCU && !defined CONFIG_TINY_RCU
+BTF_ID(func, rcu_read_unlock_strict)
+#endif
+BTF_SET_END(btf_id_deny)
+
static int check_attach_btf_id(struct bpf_verifier_env *env)
{
struct bpf_prog *prog = env->prog;
@@ -13259,6 +13330,9 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
ret = bpf_lsm_verify_prog(&env->log, prog);
if (ret < 0)
return ret;
+ } else if (prog->type == BPF_PROG_TYPE_TRACING &&
+ btf_id_set_contains(&btf_id_deny, btf_id)) {
+ return -EINVAL;
}
key = bpf_trampoline_compute_key(tgt_prog, prog->aux->attach_btf, btf_id);
@@ -13358,12 +13432,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
if (is_priv)
env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
- if (bpf_prog_is_dev_bound(env->prog->aux)) {
- ret = bpf_prog_offload_verifier_prep(env->prog);
- if (ret)
- goto skip_full_check;
- }
-
env->explored_states = kvcalloc(state_htab_size(env),
sizeof(struct bpf_verifier_state_list *),
GFP_USER);
@@ -13391,6 +13459,12 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
if (ret < 0)
goto skip_full_check;
+ if (bpf_prog_is_dev_bound(env->prog->aux)) {
+ ret = bpf_prog_offload_verifier_prep(env->prog);
+ if (ret)
+ goto skip_full_check;
+ }
+
ret = check_cfg(env);
if (ret < 0)
goto skip_full_check;
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index 391aa570369b..1f274d7fc934 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -820,6 +820,10 @@ static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent
struct cgroup *cgrp = kn->priv;
int ret;
+ /* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */
+ if (strchr(new_name_str, '\n'))
+ return -EINVAL;
+
if (kernfs_type(kn) != KERNFS_DIR)
return -ENOTDIR;
if (kn->parent != new_parent)
@@ -1001,7 +1005,7 @@ static int check_cgroupfs_options(struct fs_context *fc)
ctx->subsys_mask &= enabled;
/*
- * In absense of 'none', 'name=' or subsystem name options,
+ * In absence of 'none', 'name=' and subsystem name options,
* let's default to 'all'.
*/
if (!ctx->subsys_mask && !ctx->none && !ctx->name)
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index e049edd66776..21ecc6ee6a6d 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -468,7 +468,7 @@ static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp,
* @cgrp: the cgroup of interest
* @ss: the subsystem of interest
*
- * Find and get @cgrp's css assocaited with @ss. If the css doesn't exist
+ * Find and get @cgrp's css associated with @ss. If the css doesn't exist
* or is offline, %NULL is returned.
*/
static struct cgroup_subsys_state *cgroup_tryget_css(struct cgroup *cgrp,
@@ -1633,7 +1633,7 @@ static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
/**
* css_clear_dir - remove subsys files in a cgroup directory
- * @css: taget css
+ * @css: target css
*/
static void css_clear_dir(struct cgroup_subsys_state *css)
{
@@ -5350,7 +5350,7 @@ out_unlock:
/*
* This is called when the refcnt of a css is confirmed to be killed.
* css_tryget_online() is now guaranteed to fail. Tell the subsystem to
- * initate destruction and put the css ref from kill_css().
+ * initiate destruction and put the css ref from kill_css().
*/
static void css_killed_work_fn(struct work_struct *work)
{
@@ -5634,8 +5634,6 @@ int __init cgroup_init_early(void)
return 0;
}
-static u16 cgroup_disable_mask __initdata;
-
/**
* cgroup_init - cgroup initialization
*
@@ -5694,12 +5692,8 @@ int __init cgroup_init(void)
* disabled flag and cftype registration needs kmalloc,
* both of which aren't available during early_init.
*/
- if (cgroup_disable_mask & (1 << ssid)) {
- static_branch_disable(cgroup_subsys_enabled_key[ssid]);
- printk(KERN_INFO "Disabling %s control group subsystem\n",
- ss->name);
+ if (!cgroup_ssid_enabled(ssid))
continue;
- }
if (cgroup1_ssid_disabled(ssid))
printk(KERN_INFO "Disabling %s control group subsystem in v1 mounts\n",
@@ -6058,7 +6052,7 @@ out_revert:
* @kargs: the arguments passed to create the child process
*
* This calls the cancel_fork() callbacks if a fork failed *after*
- * cgroup_can_fork() succeded and cleans up references we took to
+ * cgroup_can_fork() succeeded and cleans up references we took to
* prepare a new css_set for the child process in cgroup_can_fork().
*/
void cgroup_cancel_fork(struct task_struct *child,
@@ -6214,7 +6208,10 @@ static int __init cgroup_disable(char *str)
if (strcmp(token, ss->name) &&
strcmp(token, ss->legacy_name))
continue;
- cgroup_disable_mask |= 1 << i;
+
+ static_branch_disable(cgroup_subsys_enabled_key[i]);
+ pr_info("Disabling %s control group subsystem\n",
+ ss->name);
}
}
return 1;
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index a945504c0ae7..adb5190c4429 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -3376,7 +3376,7 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
}
/**
- * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
+ * cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed
* @nodemask: the nodemask to be checked
*
* Are any of the nodes in the nodemask allowed in current->mems_allowed?
diff --git a/kernel/cgroup/rdma.c b/kernel/cgroup/rdma.c
index ae042c347c64..3135406608c7 100644
--- a/kernel/cgroup/rdma.c
+++ b/kernel/cgroup/rdma.c
@@ -244,7 +244,7 @@ EXPORT_SYMBOL(rdmacg_uncharge);
* This function follows charging resource in hierarchical way.
* It will fail if the charge would cause the new value to exceed the
* hierarchical limit.
- * Returns 0 if the charge succeded, otherwise -EAGAIN, -ENOMEM or -EINVAL.
+ * Returns 0 if the charge succeeded, otherwise -EAGAIN, -ENOMEM or -EINVAL.
* Returns pointer to rdmacg for this resource when charging is successful.
*
* Charger needs to account resources on two criteria.
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index 3a3fd2993a65..cee265cb535c 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -75,7 +75,7 @@ void cgroup_rstat_updated(struct cgroup *cgrp, int cpu)
* @root: root of the tree to traversal
* @cpu: target cpu
*
- * Walks the udpated rstat_cpu tree on @cpu from @root. %NULL @pos starts
+ * Walks the updated rstat_cpu tree on @cpu from @root. %NULL @pos starts
* the traversal and %NULL return indicates the end. During traversal,
* each returned cgroup is unlinked from the tree. Must be called with the
* matching cgroup_rstat_cpu_lock held.
diff --git a/kernel/crash_core.c b/kernel/crash_core.c
index 825284baaf46..684a6061a13a 100644
--- a/kernel/crash_core.c
+++ b/kernel/crash_core.c
@@ -464,6 +464,7 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
VMCOREINFO_STRUCT_SIZE(mem_section);
VMCOREINFO_OFFSET(mem_section, section_mem_map);
+ VMCOREINFO_NUMBER(SECTION_SIZE_BITS);
VMCOREINFO_NUMBER(MAX_PHYSMEM_BITS);
#endif
VMCOREINFO_STRUCT_SIZE(page);
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 8ca7d505d61c..e50df8d8f87e 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -335,6 +335,14 @@ void __init swiotlb_exit(void)
}
/*
+ * Return the offset into a iotlb slot required to keep the device happy.
+ */
+static unsigned int swiotlb_align_offset(struct device *dev, u64 addr)
+{
+ return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1);
+}
+
+/*
* Bounce: copy the swiotlb buffer from or back to the original dma location
*/
static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size,
@@ -346,10 +354,17 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
size_t alloc_size = mem->slots[index].alloc_size;
unsigned long pfn = PFN_DOWN(orig_addr);
unsigned char *vaddr = phys_to_virt(tlb_addr);
+ unsigned int tlb_offset;
if (orig_addr == INVALID_PHYS_ADDR)
return;
+ tlb_offset = (tlb_addr & (IO_TLB_SIZE - 1)) -
+ swiotlb_align_offset(dev, orig_addr);
+
+ orig_addr += tlb_offset;
+ alloc_size -= tlb_offset;
+
if (size > alloc_size) {
dev_WARN_ONCE(dev, 1,
"Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n",
@@ -391,14 +406,6 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
#define slot_addr(start, idx) ((start) + ((idx) << IO_TLB_SHIFT))
/*
- * Return the offset into a iotlb slot required to keep the device happy.
- */
-static unsigned int swiotlb_align_offset(struct device *dev, u64 addr)
-{
- return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1);
-}
-
-/*
* Carefully handle integer overflow which can occur when boundary_mask == ~0UL.
*/
static inline unsigned long get_max_slots(unsigned long boundary_mask)
diff --git a/kernel/entry/common.c b/kernel/entry/common.c
index a0b3b04fb596..bf16395b9e13 100644
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
@@ -5,6 +5,7 @@
#include <linux/highmem.h>
#include <linux/livepatch.h>
#include <linux/audit.h>
+#include <linux/tick.h>
#include "common.h"
@@ -186,7 +187,7 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
local_irq_disable_exit_to_user();
/* Check if any of the above work has queued a deferred wakeup */
- rcu_nocb_flush_deferred_wakeup();
+ tick_nohz_user_enter_prepare();
ti_work = READ_ONCE(current_thread_info()->flags);
}
@@ -202,7 +203,7 @@ static void exit_to_user_mode_prepare(struct pt_regs *regs)
lockdep_assert_irqs_disabled();
/* Flush pending rcuog wakeup before the last need_resched() check */
- rcu_nocb_flush_deferred_wakeup();
+ tick_nohz_user_enter_prepare();
if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
ti_work = exit_to_user_mode_loop(regs, ti_work);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 0e125ae2fa92..ea0e24040691 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4617,7 +4617,9 @@ find_get_context(struct pmu *pmu, struct task_struct *task,
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
ctx = &cpuctx->ctx;
get_ctx(ctx);
+ raw_spin_lock_irqsave(&ctx->lock, flags);
++ctx->pin_count;
+ raw_spin_unlock_irqrestore(&ctx->lock, flags);
return ctx;
}
@@ -6397,8 +6399,6 @@ void perf_event_wakeup(struct perf_event *event)
static void perf_sigtrap(struct perf_event *event)
{
- struct kernel_siginfo info;
-
/*
* We'd expect this to only occur if the irq_work is delayed and either
* ctx->task or current has changed in the meantime. This can be the
@@ -6413,13 +6413,8 @@ static void perf_sigtrap(struct perf_event *event)
if (current->flags & PF_EXITING)
return;
- clear_siginfo(&info);
- info.si_signo = SIGTRAP;
- info.si_code = TRAP_PERF;
- info.si_errno = event->attr.type;
- info.si_perf = event->attr.sig_data;
- info.si_addr = (void __user *)event->pending_addr;
- force_sig_info(&info);
+ force_sig_perf((void __user *)event->pending_addr,
+ event->attr.type, event->attr.sig_data);
}
static void perf_pending_event_disable(struct perf_event *event)
diff --git a/kernel/exit.c b/kernel/exit.c
index fd1c04193e18..65809fac3038 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -162,7 +162,6 @@ static void __exit_signal(struct task_struct *tsk)
flush_sigqueue(&sig->shared_pending);
tty_kref_put(tty);
}
- exit_task_sigqueue_cache(tsk);
}
static void delayed_put_task_struct(struct rcu_head *rhp)
diff --git a/kernel/fork.c b/kernel/fork.c
index dc06afd725cb..a070caed5c8e 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -2008,7 +2008,6 @@ static __latent_entropy struct task_struct *copy_process(
spin_lock_init(&p->alloc_lock);
init_sigpending(&p->pending);
- p->sigqueue_cache = NULL;
p->utime = p->stime = p->gtime = 0;
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
diff --git a/kernel/futex.c b/kernel/futex.c
index 4938a00bc785..2ecb07575055 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -35,7 +35,6 @@
#include <linux/jhash.h>
#include <linux/pagemap.h>
#include <linux/syscalls.h>
-#include <linux/hugetlb.h>
#include <linux/freezer.h>
#include <linux/memblock.h>
#include <linux/fault-inject.h>
@@ -650,7 +649,7 @@ again:
key->both.offset |= FUT_OFF_INODE; /* inode-based key */
key->shared.i_seq = get_inode_sequence_number(inode);
- key->shared.pgoff = basepage_index(tail);
+ key->shared.pgoff = page_to_pgoff(tail);
rcu_read_unlock();
}
@@ -1728,12 +1727,9 @@ retry_private:
return ret;
}
- if (!(flags & FLAGS_SHARED)) {
- cond_resched();
- goto retry_private;
- }
-
cond_resched();
+ if (!(flags & FLAGS_SHARED))
+ goto retry_private;
goto retry;
}
@@ -1874,7 +1870,7 @@ futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1,
* If the caller intends to requeue more than 1 waiter to pifutex,
* force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
* as we have means to handle the possible fault. If not, don't set
- * the bit unecessarily as it will force the subsequent unlock to enter
+ * the bit unnecessarily as it will force the subsequent unlock to enter
* the kernel.
*/
top_waiter = futex_top_waiter(hb1, key1);
@@ -2103,7 +2099,7 @@ retry_private:
continue;
/*
- * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
+ * FUTEX_WAIT_REQUEUE_PI and FUTEX_CMP_REQUEUE_PI should always
* be paired with each other and no other futex ops.
*
* We should never be requeueing a futex_q with a pi_state,
@@ -2318,7 +2314,7 @@ retry:
}
/*
- * PI futexes can not be requeued and must remove themself from the
+ * PI futexes can not be requeued and must remove themselves from the
* hash bucket. The hash bucket lock (i.e. lock_ptr) is held.
*/
static void unqueue_me_pi(struct futex_q *q)
@@ -2786,7 +2782,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
if (refill_pi_state_cache())
return -ENOMEM;
- to = futex_setup_timer(time, &timeout, FLAGS_CLOCKRT, 0);
+ to = futex_setup_timer(time, &timeout, flags, 0);
retry:
ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, FUTEX_WRITE);
@@ -2903,7 +2899,7 @@ no_block:
*/
res = fixup_owner(uaddr, &q, !ret);
/*
- * If fixup_owner() returned an error, proprogate that. If it acquired
+ * If fixup_owner() returned an error, propagate that. If it acquired
* the lock, clear our -ETIMEDOUT or -EINTR.
*/
if (res)
@@ -3280,7 +3276,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
*/
res = fixup_owner(uaddr2, &q, !ret);
/*
- * If fixup_owner() returned an error, proprogate that. If it
+ * If fixup_owner() returned an error, propagate that. If it
* acquired the lock, clear -ETIMEDOUT or -EINTR.
*/
if (res)
@@ -3678,7 +3674,7 @@ void futex_exec_release(struct task_struct *tsk)
{
/*
* The state handling is done for consistency, but in the case of
- * exec() there is no way to prevent futher damage as the PID stays
+ * exec() there is no way to prevent further damage as the PID stays
* the same. But for the unlikely and arguably buggy case that a
* futex is held on exec(), this provides at least as much state
* consistency protection which is possible.
@@ -3710,12 +3706,14 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
if (op & FUTEX_CLOCK_REALTIME) {
flags |= FLAGS_CLOCKRT;
- if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
+ if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI &&
+ cmd != FUTEX_LOCK_PI2)
return -ENOSYS;
}
switch (cmd) {
case FUTEX_LOCK_PI:
+ case FUTEX_LOCK_PI2:
case FUTEX_UNLOCK_PI:
case FUTEX_TRYLOCK_PI:
case FUTEX_WAIT_REQUEUE_PI:
@@ -3742,6 +3740,9 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
case FUTEX_WAKE_OP:
return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
case FUTEX_LOCK_PI:
+ flags |= FLAGS_CLOCKRT;
+ fallthrough;
+ case FUTEX_LOCK_PI2:
return futex_lock_pi(uaddr, flags, timeout, 0);
case FUTEX_UNLOCK_PI:
return futex_unlock_pi(uaddr, flags);
@@ -3762,6 +3763,7 @@ static __always_inline bool futex_cmd_has_timeout(u32 cmd)
switch (cmd) {
case FUTEX_WAIT:
case FUTEX_LOCK_PI:
+ case FUTEX_LOCK_PI2:
case FUTEX_WAIT_BITSET:
case FUTEX_WAIT_REQUEUE_PI:
return true;
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 23a7a0ba1388..db8c248ebc8c 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -70,9 +70,6 @@ bool irq_work_queue(struct irq_work *work)
if (!irq_work_claim(work))
return false;
- /*record irq_work call stack in order to print it in KASAN reports*/
- kasan_record_aux_stack(work);
-
/* Queue the entry and raise the IPI if needed. */
preempt_disable();
__irq_work_queue_local(work);
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index ba39fbb1f8e7..bdb0681bece8 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -309,7 +309,7 @@ EXPORT_SYMBOL_GPL(jump_label_rate_limit);
static int addr_conflict(struct jump_entry *entry, void *start, void *end)
{
if (jump_entry_code(entry) <= (unsigned long)end &&
- jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
+ jump_entry_code(entry) + jump_entry_size(entry) > (unsigned long)start)
return 1;
return 0;
@@ -483,13 +483,14 @@ void __init jump_label_init(void)
for (iter = iter_start; iter < iter_stop; iter++) {
struct static_key *iterk;
+ bool in_init;
/* rewrite NOPs */
if (jump_label_type(iter) == JUMP_LABEL_NOP)
arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
- if (init_section_contains((void *)jump_entry_code(iter), 1))
- jump_entry_set_init(iter);
+ in_init = init_section_contains((void *)jump_entry_code(iter), 1);
+ jump_entry_set_init(iter, in_init);
iterk = jump_entry_key(iter);
if (iterk == key)
@@ -634,9 +635,10 @@ static int jump_label_add_module(struct module *mod)
for (iter = iter_start; iter < iter_stop; iter++) {
struct static_key *iterk;
+ bool in_init;
- if (within_module_init(jump_entry_code(iter), mod))
- jump_entry_set_init(iter);
+ in_init = within_module_init(jump_entry_code(iter), mod);
+ jump_entry_set_init(iter, in_init);
iterk = jump_entry_key(iter);
if (iterk == key)
diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c
index c1dd02f3be8b..e65de172ccf7 100644
--- a/kernel/kcsan/debugfs.c
+++ b/kernel/kcsan/debugfs.c
@@ -266,9 +266,10 @@ static const struct file_operations debugfs_ops =
.release = single_release
};
-static void __init kcsan_debugfs_init(void)
+static int __init kcsan_debugfs_init(void)
{
debugfs_create_file("kcsan", 0644, NULL, NULL, &debugfs_ops);
+ return 0;
}
late_initcall(kcsan_debugfs_init);
diff --git a/kernel/kthread.c b/kernel/kthread.c
index fe3f2a40d61e..0fccf7d0c6a1 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -1093,8 +1093,38 @@ void kthread_flush_work(struct kthread_work *work)
EXPORT_SYMBOL_GPL(kthread_flush_work);
/*
- * This function removes the work from the worker queue. Also it makes sure
- * that it won't get queued later via the delayed work's timer.
+ * Make sure that the timer is neither set nor running and could
+ * not manipulate the work list_head any longer.
+ *
+ * The function is called under worker->lock. The lock is temporary
+ * released but the timer can't be set again in the meantime.
+ */
+static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
+ unsigned long *flags)
+{
+ struct kthread_delayed_work *dwork =
+ container_of(work, struct kthread_delayed_work, work);
+ struct kthread_worker *worker = work->worker;
+
+ /*
+ * del_timer_sync() must be called to make sure that the timer
+ * callback is not running. The lock must be temporary released
+ * to avoid a deadlock with the callback. In the meantime,
+ * any queuing is blocked by setting the canceling counter.
+ */
+ work->canceling++;
+ raw_spin_unlock_irqrestore(&worker->lock, *flags);
+ del_timer_sync(&dwork->timer);
+ raw_spin_lock_irqsave(&worker->lock, *flags);
+ work->canceling--;
+}
+
+/*
+ * This function removes the work from the worker queue.
+ *
+ * It is called under worker->lock. The caller must make sure that
+ * the timer used by delayed work is not running, e.g. by calling
+ * kthread_cancel_delayed_work_timer().
*
* The work might still be in use when this function finishes. See the
* current_work proceed by the worker.
@@ -1102,28 +1132,8 @@ EXPORT_SYMBOL_GPL(kthread_flush_work);
* Return: %true if @work was pending and successfully canceled,
* %false if @work was not pending
*/
-static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
- unsigned long *flags)
+static bool __kthread_cancel_work(struct kthread_work *work)
{
- /* Try to cancel the timer if exists. */
- if (is_dwork) {
- struct kthread_delayed_work *dwork =
- container_of(work, struct kthread_delayed_work, work);
- struct kthread_worker *worker = work->worker;
-
- /*
- * del_timer_sync() must be called to make sure that the timer
- * callback is not running. The lock must be temporary released
- * to avoid a deadlock with the callback. In the meantime,
- * any queuing is blocked by setting the canceling counter.
- */
- work->canceling++;
- raw_spin_unlock_irqrestore(&worker->lock, *flags);
- del_timer_sync(&dwork->timer);
- raw_spin_lock_irqsave(&worker->lock, *flags);
- work->canceling--;
- }
-
/*
* Try to remove the work from a worker list. It might either
* be from worker->work_list or from worker->delayed_work_list.
@@ -1176,11 +1186,23 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
/* Work must not be used with >1 worker, see kthread_queue_work() */
WARN_ON_ONCE(work->worker != worker);
- /* Do not fight with another command that is canceling this work. */
+ /*
+ * Temporary cancel the work but do not fight with another command
+ * that is canceling the work as well.
+ *
+ * It is a bit tricky because of possible races with another
+ * mod_delayed_work() and cancel_delayed_work() callers.
+ *
+ * The timer must be canceled first because worker->lock is released
+ * when doing so. But the work can be removed from the queue (list)
+ * only when it can be queued again so that the return value can
+ * be used for reference counting.
+ */
+ kthread_cancel_delayed_work_timer(work, &flags);
if (work->canceling)
goto out;
+ ret = __kthread_cancel_work(work);
- ret = __kthread_cancel_work(work, true, &flags);
fast_queue:
__kthread_queue_delayed_work(worker, dwork, delay);
out:
@@ -1202,7 +1224,10 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
/* Work must not be used with >1 worker, see kthread_queue_work(). */
WARN_ON_ONCE(work->worker != worker);
- ret = __kthread_cancel_work(work, is_dwork, &flags);
+ if (is_dwork)
+ kthread_cancel_delayed_work_timer(work, &flags);
+
+ ret = __kthread_cancel_work(work);
if (worker->current_work != work)
goto out_fast;
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 48d736aa03b2..0c0524bfff99 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -843,7 +843,7 @@ static int count_matching_names(struct lock_class *new_class)
}
/* used from NMI context -- must be lockless */
-static __always_inline struct lock_class *
+static noinstr struct lock_class *
look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
{
struct lockdep_subclass_key *key;
@@ -851,12 +851,14 @@ look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
struct lock_class *class;
if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
+ instrumentation_begin();
debug_locks_off();
printk(KERN_ERR
"BUG: looking up invalid subclass: %u\n", subclass);
printk(KERN_ERR
"turning off the locking correctness validator.\n");
dump_stack();
+ instrumentation_end();
return NULL;
}
@@ -2304,7 +2306,56 @@ static void print_lock_class_header(struct lock_class *class, int depth)
}
/*
- * printk the shortest lock dependencies from @start to @end in reverse order:
+ * Dependency path printing:
+ *
+ * After BFS we get a lock dependency path (linked via ->parent of lock_list),
+ * printing out each lock in the dependency path will help on understanding how
+ * the deadlock could happen. Here are some details about dependency path
+ * printing:
+ *
+ * 1) A lock_list can be either forwards or backwards for a lock dependency,
+ * for a lock dependency A -> B, there are two lock_lists:
+ *
+ * a) lock_list in the ->locks_after list of A, whose ->class is B and
+ * ->links_to is A. In this case, we can say the lock_list is
+ * "A -> B" (forwards case).
+ *
+ * b) lock_list in the ->locks_before list of B, whose ->class is A
+ * and ->links_to is B. In this case, we can say the lock_list is
+ * "B <- A" (bacwards case).
+ *
+ * The ->trace of both a) and b) point to the call trace where B was
+ * acquired with A held.
+ *
+ * 2) A "helper" lock_list is introduced during BFS, this lock_list doesn't
+ * represent a certain lock dependency, it only provides an initial entry
+ * for BFS. For example, BFS may introduce a "helper" lock_list whose
+ * ->class is A, as a result BFS will search all dependencies starting with
+ * A, e.g. A -> B or A -> C.
+ *
+ * The notation of a forwards helper lock_list is like "-> A", which means
+ * we should search the forwards dependencies starting with "A", e.g A -> B
+ * or A -> C.
+ *
+ * The notation of a bacwards helper lock_list is like "<- B", which means
+ * we should search the backwards dependencies ending with "B", e.g.
+ * B <- A or B <- C.
+ */
+
+/*
+ * printk the shortest lock dependencies from @root to @leaf in reverse order.
+ *
+ * We have a lock dependency path as follow:
+ *
+ * @root @leaf
+ * | |
+ * V V
+ * ->parent ->parent
+ * | lock_list | <--------- | lock_list | ... | lock_list | <--------- | lock_list |
+ * | -> L1 | | L1 -> L2 | ... |Ln-2 -> Ln-1| | Ln-1 -> Ln|
+ *
+ * , so it's natural that we start from @leaf and print every ->class and
+ * ->trace until we reach the @root.
*/
static void __used
print_shortest_lock_dependencies(struct lock_list *leaf,
@@ -2332,6 +2383,61 @@ print_shortest_lock_dependencies(struct lock_list *leaf,
} while (entry && (depth >= 0));
}
+/*
+ * printk the shortest lock dependencies from @leaf to @root.
+ *
+ * We have a lock dependency path (from a backwards search) as follow:
+ *
+ * @leaf @root
+ * | |
+ * V V
+ * ->parent ->parent
+ * | lock_list | ---------> | lock_list | ... | lock_list | ---------> | lock_list |
+ * | L2 <- L1 | | L3 <- L2 | ... | Ln <- Ln-1 | | <- Ln |
+ *
+ * , so when we iterate from @leaf to @root, we actually print the lock
+ * dependency path L1 -> L2 -> .. -> Ln in the non-reverse order.
+ *
+ * Another thing to notice here is that ->class of L2 <- L1 is L1, while the
+ * ->trace of L2 <- L1 is the call trace of L2, in fact we don't have the call
+ * trace of L1 in the dependency path, which is alright, because most of the
+ * time we can figure out where L1 is held from the call trace of L2.
+ */
+static void __used
+print_shortest_lock_dependencies_backwards(struct lock_list *leaf,
+ struct lock_list *root)
+{
+ struct lock_list *entry = leaf;
+ const struct lock_trace *trace = NULL;
+ int depth;
+
+ /*compute depth from generated tree by BFS*/
+ depth = get_lock_depth(leaf);
+
+ do {
+ print_lock_class_header(entry->class, depth);
+ if (trace) {
+ printk("%*s ... acquired at:\n", depth, "");
+ print_lock_trace(trace, 2);
+ printk("\n");
+ }
+
+ /*
+ * Record the pointer to the trace for the next lock_list
+ * entry, see the comments for the function.
+ */
+ trace = entry->trace;
+
+ if (depth == 0 && (entry != root)) {
+ printk("lockdep:%s bad path found in chain graph\n", __func__);
+ break;
+ }
+
+ entry = get_lock_parent(entry);
+ depth--;
+ } while (entry && (depth >= 0));
+}
+
static void
print_irq_lock_scenario(struct lock_list *safe_entry,
struct lock_list *unsafe_entry,
@@ -2446,10 +2552,7 @@ print_bad_irq_dependency(struct task_struct *curr,
lockdep_print_held_locks(curr);
pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
- prev_root->trace = save_trace();
- if (!prev_root->trace)
- return;
- print_shortest_lock_dependencies(backwards_entry, prev_root);
+ print_shortest_lock_dependencies_backwards(backwards_entry, prev_root);
pr_warn("\nthe dependencies between the lock to be acquired");
pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
@@ -2667,8 +2770,18 @@ static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
* Step 3: we found a bad match! Now retrieve a lock from the backward
* list whose usage mask matches the exclusive usage mask from the
* lock found on the forward list.
+ *
+ * Note, we should only keep the LOCKF_ENABLED_IRQ_ALL bits, considering
+ * the follow case:
+ *
+ * When trying to add A -> B to the graph, we find that there is a
+ * hardirq-safe L, that L -> ... -> A, and another hardirq-unsafe M,
+ * that B -> ... -> M. However M is **softirq-safe**, if we use exact
+ * invert bits of M's usage_mask, we will find another lock N that is
+ * **softirq-unsafe** and N -> ... -> A, however N -> .. -> M will not
+ * cause a inversion deadlock.
*/
- backward_mask = original_mask(target_entry1->class->usage_mask);
+ backward_mask = original_mask(target_entry1->class->usage_mask & LOCKF_ENABLED_IRQ_ALL);
ret = find_usage_backwards(&this, backward_mask, &target_entry);
if (bfs_error(ret)) {
@@ -2718,7 +2831,7 @@ static inline bool usage_skip(struct lock_list *entry, void *mask)
* <target> or not. If it can, <src> -> <target> dependency is already
* in the graph.
*
- * Return BFS_RMATCH if it does, or BFS_RMATCH if it does not, return BFS_E* if
+ * Return BFS_RMATCH if it does, or BFS_RNOMATCH if it does not, return BFS_E* if
* any error appears in the bfs search.
*/
static noinline enum bfs_result
@@ -4577,7 +4690,7 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next)
u8 curr_inner;
int depth;
- if (!curr->lockdep_depth || !next_inner || next->trylock)
+ if (!next_inner || next->trylock)
return 0;
if (!next_outer)
@@ -5736,7 +5849,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
{
unsigned long flags;
- trace_lock_acquired(lock, ip);
+ trace_lock_contended(lock, ip);
if (unlikely(!lock_stat || !lockdep_enabled()))
return;
@@ -5754,7 +5867,7 @@ void lock_acquired(struct lockdep_map *lock, unsigned long ip)
{
unsigned long flags;
- trace_lock_contended(lock, ip);
+ trace_lock_acquired(lock, ip);
if (unlikely(!lock_stat || !lockdep_enabled()))
return;
diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
index a7276aaf2abc..db9301591e3f 100644
--- a/kernel/locking/mutex-debug.c
+++ b/kernel/locking/mutex-debug.c
@@ -57,7 +57,7 @@ void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
task->blocked_on = waiter;
}
-void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct task_struct *task)
{
DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
@@ -65,7 +65,7 @@ void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
task->blocked_on = NULL;
- list_del_init(&waiter->list);
+ INIT_LIST_HEAD(&waiter->list);
waiter->task = NULL;
}
diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
index 1edd3f45a4ec..53e631e1d76d 100644
--- a/kernel/locking/mutex-debug.h
+++ b/kernel/locking/mutex-debug.h
@@ -22,7 +22,7 @@ extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
extern void debug_mutex_add_waiter(struct mutex *lock,
struct mutex_waiter *waiter,
struct task_struct *task);
-extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+extern void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct task_struct *task);
extern void debug_mutex_unlock(struct mutex *lock);
extern void debug_mutex_init(struct mutex *lock, const char *name,
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index cb6b112ce155..013e1b08a1bf 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -194,7 +194,7 @@ static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_wait
* Add @waiter to a given location in the lock wait_list and set the
* FLAG_WAITERS flag if it's the first waiter.
*/
-static void __sched
+static void
__mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct list_head *list)
{
@@ -205,6 +205,16 @@ __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
}
+static void
+__mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
+{
+ list_del(&waiter->list);
+ if (likely(list_empty(&lock->wait_list)))
+ __mutex_clear_flag(lock, MUTEX_FLAGS);
+
+ debug_mutex_remove_waiter(lock, waiter, current);
+}
+
/*
* Give up ownership to a specific task, when @task = NULL, this is equivalent
* to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves
@@ -1061,9 +1071,7 @@ acquired:
__ww_mutex_check_waiters(lock, ww_ctx);
}
- mutex_remove_waiter(lock, &waiter, current);
- if (likely(list_empty(&lock->wait_list)))
- __mutex_clear_flag(lock, MUTEX_FLAGS);
+ __mutex_remove_waiter(lock, &waiter);
debug_mutex_free_waiter(&waiter);
@@ -1080,7 +1088,7 @@ skip_wait:
err:
__set_current_state(TASK_RUNNING);
- mutex_remove_waiter(lock, &waiter, current);
+ __mutex_remove_waiter(lock, &waiter);
err_early_kill:
spin_unlock(&lock->wait_lock);
debug_mutex_free_waiter(&waiter);
diff --git a/kernel/locking/mutex.h b/kernel/locking/mutex.h
index 1c2287d3fa71..f0c710b1d192 100644
--- a/kernel/locking/mutex.h
+++ b/kernel/locking/mutex.h
@@ -10,12 +10,10 @@
* !CONFIG_DEBUG_MUTEXES case. Most of them are NOPs:
*/
-#define mutex_remove_waiter(lock, waiter, task) \
- __list_del((waiter)->list.prev, (waiter)->list.next)
-
#define debug_mutex_wake_waiter(lock, waiter) do { } while (0)
#define debug_mutex_free_waiter(waiter) do { } while (0)
#define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0)
+#define debug_mutex_remove_waiter(lock, waiter, ti) do { } while (0)
#define debug_mutex_unlock(lock) do { } while (0)
#define debug_mutex_init(lock, name, key) do { } while (0)
diff --git a/kernel/module.c b/kernel/module.c
index b5dd92e35b02..927d46cb8eb9 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -266,9 +266,18 @@ static void module_assert_mutex_or_preempt(void)
#endif
}
+#ifdef CONFIG_MODULE_SIG
static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE);
module_param(sig_enforce, bool_enable_only, 0644);
+void set_module_sig_enforced(void)
+{
+ sig_enforce = true;
+}
+#else
+#define sig_enforce false
+#endif
+
/*
* Export sig_enforce kernel cmdline parameter to allow other subsystems rely
* on that instead of directly to CONFIG_MODULE_SIG_FORCE config.
@@ -279,11 +288,6 @@ bool is_module_sig_enforced(void)
}
EXPORT_SYMBOL(is_module_sig_enforced);
-void set_module_sig_enforced(void)
-{
- sig_enforce = true;
-}
-
/* Block module loading/unloading? */
int modules_disabled = 0;
core_param(nomodule, modules_disabled, bint, 0);
@@ -2401,6 +2405,15 @@ static long get_offset(struct module *mod, unsigned int *size,
return ret;
}
+static bool module_init_layout_section(const char *sname)
+{
+#ifndef CONFIG_MODULE_UNLOAD
+ if (module_exit_section(sname))
+ return true;
+#endif
+ return module_init_section(sname);
+}
+
/*
* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
* might -- code, read-only data, read-write data, small data. Tally
@@ -2435,7 +2448,7 @@ static void layout_sections(struct module *mod, struct load_info *info)
if ((s->sh_flags & masks[m][0]) != masks[m][0]
|| (s->sh_flags & masks[m][1])
|| s->sh_entsize != ~0UL
- || module_init_section(sname))
+ || module_init_layout_section(sname))
continue;
s->sh_entsize = get_offset(mod, &mod->core_layout.size, s, i);
pr_debug("\t%s\n", sname);
@@ -2468,7 +2481,7 @@ static void layout_sections(struct module *mod, struct load_info *info)
if ((s->sh_flags & masks[m][0]) != masks[m][0]
|| (s->sh_flags & masks[m][1])
|| s->sh_entsize != ~0UL
- || !module_init_section(sname))
+ || !module_init_layout_section(sname))
continue;
s->sh_entsize = (get_offset(mod, &mod->init_layout.size, s, i)
| INIT_OFFSET_MASK);
@@ -2807,11 +2820,7 @@ void * __weak module_alloc(unsigned long size)
bool __weak module_init_section(const char *name)
{
-#ifndef CONFIG_MODULE_UNLOAD
- return strstarts(name, ".init") || module_exit_section(name);
-#else
return strstarts(name, ".init");
-#endif
}
bool __weak module_exit_section(const char *name)
diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
index 7a1414622051..94232186fccb 100644
--- a/kernel/printk/printk_safe.c
+++ b/kernel/printk/printk_safe.c
@@ -391,6 +391,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
/* No obstacles. */
return vprintk_default(fmt, args);
}
+EXPORT_SYMBOL(vprintk);
void __init printk_safe_init(void)
{
@@ -411,4 +412,3 @@ void __init printk_safe_init(void)
/* Flush pending messages that did not have scheduled IRQ works. */
printk_safe_flush();
}
-EXPORT_SYMBOL(vprintk);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 76f09456ec4b..2997ca600d18 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -170,6 +170,21 @@ void __ptrace_unlink(struct task_struct *child)
spin_unlock(&child->sighand->siglock);
}
+static bool looks_like_a_spurious_pid(struct task_struct *task)
+{
+ if (task->exit_code != ((PTRACE_EVENT_EXEC << 8) | SIGTRAP))
+ return false;
+
+ if (task_pid_vnr(task) == task->ptrace_message)
+ return false;
+ /*
+ * The tracee changed its pid but the PTRACE_EVENT_EXEC event
+ * was not wait()'ed, most probably debugger targets the old
+ * leader which was destroyed in de_thread().
+ */
+ return true;
+}
+
/* Ensure that nothing can wake it up, even SIGKILL */
static bool ptrace_freeze_traced(struct task_struct *task)
{
@@ -180,7 +195,8 @@ static bool ptrace_freeze_traced(struct task_struct *task)
return ret;
spin_lock_irq(&task->sighand->siglock);
- if (task_is_traced(task) && !__fatal_signal_pending(task)) {
+ if (task_is_traced(task) && !looks_like_a_spurious_pid(task) &&
+ !__fatal_signal_pending(task)) {
task->state = __TASK_TRACED;
ret = true;
}
diff --git a/kernel/reboot.c b/kernel/reboot.c
index a6ad5eb2fa73..f7440c0c7e43 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -7,6 +7,7 @@
#define pr_fmt(fmt) "reboot: " fmt
+#include <linux/atomic.h>
#include <linux/ctype.h>
#include <linux/export.h>
#include <linux/kexec.h>
@@ -518,6 +519,84 @@ void orderly_reboot(void)
}
EXPORT_SYMBOL_GPL(orderly_reboot);
+/**
+ * hw_failure_emergency_poweroff_func - emergency poweroff work after a known delay
+ * @work: work_struct associated with the emergency poweroff function
+ *
+ * This function is called in very critical situations to force
+ * a kernel poweroff after a configurable timeout value.
+ */
+static void hw_failure_emergency_poweroff_func(struct work_struct *work)
+{
+ /*
+ * We have reached here after the emergency shutdown waiting period has
+ * expired. This means orderly_poweroff has not been able to shut off
+ * the system for some reason.
+ *
+ * Try to shut down the system immediately using kernel_power_off
+ * if populated
+ */
+ pr_emerg("Hardware protection timed-out. Trying forced poweroff\n");
+ kernel_power_off();
+
+ /*
+ * Worst of the worst case trigger emergency restart
+ */
+ pr_emerg("Hardware protection shutdown failed. Trying emergency restart\n");
+ emergency_restart();
+}
+
+static DECLARE_DELAYED_WORK(hw_failure_emergency_poweroff_work,
+ hw_failure_emergency_poweroff_func);
+
+/**
+ * hw_failure_emergency_poweroff - Trigger an emergency system poweroff
+ *
+ * This may be called from any critical situation to trigger a system shutdown
+ * after a given period of time. If time is negative this is not scheduled.
+ */
+static void hw_failure_emergency_poweroff(int poweroff_delay_ms)
+{
+ if (poweroff_delay_ms <= 0)
+ return;
+ schedule_delayed_work(&hw_failure_emergency_poweroff_work,
+ msecs_to_jiffies(poweroff_delay_ms));
+}
+
+/**
+ * hw_protection_shutdown - Trigger an emergency system poweroff
+ *
+ * @reason: Reason of emergency shutdown to be printed.
+ * @ms_until_forced: Time to wait for orderly shutdown before tiggering a
+ * forced shudown. Negative value disables the forced
+ * shutdown.
+ *
+ * Initiate an emergency system shutdown in order to protect hardware from
+ * further damage. Usage examples include a thermal protection or a voltage or
+ * current regulator failures.
+ * NOTE: The request is ignored if protection shutdown is already pending even
+ * if the previous request has given a large timeout for forced shutdown.
+ * Can be called from any context.
+ */
+void hw_protection_shutdown(const char *reason, int ms_until_forced)
+{
+ static atomic_t allow_proceed = ATOMIC_INIT(1);
+
+ pr_emerg("HARDWARE PROTECTION shutdown (%s)\n", reason);
+
+ /* Shutdown should be initiated only once. */
+ if (!atomic_dec_and_test(&allow_proceed))
+ return;
+
+ /*
+ * Queue a backup emergency shutdown in the event of
+ * orderly_poweroff failure
+ */
+ hw_failure_emergency_poweroff(ms_until_forced);
+ orderly_poweroff(true);
+}
+EXPORT_SYMBOL_GPL(hw_protection_shutdown);
+
static int __init reboot_setup(char *str)
{
for (;;) {
diff --git a/kernel/resource.c b/kernel/resource.c
index 028a5ab18818..ca9f5198a01f 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -1805,7 +1805,7 @@ static struct resource *__request_free_mem_region(struct device *dev,
REGION_DISJOINT)
continue;
- if (!__request_region_locked(res, &iomem_resource, addr, size,
+ if (__request_region_locked(res, &iomem_resource, addr, size,
name, 0))
break;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5226cc26a095..4ca80df205ce 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6389,7 +6389,6 @@ int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
{
return __sched_setscheduler(p, attr, false, true);
}
-EXPORT_SYMBOL_GPL(sched_setattr_nocheck);
/**
* sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 9c882f20803e..c5aacbd492a1 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -885,6 +885,7 @@ static const struct seq_operations sched_debug_sops = {
#define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
#define __P(F) __PS(#F, F)
#define P(F) __PS(#F, p->F)
+#define PM(F, M) __PS(#F, p->F & (M))
#define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
#define __PN(F) __PSN(#F, F)
#define PN(F) __PSN(#F, p->F)
@@ -1011,7 +1012,7 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
P(se.avg.util_avg);
P(se.avg.last_update_time);
P(se.avg.util_est.ewma);
- P(se.avg.util_est.enqueued);
+ PM(se.avg.util_est.enqueued, ~UTIL_AVG_UNCHANGED);
#endif
#ifdef CONFIG_UCLAMP_TASK
__PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 20aa234ffe04..23663318fb81 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3298,6 +3298,52 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
#ifdef CONFIG_SMP
#ifdef CONFIG_FAIR_GROUP_SCHED
+/*
+ * Because list_add_leaf_cfs_rq always places a child cfs_rq on the list
+ * immediately before a parent cfs_rq, and cfs_rqs are removed from the list
+ * bottom-up, we only have to test whether the cfs_rq before us on the list
+ * is our child.
+ * If cfs_rq is not on the list, test whether a child needs its to be added to
+ * connect a branch to the tree * (see list_add_leaf_cfs_rq() for details).
+ */
+static inline bool child_cfs_rq_on_list(struct cfs_rq *cfs_rq)
+{
+ struct cfs_rq *prev_cfs_rq;
+ struct list_head *prev;
+
+ if (cfs_rq->on_list) {
+ prev = cfs_rq->leaf_cfs_rq_list.prev;
+ } else {
+ struct rq *rq = rq_of(cfs_rq);
+
+ prev = rq->tmp_alone_branch;
+ }
+
+ prev_cfs_rq = container_of(prev, struct cfs_rq, leaf_cfs_rq_list);
+
+ return (prev_cfs_rq->tg->parent == cfs_rq->tg);
+}
+
+static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
+{
+ if (cfs_rq->load.weight)
+ return false;
+
+ if (cfs_rq->avg.load_sum)
+ return false;
+
+ if (cfs_rq->avg.util_sum)
+ return false;
+
+ if (cfs_rq->avg.runnable_sum)
+ return false;
+
+ if (child_cfs_rq_on_list(cfs_rq))
+ return false;
+
+ return true;
+}
+
/**
* update_tg_load_avg - update the tg's load avg
* @cfs_rq: the cfs_rq whose avg changed
@@ -3499,10 +3545,9 @@ update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cf
static inline void
update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
{
- long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
+ long delta, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
unsigned long load_avg;
u64 load_sum = 0;
- s64 delta_sum;
u32 divider;
if (!runnable_sum)
@@ -3549,13 +3594,13 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
load_sum = (s64)se_weight(se) * runnable_sum;
load_avg = div_s64(load_sum, divider);
- delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
- delta_avg = load_avg - se->avg.load_avg;
+ delta = load_avg - se->avg.load_avg;
se->avg.load_sum = runnable_sum;
se->avg.load_avg = load_avg;
- add_positive(&cfs_rq->avg.load_avg, delta_avg);
- add_positive(&cfs_rq->avg.load_sum, delta_sum);
+
+ add_positive(&cfs_rq->avg.load_avg, delta);
+ cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider;
}
static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
@@ -3766,11 +3811,17 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
*/
static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
+ /*
+ * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
+ * See ___update_load_avg() for details.
+ */
+ u32 divider = get_pelt_divider(&cfs_rq->avg);
+
dequeue_load_avg(cfs_rq, se);
sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
- sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
+ cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * divider;
sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
- sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum);
+ cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider;
add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
@@ -3902,7 +3953,7 @@ static inline unsigned long _task_util_est(struct task_struct *p)
{
struct util_est ue = READ_ONCE(p->se.avg.util_est);
- return (max(ue.ewma, ue.enqueued) | UTIL_AVG_UNCHANGED);
+ return max(ue.ewma, (ue.enqueued & ~UTIL_AVG_UNCHANGED));
}
static inline unsigned long task_util_est(struct task_struct *p)
@@ -4002,7 +4053,7 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
* Reset EWMA on utilization increases, the moving average is used only
* to smooth utilization decreases.
*/
- ue.enqueued = (task_util(p) | UTIL_AVG_UNCHANGED);
+ ue.enqueued = task_util(p);
if (sched_feat(UTIL_EST_FASTUP)) {
if (ue.ewma < ue.enqueued) {
ue.ewma = ue.enqueued;
@@ -4051,6 +4102,7 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
ue.ewma += last_ewma_diff;
ue.ewma >>= UTIL_EST_WEIGHT_SHIFT;
done:
+ ue.enqueued |= UTIL_AVG_UNCHANGED;
WRITE_ONCE(p->se.avg.util_est, ue);
trace_sched_util_est_se_tp(&p->se);
@@ -4085,6 +4137,11 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
#else /* CONFIG_SMP */
+static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
+{
+ return true;
+}
+
#define UPDATE_TG 0x0
#define SKIP_AGE_LOAD 0x0
#define DO_ATTACH 0x0
@@ -4743,8 +4800,8 @@ static int tg_unthrottle_up(struct task_group *tg, void *data)
cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
cfs_rq->throttled_clock_task;
- /* Add cfs_rq with already running entity in the list */
- if (cfs_rq->nr_running >= 1)
+ /* Add cfs_rq with load or one or more already running entities to the list */
+ if (!cfs_rq_is_decayed(cfs_rq) || cfs_rq->nr_running)
list_add_leaf_cfs_rq(cfs_rq);
}
@@ -6217,7 +6274,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool
}
if (has_idle_core)
- set_idle_cores(this, false);
+ set_idle_cores(target, false);
if (sched_feat(SIS_PROP) && !has_idle_core) {
time = cpu_clock(this) - time;
@@ -7990,23 +8047,6 @@ static bool __update_blocked_others(struct rq *rq, bool *done)
#ifdef CONFIG_FAIR_GROUP_SCHED
-static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
-{
- if (cfs_rq->load.weight)
- return false;
-
- if (cfs_rq->avg.load_sum)
- return false;
-
- if (cfs_rq->avg.util_sum)
- return false;
-
- if (cfs_rq->avg.runnable_sum)
- return false;
-
- return true;
-}
-
static bool __update_blocked_fair(struct rq *rq, bool *done)
{
struct cfs_rq *cfs_rq, *pos;
@@ -8030,7 +8070,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
/* Propagate pending load changes to the parent, if any: */
se = cfs_rq->tg->se[cpu];
if (se && !skip_blocked_update(se))
- update_load_avg(cfs_rq_of(se), se, 0);
+ update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
/*
* There can be a lot of idle CPU cgroups. Don't let fully
diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
index 1462846d244e..cfe94ffd2b38 100644
--- a/kernel/sched/pelt.h
+++ b/kernel/sched/pelt.h
@@ -42,15 +42,6 @@ static inline u32 get_pelt_divider(struct sched_avg *avg)
return LOAD_AVG_MAX - 1024 + avg->period_contrib;
}
-/*
- * When a task is dequeued, its estimated utilization should not be update if
- * its util_avg has not been updated at least once.
- * This flag is used to synchronize util_avg updates with util_est updates.
- * We map this information into the LSB bit of the utilization saved at
- * dequeue time (i.e. util_est.dequeued).
- */
-#define UTIL_AVG_UNCHANGED 0x1
-
static inline void cfs_se_util_change(struct sched_avg *avg)
{
unsigned int enqueued;
@@ -58,7 +49,7 @@ static inline void cfs_se_util_change(struct sched_avg *avg)
if (!sched_feat(UTIL_EST))
return;
- /* Avoid store if the flag has been already set */
+ /* Avoid store if the flag has been already reset */
enqueued = avg->util_est.enqueued;
if (!(enqueued & UTIL_AVG_UNCHANGED))
return;
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 6ecd3f3a52b5..9f58049ac16d 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -1105,28 +1105,30 @@ static int seccomp_do_user_notification(int this_syscall,
up(&match->notif->request);
wake_up_poll(&match->wqh, EPOLLIN | EPOLLRDNORM);
- mutex_unlock(&match->notify_lock);
/*
* This is where we wait for a reply from userspace.
*/
-wait:
- err = wait_for_completion_interruptible(&n.ready);
- mutex_lock(&match->notify_lock);
- if (err == 0) {
- /* Check if we were woken up by a addfd message */
+ do {
+ mutex_unlock(&match->notify_lock);
+ err = wait_for_completion_interruptible(&n.ready);
+ mutex_lock(&match->notify_lock);
+ if (err != 0)
+ goto interrupted;
+
addfd = list_first_entry_or_null(&n.addfd,
struct seccomp_kaddfd, list);
- if (addfd && n.state != SECCOMP_NOTIFY_REPLIED) {
+ /* Check if we were woken up by a addfd message */
+ if (addfd)
seccomp_handle_addfd(addfd);
- mutex_unlock(&match->notify_lock);
- goto wait;
- }
- ret = n.val;
- err = n.error;
- flags = n.flags;
- }
+ } while (n.state != SECCOMP_NOTIFY_REPLIED);
+
+ ret = n.val;
+ err = n.error;
+ flags = n.flags;
+
+interrupted:
/* If there were any pending addfd calls, clear them out */
list_for_each_entry_safe(addfd, tmp, &n.addfd, list) {
/* The process went away before we got a chance to handle it */
diff --git a/kernel/signal.c b/kernel/signal.c
index 66e88649cf74..30a0bee5ff9b 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -431,16 +431,7 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
rcu_read_unlock();
if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
- /*
- * Preallocation does not hold sighand::siglock so it can't
- * use the cache. The lockless caching requires that only
- * one consumer and only one producer run at a time.
- */
- q = READ_ONCE(t->sigqueue_cache);
- if (!q || sigqueue_flags)
- q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
- else
- WRITE_ONCE(t->sigqueue_cache, NULL);
+ q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
} else {
print_dropped_signal(sig);
}
@@ -457,44 +448,13 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
return q;
}
-void exit_task_sigqueue_cache(struct task_struct *tsk)
-{
- /* Race free because @tsk is mopped up */
- struct sigqueue *q = tsk->sigqueue_cache;
-
- if (q) {
- tsk->sigqueue_cache = NULL;
- /*
- * Hand it back to the cache as the task might
- * be self reaping which would leak the object.
- */
- kmem_cache_free(sigqueue_cachep, q);
- }
-}
-
-static void sigqueue_cache_or_free(struct sigqueue *q)
-{
- /*
- * Cache one sigqueue per task. This pairs with the consumer side
- * in __sigqueue_alloc() and needs READ/WRITE_ONCE() to prevent the
- * compiler from store tearing and to tell KCSAN that the data race
- * is intentional when run without holding current->sighand->siglock,
- * which is fine as current obviously cannot run __sigqueue_free()
- * concurrently.
- */
- if (!READ_ONCE(current->sigqueue_cache))
- WRITE_ONCE(current->sigqueue_cache, q);
- else
- kmem_cache_free(sigqueue_cachep, q);
-}
-
static void __sigqueue_free(struct sigqueue *q)
{
if (q->flags & SIGQUEUE_PREALLOC)
return;
if (atomic_dec_and_test(&q->user->sigpending))
free_uid(q->user);
- sigqueue_cache_or_free(q);
+ kmem_cache_free(sigqueue_cachep, q);
}
void flush_sigqueue(struct sigpending *queue)
@@ -1236,6 +1196,7 @@ static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
case SIL_TIMER:
case SIL_POLL:
case SIL_FAULT:
+ case SIL_FAULT_TRAPNO:
case SIL_FAULT_MCEERR:
case SIL_FAULT_BNDERR:
case SIL_FAULT_PKUERR:
@@ -1804,6 +1765,21 @@ int force_sig_pkuerr(void __user *addr, u32 pkey)
}
#endif
+int force_sig_perf(void __user *addr, u32 type, u64 sig_data)
+{
+ struct kernel_siginfo info;
+
+ clear_siginfo(&info);
+ info.si_signo = SIGTRAP;
+ info.si_errno = 0;
+ info.si_code = TRAP_PERF;
+ info.si_addr = addr;
+ info.si_perf_data = sig_data;
+ info.si_perf_type = type;
+
+ return force_sig_info(&info);
+}
+
/* For the crazy architectures that include trap information in
* the errno field, instead of an actual errno value.
*/
@@ -2564,6 +2540,7 @@ static void hide_si_addr_tag_bits(struct ksignal *ksig)
{
switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
case SIL_FAULT:
+ case SIL_FAULT_TRAPNO:
case SIL_FAULT_MCEERR:
case SIL_FAULT_BNDERR:
case SIL_FAULT_PKUERR:
@@ -3251,6 +3228,10 @@ enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
#endif
else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
layout = SIL_PERF_EVENT;
+#ifdef __ARCH_SI_TRAPNO
+ else if (layout == SIL_FAULT)
+ layout = SIL_FAULT_TRAPNO;
+#endif
}
else if (si_code <= NSIGPOLL)
layout = SIL_POLL;
@@ -3354,35 +3335,28 @@ void copy_siginfo_to_external32(struct compat_siginfo *to,
break;
case SIL_FAULT:
to->si_addr = ptr_to_compat(from->si_addr);
-#ifdef __ARCH_SI_TRAPNO
+ break;
+ case SIL_FAULT_TRAPNO:
+ to->si_addr = ptr_to_compat(from->si_addr);
to->si_trapno = from->si_trapno;
-#endif
break;
case SIL_FAULT_MCEERR:
to->si_addr = ptr_to_compat(from->si_addr);
-#ifdef __ARCH_SI_TRAPNO
- to->si_trapno = from->si_trapno;
-#endif
to->si_addr_lsb = from->si_addr_lsb;
break;
case SIL_FAULT_BNDERR:
to->si_addr = ptr_to_compat(from->si_addr);
-#ifdef __ARCH_SI_TRAPNO
- to->si_trapno = from->si_trapno;
-#endif
to->si_lower = ptr_to_compat(from->si_lower);
to->si_upper = ptr_to_compat(from->si_upper);
break;
case SIL_FAULT_PKUERR:
to->si_addr = ptr_to_compat(from->si_addr);
-#ifdef __ARCH_SI_TRAPNO
- to->si_trapno = from->si_trapno;
-#endif
to->si_pkey = from->si_pkey;
break;
case SIL_PERF_EVENT:
to->si_addr = ptr_to_compat(from->si_addr);
- to->si_perf = from->si_perf;
+ to->si_perf_data = from->si_perf_data;
+ to->si_perf_type = from->si_perf_type;
break;
case SIL_CHLD:
to->si_pid = from->si_pid;
@@ -3438,35 +3412,28 @@ static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
break;
case SIL_FAULT:
to->si_addr = compat_ptr(from->si_addr);
-#ifdef __ARCH_SI_TRAPNO
+ break;
+ case SIL_FAULT_TRAPNO:
+ to->si_addr = compat_ptr(from->si_addr);
to->si_trapno = from->si_trapno;
-#endif
break;
case SIL_FAULT_MCEERR:
to->si_addr = compat_ptr(from->si_addr);
-#ifdef __ARCH_SI_TRAPNO
- to->si_trapno = from->si_trapno;
-#endif
to->si_addr_lsb = from->si_addr_lsb;
break;
case SIL_FAULT_BNDERR:
to->si_addr = compat_ptr(from->si_addr);
-#ifdef __ARCH_SI_TRAPNO
- to->si_trapno = from->si_trapno;
-#endif
to->si_lower = compat_ptr(from->si_lower);
to->si_upper = compat_ptr(from->si_upper);
break;
case SIL_FAULT_PKUERR:
to->si_addr = compat_ptr(from->si_addr);
-#ifdef __ARCH_SI_TRAPNO
- to->si_trapno = from->si_trapno;
-#endif
to->si_pkey = from->si_pkey;
break;
case SIL_PERF_EVENT:
to->si_addr = compat_ptr(from->si_addr);
- to->si_perf = from->si_perf;
+ to->si_perf_data = from->si_perf_data;
+ to->si_perf_type = from->si_perf_type;
break;
case SIL_CHLD:
to->si_pid = from->si_pid;
@@ -4644,11 +4611,13 @@ static inline void siginfo_buildtime_checks(void)
/* sigfault */
CHECK_OFFSET(si_addr);
+ CHECK_OFFSET(si_trapno);
CHECK_OFFSET(si_addr_lsb);
CHECK_OFFSET(si_lower);
CHECK_OFFSET(si_upper);
CHECK_OFFSET(si_pkey);
- CHECK_OFFSET(si_perf);
+ CHECK_OFFSET(si_perf_data);
+ CHECK_OFFSET(si_perf_type);
/* sigpoll */
CHECK_OFFSET(si_band);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 14edf84cc571..d4a78e08f6d8 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -225,7 +225,27 @@ static int bpf_stats_handler(struct ctl_table *table, int write,
mutex_unlock(&bpf_stats_enabled_mutex);
return ret;
}
-#endif
+
+static int bpf_unpriv_handler(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ int ret, unpriv_enable = *(int *)table->data;
+ bool locked_state = unpriv_enable == 1;
+ struct ctl_table tmp = *table;
+
+ if (write && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ tmp.data = &unpriv_enable;
+ ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
+ if (write && !ret) {
+ if (locked_state && unpriv_enable != 1)
+ return -EPERM;
+ *(int *)table->data = unpriv_enable;
+ }
+ return ret;
+}
+#endif /* CONFIG_BPF_SYSCALL && CONFIG_SYSCTL */
/*
* /proc/sys support
@@ -2600,10 +2620,9 @@ static struct ctl_table kern_table[] = {
.data = &sysctl_unprivileged_bpf_disabled,
.maxlen = sizeof(sysctl_unprivileged_bpf_disabled),
.mode = 0644,
- /* only handle a transition from default "0" to "1" */
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ONE,
- .extra2 = SYSCTL_ONE,
+ .proc_handler = bpf_unpriv_handler,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = &two,
},
{
.procname = "bpf_stats_enabled",
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index bea9d08b1698..5897828b9d7e 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -92,7 +92,7 @@ static int alarmtimer_rtc_add_device(struct device *dev,
if (rtcdev)
return -EBUSY;
- if (!rtc->ops->set_alarm)
+ if (!test_bit(RTC_FEATURE_ALARM, rtc->features))
return -1;
if (!device_may_wakeup(rtc->dev.parent))
return -1;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 828b091501ca..6784f27a3099 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -230,6 +230,7 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
#ifdef CONFIG_NO_HZ_FULL
cpumask_var_t tick_nohz_full_mask;
+EXPORT_SYMBOL_GPL(tick_nohz_full_mask);
bool tick_nohz_full_running;
EXPORT_SYMBOL_GPL(tick_nohz_full_running);
static atomic_t tick_dep_mask;
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index d2d7cf6cfe83..7a52bc172841 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -215,16 +215,11 @@ const struct bpf_func_proto bpf_probe_read_user_str_proto = {
static __always_inline int
bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
{
- int ret = security_locked_down(LOCKDOWN_BPF_READ);
+ int ret;
- if (unlikely(ret < 0))
- goto fail;
ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
if (unlikely(ret < 0))
- goto fail;
- return ret;
-fail:
- memset(dst, 0, size);
+ memset(dst, 0, size);
return ret;
}
@@ -246,10 +241,7 @@ const struct bpf_func_proto bpf_probe_read_kernel_proto = {
static __always_inline int
bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
{
- int ret = security_locked_down(LOCKDOWN_BPF_READ);
-
- if (unlikely(ret < 0))
- goto fail;
+ int ret;
/*
* The strncpy_from_kernel_nofault() call will likely not fill the
@@ -262,11 +254,7 @@ bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
*/
ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
if (unlikely(ret < 0))
- goto fail;
-
- return ret;
-fail:
- memset(dst, 0, size);
+ memset(dst, 0, size);
return ret;
}
@@ -1011,16 +999,20 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_probe_read_user:
return &bpf_probe_read_user_proto;
case BPF_FUNC_probe_read_kernel:
- return &bpf_probe_read_kernel_proto;
+ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+ NULL : &bpf_probe_read_kernel_proto;
case BPF_FUNC_probe_read_user_str:
return &bpf_probe_read_user_str_proto;
case BPF_FUNC_probe_read_kernel_str:
- return &bpf_probe_read_kernel_str_proto;
+ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+ NULL : &bpf_probe_read_kernel_str_proto;
#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
case BPF_FUNC_probe_read:
- return &bpf_probe_read_compat_proto;
+ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+ NULL : &bpf_probe_read_compat_proto;
case BPF_FUNC_probe_read_str:
- return &bpf_probe_read_compat_str_proto;
+ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+ NULL : &bpf_probe_read_compat_str_proto;
#endif
#ifdef CONFIG_CGROUPS
case BPF_FUNC_get_current_cgroup_id:
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 2e8a3fde7104..72ef4dccbcc4 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1967,12 +1967,18 @@ static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
static void print_ip_ins(const char *fmt, const unsigned char *p)
{
+ char ins[MCOUNT_INSN_SIZE];
int i;
+ if (copy_from_kernel_nofault(ins, p, MCOUNT_INSN_SIZE)) {
+ printk(KERN_CONT "%s[FAULT] %px\n", fmt, p);
+ return;
+ }
+
printk(KERN_CONT "%s", fmt);
for (i = 0; i < MCOUNT_INSN_SIZE; i++)
- printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
+ printk(KERN_CONT "%s%02x", i ? ":" : "", ins[i]);
}
enum ftrace_bug_type ftrace_bug_type;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 560e4c8d3825..d23a09d3eb37 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2198,9 +2198,6 @@ struct saved_cmdlines_buffer {
};
static struct saved_cmdlines_buffer *savedcmd;
-/* temporary disable recording */
-static atomic_t trace_record_taskinfo_disabled __read_mostly;
-
static inline char *get_saved_cmdlines(int idx)
{
return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
@@ -2486,8 +2483,6 @@ static bool tracing_record_taskinfo_skip(int flags)
{
if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
return true;
- if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
- return true;
if (!__this_cpu_read(trace_taskinfo_save))
return true;
return false;
@@ -2736,7 +2731,7 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
(entry = this_cpu_read(trace_buffered_event))) {
/* Try to use the per cpu buffer first */
val = this_cpu_inc_return(trace_buffered_event_cnt);
- if ((len < (PAGE_SIZE - sizeof(*entry))) && val == 1) {
+ if ((len < (PAGE_SIZE - sizeof(*entry) - sizeof(entry->array[0]))) && val == 1) {
trace_event_setup(entry, type, trace_ctx);
entry->array[0] = len;
return entry;
@@ -3704,6 +3699,9 @@ void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
goto print;
while (*p) {
+ bool star = false;
+ int len = 0;
+
j = 0;
/* We only care about %s and variants */
@@ -3725,13 +3723,17 @@ void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
/* Need to test cases like %08.*s */
for (j = 1; p[i+j]; j++) {
if (isdigit(p[i+j]) ||
- p[i+j] == '*' ||
p[i+j] == '.')
continue;
+ if (p[i+j] == '*') {
+ star = true;
+ continue;
+ }
break;
}
if (p[i+j] == 's')
break;
+ star = false;
}
j = 0;
}
@@ -3744,6 +3746,9 @@ void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
iter->fmt[i] = '\0';
trace_seq_vprintf(&iter->seq, iter->fmt, ap);
+ if (star)
+ len = va_arg(ap, int);
+
/* The ap now points to the string data of the %s */
str = va_arg(ap, const char *);
@@ -3762,8 +3767,18 @@ void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
int ret;
/* Try to safely read the string */
- ret = strncpy_from_kernel_nofault(iter->fmt, str,
- iter->fmt_size);
+ if (star) {
+ if (len + 1 > iter->fmt_size)
+ len = iter->fmt_size - 1;
+ if (len < 0)
+ len = 0;
+ ret = copy_from_kernel_nofault(iter->fmt, str, len);
+ iter->fmt[len] = 0;
+ star = false;
+ } else {
+ ret = strncpy_from_kernel_nofault(iter->fmt, str,
+ iter->fmt_size);
+ }
if (ret < 0)
trace_seq_printf(&iter->seq, "(0x%px)", str);
else
@@ -3775,7 +3790,10 @@ void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
strncpy(iter->fmt, p + i, j + 1);
iter->fmt[j+1] = '\0';
}
- trace_seq_printf(&iter->seq, iter->fmt, str);
+ if (star)
+ trace_seq_printf(&iter->seq, iter->fmt, len, str);
+ else
+ trace_seq_printf(&iter->seq, iter->fmt, str);
p += i + j + 1;
}
@@ -3975,9 +3993,6 @@ static void *s_start(struct seq_file *m, loff_t *pos)
return ERR_PTR(-EBUSY);
#endif
- if (!iter->snapshot)
- atomic_inc(&trace_record_taskinfo_disabled);
-
if (*pos != iter->pos) {
iter->ent = NULL;
iter->cpu = 0;
@@ -4020,9 +4035,6 @@ static void s_stop(struct seq_file *m, void *p)
return;
#endif
- if (!iter->snapshot)
- atomic_dec(&trace_record_taskinfo_disabled);
-
trace_access_unlock(iter->cpu_file);
trace_event_read_unlock();
}
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index c1637f90c8a3..4702efb00ff2 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -115,9 +115,9 @@ u64 notrace trace_clock_global(void)
prev_time = READ_ONCE(trace_clock_struct.prev_time);
now = sched_clock_cpu(this_cpu);
- /* Make sure that now is always greater than prev_time */
+ /* Make sure that now is always greater than or equal to prev_time */
if ((s64)(now - prev_time) < 0)
- now = prev_time + 1;
+ now = prev_time;
/*
* If in an NMI context then dont risk lockups and simply return
@@ -131,7 +131,7 @@ u64 notrace trace_clock_global(void)
/* Reread prev_time in case it was already updated */
prev_time = READ_ONCE(trace_clock_struct.prev_time);
if ((s64)(now - prev_time) < 0)
- now = prev_time + 1;
+ now = prev_time;
trace_clock_struct.prev_time = now;
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 7c397907d0e9..92d3bcc5a5e0 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -302,10 +302,10 @@ void touch_softlockup_watchdog_sync(void)
__this_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
}
-static int is_softlockup(unsigned long touch_ts, unsigned long period_ts)
+static int is_softlockup(unsigned long touch_ts,
+ unsigned long period_ts,
+ unsigned long now)
{
- unsigned long now = get_timestamp();
-
if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
/* Warn about unreasonable delays. */
if (time_after(now, period_ts + get_softlockup_thresh()))
@@ -353,8 +353,7 @@ static int softlockup_fn(void *data)
/* watchdog kicker functions */
static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
{
- unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
- unsigned long period_ts = __this_cpu_read(watchdog_report_ts);
+ unsigned long touch_ts, period_ts, now;
struct pt_regs *regs = get_irq_regs();
int duration;
int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
@@ -377,11 +376,22 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
/*
+ * Read the current timestamp first. It might become invalid anytime
+ * when a virtual machine is stopped by the host or when the watchog
+ * is touched from NMI.
+ */
+ now = get_timestamp();
+ /*
* If a virtual machine is stopped by the host it can look to
- * the watchdog like a soft lockup. Check to see if the host
- * stopped the vm before we process the timestamps.
+ * the watchdog like a soft lockup. This function touches the watchdog.
*/
kvm_check_and_clear_guest_paused();
+ /*
+ * The stored timestamp is comparable with @now only when not touched.
+ * It might get touched anytime from NMI. Make sure that is_softlockup()
+ * uses the same (valid) value.
+ */
+ period_ts = READ_ONCE(*this_cpu_ptr(&watchdog_report_ts));
/* Reset the interval when touched by known problematic code. */
if (period_ts == SOFTLOCKUP_DELAY_REPORT) {
@@ -398,13 +408,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
return HRTIMER_RESTART;
}
- /* check for a softlockup
- * This is done by making sure a high priority task is
- * being scheduled. The task touches the watchdog to
- * indicate it is getting cpu time. If it hasn't then
- * this is a good indication some task is hogging the cpu
- */
- duration = is_softlockup(touch_ts, period_ts);
+ /* Check for a softlockup. */
+ touch_ts = __this_cpu_read(watchdog_touch_ts);
+ duration = is_softlockup(touch_ts, period_ts, now);
if (unlikely(duration)) {
/*
* Prevent multiple soft-lockup reports if one cpu is already
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index b19d759e55a5..50142fc08902 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -50,6 +50,7 @@
#include <linux/uaccess.h>
#include <linux/sched/isolation.h>
#include <linux/nmi.h>
+#include <linux/kvm_para.h>
#include "workqueue_internal.h"
@@ -5772,6 +5773,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
{
unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
bool lockup_detected = false;
+ unsigned long now = jiffies;
struct worker_pool *pool;
int pi;
@@ -5786,6 +5788,12 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
if (list_empty(&pool->worklist))
continue;
+ /*
+ * If a virtual machine is stopped by the host it can look to
+ * the watchdog like a stall.
+ */
+ kvm_check_and_clear_guest_paused();
+
/* get the latest of pool and touched timestamps */
if (pool->cpu >= 0)
touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu));
@@ -5799,12 +5807,12 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
ts = touched;
/* did we stall? */
- if (time_after(jiffies, ts + thresh)) {
+ if (time_after(now, ts + thresh)) {
lockup_detected = true;
pr_emerg("BUG: workqueue lockup - pool");
pr_cont_pool_info(pool);
pr_cont(" stuck for %us!\n",
- jiffies_to_msecs(jiffies - pool_ts) / 1000);
+ jiffies_to_msecs(now - pool_ts) / 1000);
}
}
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 678c13967580..1e1bd6f4a13d 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1372,7 +1372,6 @@ config LOCKDEP
bool
depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
select STACKTRACE
- depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86
select KALLSYMS
select KALLSYMS_ALL
diff --git a/lib/Makefile b/lib/Makefile
index e11cfc18b6c0..2cc359ec1fdd 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -348,6 +348,7 @@ obj-$(CONFIG_OBJAGG) += objagg.o
obj-$(CONFIG_PLDMFW) += pldmfw/
# KUnit tests
+CFLAGS_bitfield_kunit.o := $(call cc-option,-Wframe-larger-than=10240)
obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o
obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
diff --git a/lib/atomic64.c b/lib/atomic64.c
index e98c85a99787..3df653994177 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -42,7 +42,7 @@ static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
}
-s64 atomic64_read(const atomic64_t *v)
+s64 generic_atomic64_read(const atomic64_t *v)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -53,9 +53,9 @@ s64 atomic64_read(const atomic64_t *v)
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
-EXPORT_SYMBOL(atomic64_read);
+EXPORT_SYMBOL(generic_atomic64_read);
-void atomic64_set(atomic64_t *v, s64 i)
+void generic_atomic64_set(atomic64_t *v, s64 i)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -64,10 +64,10 @@ void atomic64_set(atomic64_t *v, s64 i)
v->counter = i;
raw_spin_unlock_irqrestore(lock, flags);
}
-EXPORT_SYMBOL(atomic64_set);
+EXPORT_SYMBOL(generic_atomic64_set);
#define ATOMIC64_OP(op, c_op) \
-void atomic64_##op(s64 a, atomic64_t *v) \
+void generic_atomic64_##op(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
@@ -76,10 +76,10 @@ void atomic64_##op(s64 a, atomic64_t *v) \
v->counter c_op a; \
raw_spin_unlock_irqrestore(lock, flags); \
} \
-EXPORT_SYMBOL(atomic64_##op);
+EXPORT_SYMBOL(generic_atomic64_##op);
#define ATOMIC64_OP_RETURN(op, c_op) \
-s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
+s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
@@ -90,10 +90,10 @@ s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
raw_spin_unlock_irqrestore(lock, flags); \
return val; \
} \
-EXPORT_SYMBOL(atomic64_##op##_return);
+EXPORT_SYMBOL(generic_atomic64_##op##_return);
#define ATOMIC64_FETCH_OP(op, c_op) \
-s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
+s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
@@ -105,7 +105,7 @@ s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
raw_spin_unlock_irqrestore(lock, flags); \
return val; \
} \
-EXPORT_SYMBOL(atomic64_fetch_##op);
+EXPORT_SYMBOL(generic_atomic64_fetch_##op);
#define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP(op, c_op) \
@@ -130,7 +130,7 @@ ATOMIC64_OPS(xor, ^=)
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
-s64 atomic64_dec_if_positive(atomic64_t *v)
+s64 generic_atomic64_dec_if_positive(atomic64_t *v)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -143,9 +143,9 @@ s64 atomic64_dec_if_positive(atomic64_t *v)
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
-EXPORT_SYMBOL(atomic64_dec_if_positive);
+EXPORT_SYMBOL(generic_atomic64_dec_if_positive);
-s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
+s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -158,9 +158,9 @@ s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
-EXPORT_SYMBOL(atomic64_cmpxchg);
+EXPORT_SYMBOL(generic_atomic64_cmpxchg);
-s64 atomic64_xchg(atomic64_t *v, s64 new)
+s64 generic_atomic64_xchg(atomic64_t *v, s64 new)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -172,9 +172,9 @@ s64 atomic64_xchg(atomic64_t *v, s64 new)
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
-EXPORT_SYMBOL(atomic64_xchg);
+EXPORT_SYMBOL(generic_atomic64_xchg);
-s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -188,4 +188,4 @@ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
return val;
}
-EXPORT_SYMBOL(atomic64_fetch_add_unless);
+EXPORT_SYMBOL(generic_atomic64_fetch_add_unless);
diff --git a/lib/crc64.c b/lib/crc64.c
index 47cfa054827f..9f852a89ee2a 100644
--- a/lib/crc64.c
+++ b/lib/crc64.c
@@ -37,7 +37,7 @@ MODULE_LICENSE("GPL v2");
/**
* crc64_be - Calculate bitwise big-endian ECMA-182 CRC64
* @crc: seed value for computation. 0 or (u64)~0 for a new CRC calculation,
- or the previous crc64 value if computing incrementally.
+ * or the previous crc64 value if computing incrementally.
* @p: pointer to buffer over which CRC64 is run
* @len: length of buffer @p
*/
diff --git a/lib/debug_locks.c b/lib/debug_locks.c
index 06d3135bd184..a75ee30b77cb 100644
--- a/lib/debug_locks.c
+++ b/lib/debug_locks.c
@@ -36,7 +36,7 @@ EXPORT_SYMBOL_GPL(debug_locks_silent);
/*
* Generic 'turn off all lock debugging' function:
*/
-noinstr int debug_locks_off(void)
+int debug_locks_off(void)
{
if (debug_locks && __debug_locks_off()) {
if (!debug_locks_silent) {
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 921d0a654243..641767b0dce2 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -586,13 +586,11 @@ static int remaining(int wrote)
return 0;
}
-static char *dynamic_emit_prefix(const struct _ddebug *desc, char *buf)
+static char *__dynamic_emit_prefix(const struct _ddebug *desc, char *buf)
{
int pos_after_tid;
int pos = 0;
- *buf = '\0';
-
if (desc->flags & _DPRINTK_FLAGS_INCL_TID) {
if (in_interrupt())
pos += snprintf(buf + pos, remaining(pos), "<intr> ");
@@ -618,11 +616,18 @@ static char *dynamic_emit_prefix(const struct _ddebug *desc, char *buf)
return buf;
}
+static inline char *dynamic_emit_prefix(struct _ddebug *desc, char *buf)
+{
+ if (unlikely(desc->flags & _DPRINTK_FLAGS_INCL_ANY))
+ return __dynamic_emit_prefix(desc, buf);
+ return buf;
+}
+
void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...)
{
va_list args;
struct va_format vaf;
- char buf[PREFIX_SIZE];
+ char buf[PREFIX_SIZE] = "";
BUG_ON(!descriptor);
BUG_ON(!fmt);
@@ -655,7 +660,7 @@ void __dynamic_dev_dbg(struct _ddebug *descriptor,
if (!dev) {
printk(KERN_DEBUG "(NULL device *): %pV", &vaf);
} else {
- char buf[PREFIX_SIZE];
+ char buf[PREFIX_SIZE] = "";
dev_printk_emit(LOGLEVEL_DEBUG, dev, "%s%s %s: %pV",
dynamic_emit_prefix(descriptor, buf),
@@ -684,7 +689,7 @@ void __dynamic_netdev_dbg(struct _ddebug *descriptor,
vaf.va = &args;
if (dev && dev->dev.parent) {
- char buf[PREFIX_SIZE];
+ char buf[PREFIX_SIZE] = "";
dev_printk_emit(LOGLEVEL_DEBUG, dev->dev.parent,
"%s%s %s %s%s: %pV",
@@ -720,7 +725,7 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
vaf.va = &args;
if (ibdev && ibdev->dev.parent) {
- char buf[PREFIX_SIZE];
+ char buf[PREFIX_SIZE] = "";
dev_printk_emit(LOGLEVEL_DEBUG, ibdev->dev.parent,
"%s%s %s %s: %pV",
@@ -915,7 +920,6 @@ static const struct seq_operations ddebug_proc_seqops = {
static int ddebug_proc_open(struct inode *inode, struct file *file)
{
- vpr_info("called\n");
return seq_open_private(file, &ddebug_proc_seqops,
sizeof(struct ddebug_iter));
}
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index 2d85abac1744..161108e5d2fe 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -53,6 +53,7 @@ __setup("debug_locks_verbose=", setup_debug_locks_verbose);
#define LOCKTYPE_WW 0x10
#define LOCKTYPE_RTMUTEX 0x20
#define LOCKTYPE_LL 0x40
+#define LOCKTYPE_SPECIAL 0x80
static struct ww_acquire_ctx t, t2;
static struct ww_mutex o, o2, o3;
@@ -194,6 +195,7 @@ static void init_shared_classes(void)
#define HARDIRQ_ENTER() \
local_irq_disable(); \
__irq_enter(); \
+ lockdep_hardirq_threaded(); \
WARN_ON(!in_irq());
#define HARDIRQ_EXIT() \
@@ -2492,16 +2494,6 @@ static void rcu_sched_exit(int *_)
int rcu_sched_guard_##name __guard(rcu_sched_exit); \
rcu_read_lock_sched();
-static void rcu_callback_exit(int *_)
-{
- rcu_lock_release(&rcu_callback_map);
-}
-
-#define RCU_CALLBACK_CONTEXT(name, ...) \
- int rcu_callback_guard_##name __guard(rcu_callback_exit); \
- rcu_lock_acquire(&rcu_callback_map);
-
-
static void raw_spinlock_exit(raw_spinlock_t **lock)
{
raw_spin_unlock(*lock);
@@ -2558,8 +2550,6 @@ static void __maybe_unused inner##_in_##outer(void) \
* ---------------+-------+----------+------+-------
* RCU_BH | o | o | o | x
* ---------------+-------+----------+------+-------
- * RCU_CALLBACK | o | o | o | x
- * ---------------+-------+----------+------+-------
* RCU_SCHED | o | o | x | x
* ---------------+-------+----------+------+-------
* RAW_SPIN | o | o | x | x
@@ -2576,7 +2566,6 @@ GENERATE_2_CONTEXT_TESTCASE(NOTTHREADED_HARDIRQ, , inner, inner_lock) \
GENERATE_2_CONTEXT_TESTCASE(SOFTIRQ, , inner, inner_lock) \
GENERATE_2_CONTEXT_TESTCASE(RCU, , inner, inner_lock) \
GENERATE_2_CONTEXT_TESTCASE(RCU_BH, , inner, inner_lock) \
-GENERATE_2_CONTEXT_TESTCASE(RCU_CALLBACK, , inner, inner_lock) \
GENERATE_2_CONTEXT_TESTCASE(RCU_SCHED, , inner, inner_lock) \
GENERATE_2_CONTEXT_TESTCASE(RAW_SPINLOCK, raw_lock_A, inner, inner_lock) \
GENERATE_2_CONTEXT_TESTCASE(SPINLOCK, lock_A, inner, inner_lock) \
@@ -2638,10 +2627,6 @@ static void wait_context_tests(void)
DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU_BH);
pr_cont("\n");
- print_testname("in RCU callback context");
- DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU_CALLBACK);
- pr_cont("\n");
-
print_testname("in RCU-sched context");
DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(RCU_SCHED);
pr_cont("\n");
@@ -2744,6 +2729,66 @@ static void local_lock_tests(void)
pr_cont("\n");
}
+static void hardirq_deadlock_softirq_not_deadlock(void)
+{
+ /* mutex_A is hardirq-unsafe and softirq-unsafe */
+ /* mutex_A -> lock_C */
+ mutex_lock(&mutex_A);
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_C);
+ spin_unlock(&lock_C);
+ HARDIRQ_ENABLE();
+ mutex_unlock(&mutex_A);
+
+ /* lock_A is hardirq-safe */
+ HARDIRQ_ENTER();
+ spin_lock(&lock_A);
+ spin_unlock(&lock_A);
+ HARDIRQ_EXIT();
+
+ /* lock_A -> lock_B */
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_A);
+ spin_lock(&lock_B);
+ spin_unlock(&lock_B);
+ spin_unlock(&lock_A);
+ HARDIRQ_ENABLE();
+
+ /* lock_B -> lock_C */
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_B);
+ spin_lock(&lock_C);
+ spin_unlock(&lock_C);
+ spin_unlock(&lock_B);
+ HARDIRQ_ENABLE();
+
+ /* lock_D is softirq-safe */
+ SOFTIRQ_ENTER();
+ spin_lock(&lock_D);
+ spin_unlock(&lock_D);
+ SOFTIRQ_EXIT();
+
+ /* And lock_D is hardirq-unsafe */
+ SOFTIRQ_DISABLE();
+ spin_lock(&lock_D);
+ spin_unlock(&lock_D);
+ SOFTIRQ_ENABLE();
+
+ /*
+ * mutex_A -> lock_C -> lock_D is softirq-unsafe -> softirq-safe, not
+ * deadlock.
+ *
+ * lock_A -> lock_B -> lock_C -> lock_D is hardirq-safe ->
+ * hardirq-unsafe, deadlock.
+ */
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_C);
+ spin_lock(&lock_D);
+ spin_unlock(&lock_D);
+ spin_unlock(&lock_C);
+ HARDIRQ_ENABLE();
+}
+
void locking_selftest(void)
{
/*
@@ -2872,6 +2917,10 @@ void locking_selftest(void)
local_lock_tests();
+ print_testname("hardirq_unsafe_softirq_safe");
+ dotest(hardirq_deadlock_softirq_not_deadlock, FAILURE, LOCKTYPE_SPECIAL);
+ pr_cont("\n");
+
if (unexpected_testcase_failures) {
printk("-----------------------------------------------------------------\n");
debug_locks = 0;
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index a1071cdefb5a..af9302141bcf 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -275,7 +275,7 @@ static void __percpu_ref_switch_mode(struct percpu_ref *ref,
wait_event_lock_irq(percpu_ref_switch_waitq, !data->confirm_switch,
percpu_ref_switch_lock);
- if (data->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD))
+ if (data->force_atomic || percpu_ref_is_dying(ref))
__percpu_ref_switch_to_atomic(ref, confirm_switch);
else
__percpu_ref_switch_to_percpu(ref);
@@ -385,7 +385,7 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
- WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD,
+ WARN_ONCE(percpu_ref_is_dying(ref),
"%s called more than once on %ps!", __func__,
ref->data->release);
@@ -465,7 +465,7 @@ void percpu_ref_resurrect(struct percpu_ref *ref)
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
- WARN_ON_ONCE(!(ref->percpu_count_ptr & __PERCPU_REF_DEAD));
+ WARN_ON_ONCE(!percpu_ref_is_dying(ref));
WARN_ON_ONCE(__ref_is_percpu(ref, &percpu_count));
ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index dc05cfc2d12f..cacbbbdef768 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -654,8 +654,20 @@ static char global_array[10];
static void kasan_global_oob(struct kunit *test)
{
- volatile int i = 3;
- char *p = &global_array[ARRAY_SIZE(global_array) + i];
+ /*
+ * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
+ * from failing here and panicing the kernel, access the array via a
+ * volatile pointer, which will prevent the compiler from being able to
+ * determine the array bounds.
+ *
+ * This access uses a volatile pointer to char (char *volatile) rather
+ * than the more conventional pointer to volatile char (volatile char *)
+ * because we want to prevent the compiler from making inferences about
+ * the pointer itself (i.e. its array bounds), not the data that it
+ * refers to.
+ */
+ char *volatile array = global_array;
+ char *p = &array[ARRAY_SIZE(global_array) + 3];
/* Only generic mode instruments globals. */
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
@@ -703,8 +715,9 @@ static void ksize_uaf(struct kunit *test)
static void kasan_stack_oob(struct kunit *test)
{
char stack_array[10];
- volatile int i = OOB_TAG_OFF;
- char *p = &stack_array[ARRAY_SIZE(stack_array) + i];
+ /* See comment in kasan_global_oob. */
+ char *volatile array = stack_array;
+ char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
@@ -715,7 +728,9 @@ static void kasan_alloca_oob_left(struct kunit *test)
{
volatile int i = 10;
char alloca_array[i];
- char *p = alloca_array - 1;
+ /* See comment in kasan_global_oob. */
+ char *volatile array = alloca_array;
+ char *p = array - 1;
/* Only generic mode instruments dynamic allocas. */
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
@@ -728,7 +743,9 @@ static void kasan_alloca_oob_right(struct kunit *test)
{
volatile int i = 10;
char alloca_array[i];
- char *p = alloca_array + i;
+ /* See comment in kasan_global_oob. */
+ char *volatile array = alloca_array;
+ char *p = array + i;
/* Only generic mode instruments dynamic allocas. */
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c
index 05efe98a9ac2..297d1b349c19 100644
--- a/mm/debug_vm_pgtable.c
+++ b/mm/debug_vm_pgtable.c
@@ -192,7 +192,7 @@ static void __init pmd_advanced_tests(struct mm_struct *mm,
pr_debug("Validating PMD advanced\n");
/* Align the address wrt HPAGE_PMD_SIZE */
- vaddr = (vaddr & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE;
+ vaddr &= HPAGE_PMD_MASK;
pgtable_trans_huge_deposit(mm, pmdp, pgtable);
@@ -330,7 +330,7 @@ static void __init pud_advanced_tests(struct mm_struct *mm,
pr_debug("Validating PUD advanced\n");
/* Align the address wrt HPAGE_PUD_SIZE */
- vaddr = (vaddr & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE;
+ vaddr &= HPAGE_PUD_MASK;
set_pud_at(mm, vaddr, pudp, pud);
pudp_set_wrprotect(mm, vaddr, pudp);
diff --git a/mm/gup.c b/mm/gup.c
index 0697134b6a12..3ded6a5f26b2 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1593,10 +1593,6 @@ struct page *get_dump_page(unsigned long addr)
FOLL_FORCE | FOLL_DUMP | FOLL_GET);
if (locked)
mmap_read_unlock(mm);
-
- if (ret == 1 && is_page_poisoned(page))
- return NULL;
-
return (ret == 1) ? page : NULL;
}
#endif /* CONFIG_ELF_CORE */
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 63ed6b25deaa..6d2a0119fc58 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -62,6 +62,7 @@ static struct shrinker deferred_split_shrinker;
static atomic_t huge_zero_refcount;
struct page *huge_zero_page __read_mostly;
+unsigned long huge_zero_pfn __read_mostly = ~0UL;
bool transparent_hugepage_enabled(struct vm_area_struct *vma)
{
@@ -98,6 +99,7 @@ retry:
__free_pages(zero_page, compound_order(zero_page));
goto retry;
}
+ WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page));
/* We take additional reference here. It will be put back by shrinker */
atomic_set(&huge_zero_refcount, 2);
@@ -147,6 +149,7 @@ static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
struct page *zero_page = xchg(&huge_zero_page, NULL);
BUG_ON(zero_page == NULL);
+ WRITE_ONCE(huge_zero_pfn, ~0UL);
__free_pages(zero_page, compound_order(zero_page));
return HPAGE_PMD_NR;
}
@@ -2044,7 +2047,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
count_vm_event(THP_SPLIT_PMD);
if (!vma_is_anonymous(vma)) {
- _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
+ old_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
/*
* We are going to unmap this huge page. So
* just go ahead and zap it
@@ -2053,16 +2056,25 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
zap_deposited_table(mm, pmd);
if (vma_is_special_huge(vma))
return;
- page = pmd_page(_pmd);
- if (!PageDirty(page) && pmd_dirty(_pmd))
- set_page_dirty(page);
- if (!PageReferenced(page) && pmd_young(_pmd))
- SetPageReferenced(page);
- page_remove_rmap(page, true);
- put_page(page);
+ if (unlikely(is_pmd_migration_entry(old_pmd))) {
+ swp_entry_t entry;
+
+ entry = pmd_to_swp_entry(old_pmd);
+ page = migration_entry_to_page(entry);
+ } else {
+ page = pmd_page(old_pmd);
+ if (!PageDirty(page) && pmd_dirty(old_pmd))
+ set_page_dirty(page);
+ if (!PageReferenced(page) && pmd_young(old_pmd))
+ SetPageReferenced(page);
+ page_remove_rmap(page, true);
+ put_page(page);
+ }
add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
return;
- } else if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) {
+ }
+
+ if (is_huge_zero_pmd(*pmd)) {
/*
* FIXME: Do we want to invalidate secondary mmu by calling
* mmu_notifier_invalidate_range() see comments below inside
@@ -2338,17 +2350,17 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
static void unmap_page(struct page *page)
{
- enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK |
+ enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_SYNC |
TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
- bool unmap_success;
VM_BUG_ON_PAGE(!PageHead(page), page);
if (PageAnon(page))
ttu_flags |= TTU_SPLIT_FREEZE;
- unmap_success = try_to_unmap(page, ttu_flags);
- VM_BUG_ON_PAGE(!unmap_success, page);
+ try_to_unmap(page, ttu_flags);
+
+ VM_WARN_ON_ONCE_PAGE(page_mapped(page), page);
}
static void remap_page(struct page *page, unsigned int nr)
@@ -2659,7 +2671,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
struct deferred_split *ds_queue = get_deferred_split_queue(head);
struct anon_vma *anon_vma = NULL;
struct address_space *mapping = NULL;
- int count, mapcount, extra_pins, ret;
+ int extra_pins, ret;
pgoff_t end;
VM_BUG_ON_PAGE(is_huge_zero_page(head), head);
@@ -2718,7 +2730,6 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
}
unmap_page(head);
- VM_BUG_ON_PAGE(compound_mapcount(head), head);
/* block interrupt reentry in xa_lock and spinlock */
local_irq_disable();
@@ -2736,9 +2747,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
/* Prevent deferred_split_scan() touching ->_refcount */
spin_lock(&ds_queue->split_queue_lock);
- count = page_count(head);
- mapcount = total_mapcount(head);
- if (!mapcount && page_ref_freeze(head, 1 + extra_pins)) {
+ if (page_ref_freeze(head, 1 + extra_pins)) {
if (!list_empty(page_deferred_list(head))) {
ds_queue->split_queue_len--;
list_del(page_deferred_list(head));
@@ -2758,16 +2767,9 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
__split_huge_page(page, list, end);
ret = 0;
} else {
- if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) {
- pr_alert("total_mapcount: %u, page_count(): %u\n",
- mapcount, count);
- if (PageTail(page))
- dump_page(head, NULL);
- dump_page(page, "total_mapcount(head) > 0");
- BUG();
- }
spin_unlock(&ds_queue->split_queue_lock);
-fail: if (mapping)
+fail:
+ if (mapping)
xa_unlock(&mapping->i_pages);
local_irq_enable();
remap_page(head, thp_nr_pages(head));
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 3db405dea3dc..5ba5a0da6d57 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1588,15 +1588,12 @@ struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
return NULL;
}
-pgoff_t __basepage_index(struct page *page)
+pgoff_t hugetlb_basepage_index(struct page *page)
{
struct page *page_head = compound_head(page);
pgoff_t index = page_index(page_head);
unsigned long compound_idx;
- if (!PageHuge(page_head))
- return page_index(page);
-
if (compound_order(page_head) >= MAX_ORDER)
compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
else
@@ -1793,7 +1790,7 @@ retry:
SetPageHWPoison(page);
ClearPageHWPoison(head);
}
- remove_hugetlb_page(h, page, false);
+ remove_hugetlb_page(h, head, false);
h->max_huge_pages--;
spin_unlock_irq(&hugetlb_lock);
update_and_free_page(h, head);
@@ -2121,12 +2118,18 @@ out:
* be restored when a newly allocated huge page must be freed. It is
* to be called after calling vma_needs_reservation to determine if a
* reservation exists.
+ *
+ * vma_del_reservation is used in error paths where an entry in the reserve
+ * map was created during huge page allocation and must be removed. It is to
+ * be called after calling vma_needs_reservation to determine if a reservation
+ * exists.
*/
enum vma_resv_mode {
VMA_NEEDS_RESV,
VMA_COMMIT_RESV,
VMA_END_RESV,
VMA_ADD_RESV,
+ VMA_DEL_RESV,
};
static long __vma_reservation_common(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr,
@@ -2170,11 +2173,21 @@ static long __vma_reservation_common(struct hstate *h,
ret = region_del(resv, idx, idx + 1);
}
break;
+ case VMA_DEL_RESV:
+ if (vma->vm_flags & VM_MAYSHARE) {
+ region_abort(resv, idx, idx + 1, 1);
+ ret = region_del(resv, idx, idx + 1);
+ } else {
+ ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
+ /* region_add calls of range 1 should never fail. */
+ VM_BUG_ON(ret < 0);
+ }
+ break;
default:
BUG();
}
- if (vma->vm_flags & VM_MAYSHARE)
+ if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV)
return ret;
/*
* We know private mapping must have HPAGE_RESV_OWNER set.
@@ -2222,25 +2235,39 @@ static long vma_add_reservation(struct hstate *h,
return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
}
+static long vma_del_reservation(struct hstate *h,
+ struct vm_area_struct *vma, unsigned long addr)
+{
+ return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV);
+}
+
/*
- * This routine is called to restore a reservation on error paths. In the
- * specific error paths, a huge page was allocated (via alloc_huge_page)
- * and is about to be freed. If a reservation for the page existed,
- * alloc_huge_page would have consumed the reservation and set
- * HPageRestoreReserve in the newly allocated page. When the page is freed
- * via free_huge_page, the global reservation count will be incremented if
- * HPageRestoreReserve is set. However, free_huge_page can not adjust the
- * reserve map. Adjust the reserve map here to be consistent with global
- * reserve count adjustments to be made by free_huge_page.
+ * This routine is called to restore reservation information on error paths.
+ * It should ONLY be called for pages allocated via alloc_huge_page(), and
+ * the hugetlb mutex should remain held when calling this routine.
+ *
+ * It handles two specific cases:
+ * 1) A reservation was in place and the page consumed the reservation.
+ * HPageRestoreReserve is set in the page.
+ * 2) No reservation was in place for the page, so HPageRestoreReserve is
+ * not set. However, alloc_huge_page always updates the reserve map.
+ *
+ * In case 1, free_huge_page later in the error path will increment the
+ * global reserve count. But, free_huge_page does not have enough context
+ * to adjust the reservation map. This case deals primarily with private
+ * mappings. Adjust the reserve map here to be consistent with global
+ * reserve count adjustments to be made by free_huge_page. Make sure the
+ * reserve map indicates there is a reservation present.
+ *
+ * In case 2, simply undo reserve map modifications done by alloc_huge_page.
*/
-static void restore_reserve_on_error(struct hstate *h,
- struct vm_area_struct *vma, unsigned long address,
- struct page *page)
+void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
+ unsigned long address, struct page *page)
{
- if (unlikely(HPageRestoreReserve(page))) {
- long rc = vma_needs_reservation(h, vma, address);
+ long rc = vma_needs_reservation(h, vma, address);
- if (unlikely(rc < 0)) {
+ if (HPageRestoreReserve(page)) {
+ if (unlikely(rc < 0))
/*
* Rare out of memory condition in reserve map
* manipulation. Clear HPageRestoreReserve so that
@@ -2253,16 +2280,57 @@ static void restore_reserve_on_error(struct hstate *h,
* accounting of reserve counts.
*/
ClearHPageRestoreReserve(page);
- } else if (rc) {
- rc = vma_add_reservation(h, vma, address);
- if (unlikely(rc < 0))
+ else if (rc)
+ (void)vma_add_reservation(h, vma, address);
+ else
+ vma_end_reservation(h, vma, address);
+ } else {
+ if (!rc) {
+ /*
+ * This indicates there is an entry in the reserve map
+ * added by alloc_huge_page. We know it was added
+ * before the alloc_huge_page call, otherwise
+ * HPageRestoreReserve would be set on the page.
+ * Remove the entry so that a subsequent allocation
+ * does not consume a reservation.
+ */
+ rc = vma_del_reservation(h, vma, address);
+ if (rc < 0)
/*
- * See above comment about rare out of
- * memory condition.
+ * VERY rare out of memory condition. Since
+ * we can not delete the entry, set
+ * HPageRestoreReserve so that the reserve
+ * count will be incremented when the page
+ * is freed. This reserve will be consumed
+ * on a subsequent allocation.
*/
- ClearHPageRestoreReserve(page);
+ SetHPageRestoreReserve(page);
+ } else if (rc < 0) {
+ /*
+ * Rare out of memory condition from
+ * vma_needs_reservation call. Memory allocation is
+ * only attempted if a new entry is needed. Therefore,
+ * this implies there is not an entry in the
+ * reserve map.
+ *
+ * For shared mappings, no entry in the map indicates
+ * no reservation. We are done.
+ */
+ if (!(vma->vm_flags & VM_MAYSHARE))
+ /*
+ * For private mappings, no entry indicates
+ * a reservation is present. Since we can
+ * not add an entry, set SetHPageRestoreReserve
+ * on the page so reserve count will be
+ * incremented when freed. This reserve will
+ * be consumed on a subsequent allocation.
+ */
+ SetHPageRestoreReserve(page);
} else
- vma_end_reservation(h, vma, address);
+ /*
+ * No reservation present, do nothing
+ */
+ vma_end_reservation(h, vma, address);
}
}
@@ -4037,6 +4105,8 @@ again:
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
entry = huge_ptep_get(src_pte);
if (!pte_same(src_pte_old, entry)) {
+ restore_reserve_on_error(h, vma, addr,
+ new);
put_page(new);
/* dst_entry won't change as in child */
goto again;
@@ -4056,6 +4126,7 @@ again:
* See Documentation/vm/mmu_notifier.rst
*/
huge_ptep_set_wrprotect(src, addr, src_pte);
+ entry = huge_pte_wrprotect(entry);
}
page_dup_rmap(ptepage, true);
@@ -4888,10 +4959,20 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
if (!page)
goto out;
} else if (!*pagep) {
- ret = -ENOMEM;
+ /* If a page already exists, then it's UFFDIO_COPY for
+ * a non-missing case. Return -EEXIST.
+ */
+ if (vm_shared &&
+ hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
+ ret = -EEXIST;
+ goto out;
+ }
+
page = alloc_huge_page(dst_vma, dst_addr, 0);
- if (IS_ERR(page))
+ if (IS_ERR(page)) {
+ ret = -ENOMEM;
goto out;
+ }
ret = copy_huge_page_from_user(page,
(const void __user *) src_addr,
@@ -4995,6 +5076,7 @@ out_release_unlock:
if (vm_shared || is_continue)
unlock_page(page);
out_release_nounlock:
+ restore_reserve_on_error(h, dst_vma, dst_addr, page);
put_page(page);
goto out;
}
@@ -5846,6 +5928,21 @@ unlock:
return ret;
}
+int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
+{
+ int ret = 0;
+
+ *hugetlb = false;
+ spin_lock_irq(&hugetlb_lock);
+ if (PageHeadHuge(page)) {
+ *hugetlb = true;
+ if (HPageFreed(page) || HPageMigratable(page))
+ ret = get_page_unless_zero(page);
+ }
+ spin_unlock_irq(&hugetlb_lock);
+ return ret;
+}
+
void putback_active_hugepage(struct page *page)
{
spin_lock_irq(&hugetlb_lock);
diff --git a/mm/internal.h b/mm/internal.h
index 54bd0dc2c23c..e8fdb531f887 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -96,26 +96,6 @@ static inline void set_page_refcounted(struct page *page)
set_page_count(page, 1);
}
-/*
- * When kernel touch the user page, the user page may be have been marked
- * poison but still mapped in user space, if without this page, the kernel
- * can guarantee the data integrity and operation success, the kernel is
- * better to check the posion status and avoid touching it, be good not to
- * panic, coredump for process fatal signal is a sample case matching this
- * scenario. Or if kernel can't guarantee the data integrity, it's better
- * not to call this function, let kernel touch the poison page and get to
- * panic.
- */
-static inline bool is_page_poisoned(struct page *page)
-{
- if (PageHWPoison(page))
- return true;
- else if (PageHuge(page) && PageHWPoison(compound_head(page)))
- return true;
-
- return false;
-}
-
extern unsigned long highest_memmap_pfn;
/*
@@ -404,27 +384,52 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
/*
- * At what user virtual address is page expected in @vma?
+ * At what user virtual address is page expected in vma?
+ * Returns -EFAULT if all of the page is outside the range of vma.
+ * If page is a compound head, the entire compound page is considered.
*/
static inline unsigned long
-__vma_address(struct page *page, struct vm_area_struct *vma)
+vma_address(struct page *page, struct vm_area_struct *vma)
{
- pgoff_t pgoff = page_to_pgoff(page);
- return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
+ pgoff_t pgoff;
+ unsigned long address;
+
+ VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */
+ pgoff = page_to_pgoff(page);
+ if (pgoff >= vma->vm_pgoff) {
+ address = vma->vm_start +
+ ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
+ /* Check for address beyond vma (or wrapped through 0?) */
+ if (address < vma->vm_start || address >= vma->vm_end)
+ address = -EFAULT;
+ } else if (PageHead(page) &&
+ pgoff + compound_nr(page) - 1 >= vma->vm_pgoff) {
+ /* Test above avoids possibility of wrap to 0 on 32-bit */
+ address = vma->vm_start;
+ } else {
+ address = -EFAULT;
+ }
+ return address;
}
+/*
+ * Then at what user virtual address will none of the page be found in vma?
+ * Assumes that vma_address() already returned a good starting address.
+ * If page is a compound head, the entire compound page is considered.
+ */
static inline unsigned long
-vma_address(struct page *page, struct vm_area_struct *vma)
+vma_address_end(struct page *page, struct vm_area_struct *vma)
{
- unsigned long start, end;
-
- start = __vma_address(page, vma);
- end = start + thp_size(page) - PAGE_SIZE;
-
- /* page should be within @vma mapping range */
- VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma);
-
- return max(start, vma->vm_start);
+ pgoff_t pgoff;
+ unsigned long address;
+
+ VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */
+ pgoff = page_to_pgoff(page) + compound_nr(page);
+ address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
+ /* Check for address beyond vma (or wrapped through 0?) */
+ if (address < vma->vm_start || address > vma->vm_end)
+ address = vma->vm_end;
+ return address;
}
static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
diff --git a/mm/ioremap.c b/mm/ioremap.c
index d1dcc7e744ac..8ee0136f8cb0 100644
--- a/mm/ioremap.c
+++ b/mm/ioremap.c
@@ -16,16 +16,16 @@
#include "pgalloc-track.h"
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
-static bool __ro_after_init iomap_max_page_shift = PAGE_SHIFT;
+static unsigned int __ro_after_init iomap_max_page_shift = BITS_PER_LONG - 1;
static int __init set_nohugeiomap(char *str)
{
- iomap_max_page_shift = P4D_SHIFT;
+ iomap_max_page_shift = PAGE_SHIFT;
return 0;
}
early_param("nohugeiomap", set_nohugeiomap);
#else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
-static const bool iomap_max_page_shift = PAGE_SHIFT;
+static const unsigned int iomap_max_page_shift = PAGE_SHIFT;
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
int ioremap_page_range(unsigned long addr,
diff --git a/mm/kasan/init.c b/mm/kasan/init.c
index c4605ac9837b..348f31d15a97 100644
--- a/mm/kasan/init.c
+++ b/mm/kasan/init.c
@@ -220,8 +220,8 @@ static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
/**
* kasan_populate_early_shadow - populate shadow memory region with
* kasan_early_shadow_page
- * @shadow_start - start of the memory range to populate
- * @shadow_end - end of the memory range to populate
+ * @shadow_start: start of the memory range to populate
+ * @shadow_end: end of the memory range to populate
*/
int __ref kasan_populate_early_shadow(const void *shadow_start,
const void *shadow_end)
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index e18fbbd5d9b4..4d21ac44d5d3 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -627,10 +627,10 @@ static void toggle_allocation_gate(struct work_struct *work)
* During low activity with no allocations we might wait a
* while; let's avoid the hung task warning.
*/
- wait_event_timeout(allocation_wait, atomic_read(&kfence_allocation_gate),
- sysctl_hung_task_timeout_secs * HZ / 2);
+ wait_event_idle_timeout(allocation_wait, atomic_read(&kfence_allocation_gate),
+ sysctl_hung_task_timeout_secs * HZ / 2);
} else {
- wait_event(allocation_wait, atomic_read(&kfence_allocation_gate));
+ wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate));
}
/* Disable static key and reset timer. */
diff --git a/mm/ksm.c b/mm/ksm.c
index 6bbe314c5260..2f3aaeb34a42 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -776,11 +776,12 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
struct page *page;
stable_node = rmap_item->head;
- page = get_ksm_page(stable_node, GET_KSM_PAGE_NOLOCK);
+ page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK);
if (!page)
goto out;
hlist_del(&rmap_item->hlist);
+ unlock_page(page);
put_page(page);
if (!hlist_empty(&stable_node->hlist))
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 85ad98c00fd9..6f5f78885ab4 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -658,6 +658,7 @@ static int truncate_error_page(struct page *p, unsigned long pfn,
*/
static int me_kernel(struct page *p, unsigned long pfn)
{
+ unlock_page(p);
return MF_IGNORED;
}
@@ -667,6 +668,7 @@ static int me_kernel(struct page *p, unsigned long pfn)
static int me_unknown(struct page *p, unsigned long pfn)
{
pr_err("Memory failure: %#lx: Unknown page state\n", pfn);
+ unlock_page(p);
return MF_FAILED;
}
@@ -675,6 +677,7 @@ static int me_unknown(struct page *p, unsigned long pfn)
*/
static int me_pagecache_clean(struct page *p, unsigned long pfn)
{
+ int ret;
struct address_space *mapping;
delete_from_lru_cache(p);
@@ -683,8 +686,10 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn)
* For anonymous pages we're done the only reference left
* should be the one m_f() holds.
*/
- if (PageAnon(p))
- return MF_RECOVERED;
+ if (PageAnon(p)) {
+ ret = MF_RECOVERED;
+ goto out;
+ }
/*
* Now truncate the page in the page cache. This is really
@@ -698,7 +703,8 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn)
/*
* Page has been teared down in the meanwhile
*/
- return MF_FAILED;
+ ret = MF_FAILED;
+ goto out;
}
/*
@@ -706,7 +712,10 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn)
*
* Open: to take i_mutex or not for this? Right now we don't.
*/
- return truncate_error_page(p, pfn, mapping);
+ ret = truncate_error_page(p, pfn, mapping);
+out:
+ unlock_page(p);
+ return ret;
}
/*
@@ -782,24 +791,26 @@ static int me_pagecache_dirty(struct page *p, unsigned long pfn)
*/
static int me_swapcache_dirty(struct page *p, unsigned long pfn)
{
+ int ret;
+
ClearPageDirty(p);
/* Trigger EIO in shmem: */
ClearPageUptodate(p);
- if (!delete_from_lru_cache(p))
- return MF_DELAYED;
- else
- return MF_FAILED;
+ ret = delete_from_lru_cache(p) ? MF_FAILED : MF_DELAYED;
+ unlock_page(p);
+ return ret;
}
static int me_swapcache_clean(struct page *p, unsigned long pfn)
{
+ int ret;
+
delete_from_swap_cache(p);
- if (!delete_from_lru_cache(p))
- return MF_RECOVERED;
- else
- return MF_FAILED;
+ ret = delete_from_lru_cache(p) ? MF_FAILED : MF_RECOVERED;
+ unlock_page(p);
+ return ret;
}
/*
@@ -820,6 +831,7 @@ static int me_huge_page(struct page *p, unsigned long pfn)
mapping = page_mapping(hpage);
if (mapping) {
res = truncate_error_page(hpage, pfn, mapping);
+ unlock_page(hpage);
} else {
res = MF_FAILED;
unlock_page(hpage);
@@ -834,7 +846,6 @@ static int me_huge_page(struct page *p, unsigned long pfn)
page_ref_inc(p);
res = MF_RECOVERED;
}
- lock_page(hpage);
}
return res;
@@ -866,6 +877,8 @@ static struct page_state {
unsigned long mask;
unsigned long res;
enum mf_action_page_type type;
+
+ /* Callback ->action() has to unlock the relevant page inside it. */
int (*action)(struct page *p, unsigned long pfn);
} error_states[] = {
{ reserved, reserved, MF_MSG_KERNEL, me_kernel },
@@ -929,6 +942,7 @@ static int page_action(struct page_state *ps, struct page *p,
int result;
int count;
+ /* page p should be unlocked after returning from ps->action(). */
result = ps->action(p, pfn);
count = page_count(p) - 1;
@@ -949,6 +963,17 @@ static int page_action(struct page_state *ps, struct page *p,
return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY;
}
+/*
+ * Return true if a page type of a given page is supported by hwpoison
+ * mechanism (while handling could fail), otherwise false. This function
+ * does not return true for hugetlb or device memory pages, so it's assumed
+ * to be called only in the context where we never have such pages.
+ */
+static inline bool HWPoisonHandlable(struct page *page)
+{
+ return PageLRU(page) || __PageMovable(page);
+}
+
/**
* __get_hwpoison_page() - Get refcount for memory error handling:
* @page: raw error page (hit by memory error)
@@ -959,8 +984,22 @@ static int page_action(struct page_state *ps, struct page *p,
static int __get_hwpoison_page(struct page *page)
{
struct page *head = compound_head(page);
+ int ret = 0;
+ bool hugetlb = false;
+
+ ret = get_hwpoison_huge_page(head, &hugetlb);
+ if (hugetlb)
+ return ret;
+
+ /*
+ * This check prevents from calling get_hwpoison_unless_zero()
+ * for any unsupported type of page in order to reduce the risk of
+ * unexpected races caused by taking a page refcount.
+ */
+ if (!HWPoisonHandlable(head))
+ return 0;
- if (!PageHuge(head) && PageTransHuge(head)) {
+ if (PageTransHuge(head)) {
/*
* Non anonymous thp exists only in allocation/free time. We
* can't handle such a case correctly, so let's give it up.
@@ -1017,7 +1056,7 @@ try_again:
ret = -EIO;
}
} else {
- if (PageHuge(p) || PageLRU(p) || __PageMovable(p)) {
+ if (PageHuge(p) || HWPoisonHandlable(p)) {
ret = 1;
} else {
/*
@@ -1228,7 +1267,7 @@ static int memory_failure_hugetlb(unsigned long pfn, int flags)
if (TestSetPageHWPoison(head)) {
pr_err("Memory failure: %#lx: already hardware poisoned\n",
pfn);
- return 0;
+ return -EHWPOISON;
}
num_poisoned_pages_inc();
@@ -1288,7 +1327,7 @@ static int memory_failure_hugetlb(unsigned long pfn, int flags)
goto out;
}
- res = identify_page_state(pfn, p, page_flags);
+ return identify_page_state(pfn, p, page_flags);
out:
unlock_page(head);
return res;
@@ -1404,9 +1443,10 @@ int memory_failure(unsigned long pfn, int flags)
struct page *hpage;
struct page *orig_head;
struct dev_pagemap *pgmap;
- int res;
+ int res = 0;
unsigned long page_flags;
bool retry = true;
+ static DEFINE_MUTEX(mf_mutex);
if (!sysctl_memory_failure_recovery)
panic("Memory failure on page %lx", pfn);
@@ -1424,13 +1464,19 @@ int memory_failure(unsigned long pfn, int flags)
return -ENXIO;
}
+ mutex_lock(&mf_mutex);
+
try_again:
- if (PageHuge(p))
- return memory_failure_hugetlb(pfn, flags);
+ if (PageHuge(p)) {
+ res = memory_failure_hugetlb(pfn, flags);
+ goto unlock_mutex;
+ }
+
if (TestSetPageHWPoison(p)) {
pr_err("Memory failure: %#lx: already hardware poisoned\n",
pfn);
- return 0;
+ res = -EHWPOISON;
+ goto unlock_mutex;
}
orig_head = hpage = compound_head(p);
@@ -1463,17 +1509,19 @@ try_again:
res = MF_FAILED;
}
action_result(pfn, MF_MSG_BUDDY, res);
- return res == MF_RECOVERED ? 0 : -EBUSY;
+ res = res == MF_RECOVERED ? 0 : -EBUSY;
} else {
action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
- return -EBUSY;
+ res = -EBUSY;
}
+ goto unlock_mutex;
}
if (PageTransHuge(hpage)) {
if (try_to_split_thp_page(p, "Memory Failure") < 0) {
action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED);
- return -EBUSY;
+ res = -EBUSY;
+ goto unlock_mutex;
}
VM_BUG_ON_PAGE(!page_count(p), p);
}
@@ -1497,7 +1545,7 @@ try_again:
if (PageCompound(p) && compound_head(p) != orig_head) {
action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED);
res = -EBUSY;
- goto out;
+ goto unlock_page;
}
/*
@@ -1517,17 +1565,22 @@ try_again:
num_poisoned_pages_dec();
unlock_page(p);
put_page(p);
- return 0;
+ goto unlock_mutex;
}
if (hwpoison_filter(p)) {
if (TestClearPageHWPoison(p))
num_poisoned_pages_dec();
unlock_page(p);
put_page(p);
- return 0;
+ goto unlock_mutex;
}
- if (!PageTransTail(p) && !PageLRU(p))
+ /*
+ * __munlock_pagevec may clear a writeback page's LRU flag without
+ * page_lock. We need wait writeback completion for this page or it
+ * may trigger vfs BUG while evict inode.
+ */
+ if (!PageTransTail(p) && !PageLRU(p) && !PageWriteback(p))
goto identify_page_state;
/*
@@ -1543,7 +1596,7 @@ try_again:
if (!hwpoison_user_mappings(p, pfn, flags, &p)) {
action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
res = -EBUSY;
- goto out;
+ goto unlock_page;
}
/*
@@ -1552,13 +1605,17 @@ try_again:
if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
res = -EBUSY;
- goto out;
+ goto unlock_page;
}
identify_page_state:
res = identify_page_state(pfn, p, page_flags);
-out:
+ mutex_unlock(&mf_mutex);
+ return res;
+unlock_page:
unlock_page(p);
+unlock_mutex:
+ mutex_unlock(&mf_mutex);
return res;
}
EXPORT_SYMBOL_GPL(memory_failure);
diff --git a/mm/memory.c b/mm/memory.c
index 730daa00952b..486f4a2874e7 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1361,7 +1361,18 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
else if (zap_huge_pmd(tlb, vma, pmd, addr))
goto next;
/* fall through */
+ } else if (details && details->single_page &&
+ PageTransCompound(details->single_page) &&
+ next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
+ spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
+ /*
+ * Take and drop THP pmd lock so that we cannot return
+ * prematurely, while zap_huge_pmd() has cleared *pmd,
+ * but not yet decremented compound_mapcount().
+ */
+ spin_unlock(ptl);
}
+
/*
* Here there can be other concurrent MADV_DONTNEED or
* trans huge page faults running, and if the pmd is
@@ -2939,6 +2950,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
}
flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
entry = mk_pte(new_page, vma->vm_page_prot);
+ entry = pte_sw_mkyoung(entry);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
/*
@@ -3236,6 +3248,36 @@ static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
}
/**
+ * unmap_mapping_page() - Unmap single page from processes.
+ * @page: The locked page to be unmapped.
+ *
+ * Unmap this page from any userspace process which still has it mmaped.
+ * Typically, for efficiency, the range of nearby pages has already been
+ * unmapped by unmap_mapping_pages() or unmap_mapping_range(). But once
+ * truncation or invalidation holds the lock on a page, it may find that
+ * the page has been remapped again: and then uses unmap_mapping_page()
+ * to unmap it finally.
+ */
+void unmap_mapping_page(struct page *page)
+{
+ struct address_space *mapping = page->mapping;
+ struct zap_details details = { };
+
+ VM_BUG_ON(!PageLocked(page));
+ VM_BUG_ON(PageTail(page));
+
+ details.check_mapping = mapping;
+ details.first_index = page->index;
+ details.last_index = page->index + thp_nr_pages(page) - 1;
+ details.single_page = page;
+
+ i_mmap_lock_write(mapping);
+ if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
+ unmap_mapping_range_tree(&mapping->i_mmap, &details);
+ i_mmap_unlock_write(mapping);
+}
+
+/**
* unmap_mapping_pages() - Unmap pages from processes.
* @mapping: The address space containing pages to be unmapped.
* @start: Index of first page to be unmapped.
@@ -3602,6 +3644,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
__SetPageUptodate(page);
entry = mk_pte(page, vma->vm_page_prot);
+ entry = pte_sw_mkyoung(entry);
if (vma->vm_flags & VM_WRITE)
entry = pte_mkwrite(pte_mkdirty(entry));
@@ -3786,6 +3829,8 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
if (prefault && arch_wants_old_prefaulted_pte())
entry = pte_mkold(entry);
+ else
+ entry = pte_sw_mkyoung(entry);
if (write)
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
diff --git a/mm/migrate.c b/mm/migrate.c
index b234c3f3acb7..41ff2c9896c4 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -295,6 +295,7 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
goto out;
page = migration_entry_to_page(entry);
+ page = compound_head(page);
/*
* Once page cache replacement of page migration started, page_count
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index aaa1655cf682..04220581579c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5053,9 +5053,13 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
* Skip populated array elements to determine if any pages need
* to be allocated before disabling IRQs.
*/
- while (page_array && page_array[nr_populated] && nr_populated < nr_pages)
+ while (page_array && nr_populated < nr_pages && page_array[nr_populated])
nr_populated++;
+ /* Already populated array? */
+ if (unlikely(page_array && nr_pages - nr_populated == 0))
+ return nr_populated;
+
/* Use the single page allocator for one page. */
if (nr_pages - nr_populated == 1)
goto failed;
@@ -9158,6 +9162,8 @@ bool take_page_off_buddy(struct page *page)
del_page_from_free_list(page_head, zone, page_order);
break_down_buddy_pages(zone, page_head, page, 0,
page_order, migratetype);
+ if (!is_migrate_isolate(migratetype))
+ __mod_zone_freepage_state(zone, -1, migratetype);
ret = true;
break;
}
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index 2cf01d933f13..a4435311754b 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -116,6 +116,13 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
return pfn_is_match(pvmw->page, pfn);
}
+static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
+{
+ pvmw->address = (pvmw->address + size) & ~(size - 1);
+ if (!pvmw->address)
+ pvmw->address = ULONG_MAX;
+}
+
/**
* page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
* @pvmw->address
@@ -144,6 +151,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
{
struct mm_struct *mm = pvmw->vma->vm_mm;
struct page *page = pvmw->page;
+ unsigned long end;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
@@ -153,10 +161,11 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
if (pvmw->pmd && !pvmw->pte)
return not_found(pvmw);
- if (pvmw->pte)
- goto next_pte;
+ if (unlikely(PageHuge(page))) {
+ /* The only possible mapping was handled on last iteration */
+ if (pvmw->pte)
+ return not_found(pvmw);
- if (unlikely(PageHuge(pvmw->page))) {
/* when pud is not present, pte will be NULL */
pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
if (!pvmw->pte)
@@ -168,78 +177,108 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
return not_found(pvmw);
return true;
}
-restart:
- pgd = pgd_offset(mm, pvmw->address);
- if (!pgd_present(*pgd))
- return false;
- p4d = p4d_offset(pgd, pvmw->address);
- if (!p4d_present(*p4d))
- return false;
- pud = pud_offset(p4d, pvmw->address);
- if (!pud_present(*pud))
- return false;
- pvmw->pmd = pmd_offset(pud, pvmw->address);
+
/*
- * Make sure the pmd value isn't cached in a register by the
- * compiler and used as a stale value after we've observed a
- * subsequent update.
+ * Seek to next pte only makes sense for THP.
+ * But more important than that optimization, is to filter out
+ * any PageKsm page: whose page->index misleads vma_address()
+ * and vma_address_end() to disaster.
*/
- pmde = READ_ONCE(*pvmw->pmd);
- if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
- pvmw->ptl = pmd_lock(mm, pvmw->pmd);
- if (likely(pmd_trans_huge(*pvmw->pmd))) {
- if (pvmw->flags & PVMW_MIGRATION)
- return not_found(pvmw);
- if (pmd_page(*pvmw->pmd) != page)
- return not_found(pvmw);
- return true;
- } else if (!pmd_present(*pvmw->pmd)) {
- if (thp_migration_supported()) {
- if (!(pvmw->flags & PVMW_MIGRATION))
+ end = PageTransCompound(page) ?
+ vma_address_end(page, pvmw->vma) :
+ pvmw->address + PAGE_SIZE;
+ if (pvmw->pte)
+ goto next_pte;
+restart:
+ do {
+ pgd = pgd_offset(mm, pvmw->address);
+ if (!pgd_present(*pgd)) {
+ step_forward(pvmw, PGDIR_SIZE);
+ continue;
+ }
+ p4d = p4d_offset(pgd, pvmw->address);
+ if (!p4d_present(*p4d)) {
+ step_forward(pvmw, P4D_SIZE);
+ continue;
+ }
+ pud = pud_offset(p4d, pvmw->address);
+ if (!pud_present(*pud)) {
+ step_forward(pvmw, PUD_SIZE);
+ continue;
+ }
+
+ pvmw->pmd = pmd_offset(pud, pvmw->address);
+ /*
+ * Make sure the pmd value isn't cached in a register by the
+ * compiler and used as a stale value after we've observed a
+ * subsequent update.
+ */
+ pmde = READ_ONCE(*pvmw->pmd);
+
+ if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
+ pvmw->ptl = pmd_lock(mm, pvmw->pmd);
+ pmde = *pvmw->pmd;
+ if (likely(pmd_trans_huge(pmde))) {
+ if (pvmw->flags & PVMW_MIGRATION)
return not_found(pvmw);
- if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) {
- swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd);
+ if (pmd_page(pmde) != page)
+ return not_found(pvmw);
+ return true;
+ }
+ if (!pmd_present(pmde)) {
+ swp_entry_t entry;
- if (migration_entry_to_page(entry) != page)
- return not_found(pvmw);
- return true;
- }
+ if (!thp_migration_supported() ||
+ !(pvmw->flags & PVMW_MIGRATION))
+ return not_found(pvmw);
+ entry = pmd_to_swp_entry(pmde);
+ if (!is_migration_entry(entry) ||
+ migration_entry_to_page(entry) != page)
+ return not_found(pvmw);
+ return true;
}
- return not_found(pvmw);
- } else {
/* THP pmd was split under us: handle on pte level */
spin_unlock(pvmw->ptl);
pvmw->ptl = NULL;
+ } else if (!pmd_present(pmde)) {
+ /*
+ * If PVMW_SYNC, take and drop THP pmd lock so that we
+ * cannot return prematurely, while zap_huge_pmd() has
+ * cleared *pmd but not decremented compound_mapcount().
+ */
+ if ((pvmw->flags & PVMW_SYNC) &&
+ PageTransCompound(page)) {
+ spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
+
+ spin_unlock(ptl);
+ }
+ step_forward(pvmw, PMD_SIZE);
+ continue;
}
- } else if (!pmd_present(pmde)) {
- return false;
- }
- if (!map_pte(pvmw))
- goto next_pte;
- while (1) {
+ if (!map_pte(pvmw))
+ goto next_pte;
+this_pte:
if (check_pte(pvmw))
return true;
next_pte:
- /* Seek to next pte only makes sense for THP */
- if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
- return not_found(pvmw);
do {
pvmw->address += PAGE_SIZE;
- if (pvmw->address >= pvmw->vma->vm_end ||
- pvmw->address >=
- __vma_address(pvmw->page, pvmw->vma) +
- thp_size(pvmw->page))
+ if (pvmw->address >= end)
return not_found(pvmw);
/* Did we cross page table boundary? */
- if (pvmw->address % PMD_SIZE == 0) {
- pte_unmap(pvmw->pte);
+ if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
if (pvmw->ptl) {
spin_unlock(pvmw->ptl);
pvmw->ptl = NULL;
}
+ pte_unmap(pvmw->pte);
+ pvmw->pte = NULL;
goto restart;
- } else {
- pvmw->pte++;
+ }
+ pvmw->pte++;
+ if ((pvmw->flags & PVMW_SYNC) && !pvmw->ptl) {
+ pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
+ spin_lock(pvmw->ptl);
}
} while (pte_none(*pvmw->pte));
@@ -247,7 +286,10 @@ next_pte:
pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
spin_lock(pvmw->ptl);
}
- }
+ goto this_pte;
+ } while (pvmw->address < end);
+
+ return false;
}
/**
@@ -266,14 +308,10 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
.vma = vma,
.flags = PVMW_SYNC,
};
- unsigned long start, end;
-
- start = __vma_address(page, vma);
- end = start + thp_size(page) - PAGE_SIZE;
- if (unlikely(end < vma->vm_start || start >= vma->vm_end))
+ pvmw.address = vma_address(page, vma);
+ if (pvmw.address == -EFAULT)
return 0;
- pvmw.address = max(start, vma->vm_start);
if (!page_vma_mapped_walk(&pvmw))
return 0;
page_vma_mapped_walk_done(&pvmw);
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index c2210e1cdb51..4e640baf9794 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -135,9 +135,8 @@ pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
{
pmd_t pmd;
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
- VM_BUG_ON(!pmd_present(*pmdp));
- /* Below assumes pmd_present() is true */
- VM_BUG_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
+ VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
+ !pmd_devmap(*pmdp));
pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
return pmd;
diff --git a/mm/rmap.c b/mm/rmap.c
index 693a610e181d..e05c300048e6 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -707,7 +707,6 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
*/
unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
{
- unsigned long address;
if (PageAnon(page)) {
struct anon_vma *page__anon_vma = page_anon_vma(page);
/*
@@ -717,15 +716,13 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
if (!vma->anon_vma || !page__anon_vma ||
vma->anon_vma->root != page__anon_vma->root)
return -EFAULT;
- } else if (page->mapping) {
- if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping)
- return -EFAULT;
- } else
+ } else if (!vma->vm_file) {
return -EFAULT;
- address = __vma_address(page, vma);
- if (unlikely(address < vma->vm_start || address >= vma->vm_end))
+ } else if (vma->vm_file->f_mapping != compound_head(page)->mapping) {
return -EFAULT;
- return address;
+ }
+
+ return vma_address(page, vma);
}
pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
@@ -919,7 +916,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
*/
mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
0, vma, vma->vm_mm, address,
- min(vma->vm_end, address + page_size(page)));
+ vma_address_end(page, vma));
mmu_notifier_invalidate_range_start(&range);
while (page_vma_mapped_walk(&pvmw)) {
@@ -1405,6 +1402,15 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
struct mmu_notifier_range range;
enum ttu_flags flags = (enum ttu_flags)(long)arg;
+ /*
+ * When racing against e.g. zap_pte_range() on another cpu,
+ * in between its ptep_get_and_clear_full() and page_remove_rmap(),
+ * try_to_unmap() may return false when it is about to become true,
+ * if page table locking is skipped: use TTU_SYNC to wait for that.
+ */
+ if (flags & TTU_SYNC)
+ pvmw.flags = PVMW_SYNC;
+
/* munlock has nothing to gain from examining un-locked vmas */
if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
return true;
@@ -1426,9 +1432,10 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
* Note that the page can not be free in this function as call of
* try_to_unmap() must hold a reference on the page.
*/
+ range.end = PageKsm(page) ?
+ address + PAGE_SIZE : vma_address_end(page, vma);
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
- address,
- min(vma->vm_end, address + page_size(page)));
+ address, range.end);
if (PageHuge(page)) {
/*
* If sharing is possible, start and end will be adjusted
@@ -1777,7 +1784,13 @@ bool try_to_unmap(struct page *page, enum ttu_flags flags)
else
rmap_walk(page, &rwc);
- return !page_mapcount(page) ? true : false;
+ /*
+ * When racing against e.g. zap_pte_range() on another cpu,
+ * in between its ptep_get_and_clear_full() and page_remove_rmap(),
+ * try_to_unmap() may return false when it is about to become true,
+ * if page table locking is skipped: use TTU_SYNC to wait for that.
+ */
+ return !page_mapcount(page);
}
/**
@@ -1874,6 +1887,7 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
struct vm_area_struct *vma = avc->vma;
unsigned long address = vma_address(page, vma);
+ VM_BUG_ON_VMA(address == -EFAULT, vma);
cond_resched();
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
@@ -1928,6 +1942,7 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
pgoff_start, pgoff_end) {
unsigned long address = vma_address(page, vma);
+ VM_BUG_ON_VMA(address == -EFAULT, vma);
cond_resched();
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
diff --git a/mm/shmem.c b/mm/shmem.c
index a08cedefbfaa..5d46611cba8d 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2258,25 +2258,11 @@ out_nomem:
static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
{
struct shmem_inode_info *info = SHMEM_I(file_inode(file));
+ int ret;
- if (info->seals & F_SEAL_FUTURE_WRITE) {
- /*
- * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
- * "future write" seal active.
- */
- if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
- return -EPERM;
-
- /*
- * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as
- * MAP_SHARED and read-only, take care to not allow mprotect to
- * revert protections on such mappings. Do this only for shared
- * mappings. For private mappings, don't need to mask
- * VM_MAYWRITE as we still want them to be COW-writable.
- */
- if (vma->vm_flags & VM_SHARED)
- vma->vm_flags &= ~(VM_MAYWRITE);
- }
+ ret = seal_check_future_write(info->seals, vma);
+ if (ret)
+ return ret;
/* arm64 - allow memory tagging on RAM-based files */
vma->vm_flags |= VM_MTE_ALLOWED;
@@ -2375,8 +2361,18 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
pgoff_t offset, max_off;
ret = -ENOMEM;
- if (!shmem_inode_acct_block(inode, 1))
+ if (!shmem_inode_acct_block(inode, 1)) {
+ /*
+ * We may have got a page, returned -ENOENT triggering a retry,
+ * and now we find ourselves with -ENOMEM. Release the page, to
+ * avoid a BUG_ON in our caller.
+ */
+ if (unlikely(*pagep)) {
+ put_page(*pagep);
+ *pagep = NULL;
+ }
goto out;
+ }
if (!*pagep) {
page = shmem_alloc_page(gfp, info, pgoff);
diff --git a/mm/shuffle.h b/mm/shuffle.h
index 71b784f0b7c3..cec62984f7d3 100644
--- a/mm/shuffle.h
+++ b/mm/shuffle.h
@@ -10,7 +10,7 @@
DECLARE_STATIC_KEY_FALSE(page_alloc_shuffle_key);
extern void __shuffle_free_memory(pg_data_t *pgdat);
extern bool shuffle_pick_tail(void);
-static inline void shuffle_free_memory(pg_data_t *pgdat)
+static inline void __meminit shuffle_free_memory(pg_data_t *pgdat)
{
if (!static_branch_unlikely(&page_alloc_shuffle_key))
return;
@@ -18,7 +18,7 @@ static inline void shuffle_free_memory(pg_data_t *pgdat)
}
extern void __shuffle_zone(struct zone *z);
-static inline void shuffle_zone(struct zone *z)
+static inline void __meminit shuffle_zone(struct zone *z)
{
if (!static_branch_unlikely(&page_alloc_shuffle_key))
return;
diff --git a/mm/slab_common.c b/mm/slab_common.c
index f8833d3e5d47..7cab77655f11 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -97,8 +97,7 @@ EXPORT_SYMBOL(kmem_cache_size);
#ifdef CONFIG_DEBUG_VM
static int kmem_cache_sanity_check(const char *name, unsigned int size)
{
- if (!name || in_interrupt() || size < sizeof(void *) ||
- size > KMALLOC_MAX_SIZE) {
+ if (!name || in_interrupt() || size > KMALLOC_MAX_SIZE) {
pr_err("kmem_cache_create(%s) integrity check failed\n", name);
return -EINVAL;
}
@@ -318,6 +317,16 @@ kmem_cache_create_usercopy(const char *name,
const char *cache_name;
int err;
+#ifdef CONFIG_SLUB_DEBUG
+ /*
+ * If no slub_debug was enabled globally, the static key is not yet
+ * enabled by setup_slub_debug(). Enable it if the cache is being
+ * created with any of the debugging flags passed explicitly.
+ */
+ if (flags & SLAB_DEBUG_FLAGS)
+ static_branch_enable(&slub_debug_enabled);
+#endif
+
mutex_lock(&slab_mutex);
err = kmem_cache_sanity_check(name, size);
diff --git a/mm/slub.c b/mm/slub.c
index feda53ae62ba..61bd40e3eb9a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/bit_spinlock.h>
#include <linux/interrupt.h>
+#include <linux/swab.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include "slab.h"
@@ -301,6 +302,7 @@ static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
if (!debug_pagealloc_enabled_static())
return get_freepointer(s, object);
+ object = kasan_reset_tag(object);
freepointer_addr = (unsigned long)object + s->offset;
copy_from_kernel_nofault(&p, (void **)freepointer_addr, sizeof(p));
return freelist_ptr(s, p, freepointer_addr);
@@ -711,15 +713,15 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
p, p - addr, get_freepointer(s, p));
if (s->flags & SLAB_RED_ZONE)
- print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
+ print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
s->red_left_pad);
else if (p > addr + 16)
print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
- print_section(KERN_ERR, "Object ", p,
+ print_section(KERN_ERR, "Object ", p,
min_t(unsigned int, s->object_size, PAGE_SIZE));
if (s->flags & SLAB_RED_ZONE)
- print_section(KERN_ERR, "Redzone ", p + s->object_size,
+ print_section(KERN_ERR, "Redzone ", p + s->object_size,
s->inuse - s->object_size);
off = get_info_end(s);
@@ -731,7 +733,7 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
if (off != size_from_object(s))
/* Beginning of the filler is the free pointer */
- print_section(KERN_ERR, "Padding ", p + off,
+ print_section(KERN_ERR, "Padding ", p + off,
size_from_object(s) - off);
dump_stack();
@@ -908,11 +910,11 @@ static int check_object(struct kmem_cache *s, struct page *page,
u8 *endobject = object + s->object_size;
if (s->flags & SLAB_RED_ZONE) {
- if (!check_bytes_and_report(s, page, object, "Redzone",
+ if (!check_bytes_and_report(s, page, object, "Left Redzone",
object - s->red_left_pad, val, s->red_left_pad))
return 0;
- if (!check_bytes_and_report(s, page, object, "Redzone",
+ if (!check_bytes_and_report(s, page, object, "Right Redzone",
endobject, val, s->inuse - s->object_size))
return 0;
} else {
@@ -927,7 +929,7 @@ static int check_object(struct kmem_cache *s, struct page *page,
if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
(!check_bytes_and_report(s, page, p, "Poison", p,
POISON_FREE, s->object_size - 1) ||
- !check_bytes_and_report(s, page, p, "Poison",
+ !check_bytes_and_report(s, page, p, "End Poison",
p + s->object_size - 1, POISON_END, 1)))
return 0;
/*
@@ -3688,7 +3690,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
{
slab_flags_t flags = s->flags;
unsigned int size = s->object_size;
- unsigned int freepointer_area;
unsigned int order;
/*
@@ -3697,13 +3698,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
* the possible location of the free pointer.
*/
size = ALIGN(size, sizeof(void *));
- /*
- * This is the area of the object where a freepointer can be
- * safely written. If redzoning adds more to the inuse size, we
- * can't use that portion for writing the freepointer, so
- * s->offset must be limited within this for the general case.
- */
- freepointer_area = size;
#ifdef CONFIG_SLUB_DEBUG
/*
@@ -3729,19 +3723,21 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
/*
* With that we have determined the number of bytes in actual use
- * by the object. This is the potential offset to the free pointer.
+ * by the object and redzoning.
*/
s->inuse = size;
- if (((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
- s->ctor)) {
+ if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
+ ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) ||
+ s->ctor) {
/*
* Relocate free pointer after the object if it is not
* permitted to overwrite the first word of the object on
* kmem_cache_free.
*
* This is the case if we do RCU, have a constructor or
- * destructor or are poisoning the objects.
+ * destructor, are poisoning the objects, or are
+ * redzoning an object smaller than sizeof(void *).
*
* The assumption that s->offset >= s->inuse means free
* pointer is outside of the object is used in the
@@ -3750,13 +3746,13 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
*/
s->offset = size;
size += sizeof(void *);
- } else if (freepointer_area > sizeof(void *)) {
+ } else {
/*
* Store freelist pointer near middle of object to keep
* it away from the edges of the object to avoid small
* sized over/underflows from neighboring allocations.
*/
- s->offset = ALIGN(freepointer_area / 2, sizeof(void *));
+ s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *));
}
#ifdef CONFIG_SLUB_DEBUG
@@ -3828,15 +3824,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
{
-#ifdef CONFIG_SLUB_DEBUG
- /*
- * If no slub_debug was enabled globally, the static key is not yet
- * enabled by setup_slub_debug(). Enable it if the cache is being
- * created with any of the debugging flags passed explicitly.
- */
- if (flags & SLAB_DEBUG_FLAGS)
- static_branch_enable(&slub_debug_enabled);
-#endif
s->flags = kmem_cache_flags(s->size, flags, s->name);
#ifdef CONFIG_SLAB_FREELIST_HARDENED
s->random = get_random_long();
diff --git a/mm/sparse.c b/mm/sparse.c
index b2ada9dc00cb..55c18aff3e42 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -344,6 +344,15 @@ size_t mem_section_usage_size(void)
return sizeof(struct mem_section_usage) + usemap_size();
}
+static inline phys_addr_t pgdat_to_phys(struct pglist_data *pgdat)
+{
+#ifndef CONFIG_NEED_MULTIPLE_NODES
+ return __pa_symbol(pgdat);
+#else
+ return __pa(pgdat);
+#endif
+}
+
#ifdef CONFIG_MEMORY_HOTREMOVE
static struct mem_section_usage * __init
sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
@@ -362,7 +371,7 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
* from the same section as the pgdat where possible to avoid
* this problem.
*/
- goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
+ goal = pgdat_to_phys(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
limit = goal + (1UL << PA_SECTION_SHIFT);
nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
again:
@@ -390,7 +399,7 @@ static void __init check_usemap_section_nr(int nid,
}
usemap_snr = pfn_to_section_nr(__pa(usage) >> PAGE_SHIFT);
- pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
+ pgdat_snr = pfn_to_section_nr(pgdat_to_phys(pgdat) >> PAGE_SHIFT);
if (usemap_snr == pgdat_snr)
return;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 149e77454e3c..996afa8131c8 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1900,7 +1900,7 @@ unsigned int count_swap_pages(int type, int free)
static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
{
- return pte_same(pte_swp_clear_soft_dirty(pte), swp_pte);
+ return pte_same(pte_swp_clear_flags(pte), swp_pte);
}
/*
diff --git a/mm/truncate.c b/mm/truncate.c
index 95af244b112a..234ddd879caa 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -167,13 +167,10 @@ void do_invalidatepage(struct page *page, unsigned int offset,
* its lock, b) when a concurrent invalidate_mapping_pages got there first and
* c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
*/
-static void
-truncate_cleanup_page(struct address_space *mapping, struct page *page)
+static void truncate_cleanup_page(struct page *page)
{
- if (page_mapped(page)) {
- unsigned int nr = thp_nr_pages(page);
- unmap_mapping_pages(mapping, page->index, nr, false);
- }
+ if (page_mapped(page))
+ unmap_mapping_page(page);
if (page_has_private(page))
do_invalidatepage(page, 0, thp_size(page));
@@ -218,7 +215,7 @@ int truncate_inode_page(struct address_space *mapping, struct page *page)
if (page->mapping != mapping)
return -EIO;
- truncate_cleanup_page(mapping, page);
+ truncate_cleanup_page(page);
delete_from_page_cache(page);
return 0;
}
@@ -325,7 +322,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
index = indices[pagevec_count(&pvec) - 1] + 1;
truncate_exceptional_pvec_entries(mapping, &pvec, indices);
for (i = 0; i < pagevec_count(&pvec); i++)
- truncate_cleanup_page(mapping, pvec.pages[i]);
+ truncate_cleanup_page(pvec.pages[i]);
delete_from_page_cache_batch(mapping, &pvec);
for (i = 0; i < pagevec_count(&pvec); i++)
unlock_page(pvec.pages[i]);
@@ -639,6 +636,16 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
continue;
}
+ if (!did_range_unmap && page_mapped(page)) {
+ /*
+ * If page is mapped, before taking its lock,
+ * zap the rest of the file in one hit.
+ */
+ unmap_mapping_pages(mapping, index,
+ (1 + end - index), false);
+ did_range_unmap = 1;
+ }
+
lock_page(page);
WARN_ON(page_to_index(page) != index);
if (page->mapping != mapping) {
@@ -646,23 +653,11 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
continue;
}
wait_on_page_writeback(page);
- if (page_mapped(page)) {
- if (!did_range_unmap) {
- /*
- * Zap the rest of the file in one hit.
- */
- unmap_mapping_pages(mapping, index,
- (1 + end - index), false);
- did_range_unmap = 1;
- } else {
- /*
- * Just zap this page
- */
- unmap_mapping_pages(mapping, index,
- 1, false);
- }
- }
+
+ if (page_mapped(page))
+ unmap_mapping_page(page);
BUG_ON(page_mapped(page));
+
ret2 = do_launder_page(mapping, page);
if (ret2 == 0) {
if (!invalidate_complete_page2(mapping, page))
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index e14b3820c6a8..63a73e164d55 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -360,38 +360,38 @@ out:
* If a reservation for the page existed in the reservation
* map of a private mapping, the map was modified to indicate
* the reservation was consumed when the page was allocated.
- * We clear the PagePrivate flag now so that the global
+ * We clear the HPageRestoreReserve flag now so that the global
* reserve count will not be incremented in free_huge_page.
* The reservation map will still indicate the reservation
* was consumed and possibly prevent later page allocation.
* This is better than leaking a global reservation. If no
- * reservation existed, it is still safe to clear PagePrivate
- * as no adjustments to reservation counts were made during
- * allocation.
+ * reservation existed, it is still safe to clear
+ * HPageRestoreReserve as no adjustments to reservation counts
+ * were made during allocation.
*
* The reservation map for shared mappings indicates which
* pages have reservations. When a huge page is allocated
* for an address with a reservation, no change is made to
- * the reserve map. In this case PagePrivate will be set
- * to indicate that the global reservation count should be
+ * the reserve map. In this case HPageRestoreReserve will be
+ * set to indicate that the global reservation count should be
* incremented when the page is freed. This is the desired
* behavior. However, when a huge page is allocated for an
* address without a reservation a reservation entry is added
- * to the reservation map, and PagePrivate will not be set.
- * When the page is freed, the global reserve count will NOT
- * be incremented and it will appear as though we have leaked
- * reserved page. In this case, set PagePrivate so that the
- * global reserve count will be incremented to match the
- * reservation map entry which was created.
+ * to the reservation map, and HPageRestoreReserve will not be
+ * set. When the page is freed, the global reserve count will
+ * NOT be incremented and it will appear as though we have
+ * leaked reserved page. In this case, set HPageRestoreReserve
+ * so that the global reserve count will be incremented to
+ * match the reservation map entry which was created.
*
* Note that vm_alloc_shared is based on the flags of the vma
* for which the page was originally allocated. dst_vma could
* be different or NULL on error.
*/
if (vm_alloc_shared)
- SetPagePrivate(page);
+ SetHPageRestoreReserve(page);
else
- ClearPagePrivate(page);
+ ClearHPageRestoreReserve(page);
put_page(page);
}
BUG_ON(copied < 0);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index a13ac524f6ff..d0a7d89be091 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2344,15 +2344,16 @@ static void clear_vm_uninitialized_flag(struct vm_struct *vm)
}
static struct vm_struct *__get_vm_area_node(unsigned long size,
- unsigned long align, unsigned long flags, unsigned long start,
- unsigned long end, int node, gfp_t gfp_mask, const void *caller)
+ unsigned long align, unsigned long shift, unsigned long flags,
+ unsigned long start, unsigned long end, int node,
+ gfp_t gfp_mask, const void *caller)
{
struct vmap_area *va;
struct vm_struct *area;
unsigned long requested_size = size;
BUG_ON(in_interrupt());
- size = PAGE_ALIGN(size);
+ size = ALIGN(size, 1ul << shift);
if (unlikely(!size))
return NULL;
@@ -2384,8 +2385,8 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end,
const void *caller)
{
- return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
- GFP_KERNEL, caller);
+ return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end,
+ NUMA_NO_NODE, GFP_KERNEL, caller);
}
/**
@@ -2401,7 +2402,8 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
*/
struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
{
- return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
+ return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
+ VMALLOC_START, VMALLOC_END,
NUMA_NO_NODE, GFP_KERNEL,
__builtin_return_address(0));
}
@@ -2409,7 +2411,8 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
const void *caller)
{
- return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
+ return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
+ VMALLOC_START, VMALLOC_END,
NUMA_NO_NODE, GFP_KERNEL, caller);
}
@@ -2902,9 +2905,9 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
}
again:
- size = PAGE_ALIGN(size);
- area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
- vm_flags, start, end, node, gfp_mask, caller);
+ area = __get_vm_area_node(real_size, align, shift, VM_ALLOC |
+ VM_UNINITIALIZED | vm_flags, start, end, node,
+ gfp_mask, caller);
if (!area) {
warn_alloc(gfp_mask, NULL,
"vmalloc size %lu allocation failure: "
@@ -2923,6 +2926,7 @@ again:
*/
clear_vm_uninitialized_flag(area);
+ size = PAGE_ALIGN(size);
kmemleak_vmalloc(area, size, gfp_mask);
return addr;
@@ -2999,6 +3003,23 @@ void *vmalloc(unsigned long size)
EXPORT_SYMBOL(vmalloc);
/**
+ * vmalloc_no_huge - allocate virtually contiguous memory using small pages
+ * @size: allocation size
+ *
+ * Allocate enough non-huge pages to cover @size from the page level
+ * allocator and map them into contiguous kernel virtual space.
+ *
+ * Return: pointer to the allocated memory or %NULL on error
+ */
+void *vmalloc_no_huge(unsigned long size)
+{
+ return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
+ GFP_KERNEL, PAGE_KERNEL, VM_NO_HUGE_VMAP,
+ NUMA_NO_NODE, __builtin_return_address(0));
+}
+EXPORT_SYMBOL(vmalloc_no_huge);
+
+/**
* vzalloc - allocate virtually contiguous memory with zero fill
* @size: allocation size
*
diff --git a/net/Kconfig b/net/Kconfig
index f5ee7c65e6b4..c7392c449b25 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -302,21 +302,6 @@ config BQL
select DQL
default y
-config BPF_JIT
- bool "enable BPF Just In Time compiler"
- depends on HAVE_CBPF_JIT || HAVE_EBPF_JIT
- depends on MODULES
- help
- Berkeley Packet Filter filtering capabilities are normally handled
- by an interpreter. This option allows kernel to generate a native
- code when filter is loaded in memory. This should speedup
- packet sniffing (libpcap/tcpdump).
-
- Note, admin should enable this feature changing:
- /proc/sys/net/core/bpf_jit_enable
- /proc/sys/net/core/bpf_jit_harden (optional)
- /proc/sys/net/core/bpf_jit_kallsyms (optional)
-
config BPF_STREAM_PARSER
bool "enable BPF STREAM_PARSER"
depends on INET
@@ -470,15 +455,3 @@ config ETHTOOL_NETLINK
e.g. notification messages.
endif # if NET
-
-# Used by archs to tell that they support BPF JIT compiler plus which flavour.
-# Only one of the two can be selected for a specific arch since eBPF JIT supersedes
-# the cBPF JIT.
-
-# Classic BPF JIT (cBPF)
-config HAVE_CBPF_JIT
- bool
-
-# Extended BPF JIT (eBPF)
-config HAVE_EBPF_JIT
- bool
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index be18af481d7d..c7236daa2415 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -768,7 +768,7 @@ static int aarp_rcv(struct sk_buff *skb, struct net_device *dev,
if (a && a->status & ATIF_PROBE) {
a->status |= ATIF_PROBE_FAIL;
/*
- * we do not respond to probe or request packets for
+ * we do not respond to probe or request packets of
* this address while we are probing this address
*/
goto unlock;
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 789f257be24f..fc8be49010b9 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -409,8 +409,10 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
if (WARN_ON(!forw_packet->if_outgoing))
return;
- if (WARN_ON(forw_packet->if_outgoing->soft_iface != soft_iface))
+ if (forw_packet->if_outgoing->soft_iface != soft_iface) {
+ pr_warn("%s: soft interface switch for queued OGM\n", __func__);
return;
+ }
if (forw_packet->if_incoming->if_status != BATADV_IF_ACTIVE)
return;
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index fd12f1652bdf..7d71d104fdfd 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1610,8 +1610,13 @@ setup_failed:
} else {
/* Init failed, cleanup */
flush_work(&hdev->tx_work);
- flush_work(&hdev->cmd_work);
+
+ /* Since hci_rx_work() is possible to awake new cmd_work
+ * it should be flushed first to avoid unexpected call of
+ * hci_cmd_work()
+ */
flush_work(&hdev->rx_work);
+ flush_work(&hdev->cmd_work);
skb_queue_purge(&hdev->cmd_q);
skb_queue_purge(&hdev->rx_q);
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 251b9128f530..eed0dd066e12 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -762,7 +762,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
/* Detach sockets from device */
read_lock(&hci_sk_list.lock);
sk_for_each(sk, &hci_sk_list.head) {
- bh_lock_sock_nested(sk);
+ lock_sock(sk);
if (hci_pi(sk)->hdev == hdev) {
hci_pi(sk)->hdev = NULL;
sk->sk_err = EPIPE;
@@ -771,7 +771,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
hci_dev_put(hdev);
}
- bh_unlock_sock(sk);
+ release_sock(sk);
}
read_unlock(&hci_sk_list.lock);
}
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 372e3b25aaa4..7dd51da73845 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -3229,7 +3229,7 @@ static inline struct l2cap_chan *smp_new_conn_cb(struct l2cap_chan *pchan)
{
struct l2cap_chan *chan;
- bt_dev_dbg(pchan->conn->hcon->hdev, "pchan %p", pchan);
+ BT_DBG("pchan %p", pchan);
chan = l2cap_chan_create();
if (!chan)
@@ -3250,7 +3250,7 @@ static inline struct l2cap_chan *smp_new_conn_cb(struct l2cap_chan *pchan)
*/
atomic_set(&chan->nesting, L2CAP_NESTING_SMP);
- bt_dev_dbg(pchan->conn->hcon->hdev, "created chan %p", chan);
+ BT_DBG("created chan %p", chan);
return chan;
}
@@ -3354,7 +3354,7 @@ static void smp_del_chan(struct l2cap_chan *chan)
{
struct smp_dev *smp;
- bt_dev_dbg(chan->conn->hcon->hdev, "chan %p", chan);
+ BT_DBG("chan %p", chan);
smp = chan->data;
if (smp) {
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 7ce8a77cc6b6..e013d33f1c7c 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -90,8 +90,8 @@ struct bridge_mcast_stats {
#endif
struct br_tunnel_info {
- __be64 tunnel_id;
- struct metadata_dst *tunnel_dst;
+ __be64 tunnel_id;
+ struct metadata_dst __rcu *tunnel_dst;
};
/* private vlan flags */
diff --git a/net/bridge/br_vlan_tunnel.c b/net/bridge/br_vlan_tunnel.c
index 0d3a8c01552e..01017448ebde 100644
--- a/net/bridge/br_vlan_tunnel.c
+++ b/net/bridge/br_vlan_tunnel.c
@@ -41,26 +41,33 @@ static struct net_bridge_vlan *br_vlan_tunnel_lookup(struct rhashtable *tbl,
br_vlan_tunnel_rht_params);
}
+static void vlan_tunnel_info_release(struct net_bridge_vlan *vlan)
+{
+ struct metadata_dst *tdst = rtnl_dereference(vlan->tinfo.tunnel_dst);
+
+ WRITE_ONCE(vlan->tinfo.tunnel_id, 0);
+ RCU_INIT_POINTER(vlan->tinfo.tunnel_dst, NULL);
+ dst_release(&tdst->dst);
+}
+
void vlan_tunnel_info_del(struct net_bridge_vlan_group *vg,
struct net_bridge_vlan *vlan)
{
- if (!vlan->tinfo.tunnel_dst)
+ if (!rcu_access_pointer(vlan->tinfo.tunnel_dst))
return;
rhashtable_remove_fast(&vg->tunnel_hash, &vlan->tnode,
br_vlan_tunnel_rht_params);
- vlan->tinfo.tunnel_id = 0;
- dst_release(&vlan->tinfo.tunnel_dst->dst);
- vlan->tinfo.tunnel_dst = NULL;
+ vlan_tunnel_info_release(vlan);
}
static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg,
struct net_bridge_vlan *vlan, u32 tun_id)
{
- struct metadata_dst *metadata = NULL;
+ struct metadata_dst *metadata = rtnl_dereference(vlan->tinfo.tunnel_dst);
__be64 key = key32_to_tunnel_id(cpu_to_be32(tun_id));
int err;
- if (vlan->tinfo.tunnel_dst)
+ if (metadata)
return -EEXIST;
metadata = __ip_tun_set_dst(0, 0, 0, 0, 0, TUNNEL_KEY,
@@ -69,8 +76,8 @@ static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg,
return -EINVAL;
metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX | IP_TUNNEL_INFO_BRIDGE;
- vlan->tinfo.tunnel_dst = metadata;
- vlan->tinfo.tunnel_id = key;
+ rcu_assign_pointer(vlan->tinfo.tunnel_dst, metadata);
+ WRITE_ONCE(vlan->tinfo.tunnel_id, key);
err = rhashtable_lookup_insert_fast(&vg->tunnel_hash, &vlan->tnode,
br_vlan_tunnel_rht_params);
@@ -79,9 +86,7 @@ static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg,
return 0;
out:
- dst_release(&vlan->tinfo.tunnel_dst->dst);
- vlan->tinfo.tunnel_dst = NULL;
- vlan->tinfo.tunnel_id = 0;
+ vlan_tunnel_info_release(vlan);
return err;
}
@@ -182,12 +187,15 @@ int br_handle_ingress_vlan_tunnel(struct sk_buff *skb,
int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
struct net_bridge_vlan *vlan)
{
+ struct metadata_dst *tunnel_dst;
+ __be64 tunnel_id;
int err;
- if (!vlan || !vlan->tinfo.tunnel_id)
+ if (!vlan)
return 0;
- if (unlikely(!skb_vlan_tag_present(skb)))
+ tunnel_id = READ_ONCE(vlan->tinfo.tunnel_id);
+ if (!tunnel_id || unlikely(!skb_vlan_tag_present(skb)))
return 0;
skb_dst_drop(skb);
@@ -195,7 +203,9 @@ int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
if (err)
return err;
- skb_dst_set(skb, dst_clone(&vlan->tinfo.tunnel_dst->dst));
+ tunnel_dst = rcu_dereference(vlan->tinfo.tunnel_dst);
+ if (tunnel_dst && dst_hold_safe(&tunnel_dst->dst))
+ skb_dst_set(skb, &tunnel_dst->dst);
return 0;
}
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index c10e5a55758d..440139706130 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -308,7 +308,7 @@ static void dev_flowctrl(struct net_device *dev, int on)
caifd_put(caifd);
}
-void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
+int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
struct cflayer *link_support, int head_room,
struct cflayer **layer,
int (**rcv_func)(struct sk_buff *, struct net_device *,
@@ -319,11 +319,12 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
enum cfcnfg_phy_preference pref;
struct cfcnfg *cfg = get_cfcnfg(dev_net(dev));
struct caif_device_entry_list *caifdevs;
+ int res;
caifdevs = caif_device_list(dev_net(dev));
caifd = caif_device_alloc(dev);
if (!caifd)
- return;
+ return -ENOMEM;
*layer = &caifd->layer;
spin_lock_init(&caifd->flow_lock);
@@ -344,7 +345,7 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
strlcpy(caifd->layer.name, dev->name,
sizeof(caifd->layer.name));
caifd->layer.transmit = transmit;
- cfcnfg_add_phy_layer(cfg,
+ res = cfcnfg_add_phy_layer(cfg,
dev,
&caifd->layer,
pref,
@@ -354,6 +355,7 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
mutex_unlock(&caifdevs->lock);
if (rcv_func)
*rcv_func = receive;
+ return res;
}
EXPORT_SYMBOL(caif_enroll_dev);
@@ -368,6 +370,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
struct cflayer *layer, *link_support;
int head_room = 0;
struct caif_device_entry_list *caifdevs;
+ int res;
cfg = get_cfcnfg(dev_net(dev));
caifdevs = caif_device_list(dev_net(dev));
@@ -393,8 +396,10 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
break;
}
}
- caif_enroll_dev(dev, caifdev, link_support, head_room,
+ res = caif_enroll_dev(dev, caifdev, link_support, head_room,
&layer, NULL);
+ if (res)
+ cfserl_release(link_support);
caifdev->flowctrl = dev_flowctrl;
break;
diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c
index a0116b9503d9..b02e1292f7f1 100644
--- a/net/caif/caif_usb.c
+++ b/net/caif/caif_usb.c
@@ -115,6 +115,11 @@ static struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN],
return (struct cflayer *) this;
}
+static void cfusbl_release(struct cflayer *layer)
+{
+ kfree(layer);
+}
+
static struct packet_type caif_usb_type __read_mostly = {
.type = cpu_to_be16(ETH_P_802_EX1),
};
@@ -127,6 +132,7 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
struct cflayer *layer, *link_support;
struct usbnet *usbnet;
struct usb_device *usbdev;
+ int res;
/* Check whether we have a NCM device, and find its VID/PID. */
if (!(dev->dev.parent && dev->dev.parent->driver &&
@@ -169,8 +175,11 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
if (dev->num_tx_queues > 1)
pr_warn("USB device uses more than one tx queue\n");
- caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN,
+ res = caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN,
&layer, &caif_usb_type.func);
+ if (res)
+ goto err;
+
if (!pack_added)
dev_add_pack(&caif_usb_type);
pack_added = true;
@@ -178,6 +187,9 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
strlcpy(layer->name, dev->name, sizeof(layer->name));
return 0;
+err:
+ cfusbl_release(link_support);
+ return res;
}
static struct notifier_block caif_device_notifier = {
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index 399239a14420..cac30e676ac9 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -450,7 +450,7 @@ unlock:
rcu_read_unlock();
}
-void
+int
cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
struct net_device *dev, struct cflayer *phy_layer,
enum cfcnfg_phy_preference pref,
@@ -459,7 +459,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
{
struct cflayer *frml;
struct cfcnfg_phyinfo *phyinfo = NULL;
- int i;
+ int i, res = 0;
u8 phyid;
mutex_lock(&cnfg->lock);
@@ -473,12 +473,15 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
goto got_phyid;
}
pr_warn("Too many CAIF Link Layers (max 6)\n");
+ res = -EEXIST;
goto out;
got_phyid:
phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC);
- if (!phyinfo)
+ if (!phyinfo) {
+ res = -ENOMEM;
goto out_err;
+ }
phy_layer->id = phyid;
phyinfo->pref = pref;
@@ -492,8 +495,10 @@ got_phyid:
frml = cffrml_create(phyid, fcs);
- if (!frml)
+ if (!frml) {
+ res = -ENOMEM;
goto out_err;
+ }
phyinfo->frm_layer = frml;
layer_set_up(frml, cnfg->mux);
@@ -511,11 +516,12 @@ got_phyid:
list_add_rcu(&phyinfo->node, &cnfg->phys);
out:
mutex_unlock(&cnfg->lock);
- return;
+ return res;
out_err:
kfree(phyinfo);
mutex_unlock(&cnfg->lock);
+ return res;
}
EXPORT_SYMBOL(cfcnfg_add_phy_layer);
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
index e11725a4bb0e..40cd57ad0a0f 100644
--- a/net/caif/cfserl.c
+++ b/net/caif/cfserl.c
@@ -31,6 +31,11 @@ static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt);
static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
int phyid);
+void cfserl_release(struct cflayer *layer)
+{
+ kfree(layer);
+}
+
struct cflayer *cfserl_create(int instance, bool use_stx)
{
struct cfserl *this = kzalloc(sizeof(struct cfserl), GFP_ATOMIC);
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 909b9e684e04..f3e4d9528fa3 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -125,7 +125,7 @@ struct bcm_sock {
struct sock sk;
int bound;
int ifindex;
- struct notifier_block notifier;
+ struct list_head notifier;
struct list_head rx_ops;
struct list_head tx_ops;
unsigned long dropped_usr_msgs;
@@ -133,6 +133,10 @@ struct bcm_sock {
char procname [32]; /* inode number in decimal with \0 */
};
+static LIST_HEAD(bcm_notifier_list);
+static DEFINE_SPINLOCK(bcm_notifier_lock);
+static struct bcm_sock *bcm_busy_notifier;
+
static inline struct bcm_sock *bcm_sk(const struct sock *sk)
{
return (struct bcm_sock *)sk;
@@ -402,6 +406,7 @@ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
if (!op->count && (op->flags & TX_COUNTEVT)) {
/* create notification to user */
+ memset(&msg_head, 0, sizeof(msg_head));
msg_head.opcode = TX_EXPIRED;
msg_head.flags = op->flags;
msg_head.count = op->count;
@@ -439,6 +444,7 @@ static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
/* this element is not throttled anymore */
data->flags &= (BCM_CAN_FLAGS_MASK|RX_RECV);
+ memset(&head, 0, sizeof(head));
head.opcode = RX_CHANGED;
head.flags = op->flags;
head.count = op->count;
@@ -560,6 +566,7 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
}
/* create notification to user */
+ memset(&msg_head, 0, sizeof(msg_head));
msg_head.opcode = RX_TIMEOUT;
msg_head.flags = op->flags;
msg_head.count = op->count;
@@ -1378,20 +1385,15 @@ static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
/*
* notification handler for netdevice status changes
*/
-static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
- void *ptr)
+static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
+ struct net_device *dev)
{
- struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier);
struct sock *sk = &bo->sk;
struct bcm_op *op;
int notify_enodev = 0;
if (!net_eq(dev_net(dev), sock_net(sk)))
- return NOTIFY_DONE;
-
- if (dev->type != ARPHRD_CAN)
- return NOTIFY_DONE;
+ return;
switch (msg) {
@@ -1426,7 +1428,28 @@ static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
sk->sk_error_report(sk);
}
}
+}
+static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
+ void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+ if (dev->type != ARPHRD_CAN)
+ return NOTIFY_DONE;
+ if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
+ return NOTIFY_DONE;
+ if (unlikely(bcm_busy_notifier)) /* Check for reentrant bug. */
+ return NOTIFY_DONE;
+
+ spin_lock(&bcm_notifier_lock);
+ list_for_each_entry(bcm_busy_notifier, &bcm_notifier_list, notifier) {
+ spin_unlock(&bcm_notifier_lock);
+ bcm_notify(bcm_busy_notifier, msg, dev);
+ spin_lock(&bcm_notifier_lock);
+ }
+ bcm_busy_notifier = NULL;
+ spin_unlock(&bcm_notifier_lock);
return NOTIFY_DONE;
}
@@ -1446,9 +1469,9 @@ static int bcm_init(struct sock *sk)
INIT_LIST_HEAD(&bo->rx_ops);
/* set notifier */
- bo->notifier.notifier_call = bcm_notifier;
-
- register_netdevice_notifier(&bo->notifier);
+ spin_lock(&bcm_notifier_lock);
+ list_add_tail(&bo->notifier, &bcm_notifier_list);
+ spin_unlock(&bcm_notifier_lock);
return 0;
}
@@ -1471,7 +1494,14 @@ static int bcm_release(struct socket *sock)
/* remove bcm_ops, timer, rx_unregister(), etc. */
- unregister_netdevice_notifier(&bo->notifier);
+ spin_lock(&bcm_notifier_lock);
+ while (bcm_busy_notifier == bo) {
+ spin_unlock(&bcm_notifier_lock);
+ schedule_timeout_uninterruptible(1);
+ spin_lock(&bcm_notifier_lock);
+ }
+ list_del(&bo->notifier);
+ spin_unlock(&bcm_notifier_lock);
lock_sock(sk);
@@ -1692,6 +1722,10 @@ static struct pernet_operations canbcm_pernet_ops __read_mostly = {
.exit = canbcm_pernet_exit,
};
+static struct notifier_block canbcm_notifier = {
+ .notifier_call = bcm_notifier
+};
+
static int __init bcm_module_init(void)
{
int err;
@@ -1705,12 +1739,14 @@ static int __init bcm_module_init(void)
}
register_pernet_subsys(&canbcm_pernet_ops);
+ register_netdevice_notifier(&canbcm_notifier);
return 0;
}
static void __exit bcm_module_exit(void)
{
can_proto_unregister(&bcm_can_proto);
+ unregister_netdevice_notifier(&canbcm_notifier);
unregister_pernet_subsys(&canbcm_pernet_ops);
}
diff --git a/net/can/isotp.c b/net/can/isotp.c
index 9f94ad3caee9..be6183f8ca11 100644
--- a/net/can/isotp.c
+++ b/net/can/isotp.c
@@ -143,10 +143,14 @@ struct isotp_sock {
u32 force_tx_stmin;
u32 force_rx_stmin;
struct tpcon rx, tx;
- struct notifier_block notifier;
+ struct list_head notifier;
wait_queue_head_t wait;
};
+static LIST_HEAD(isotp_notifier_list);
+static DEFINE_SPINLOCK(isotp_notifier_lock);
+static struct isotp_sock *isotp_busy_notifier;
+
static inline struct isotp_sock *isotp_sk(const struct sock *sk)
{
return (struct isotp_sock *)sk;
@@ -1013,7 +1017,14 @@ static int isotp_release(struct socket *sock)
/* wait for complete transmission of current pdu */
wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
- unregister_netdevice_notifier(&so->notifier);
+ spin_lock(&isotp_notifier_lock);
+ while (isotp_busy_notifier == so) {
+ spin_unlock(&isotp_notifier_lock);
+ schedule_timeout_uninterruptible(1);
+ spin_lock(&isotp_notifier_lock);
+ }
+ list_del(&so->notifier);
+ spin_unlock(&isotp_notifier_lock);
lock_sock(sk);
@@ -1062,27 +1073,31 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
if (len < ISOTP_MIN_NAMELEN)
return -EINVAL;
+ if (addr->can_addr.tp.tx_id & (CAN_ERR_FLAG | CAN_RTR_FLAG))
+ return -EADDRNOTAVAIL;
+
+ if (!addr->can_ifindex)
+ return -ENODEV;
+
+ lock_sock(sk);
+
/* do not register frame reception for functional addressing */
if (so->opt.flags & CAN_ISOTP_SF_BROADCAST)
do_rx_reg = 0;
/* do not validate rx address for functional addressing */
if (do_rx_reg) {
- if (addr->can_addr.tp.rx_id == addr->can_addr.tp.tx_id)
- return -EADDRNOTAVAIL;
+ if (addr->can_addr.tp.rx_id == addr->can_addr.tp.tx_id) {
+ err = -EADDRNOTAVAIL;
+ goto out;
+ }
- if (addr->can_addr.tp.rx_id & (CAN_ERR_FLAG | CAN_RTR_FLAG))
- return -EADDRNOTAVAIL;
+ if (addr->can_addr.tp.rx_id & (CAN_ERR_FLAG | CAN_RTR_FLAG)) {
+ err = -EADDRNOTAVAIL;
+ goto out;
+ }
}
- if (addr->can_addr.tp.tx_id & (CAN_ERR_FLAG | CAN_RTR_FLAG))
- return -EADDRNOTAVAIL;
-
- if (!addr->can_ifindex)
- return -ENODEV;
-
- lock_sock(sk);
-
if (so->bound && addr->can_ifindex == so->ifindex &&
addr->can_addr.tp.rx_id == so->rxid &&
addr->can_addr.tp.tx_id == so->txid)
@@ -1164,16 +1179,13 @@ static int isotp_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
return ISOTP_MIN_NAMELEN;
}
-static int isotp_setsockopt(struct socket *sock, int level, int optname,
+static int isotp_setsockopt_locked(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct isotp_sock *so = isotp_sk(sk);
int ret = 0;
- if (level != SOL_CAN_ISOTP)
- return -EINVAL;
-
if (so->bound)
return -EISCONN;
@@ -1248,6 +1260,22 @@ static int isotp_setsockopt(struct socket *sock, int level, int optname,
return ret;
}
+static int isotp_setsockopt(struct socket *sock, int level, int optname,
+ sockptr_t optval, unsigned int optlen)
+
+{
+ struct sock *sk = sock->sk;
+ int ret;
+
+ if (level != SOL_CAN_ISOTP)
+ return -EINVAL;
+
+ lock_sock(sk);
+ ret = isotp_setsockopt_locked(sock, level, optname, optval, optlen);
+ release_sock(sk);
+ return ret;
+}
+
static int isotp_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
@@ -1300,21 +1328,16 @@ static int isotp_getsockopt(struct socket *sock, int level, int optname,
return 0;
}
-static int isotp_notifier(struct notifier_block *nb, unsigned long msg,
- void *ptr)
+static void isotp_notify(struct isotp_sock *so, unsigned long msg,
+ struct net_device *dev)
{
- struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- struct isotp_sock *so = container_of(nb, struct isotp_sock, notifier);
struct sock *sk = &so->sk;
if (!net_eq(dev_net(dev), sock_net(sk)))
- return NOTIFY_DONE;
-
- if (dev->type != ARPHRD_CAN)
- return NOTIFY_DONE;
+ return;
if (so->ifindex != dev->ifindex)
- return NOTIFY_DONE;
+ return;
switch (msg) {
case NETDEV_UNREGISTER:
@@ -1340,7 +1363,28 @@ static int isotp_notifier(struct notifier_block *nb, unsigned long msg,
sk->sk_error_report(sk);
break;
}
+}
+
+static int isotp_notifier(struct notifier_block *nb, unsigned long msg,
+ void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ if (dev->type != ARPHRD_CAN)
+ return NOTIFY_DONE;
+ if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
+ return NOTIFY_DONE;
+ if (unlikely(isotp_busy_notifier)) /* Check for reentrant bug. */
+ return NOTIFY_DONE;
+
+ spin_lock(&isotp_notifier_lock);
+ list_for_each_entry(isotp_busy_notifier, &isotp_notifier_list, notifier) {
+ spin_unlock(&isotp_notifier_lock);
+ isotp_notify(isotp_busy_notifier, msg, dev);
+ spin_lock(&isotp_notifier_lock);
+ }
+ isotp_busy_notifier = NULL;
+ spin_unlock(&isotp_notifier_lock);
return NOTIFY_DONE;
}
@@ -1377,8 +1421,9 @@ static int isotp_init(struct sock *sk)
init_waitqueue_head(&so->wait);
- so->notifier.notifier_call = isotp_notifier;
- register_netdevice_notifier(&so->notifier);
+ spin_lock(&isotp_notifier_lock);
+ list_add_tail(&so->notifier, &isotp_notifier_list);
+ spin_unlock(&isotp_notifier_lock);
return 0;
}
@@ -1425,6 +1470,10 @@ static const struct can_proto isotp_can_proto = {
.prot = &isotp_proto,
};
+static struct notifier_block canisotp_notifier = {
+ .notifier_call = isotp_notifier
+};
+
static __init int isotp_module_init(void)
{
int err;
@@ -1434,6 +1483,8 @@ static __init int isotp_module_init(void)
err = can_proto_register(&isotp_can_proto);
if (err < 0)
pr_err("can: registration of isotp protocol failed\n");
+ else
+ register_netdevice_notifier(&canisotp_notifier);
return err;
}
@@ -1441,6 +1492,7 @@ static __init int isotp_module_init(void)
static __exit void isotp_module_exit(void)
{
can_proto_unregister(&isotp_can_proto);
+ unregister_netdevice_notifier(&canisotp_notifier);
}
module_init(isotp_module_init);
diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
index e09d087ba240..c3946c355882 100644
--- a/net/can/j1939/transport.c
+++ b/net/can/j1939/transport.c
@@ -330,6 +330,9 @@ static void j1939_session_skb_drop_old(struct j1939_session *session)
if ((do_skcb->offset + do_skb->len) < offset_start) {
__skb_unlink(do_skb, &session->skb_queue);
+ /* drop ref taken in j1939_session_skb_queue() */
+ skb_unref(do_skb);
+
kfree_skb(do_skb);
}
spin_unlock_irqrestore(&session->skb_queue.lock, flags);
@@ -349,12 +352,13 @@ void j1939_session_skb_queue(struct j1939_session *session,
skcb->flags |= J1939_ECU_LOCAL_SRC;
+ skb_get(skb);
skb_queue_tail(&session->skb_queue, skb);
}
static struct
-sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
- unsigned int offset_start)
+sk_buff *j1939_session_skb_get_by_offset(struct j1939_session *session,
+ unsigned int offset_start)
{
struct j1939_priv *priv = session->priv;
struct j1939_sk_buff_cb *do_skcb;
@@ -371,6 +375,10 @@ sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
skb = do_skb;
}
}
+
+ if (skb)
+ skb_get(skb);
+
spin_unlock_irqrestore(&session->skb_queue.lock, flags);
if (!skb)
@@ -381,12 +389,12 @@ sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
return skb;
}
-static struct sk_buff *j1939_session_skb_find(struct j1939_session *session)
+static struct sk_buff *j1939_session_skb_get(struct j1939_session *session)
{
unsigned int offset_start;
offset_start = session->pkt.dpo * 7;
- return j1939_session_skb_find_by_offset(session, offset_start);
+ return j1939_session_skb_get_by_offset(session, offset_start);
}
/* see if we are receiver
@@ -776,7 +784,7 @@ static int j1939_session_tx_dat(struct j1939_session *session)
int ret = 0;
u8 dat[8];
- se_skb = j1939_session_skb_find_by_offset(session, session->pkt.tx * 7);
+ se_skb = j1939_session_skb_get_by_offset(session, session->pkt.tx * 7);
if (!se_skb)
return -ENOBUFS;
@@ -801,7 +809,8 @@ static int j1939_session_tx_dat(struct j1939_session *session)
netdev_err_once(priv->ndev,
"%s: 0x%p: requested data outside of queued buffer: offset %i, len %i, pkt.tx: %i\n",
__func__, session, skcb->offset, se_skb->len , session->pkt.tx);
- return -EOVERFLOW;
+ ret = -EOVERFLOW;
+ goto out_free;
}
if (!len) {
@@ -835,6 +844,12 @@ static int j1939_session_tx_dat(struct j1939_session *session)
if (pkt_done)
j1939_tp_set_rxtimeout(session, 250);
+ out_free:
+ if (ret)
+ kfree_skb(se_skb);
+ else
+ consume_skb(se_skb);
+
return ret;
}
@@ -1007,7 +1022,7 @@ static int j1939_xtp_txnext_receiver(struct j1939_session *session)
static int j1939_simple_txnext(struct j1939_session *session)
{
struct j1939_priv *priv = session->priv;
- struct sk_buff *se_skb = j1939_session_skb_find(session);
+ struct sk_buff *se_skb = j1939_session_skb_get(session);
struct sk_buff *skb;
int ret;
@@ -1015,8 +1030,10 @@ static int j1939_simple_txnext(struct j1939_session *session)
return 0;
skb = skb_clone(se_skb, GFP_ATOMIC);
- if (!skb)
- return -ENOMEM;
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
can_skb_set_owner(skb, se_skb->sk);
@@ -1024,12 +1041,18 @@ static int j1939_simple_txnext(struct j1939_session *session)
ret = j1939_send_one(priv, skb);
if (ret)
- return ret;
+ goto out_free;
j1939_sk_errqueue(session, J1939_ERRQUEUE_SCHED);
j1939_sk_queue_activate_next(session);
- return 0;
+ out_free:
+ if (ret)
+ kfree_skb(se_skb);
+ else
+ consume_skb(se_skb);
+
+ return ret;
}
static bool j1939_session_deactivate_locked(struct j1939_session *session)
@@ -1170,9 +1193,10 @@ static void j1939_session_completed(struct j1939_session *session)
struct sk_buff *skb;
if (!session->transmission) {
- skb = j1939_session_skb_find(session);
+ skb = j1939_session_skb_get(session);
/* distribute among j1939 receivers */
j1939_sk_recv(session->priv, skb);
+ consume_skb(skb);
}
j1939_session_deactivate_activate_next(session);
@@ -1744,7 +1768,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
{
struct j1939_priv *priv = session->priv;
struct j1939_sk_buff_cb *skcb;
- struct sk_buff *se_skb;
+ struct sk_buff *se_skb = NULL;
const u8 *dat;
u8 *tpdat;
int offset;
@@ -1786,7 +1810,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
goto out_session_cancel;
}
- se_skb = j1939_session_skb_find_by_offset(session, packet * 7);
+ se_skb = j1939_session_skb_get_by_offset(session, packet * 7);
if (!se_skb) {
netdev_warn(priv->ndev, "%s: 0x%p: no skb found\n", __func__,
session);
@@ -1848,11 +1872,13 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
j1939_tp_set_rxtimeout(session, 250);
}
session->last_cmd = 0xff;
+ consume_skb(se_skb);
j1939_session_put(session);
return;
out_session_cancel:
+ kfree_skb(se_skb);
j1939_session_timers_cancel(session);
j1939_session_cancel(session, J1939_XTP_ABORT_FAULT);
j1939_session_put(session);
diff --git a/net/can/raw.c b/net/can/raw.c
index 139d9471ddcf..ac96fc210025 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -83,7 +83,7 @@ struct raw_sock {
struct sock sk;
int bound;
int ifindex;
- struct notifier_block notifier;
+ struct list_head notifier;
int loopback;
int recv_own_msgs;
int fd_frames;
@@ -95,6 +95,10 @@ struct raw_sock {
struct uniqframe __percpu *uniq;
};
+static LIST_HEAD(raw_notifier_list);
+static DEFINE_SPINLOCK(raw_notifier_lock);
+static struct raw_sock *raw_busy_notifier;
+
/* Return pointer to store the extra msg flags for raw_recvmsg().
* We use the space of one unsigned int beyond the 'struct sockaddr_can'
* in skb->cb.
@@ -263,21 +267,16 @@ static int raw_enable_allfilters(struct net *net, struct net_device *dev,
return err;
}
-static int raw_notifier(struct notifier_block *nb,
- unsigned long msg, void *ptr)
+static void raw_notify(struct raw_sock *ro, unsigned long msg,
+ struct net_device *dev)
{
- struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- struct raw_sock *ro = container_of(nb, struct raw_sock, notifier);
struct sock *sk = &ro->sk;
if (!net_eq(dev_net(dev), sock_net(sk)))
- return NOTIFY_DONE;
-
- if (dev->type != ARPHRD_CAN)
- return NOTIFY_DONE;
+ return;
if (ro->ifindex != dev->ifindex)
- return NOTIFY_DONE;
+ return;
switch (msg) {
case NETDEV_UNREGISTER:
@@ -305,7 +304,28 @@ static int raw_notifier(struct notifier_block *nb,
sk->sk_error_report(sk);
break;
}
+}
+
+static int raw_notifier(struct notifier_block *nb, unsigned long msg,
+ void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+ if (dev->type != ARPHRD_CAN)
+ return NOTIFY_DONE;
+ if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
+ return NOTIFY_DONE;
+ if (unlikely(raw_busy_notifier)) /* Check for reentrant bug. */
+ return NOTIFY_DONE;
+ spin_lock(&raw_notifier_lock);
+ list_for_each_entry(raw_busy_notifier, &raw_notifier_list, notifier) {
+ spin_unlock(&raw_notifier_lock);
+ raw_notify(raw_busy_notifier, msg, dev);
+ spin_lock(&raw_notifier_lock);
+ }
+ raw_busy_notifier = NULL;
+ spin_unlock(&raw_notifier_lock);
return NOTIFY_DONE;
}
@@ -334,9 +354,9 @@ static int raw_init(struct sock *sk)
return -ENOMEM;
/* set notifier */
- ro->notifier.notifier_call = raw_notifier;
-
- register_netdevice_notifier(&ro->notifier);
+ spin_lock(&raw_notifier_lock);
+ list_add_tail(&ro->notifier, &raw_notifier_list);
+ spin_unlock(&raw_notifier_lock);
return 0;
}
@@ -351,7 +371,14 @@ static int raw_release(struct socket *sock)
ro = raw_sk(sk);
- unregister_netdevice_notifier(&ro->notifier);
+ spin_lock(&raw_notifier_lock);
+ while (raw_busy_notifier == ro) {
+ spin_unlock(&raw_notifier_lock);
+ schedule_timeout_uninterruptible(1);
+ spin_lock(&raw_notifier_lock);
+ }
+ list_del(&ro->notifier);
+ spin_unlock(&raw_notifier_lock);
lock_sock(sk);
@@ -889,6 +916,10 @@ static const struct can_proto raw_can_proto = {
.prot = &raw_proto,
};
+static struct notifier_block canraw_notifier = {
+ .notifier_call = raw_notifier
+};
+
static __init int raw_module_init(void)
{
int err;
@@ -898,6 +929,8 @@ static __init int raw_module_init(void)
err = can_proto_register(&raw_can_proto);
if (err < 0)
pr_err("can: registration of raw protocol failed\n");
+ else
+ register_netdevice_notifier(&canraw_notifier);
return err;
}
@@ -905,6 +938,7 @@ static __init int raw_module_init(void)
static __exit void raw_module_exit(void)
{
can_proto_unregister(&raw_can_proto);
+ unregister_netdevice_notifier(&canraw_notifier);
}
module_init(raw_module_init);
diff --git a/net/ceph/auth.c b/net/ceph/auth.c
index de407e8feb97..d2b268a1838e 100644
--- a/net/ceph/auth.c
+++ b/net/ceph/auth.c
@@ -36,7 +36,7 @@ static int init_protocol(struct ceph_auth_client *ac, int proto)
}
}
-static void set_global_id(struct ceph_auth_client *ac, u64 global_id)
+void ceph_auth_set_global_id(struct ceph_auth_client *ac, u64 global_id)
{
dout("%s global_id %llu\n", __func__, global_id);
@@ -260,19 +260,22 @@ int ceph_handle_auth_reply(struct ceph_auth_client *ac,
ac->negotiating = false;
}
- ret = ac->ops->handle_reply(ac, result, payload, payload_end,
+ if (result) {
+ pr_err("auth protocol '%s' mauth authentication failed: %d\n",
+ ceph_auth_proto_name(ac->protocol), result);
+ ret = result;
+ goto out;
+ }
+
+ ret = ac->ops->handle_reply(ac, global_id, payload, payload_end,
NULL, NULL, NULL, NULL);
if (ret == -EAGAIN) {
ret = build_request(ac, true, reply_buf, reply_len);
goto out;
} else if (ret) {
- pr_err("auth protocol '%s' mauth authentication failed: %d\n",
- ceph_auth_proto_name(ac->protocol), result);
goto out;
}
- set_global_id(ac, global_id);
-
out:
mutex_unlock(&ac->mutex);
return ret;
@@ -498,11 +501,10 @@ int ceph_auth_handle_reply_done(struct ceph_auth_client *ac,
int ret;
mutex_lock(&ac->mutex);
- ret = ac->ops->handle_reply(ac, 0, reply, reply + reply_len,
+ ret = ac->ops->handle_reply(ac, global_id, reply, reply + reply_len,
session_key, session_key_len,
con_secret, con_secret_len);
- if (!ret)
- set_global_id(ac, global_id);
+ WARN_ON(ret == -EAGAIN || ret > 0);
mutex_unlock(&ac->mutex);
return ret;
}
diff --git a/net/ceph/auth_none.c b/net/ceph/auth_none.c
index 70e86e462250..097e9f8d87a7 100644
--- a/net/ceph/auth_none.c
+++ b/net/ceph/auth_none.c
@@ -69,7 +69,7 @@ static int build_request(struct ceph_auth_client *ac, void *buf, void *end)
* the generic auth code decode the global_id, and we carry no actual
* authenticate state, so nothing happens here.
*/
-static int handle_reply(struct ceph_auth_client *ac, int result,
+static int handle_reply(struct ceph_auth_client *ac, u64 global_id,
void *buf, void *end, u8 *session_key,
int *session_key_len, u8 *con_secret,
int *con_secret_len)
@@ -77,7 +77,8 @@ static int handle_reply(struct ceph_auth_client *ac, int result,
struct ceph_auth_none_info *xi = ac->private;
xi->starting = false;
- return result;
+ ceph_auth_set_global_id(ac, global_id);
+ return 0;
}
static void ceph_auth_none_destroy_authorizer(struct ceph_authorizer *a)
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
index 79641c4afee9..b71b1635916e 100644
--- a/net/ceph/auth_x.c
+++ b/net/ceph/auth_x.c
@@ -597,7 +597,7 @@ bad:
return -EINVAL;
}
-static int handle_auth_session_key(struct ceph_auth_client *ac,
+static int handle_auth_session_key(struct ceph_auth_client *ac, u64 global_id,
void **p, void *end,
u8 *session_key, int *session_key_len,
u8 *con_secret, int *con_secret_len)
@@ -613,6 +613,7 @@ static int handle_auth_session_key(struct ceph_auth_client *ac,
if (ret)
return ret;
+ ceph_auth_set_global_id(ac, global_id);
if (*p == end) {
/* pre-nautilus (or didn't request service tickets!) */
WARN_ON(session_key || con_secret);
@@ -661,7 +662,7 @@ e_inval:
return -EINVAL;
}
-static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result,
+static int ceph_x_handle_reply(struct ceph_auth_client *ac, u64 global_id,
void *buf, void *end,
u8 *session_key, int *session_key_len,
u8 *con_secret, int *con_secret_len)
@@ -669,13 +670,11 @@ static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result,
struct ceph_x_info *xi = ac->private;
struct ceph_x_ticket_handler *th;
int len = end - buf;
+ int result;
void *p;
int op;
int ret;
- if (result)
- return result; /* XXX hmm? */
-
if (xi->starting) {
/* it's a hello */
struct ceph_x_server_challenge *sc = buf;
@@ -697,9 +696,9 @@ static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result,
switch (op) {
case CEPHX_GET_AUTH_SESSION_KEY:
/* AUTH ticket + [connection secret] + service tickets */
- ret = handle_auth_session_key(ac, &p, end, session_key,
- session_key_len, con_secret,
- con_secret_len);
+ ret = handle_auth_session_key(ac, global_id, &p, end,
+ session_key, session_key_len,
+ con_secret, con_secret_len);
break;
case CEPHX_GET_PRINCIPAL_SESSION_KEY:
diff --git a/net/compat.c b/net/compat.c
index ddd15af3a283..210fc3b4d0d8 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -177,7 +177,7 @@ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk,
if (kcmlen > stackbuf_size)
kcmsg_base = kcmsg = sock_kmalloc(sk, kcmlen, GFP_KERNEL);
if (kcmsg == NULL)
- return -ENOBUFS;
+ return -ENOMEM;
/* Now copy them over neatly. */
memset(kcmsg, 0, kcmlen);
diff --git a/net/core/dev.c b/net/core/dev.c
index 222b1d322c96..ef8cf7619baf 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3853,7 +3853,8 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
if (q->flags & TCQ_F_NOLOCK) {
rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
- qdisc_run(q);
+ if (likely(!netif_xmit_frozen_or_stopped(txq)))
+ qdisc_run(q);
if (unlikely(to_free))
kfree_skb_list(to_free);
@@ -5025,25 +5026,43 @@ static __latent_entropy void net_tx_action(struct softirq_action *h)
sd->output_queue_tailp = &sd->output_queue;
local_irq_enable();
+ rcu_read_lock();
+
while (head) {
struct Qdisc *q = head;
spinlock_t *root_lock = NULL;
head = head->next_sched;
- if (!(q->flags & TCQ_F_NOLOCK)) {
- root_lock = qdisc_lock(q);
- spin_lock(root_lock);
- }
/* We need to make sure head->next_sched is read
* before clearing __QDISC_STATE_SCHED
*/
smp_mb__before_atomic();
+
+ if (!(q->flags & TCQ_F_NOLOCK)) {
+ root_lock = qdisc_lock(q);
+ spin_lock(root_lock);
+ } else if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED,
+ &q->state))) {
+ /* There is a synchronize_net() between
+ * STATE_DEACTIVATED flag being set and
+ * qdisc_reset()/some_qdisc_is_busy() in
+ * dev_deactivate(), so we can safely bail out
+ * early here to avoid data race between
+ * qdisc_deactivate() and some_qdisc_is_busy()
+ * for lockless qdisc.
+ */
+ clear_bit(__QDISC_STATE_SCHED, &q->state);
+ continue;
+ }
+
clear_bit(__QDISC_STATE_SCHED, &q->state);
qdisc_run(q);
if (root_lock)
spin_unlock(root_lock);
}
+
+ rcu_read_unlock();
}
xfrm_dev_backlog(sd);
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 4eb969518ee0..051432ea4f69 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -705,7 +705,6 @@ static int devlink_nl_port_attrs_put(struct sk_buff *msg,
case DEVLINK_PORT_FLAVOUR_PHYSICAL:
case DEVLINK_PORT_FLAVOUR_CPU:
case DEVLINK_PORT_FLAVOUR_DSA:
- case DEVLINK_PORT_FLAVOUR_VIRTUAL:
if (nla_put_u32(msg, DEVLINK_ATTR_PORT_NUMBER,
attrs->phys.port_number))
return -EMSGSIZE;
@@ -8631,7 +8630,6 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
switch (attrs->flavour) {
case DEVLINK_PORT_FLAVOUR_PHYSICAL:
- case DEVLINK_PORT_FLAVOUR_VIRTUAL:
if (!attrs->split)
n = snprintf(name, len, "p%u", attrs->phys.port_number);
else
@@ -8679,6 +8677,8 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
n = snprintf(name, len, "pf%usf%u", attrs->pci_sf.pf,
attrs->pci_sf.sf);
break;
+ case DEVLINK_PORT_FLAVOUR_VIRTUAL:
+ return -EOPNOTSUPP;
}
if (n >= len)
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index cd80ffed6d26..a9f937975080 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -1168,7 +1168,7 @@ static void notify_rule_change(int event, struct fib_rule *rule,
{
struct net *net;
struct sk_buff *skb;
- int err = -ENOBUFS;
+ int err = -ENOMEM;
net = ops->fro_net;
skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
diff --git a/net/core/filter.c b/net/core/filter.c
index cae56d08a670..d81352ca1b5c 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -17,6 +17,7 @@
* Kris Katterjohn - Added many additional checks in bpf_check_classic()
*/
+#include <linux/atomic.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/mm.h>
@@ -41,7 +42,6 @@
#include <linux/timer.h>
#include <linux/uaccess.h>
#include <asm/unaligned.h>
-#include <asm/cmpxchg.h>
#include <linux/filter.h>
#include <linux/ratelimit.h>
#include <linux/seccomp.h>
@@ -3784,6 +3784,7 @@ static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room,
__skb_push(skb, head_room);
memset(skb->data, 0, head_room);
skb_reset_mac_header(skb);
+ skb_reset_mac_len(skb);
}
return ret;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 98f20efbfadf..bf774575ad71 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -238,6 +238,7 @@ static int neigh_forced_gc(struct neigh_table *tbl)
write_lock(&n->lock);
if ((n->nud_state == NUD_FAILED) ||
+ (n->nud_state == NUD_NOARP) ||
(tbl->is_multicast &&
tbl->is_multicast(n->primary_key)) ||
time_after(tref, n->updated))
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 43b6ac4c4439..9b5a767eddd5 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -641,6 +641,18 @@ void __put_net(struct net *net)
}
EXPORT_SYMBOL_GPL(__put_net);
+/**
+ * get_net_ns - increment the refcount of the network namespace
+ * @ns: common namespace (net)
+ *
+ * Returns the net's common namespace.
+ */
+struct ns_common *get_net_ns(struct ns_common *ns)
+{
+ return &get_net(container_of(ns, struct net, ns))->ns;
+}
+EXPORT_SYMBOL_GPL(get_net_ns);
+
struct net *get_net_ns_by_fd(int fd)
{
struct file *file;
@@ -660,14 +672,8 @@ struct net *get_net_ns_by_fd(int fd)
fput(file);
return net;
}
-
-#else
-struct net *get_net_ns_by_fd(int fd)
-{
- return ERR_PTR(-EINVAL);
-}
-#endif
EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
+#endif
struct net *get_net_ns_by_pid(pid_t pid)
{
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 9ec1aa9640ad..3c4c4c7a0402 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -174,8 +174,10 @@ static void page_pool_dma_sync_for_device(struct page_pool *pool,
struct page *page,
unsigned int dma_sync_size)
{
+ dma_addr_t dma_addr = page_pool_get_dma_addr(page);
+
dma_sync_size = min(dma_sync_size, pool->p.max_len);
- dma_sync_single_range_for_device(pool->p.dev, page->dma_addr,
+ dma_sync_single_range_for_device(pool->p.dev, dma_addr,
pool->p.offset, dma_sync_size,
pool->p.dma_dir);
}
@@ -195,7 +197,7 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
if (dma_mapping_error(pool->p.dev, dma))
return false;
- page->dma_addr = dma;
+ page_pool_set_dma_addr(page, dma);
if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
@@ -331,13 +333,13 @@ void page_pool_release_page(struct page_pool *pool, struct page *page)
*/
goto skip_dma_unmap;
- dma = page->dma_addr;
+ dma = page_pool_get_dma_addr(page);
- /* When page is unmapped, it cannot be returned our pool */
+ /* When page is unmapped, it cannot be returned to our pool */
dma_unmap_page_attrs(pool->p.dev, dma,
PAGE_SIZE << pool->p.order, pool->p.dma_dir,
DMA_ATTR_SKIP_CPU_SYNC);
- page->dma_addr = 0;
+ page_pool_set_dma_addr(page, 0);
skip_dma_unmap:
/* This may be the last page returned, releasing the pool, so
* it is not safe to reference pool afterwards.
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 714d5fa38546..ec931b080156 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -4842,6 +4842,10 @@ static int rtnl_bridge_notify(struct net_device *dev)
if (err < 0)
goto errout;
+ /* Notification info is only filled for bridge ports, not the bridge
+ * device itself. Therefore, a zero notification length is valid and
+ * should not result in an error.
+ */
if (!skb->len)
goto errout;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 3ad22870298c..bbc3b4b62032 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1253,6 +1253,7 @@ static void __msg_zerocopy_callback(struct ubuf_info *uarg)
struct sock *sk = skb->sk;
struct sk_buff_head *q;
unsigned long flags;
+ bool is_zerocopy;
u32 lo, hi;
u16 len;
@@ -1267,6 +1268,7 @@ static void __msg_zerocopy_callback(struct ubuf_info *uarg)
len = uarg->len;
lo = uarg->id;
hi = uarg->id + len - 1;
+ is_zerocopy = uarg->zerocopy;
serr = SKB_EXT_ERR(skb);
memset(serr, 0, sizeof(*serr));
@@ -1274,7 +1276,7 @@ static void __msg_zerocopy_callback(struct ubuf_info *uarg)
serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY;
serr->ee.ee_data = hi;
serr->ee.ee_info = lo;
- if (!uarg->zerocopy)
+ if (!is_zerocopy)
serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED;
q = &sk->sk_error_queue;
diff --git a/net/core/sock.c b/net/core/sock.c
index c761c4a0b66b..946888afef88 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -815,10 +815,18 @@ void sock_set_rcvbuf(struct sock *sk, int val)
}
EXPORT_SYMBOL(sock_set_rcvbuf);
+static void __sock_set_mark(struct sock *sk, u32 val)
+{
+ if (val != sk->sk_mark) {
+ sk->sk_mark = val;
+ sk_dst_reset(sk);
+ }
+}
+
void sock_set_mark(struct sock *sk, u32 val)
{
lock_sock(sk);
- sk->sk_mark = val;
+ __sock_set_mark(sk, val);
release_sock(sk);
}
EXPORT_SYMBOL(sock_set_mark);
@@ -1126,10 +1134,10 @@ set_sndbuf:
case SO_MARK:
if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
ret = -EPERM;
- } else if (val != sk->sk_mark) {
- sk->sk_mark = val;
- sk_dst_reset(sk);
+ break;
}
+
+ __sock_set_mark(sk, val);
break;
case SO_RXQ_OVFL:
@@ -2132,10 +2140,10 @@ void skb_orphan_partial(struct sk_buff *skb)
if (skb_is_tcp_pure_ack(skb))
return;
- if (can_skb_orphan_partial(skb))
- skb_set_owner_sk_safe(skb, skb->sk);
- else
- skb_orphan(skb);
+ if (can_skb_orphan_partial(skb) && skb_set_owner_sk_safe(skb, skb->sk))
+ return;
+
+ skb_orphan(skb);
}
EXPORT_SYMBOL(skb_orphan_partial);
diff --git a/net/dsa/master.c b/net/dsa/master.c
index 052a977914a6..63adbc21a735 100644
--- a/net/dsa/master.c
+++ b/net/dsa/master.c
@@ -147,8 +147,7 @@ static void dsa_master_get_strings(struct net_device *dev, uint32_t stringset,
struct dsa_switch *ds = cpu_dp->ds;
int port = cpu_dp->index;
int len = ETH_GSTRING_LEN;
- int mcount = 0, count;
- unsigned int i;
+ int mcount = 0, count, i;
uint8_t pfx[4];
uint8_t *ndata;
@@ -178,6 +177,8 @@ static void dsa_master_get_strings(struct net_device *dev, uint32_t stringset,
*/
ds->ops->get_strings(ds, port, stringset, ndata);
count = ds->ops->get_sset_count(ds, port, stringset);
+ if (count < 0)
+ return;
for (i = 0; i < count; i++) {
memmove(ndata + (i * len + sizeof(pfx)),
ndata + i * len, len - sizeof(pfx));
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 8c0f3c6ab365..d4756b920108 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -776,13 +776,15 @@ static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
struct dsa_switch *ds = dp->ds;
if (sset == ETH_SS_STATS) {
- int count;
+ int count = 0;
- count = 4;
- if (ds->ops->get_sset_count)
- count += ds->ops->get_sset_count(ds, dp->index, sset);
+ if (ds->ops->get_sset_count) {
+ count = ds->ops->get_sset_count(ds, dp->index, sset);
+ if (count < 0)
+ return count;
+ }
- return count;
+ return count + 4;
} else if (sset == ETH_SS_TEST) {
return net_selftest_get_count();
}
diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c
index 008c1ec6e20c..122ad5833fb1 100644
--- a/net/dsa/tag_8021q.c
+++ b/net/dsa/tag_8021q.c
@@ -64,7 +64,7 @@
#define DSA_8021Q_SUBVLAN_HI_SHIFT 9
#define DSA_8021Q_SUBVLAN_HI_MASK GENMASK(9, 9)
#define DSA_8021Q_SUBVLAN_LO_SHIFT 4
-#define DSA_8021Q_SUBVLAN_LO_MASK GENMASK(4, 3)
+#define DSA_8021Q_SUBVLAN_LO_MASK GENMASK(5, 4)
#define DSA_8021Q_SUBVLAN_HI(x) (((x) & GENMASK(2, 2)) >> 2)
#define DSA_8021Q_SUBVLAN_LO(x) ((x) & GENMASK(1, 0))
#define DSA_8021Q_SUBVLAN(x) \
diff --git a/net/ethtool/eeprom.c b/net/ethtool/eeprom.c
index 2a6733a6449a..5d38e90895ac 100644
--- a/net/ethtool/eeprom.c
+++ b/net/ethtool/eeprom.c
@@ -95,7 +95,7 @@ static int get_module_eeprom_by_page(struct net_device *dev,
if (dev->sfp_bus)
return sfp_get_module_eeprom_by_page(dev->sfp_bus, page_data, extack);
- if (ops->get_module_info)
+ if (ops->get_module_eeprom_by_page)
return ops->get_module_eeprom_by_page(dev, page_data, extack);
return -EOPNOTSUPP;
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 3fa7a394eabf..baa5d10043cb 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -1421,7 +1421,7 @@ static int ethtool_get_any_eeprom(struct net_device *dev, void __user *useraddr,
if (eeprom.offset + eeprom.len > total_len)
return -EINVAL;
- data = kmalloc(PAGE_SIZE, GFP_USER);
+ data = kzalloc(PAGE_SIZE, GFP_USER);
if (!data)
return -ENOMEM;
@@ -1486,7 +1486,7 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr)
if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
return -EINVAL;
- data = kmalloc(PAGE_SIZE, GFP_USER);
+ data = kzalloc(PAGE_SIZE, GFP_USER);
if (!data)
return -ENOMEM;
@@ -1765,7 +1765,7 @@ static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
return -EFAULT;
test.len = test_len;
- data = kmalloc_array(test_len, sizeof(u64), GFP_USER);
+ data = kcalloc(test_len, sizeof(u64), GFP_USER);
if (!data)
return -ENOMEM;
@@ -2293,7 +2293,7 @@ static int ethtool_get_tunable(struct net_device *dev, void __user *useraddr)
ret = ethtool_tunable_valid(&tuna);
if (ret)
return ret;
- data = kmalloc(tuna.len, GFP_USER);
+ data = kzalloc(tuna.len, GFP_USER);
if (!data)
return -ENOMEM;
ret = ops->get_tunable(dev, &tuna, data);
@@ -2485,7 +2485,7 @@ static int get_phy_tunable(struct net_device *dev, void __user *useraddr)
ret = ethtool_phy_tunable_valid(&tuna);
if (ret)
return ret;
- data = kmalloc(tuna.len, GFP_USER);
+ data = kzalloc(tuna.len, GFP_USER);
if (!data)
return -ENOMEM;
if (phy_drv_tunable) {
diff --git a/net/ethtool/stats.c b/net/ethtool/stats.c
index b7642dc96d50..ec07f5765e03 100644
--- a/net/ethtool/stats.c
+++ b/net/ethtool/stats.c
@@ -119,7 +119,7 @@ static int stats_prepare_data(const struct ethnl_req_info *req_base,
*/
memset(&data->phy_stats, 0xff, sizeof(data->phy_stats));
memset(&data->mac_stats, 0xff, sizeof(data->mac_stats));
- memset(&data->ctrl_stats, 0xff, sizeof(data->mac_stats));
+ memset(&data->ctrl_stats, 0xff, sizeof(data->ctrl_stats));
memset(&data->rmon_stats, 0xff, sizeof(data->rmon_stats));
if (test_bit(ETHTOOL_STATS_ETH_PHY, req_info->stat_mask) &&
diff --git a/net/ethtool/strset.c b/net/ethtool/strset.c
index b3029fff715d..2d51b7ab4dc5 100644
--- a/net/ethtool/strset.c
+++ b/net/ethtool/strset.c
@@ -353,6 +353,8 @@ static int strset_reply_size(const struct ethnl_req_info *req_base,
int len = 0;
int ret;
+ len += nla_total_size(0); /* ETHTOOL_A_STRSET_STRINGSETS */
+
for (i = 0; i < ETH_SS_COUNT; i++) {
const struct strset_info *set_info = &data->sets[i];
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index bfcdc75fc01e..26c32407f029 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -218,6 +218,7 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
if (master) {
skb->dev = master->dev;
skb_reset_mac_header(skb);
+ skb_reset_mac_len(skb);
hsr_forward_skb(skb, master);
} else {
atomic_long_inc(&dev->tx_dropped);
@@ -259,6 +260,7 @@ static struct sk_buff *hsr_init_skb(struct hsr_port *master)
goto out;
skb_reset_mac_header(skb);
+ skb_reset_mac_len(skb);
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
index 6852e9bccf5b..ceb8afb2a62f 100644
--- a/net/hsr/hsr_forward.c
+++ b/net/hsr/hsr_forward.c
@@ -474,8 +474,8 @@ static void handle_std_frame(struct sk_buff *skb,
}
}
-void hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
- struct hsr_frame_info *frame)
+int hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
+ struct hsr_frame_info *frame)
{
struct hsr_port *port = frame->port_rcv;
struct hsr_priv *hsr = port->hsr;
@@ -483,20 +483,26 @@ void hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
/* HSRv0 supervisory frames double as a tag so treat them as tagged. */
if ((!hsr->prot_version && proto == htons(ETH_P_PRP)) ||
proto == htons(ETH_P_HSR)) {
+ /* Check if skb contains hsr_ethhdr */
+ if (skb->mac_len < sizeof(struct hsr_ethhdr))
+ return -EINVAL;
+
/* HSR tagged frame :- Data or Supervision */
frame->skb_std = NULL;
frame->skb_prp = NULL;
frame->skb_hsr = skb;
frame->sequence_nr = hsr_get_skb_sequence_nr(skb);
- return;
+ return 0;
}
/* Standard frame or PRP from master port */
handle_std_frame(skb, frame);
+
+ return 0;
}
-void prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
- struct hsr_frame_info *frame)
+int prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
+ struct hsr_frame_info *frame)
{
/* Supervision frame */
struct prp_rct *rct = skb_get_PRP_rct(skb);
@@ -507,9 +513,11 @@ void prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
frame->skb_std = NULL;
frame->skb_prp = skb;
frame->sequence_nr = prp_get_skb_sequence_nr(rct);
- return;
+ return 0;
}
handle_std_frame(skb, frame);
+
+ return 0;
}
static int fill_frame_info(struct hsr_frame_info *frame,
@@ -519,9 +527,10 @@ static int fill_frame_info(struct hsr_frame_info *frame,
struct hsr_vlan_ethhdr *vlan_hdr;
struct ethhdr *ethhdr;
__be16 proto;
+ int ret;
- /* Check if skb contains hsr_ethhdr */
- if (skb->mac_len < sizeof(struct hsr_ethhdr))
+ /* Check if skb contains ethhdr */
+ if (skb->mac_len < sizeof(struct ethhdr))
return -EINVAL;
memset(frame, 0, sizeof(*frame));
@@ -548,7 +557,10 @@ static int fill_frame_info(struct hsr_frame_info *frame,
frame->is_from_san = false;
frame->port_rcv = port;
- hsr->proto_ops->fill_frame_info(proto, skb, frame);
+ ret = hsr->proto_ops->fill_frame_info(proto, skb, frame);
+ if (ret)
+ return ret;
+
check_local_dest(port->hsr, skb, frame);
return 0;
diff --git a/net/hsr/hsr_forward.h b/net/hsr/hsr_forward.h
index b6acaafa83fc..206636750b30 100644
--- a/net/hsr/hsr_forward.h
+++ b/net/hsr/hsr_forward.h
@@ -24,8 +24,8 @@ struct sk_buff *prp_get_untagged_frame(struct hsr_frame_info *frame,
struct hsr_port *port);
bool prp_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port);
bool hsr_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port);
-void prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
- struct hsr_frame_info *frame);
-void hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
- struct hsr_frame_info *frame);
+int prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
+ struct hsr_frame_info *frame);
+int hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
+ struct hsr_frame_info *frame);
#endif /* __HSR_FORWARD_H */
diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
index 8f264672b70b..53d1f7a82463 100644
--- a/net/hsr/hsr_main.h
+++ b/net/hsr/hsr_main.h
@@ -186,8 +186,8 @@ struct hsr_proto_ops {
struct hsr_port *port);
struct sk_buff * (*create_tagged_frame)(struct hsr_frame_info *frame,
struct hsr_port *port);
- void (*fill_frame_info)(__be16 proto, struct sk_buff *skb,
- struct hsr_frame_info *frame);
+ int (*fill_frame_info)(__be16 proto, struct sk_buff *skb,
+ struct hsr_frame_info *frame);
bool (*invalid_dan_ingress_frame)(__be16 protocol);
void (*update_san_info)(struct hsr_node *node, bool is_sup);
};
diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c
index c5227d42faf5..b70e6bbf6021 100644
--- a/net/hsr/hsr_slave.c
+++ b/net/hsr/hsr_slave.c
@@ -60,12 +60,11 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
goto finish_pass;
skb_push(skb, ETH_HLEN);
-
- if (skb_mac_header(skb) != skb->data) {
- WARN_ONCE(1, "%s:%d: Malformed frame at source port %s)\n",
- __func__, __LINE__, port->dev->name);
- goto finish_consume;
- }
+ skb_reset_mac_header(skb);
+ if ((!hsr->prot_version && protocol == htons(ETH_P_PRP)) ||
+ protocol == htons(ETH_P_HSR))
+ skb_set_network_header(skb, ETH_HLEN + HSR_HLEN);
+ skb_reset_mac_len(skb);
hsr_forward_skb(skb, port);
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
index 0c1b0770c59e..29bf97640166 100644
--- a/net/ieee802154/nl-mac.c
+++ b/net/ieee802154/nl-mac.c
@@ -680,8 +680,10 @@ int ieee802154_llsec_getparams(struct sk_buff *skb, struct genl_info *info)
nla_put_u8(msg, IEEE802154_ATTR_LLSEC_SECLEVEL, params.out_level) ||
nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
be32_to_cpu(params.frame_counter)) ||
- ieee802154_llsec_fill_key_id(msg, &params.out_key))
+ ieee802154_llsec_fill_key_id(msg, &params.out_key)) {
+ rc = -ENOBUFS;
goto out_free;
+ }
dev_put(dev);
@@ -1184,7 +1186,7 @@ static int llsec_iter_devkeys(struct llsec_dump_data *data)
{
struct ieee802154_llsec_device *dpos;
struct ieee802154_llsec_device_key *kpos;
- int rc = 0, idx = 0, idx2;
+ int idx = 0, idx2;
list_for_each_entry(dpos, &data->table->devices, list) {
if (idx++ < data->s_idx)
@@ -1200,7 +1202,7 @@ static int llsec_iter_devkeys(struct llsec_dump_data *data)
data->nlmsg_seq,
dpos->hwaddr, kpos,
data->dev)) {
- return rc = -EMSGSIZE;
+ return -EMSGSIZE;
}
data->s_idx2++;
@@ -1209,7 +1211,7 @@ static int llsec_iter_devkeys(struct llsec_dump_data *data)
data->s_idx++;
}
- return rc;
+ return 0;
}
int ieee802154_llsec_dump_devkeys(struct sk_buff *skb,
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index 2cdc7e63fe17..88215b5c93aa 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -241,8 +241,10 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
}
if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
- nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name))
+ nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name)) {
+ rc = -EMSGSIZE;
goto nla_put_failure;
+ }
dev_put(dev);
wpan_phy_put(phy);
diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
index 05f6bd89a7dd..0cf2374c143b 100644
--- a/net/ieee802154/nl802154.c
+++ b/net/ieee802154/nl802154.c
@@ -1298,19 +1298,20 @@ ieee802154_llsec_parse_dev_addr(struct nlattr *nla,
if (!nla || nla_parse_nested_deprecated(attrs, NL802154_DEV_ADDR_ATTR_MAX, nla, nl802154_dev_addr_policy, NULL))
return -EINVAL;
- if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] ||
- !attrs[NL802154_DEV_ADDR_ATTR_MODE] ||
- !(attrs[NL802154_DEV_ADDR_ATTR_SHORT] ||
- attrs[NL802154_DEV_ADDR_ATTR_EXTENDED]))
+ if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] || !attrs[NL802154_DEV_ADDR_ATTR_MODE])
return -EINVAL;
addr->pan_id = nla_get_le16(attrs[NL802154_DEV_ADDR_ATTR_PAN_ID]);
addr->mode = nla_get_u32(attrs[NL802154_DEV_ADDR_ATTR_MODE]);
switch (addr->mode) {
case NL802154_DEV_ADDR_SHORT:
+ if (!attrs[NL802154_DEV_ADDR_ATTR_SHORT])
+ return -EINVAL;
addr->short_addr = nla_get_le16(attrs[NL802154_DEV_ADDR_ATTR_SHORT]);
break;
case NL802154_DEV_ADDR_EXTENDED:
+ if (!attrs[NL802154_DEV_ADDR_ATTR_EXTENDED])
+ return -EINVAL;
addr->extended_addr = nla_get_le64(attrs[NL802154_DEV_ADDR_ATTR_EXTENDED]);
break;
default:
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index f17870ee558b..2f94d221c00e 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -575,7 +575,7 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
return err;
}
- if (!inet_sk(sk)->inet_num && inet_autobind(sk))
+ if (data_race(!inet_sk(sk)->inet_num) && inet_autobind(sk))
return -EAGAIN;
return sk->sk_prot->connect(sk, uaddr, addr_len);
}
@@ -803,7 +803,7 @@ int inet_send_prepare(struct sock *sk)
sock_rps_record_flow(sk);
/* We may need to bind the socket. */
- if (!inet_sk(sk)->inet_num && !sk->sk_prot->no_autobind &&
+ if (data_race(!inet_sk(sk)->inet_num) && !sk->sk_prot->no_autobind &&
inet_autobind(sk))
return -EAGAIN;
diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c
index dff4f0eb96b0..9e41eff4a685 100644
--- a/net/ipv4/bpf_tcp_ca.c
+++ b/net/ipv4/bpf_tcp_ca.c
@@ -185,6 +185,7 @@ BTF_ID(func, tcp_reno_cong_avoid)
BTF_ID(func, tcp_reno_undo_cwnd)
BTF_ID(func, tcp_slow_start)
BTF_ID(func, tcp_cong_avoid_ai)
+#ifdef CONFIG_X86
#ifdef CONFIG_DYNAMIC_FTRACE
#if IS_BUILTIN(CONFIG_TCP_CONG_CUBIC)
BTF_ID(func, cubictcp_init)
@@ -213,6 +214,7 @@ BTF_ID(func, bbr_min_tso_segs)
BTF_ID(func, bbr_set_state)
#endif
#endif /* CONFIG_DYNAMIC_FTRACE */
+#endif /* CONFIG_X86 */
BTF_SET_END(bpf_tcp_ca_kfunc_ids)
static bool bpf_tcp_ca_check_kfunc_call(u32 kfunc_btf_id)
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index bfaf327e9d12..e0480c6cebaa 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -472,6 +472,7 @@ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def)
kfree(doi_def->map.std->lvl.local);
kfree(doi_def->map.std->cat.cipso);
kfree(doi_def->map.std->cat.local);
+ kfree(doi_def->map.std);
break;
}
kfree(doi_def);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 2e35f68da40a..1c6429c353a9 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1989,7 +1989,7 @@ static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla,
return -EAFNOSUPPORT;
if (nla_parse_nested_deprecated(tb, IFLA_INET_MAX, nla, NULL, NULL) < 0)
- BUG();
+ return -EINVAL;
if (tb[IFLA_INET_CONF]) {
nla_for_each_nested(a, tb[IFLA_INET_CONF], rem)
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 7b6931a4d775..752e392083e6 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -759,6 +759,13 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
icmp_param.data_len = room;
icmp_param.head_len = sizeof(struct icmphdr);
+ /* if we don't have a source address at this point, fall back to the
+ * dummy address instead of sending out a packet with a source address
+ * of 0.0.0.0
+ */
+ if (!fl4.saddr)
+ fl4.saddr = htonl(INADDR_DUMMY);
+
icmp_push_reply(&icmp_param, &fl4, &ipc, &rt);
ende:
ip_rt_put(rt);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 7b272bbed2b4..6b3c558a4f23 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1801,6 +1801,7 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) {
in_dev->mc_list = i->next_rcu;
in_dev->mc_count--;
+ ip_mc_clear_src(i);
ip_ma_put(i);
}
}
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index bc2f6ca97152..816d8aad5a68 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -886,7 +886,7 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d
/*
- * Copy BOOTP-supplied string if not already set.
+ * Copy BOOTP-supplied string
*/
static int __init ic_bootp_string(char *dest, char *src, int len, int max)
{
@@ -935,12 +935,15 @@ static void __init ic_do_bootp_ext(u8 *ext)
}
break;
case 12: /* Host name */
- ic_bootp_string(utsname()->nodename, ext+1, *ext,
- __NEW_UTS_LEN);
- ic_host_name_set = 1;
+ if (!ic_host_name_set) {
+ ic_bootp_string(utsname()->nodename, ext+1, *ext,
+ __NEW_UTS_LEN);
+ ic_host_name_set = 1;
+ }
break;
case 15: /* Domain name (DNS) */
- ic_bootp_string(ic_domain, ext+1, *ext, sizeof(ic_domain));
+ if (!ic_domain[0])
+ ic_bootp_string(ic_domain, ext+1, *ext, sizeof(ic_domain));
break;
case 17: /* Root path */
if (!root_server_path[0])
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 1c9f71a37258..95a718397fd1 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -954,6 +954,7 @@ bool ping_rcv(struct sk_buff *skb)
struct sock *sk;
struct net *net = dev_net(skb->dev);
struct icmphdr *icmph = icmp_hdr(skb);
+ bool rc = false;
/* We assume the packet has already been checked by icmp_rcv */
@@ -968,14 +969,15 @@ bool ping_rcv(struct sk_buff *skb)
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
pr_debug("rcv on socket %p\n", sk);
- if (skb2)
- ping_queue_rcv_skb(sk, skb2);
+ if (skb2 && !ping_queue_rcv_skb(sk, skb2))
+ rc = true;
sock_put(sk);
- return true;
}
- pr_debug("no socket, dropping\n");
- return false;
+ if (!rc)
+ pr_debug("no socket, dropping\n");
+
+ return rc;
}
EXPORT_SYMBOL_GPL(ping_rcv);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index f6787c55f6ab..6a36ac98476f 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2056,6 +2056,19 @@ martian_source:
return err;
}
+/* get device for dst_alloc with local routes */
+static struct net_device *ip_rt_get_dev(struct net *net,
+ const struct fib_result *res)
+{
+ struct fib_nh_common *nhc = res->fi ? res->nhc : NULL;
+ struct net_device *dev = NULL;
+
+ if (nhc)
+ dev = l3mdev_master_dev_rcu(nhc->nhc_dev);
+
+ return dev ? : net->loopback_dev;
+}
+
/*
* NOTE. We drop all the packets that has local source
* addresses, because every properly looped back packet
@@ -2212,7 +2225,7 @@ local_input:
}
}
- rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
+ rth = rt_dst_alloc(ip_rt_get_dev(net, res),
flags | RTCF_LOCAL, res->type,
IN_DEV_ORCONF(in_dev, NOPOLICY), false);
if (!rth)
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 15f5504adf5b..1307ad0d3b9e 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2607,6 +2607,9 @@ void udp_destroy_sock(struct sock *sk)
{
struct udp_sock *up = udp_sk(sk);
bool slow = lock_sock_fast(sk);
+
+ /* protects from races with udp_abort() */
+ sock_set_flag(sk, SOCK_DEAD);
udp_flush_pending_frames(sk);
unlock_sock_fast(sk, slow);
if (static_branch_unlikely(&udp_encap_needed_key)) {
@@ -2857,10 +2860,17 @@ int udp_abort(struct sock *sk, int err)
{
lock_sock(sk);
+ /* udp{v6}_destroy_sock() sets it under the sk lock, avoid racing
+ * with close()
+ */
+ if (sock_flag(sk, SOCK_DEAD))
+ goto out;
+
sk->sk_err = err;
sk->sk_error_report(sk);
__udp_disconnect(sk, 0);
+out:
release_sock(sk);
return 0;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index b0ef65eb9bd2..701eb82acd1c 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -5827,7 +5827,7 @@ static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla,
return -EAFNOSUPPORT;
if (nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0)
- BUG();
+ return -EINVAL;
if (tb[IFLA_INET6_TOKEN]) {
err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]),
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 0d59efb6b49e..d36ef9d25e73 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1745,10 +1745,7 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
IPV6_TLV_PADN, 0 };
/* we assume size > sizeof(ra) here */
- /* limit our allocations to order-0 page */
- size = min_t(int, size, SKB_MAX_ORDER(0, 0));
skb = sock_alloc_send_skb(sk, size, 1, &err);
-
if (!skb)
return NULL;
diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c
index e204163c7036..92f3235fa287 100644
--- a/net/ipv6/netfilter/nft_fib_ipv6.c
+++ b/net/ipv6/netfilter/nft_fib_ipv6.c
@@ -135,6 +135,17 @@ void nft_fib6_eval_type(const struct nft_expr *expr, struct nft_regs *regs,
}
EXPORT_SYMBOL_GPL(nft_fib6_eval_type);
+static bool nft_fib_v6_skip_icmpv6(const struct sk_buff *skb, u8 next, const struct ipv6hdr *iph)
+{
+ if (likely(next != IPPROTO_ICMPV6))
+ return false;
+
+ if (ipv6_addr_type(&iph->saddr) != IPV6_ADDR_ANY)
+ return false;
+
+ return ipv6_addr_type(&iph->daddr) & IPV6_ADDR_LINKLOCAL;
+}
+
void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
@@ -163,10 +174,13 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
lookup_flags = nft_fib6_flowi_init(&fl6, priv, pkt, oif, iph);
- if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
- nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
- nft_fib_store_result(dest, priv, nft_in(pkt));
- return;
+ if (nft_hook(pkt) == NF_INET_PRE_ROUTING ||
+ nft_hook(pkt) == NF_INET_INGRESS) {
+ if (nft_fib_is_loopback(pkt->skb, nft_in(pkt)) ||
+ nft_fib_v6_skip_icmpv6(pkt->skb, pkt->tprot, iph)) {
+ nft_fib_store_result(dest, priv, nft_in(pkt));
+ return;
+ }
}
*dest = 0;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 47a0dc46cbdb..28e44782c94d 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -343,7 +343,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
hdr = ipv6_hdr(skb);
fhdr = (struct frag_hdr *)skb_transport_header(skb);
- if (!(fhdr->frag_off & htons(0xFFF9))) {
+ if (!(fhdr->frag_off & htons(IP6_OFFSET | IP6_MF))) {
/* It is not a fragmented frame */
skb->transport_header += sizeof(struct frag_hdr);
__IP6_INC_STATS(net,
@@ -351,6 +351,8 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
+ IP6CB(skb)->frag_max_size = ntohs(hdr->payload_len) +
+ sizeof(struct ipv6hdr);
return 1;
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index a22822bdbf39..d417e514bd52 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -3673,11 +3673,11 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
if (nh) {
if (rt->fib6_src.plen) {
NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing");
- goto out;
+ goto out_free;
}
if (!nexthop_get(nh)) {
NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
- goto out;
+ goto out_free;
}
rt->nh = nh;
fib6_nh = nexthop_fib6_nh(rt->nh);
@@ -3714,6 +3714,10 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
out:
fib6_info_release(rt);
return ERR_PTR(err);
+out_free:
+ ip_fib_metrics_put(rt->fib6_metrics);
+ kfree(rt);
+ return ERR_PTR(err);
}
int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index aa98294a3ad3..f7c8110ece5f 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -271,6 +271,9 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
if (ipip6_tunnel_create(dev) < 0)
goto failed_free;
+ if (!parms->name[0])
+ strcpy(parms->name, dev->name);
+
return nt;
failed_free:
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 199b080d418a..3fcd86f4dfdc 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1598,6 +1598,9 @@ void udpv6_destroy_sock(struct sock *sk)
{
struct udp_sock *up = udp_sk(sk);
lock_sock(sk);
+
+ /* protects from races with udp_abort() */
+ sock_set_flag(sk, SOCK_DEAD);
udp_v6_flush_pending_frames(sk);
release_sock(sk);
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 9245c0421bda..fc34ae2b604c 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -4,7 +4,7 @@
*
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright (C) 2018 - 2019, 2021 Intel Corporation
*/
#include <linux/debugfs.h>
@@ -387,10 +387,17 @@ static ssize_t reset_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_local *local = file->private_data;
+ int ret;
rtnl_lock();
+ wiphy_lock(local->hw.wiphy);
__ieee80211_suspend(&local->hw, NULL);
- __ieee80211_resume(&local->hw);
+ ret = __ieee80211_resume(&local->hw);
+ wiphy_unlock(local->hw.wiphy);
+
+ if (ret)
+ cfg80211_shutdown_all_interfaces(local->hw.wiphy);
+
rtnl_unlock();
return count;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 8fcbaa1eedf3..648696b49f89 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -50,12 +50,6 @@ struct ieee80211_local;
#define IEEE80211_ENCRYPT_HEADROOM 8
#define IEEE80211_ENCRYPT_TAILROOM 18
-/* IEEE 802.11 (Ch. 9.5 Defragmentation) requires support for concurrent
- * reception of at least three fragmented frames. This limit can be increased
- * by changing this define, at the cost of slower frame reassembly and
- * increased memory use (about 2 kB of RAM per entry). */
-#define IEEE80211_FRAGMENT_MAX 4
-
/* power level hasn't been configured (or set to automatic) */
#define IEEE80211_UNSET_POWER_LEVEL INT_MIN
@@ -88,18 +82,6 @@ extern const u8 ieee80211_ac_to_qos_mask[IEEE80211_NUM_ACS];
#define IEEE80211_MAX_NAN_INSTANCE_ID 255
-struct ieee80211_fragment_entry {
- struct sk_buff_head skb_list;
- unsigned long first_frag_time;
- u16 seq;
- u16 extra_len;
- u16 last_frag;
- u8 rx_queue;
- bool check_sequential_pn; /* needed for CCMP/GCMP */
- u8 last_pn[6]; /* PN of the last fragment if CCMP was used */
-};
-
-
struct ieee80211_bss {
u32 device_ts_beacon, device_ts_presp;
@@ -241,8 +223,15 @@ struct ieee80211_rx_data {
*/
int security_idx;
- u32 tkip_iv32;
- u16 tkip_iv16;
+ union {
+ struct {
+ u32 iv32;
+ u16 iv16;
+ } tkip;
+ struct {
+ u8 pn[IEEE80211_CCMP_PN_LEN];
+ } ccm_gcm;
+ };
};
struct ieee80211_csa_settings {
@@ -902,9 +891,7 @@ struct ieee80211_sub_if_data {
char name[IFNAMSIZ];
- /* Fragment table for host-based reassembly */
- struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX];
- unsigned int fragment_next;
+ struct ieee80211_fragment_cache frags;
/* TID bitmap for NoAck policy */
u16 noack_map;
@@ -1455,7 +1442,7 @@ ieee80211_get_sband(struct ieee80211_sub_if_data *sdata)
rcu_read_lock();
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
- if (WARN_ON_ONCE(!chanctx_conf)) {
+ if (!chanctx_conf) {
rcu_read_unlock();
return NULL;
}
@@ -2320,4 +2307,7 @@ u32 ieee80211_calc_expected_tx_airtime(struct ieee80211_hw *hw,
#define debug_noinline
#endif
+void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache);
+void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache);
+
#endif /* IEEE80211_I_H */
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 7032a2b59249..137fa4c50e07 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -8,7 +8,7 @@
* Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (c) 2016 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*/
#include <linux/slab.h>
#include <linux/kernel.h>
@@ -476,14 +476,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
GFP_KERNEL);
}
- /* APs need special treatment */
if (sdata->vif.type == NL80211_IFTYPE_AP) {
- struct ieee80211_sub_if_data *vlan, *tmpsdata;
-
- /* down all dependent devices, that is VLANs */
- list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
- u.vlan.list)
- dev_close(vlan->dev);
WARN_ON(!list_empty(&sdata->u.ap.vlans));
} else if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
/* remove all packets in parent bc_buf pointing to this dev */
@@ -641,6 +634,15 @@ static int ieee80211_stop(struct net_device *dev)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ /* close all dependent VLAN interfaces before locking wiphy */
+ if (sdata->vif.type == NL80211_IFTYPE_AP) {
+ struct ieee80211_sub_if_data *vlan, *tmpsdata;
+
+ list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
+ u.vlan.list)
+ dev_close(vlan->dev);
+ }
+
wiphy_lock(sdata->local->hw.wiphy);
ieee80211_do_stop(sdata, true);
wiphy_unlock(sdata->local->hw.wiphy);
@@ -677,16 +679,12 @@ static void ieee80211_set_multicast_list(struct net_device *dev)
*/
static void ieee80211_teardown_sdata(struct ieee80211_sub_if_data *sdata)
{
- int i;
-
/* free extra data */
ieee80211_free_keys(sdata, false);
ieee80211_debugfs_remove_netdev(sdata);
- for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++)
- __skb_queue_purge(&sdata->fragments[i].skb_list);
- sdata->fragment_next = 0;
+ ieee80211_destroy_frag_cache(&sdata->frags);
if (ieee80211_vif_is_mesh(&sdata->vif))
ieee80211_mesh_teardown_sdata(sdata);
@@ -1595,6 +1593,9 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP:
+ if (!list_empty(&sdata->u.ap.vlans))
+ return -EBUSY;
+ break;
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_OCB:
@@ -1930,8 +1931,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
sdata->wdev.wiphy = local->hw.wiphy;
sdata->local = local;
- for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++)
- skb_queue_head_init(&sdata->fragments[i].skb_list);
+ ieee80211_init_frag_cache(&sdata->frags);
INIT_LIST_HEAD(&sdata->key_list);
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 56c068cb49c4..f695fc80088b 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -799,6 +799,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
struct ieee80211_sub_if_data *sdata,
struct sta_info *sta)
{
+ static atomic_t key_color = ATOMIC_INIT(0);
struct ieee80211_key *old_key;
int idx = key->conf.keyidx;
bool pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE;
@@ -850,6 +851,12 @@ int ieee80211_key_link(struct ieee80211_key *key,
key->sdata = sdata;
key->sta = sta;
+ /*
+ * Assign a unique ID to every key so we can easily prevent mixed
+ * key and fragment cache attacks.
+ */
+ key->color = atomic_inc_return(&key_color);
+
increment_tailroom_need_count(sdata);
ret = ieee80211_key_replace(sdata, sta, pairwise, old_key, key);
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index 7ad72e9b4991..1e326c89d721 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -128,6 +128,8 @@ struct ieee80211_key {
} debugfs;
#endif
+ unsigned int color;
+
/*
* key config, must be last because it contains key
* material as variable length member
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 62145e5f9628..f33a3acd7f96 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -252,6 +252,7 @@ static void ieee80211_restart_work(struct work_struct *work)
struct ieee80211_local *local =
container_of(work, struct ieee80211_local, restart_work);
struct ieee80211_sub_if_data *sdata;
+ int ret;
/* wait for scan work complete */
flush_workqueue(local->workqueue);
@@ -301,8 +302,12 @@ static void ieee80211_restart_work(struct work_struct *work)
/* wait for all packet processing to be done */
synchronize_net();
- ieee80211_reconfig(local);
+ ret = ieee80211_reconfig(local);
wiphy_unlock(local->hw.wiphy);
+
+ if (ret)
+ cfg80211_shutdown_all_interfaces(local->hw.wiphy);
+
rtnl_unlock();
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 2480bd0577bb..3f2aad2e7436 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -4062,10 +4062,14 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
if (elems.mbssid_config_ie)
bss_conf->profile_periodicity =
elems.mbssid_config_ie->profile_periodicity;
+ else
+ bss_conf->profile_periodicity = 0;
if (elems.ext_capab_len >= 11 &&
(elems.ext_capab[10] & WLAN_EXT_CAPA11_EMA_SUPPORT))
bss_conf->ema_ap = true;
+ else
+ bss_conf->ema_ap = false;
/* continue assoc process */
ifmgd->assoc_data->timeout = jiffies;
@@ -5802,12 +5806,16 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
beacon_ies->data, beacon_ies->len);
if (elem && elem->datalen >= 3)
sdata->vif.bss_conf.profile_periodicity = elem->data[2];
+ else
+ sdata->vif.bss_conf.profile_periodicity = 0;
elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY,
beacon_ies->data, beacon_ies->len);
if (elem && elem->datalen >= 11 &&
(elem->data[10] & WLAN_EXT_CAPA11_EMA_SUPPORT))
sdata->vif.bss_conf.ema_ap = true;
+ else
+ sdata->vif.bss_conf.ema_ap = false;
} else {
assoc_data->timeout = jiffies;
assoc_data->timeout_started = true;
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 6487b05da6fa..a6f3fb4a9197 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -1514,7 +1514,7 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
return;
- if (time_is_before_jiffies(mi->sample_time))
+ if (time_is_after_jiffies(mi->sample_time))
return;
mi->sample_time = jiffies + MINSTREL_SAMPLE_INTERVAL;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 62047e93e217..af0ef456eb0f 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -6,7 +6,7 @@
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*/
#include <linux/jiffies.h>
@@ -2123,19 +2123,34 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
return result;
}
+void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cache->entries); i++)
+ skb_queue_head_init(&cache->entries[i].skb_list);
+}
+
+void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cache->entries); i++)
+ __skb_queue_purge(&cache->entries[i].skb_list);
+}
+
static inline struct ieee80211_fragment_entry *
-ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
+ieee80211_reassemble_add(struct ieee80211_fragment_cache *cache,
unsigned int frag, unsigned int seq, int rx_queue,
struct sk_buff **skb)
{
struct ieee80211_fragment_entry *entry;
- entry = &sdata->fragments[sdata->fragment_next++];
- if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
- sdata->fragment_next = 0;
+ entry = &cache->entries[cache->next++];
+ if (cache->next >= IEEE80211_FRAGMENT_MAX)
+ cache->next = 0;
- if (!skb_queue_empty(&entry->skb_list))
- __skb_queue_purge(&entry->skb_list);
+ __skb_queue_purge(&entry->skb_list);
__skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
*skb = NULL;
@@ -2150,14 +2165,14 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
}
static inline struct ieee80211_fragment_entry *
-ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
+ieee80211_reassemble_find(struct ieee80211_fragment_cache *cache,
unsigned int frag, unsigned int seq,
int rx_queue, struct ieee80211_hdr *hdr)
{
struct ieee80211_fragment_entry *entry;
int i, idx;
- idx = sdata->fragment_next;
+ idx = cache->next;
for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
struct ieee80211_hdr *f_hdr;
struct sk_buff *f_skb;
@@ -2166,7 +2181,7 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
if (idx < 0)
idx = IEEE80211_FRAGMENT_MAX - 1;
- entry = &sdata->fragments[idx];
+ entry = &cache->entries[idx];
if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
entry->rx_queue != rx_queue ||
entry->last_frag + 1 != frag)
@@ -2194,15 +2209,27 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
return NULL;
}
+static bool requires_sequential_pn(struct ieee80211_rx_data *rx, __le16 fc)
+{
+ return rx->key &&
+ (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
+ rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 ||
+ rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP ||
+ rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) &&
+ ieee80211_has_protected(fc);
+}
+
static ieee80211_rx_result debug_noinline
ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
{
+ struct ieee80211_fragment_cache *cache = &rx->sdata->frags;
struct ieee80211_hdr *hdr;
u16 sc;
__le16 fc;
unsigned int frag, seq;
struct ieee80211_fragment_entry *entry;
struct sk_buff *skb;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
hdr = (struct ieee80211_hdr *)rx->skb->data;
fc = hdr->frame_control;
@@ -2213,14 +2240,15 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
sc = le16_to_cpu(hdr->seq_ctrl);
frag = sc & IEEE80211_SCTL_FRAG;
- if (is_multicast_ether_addr(hdr->addr1)) {
- I802_DEBUG_INC(rx->local->dot11MulticastReceivedFrameCount);
- goto out_no_led;
- }
+ if (rx->sta)
+ cache = &rx->sta->frags;
if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
goto out;
+ if (is_multicast_ether_addr(hdr->addr1))
+ return RX_DROP_MONITOR;
+
I802_DEBUG_INC(rx->local->rx_handlers_fragments);
if (skb_linearize(rx->skb))
@@ -2236,20 +2264,17 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
if (frag == 0) {
/* This is the first fragment of a new frame. */
- entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
+ entry = ieee80211_reassemble_add(cache, frag, seq,
rx->seqno_idx, &(rx->skb));
- if (rx->key &&
- (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
- rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 ||
- rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP ||
- rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) &&
- ieee80211_has_protected(fc)) {
+ if (requires_sequential_pn(rx, fc)) {
int queue = rx->security_idx;
/* Store CCMP/GCMP PN so that we can verify that the
* next fragment has a sequential PN value.
*/
entry->check_sequential_pn = true;
+ entry->is_protected = true;
+ entry->key_color = rx->key->color;
memcpy(entry->last_pn,
rx->key->u.ccmp.rx_pn[queue],
IEEE80211_CCMP_PN_LEN);
@@ -2261,6 +2286,11 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
sizeof(rx->key->u.gcmp.rx_pn[queue]));
BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN !=
IEEE80211_GCMP_PN_LEN);
+ } else if (rx->key &&
+ (ieee80211_has_protected(fc) ||
+ (status->flag & RX_FLAG_DECRYPTED))) {
+ entry->is_protected = true;
+ entry->key_color = rx->key->color;
}
return RX_QUEUED;
}
@@ -2268,7 +2298,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
/* This is a fragment for a frame that should already be pending in
* fragment cache. Add this fragment to the end of the pending entry.
*/
- entry = ieee80211_reassemble_find(rx->sdata, frag, seq,
+ entry = ieee80211_reassemble_find(cache, frag, seq,
rx->seqno_idx, hdr);
if (!entry) {
I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
@@ -2283,25 +2313,39 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
if (entry->check_sequential_pn) {
int i;
u8 pn[IEEE80211_CCMP_PN_LEN], *rpn;
- int queue;
- if (!rx->key ||
- (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP &&
- rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256 &&
- rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP &&
- rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP_256))
+ if (!requires_sequential_pn(rx, fc))
+ return RX_DROP_UNUSABLE;
+
+ /* Prevent mixed key and fragment cache attacks */
+ if (entry->key_color != rx->key->color)
return RX_DROP_UNUSABLE;
+
memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN);
for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) {
pn[i]++;
if (pn[i])
break;
}
- queue = rx->security_idx;
- rpn = rx->key->u.ccmp.rx_pn[queue];
+
+ rpn = rx->ccm_gcm.pn;
if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN))
return RX_DROP_UNUSABLE;
memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN);
+ } else if (entry->is_protected &&
+ (!rx->key ||
+ (!ieee80211_has_protected(fc) &&
+ !(status->flag & RX_FLAG_DECRYPTED)) ||
+ rx->key->color != entry->key_color)) {
+ /* Drop this as a mixed key or fragment cache attack, even
+ * if for TKIP Michael MIC should protect us, and WEP is a
+ * lost cause anyway.
+ */
+ return RX_DROP_UNUSABLE;
+ } else if (entry->is_protected && rx->key &&
+ entry->key_color != rx->key->color &&
+ (status->flag & RX_FLAG_DECRYPTED)) {
+ return RX_DROP_UNUSABLE;
}
skb_pull(rx->skb, ieee80211_hdrlen(fc));
@@ -2330,7 +2374,6 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
out:
ieee80211_led_rx(rx->local);
- out_no_led:
if (rx->sta)
rx->sta->rx_stats.packets++;
return RX_CONTINUE;
@@ -2494,13 +2537,13 @@ static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
/*
- * Allow EAPOL frames to us/the PAE group address regardless
- * of whether the frame was encrypted or not.
+ * Allow EAPOL frames to us/the PAE group address regardless of
+ * whether the frame was encrypted or not, and always disallow
+ * all other destination addresses for them.
*/
- if (ehdr->h_proto == rx->sdata->control_port_protocol &&
- (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) ||
- ether_addr_equal(ehdr->h_dest, pae_group_addr)))
- return true;
+ if (unlikely(ehdr->h_proto == rx->sdata->control_port_protocol))
+ return ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) ||
+ ether_addr_equal(ehdr->h_dest, pae_group_addr);
if (ieee80211_802_1x_port_control(rx) ||
ieee80211_drop_unencrypted(rx, fc))
@@ -2525,8 +2568,28 @@ static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb,
cfg80211_rx_control_port(dev, skb, noencrypt);
dev_kfree_skb(skb);
} else {
+ struct ethhdr *ehdr = (void *)skb_mac_header(skb);
+
memset(skb->cb, 0, sizeof(skb->cb));
+ /*
+ * 802.1X over 802.11 requires that the authenticator address
+ * be used for EAPOL frames. However, 802.1X allows the use of
+ * the PAE group address instead. If the interface is part of
+ * a bridge and we pass the frame with the PAE group address,
+ * then the bridge will forward it to the network (even if the
+ * client was not associated yet), which isn't supposed to
+ * happen.
+ * To avoid that, rewrite the destination address to our own
+ * address, so that the authenticator (e.g. hostapd) will see
+ * the frame, but bridge won't forward it anywhere else. Note
+ * that due to earlier filtering, the only other address can
+ * be the PAE group address.
+ */
+ if (unlikely(skb->protocol == sdata->control_port_protocol &&
+ !ether_addr_equal(ehdr->h_dest, sdata->vif.addr)))
+ ether_addr_copy(ehdr->h_dest, sdata->vif.addr);
+
/* deliver to local stack */
if (rx->list)
list_add_tail(&skb->list, rx->list);
@@ -2566,6 +2629,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
if ((sdata->vif.type == NL80211_IFTYPE_AP ||
sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
!(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
+ ehdr->h_proto != rx->sdata->control_port_protocol &&
(sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
if (is_multicast_ether_addr(ehdr->h_dest) &&
ieee80211_vif_get_num_mcast_if(sdata) != 0) {
@@ -2675,7 +2739,7 @@ __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
if (ieee80211_data_to_8023_exthdr(skb, &ethhdr,
rx->sdata->vif.addr,
rx->sdata->vif.type,
- data_offset))
+ data_offset, true))
return RX_DROP_UNUSABLE;
ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
@@ -2732,6 +2796,23 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
if (is_multicast_ether_addr(hdr->addr1))
return RX_DROP_UNUSABLE;
+ if (rx->key) {
+ /*
+ * We should not receive A-MSDUs on pre-HT connections,
+ * and HT connections cannot use old ciphers. Thus drop
+ * them, as in those cases we couldn't even have SPP
+ * A-MSDUs or such.
+ */
+ switch (rx->key->conf.cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ return RX_DROP_UNUSABLE;
+ default:
+ break;
+ }
+ }
+
return __ieee80211_rx_h_amsdu(rx, 0);
}
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index d4cc9ac2d703..6b50cb5e0e3c 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -251,13 +251,24 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
struct ieee80211_mgmt *mgmt = (void *)skb->data;
struct ieee80211_bss *bss;
struct ieee80211_channel *channel;
+ size_t min_hdr_len = offsetof(struct ieee80211_mgmt,
+ u.probe_resp.variable);
+
+ if (!ieee80211_is_probe_resp(mgmt->frame_control) &&
+ !ieee80211_is_beacon(mgmt->frame_control) &&
+ !ieee80211_is_s1g_beacon(mgmt->frame_control))
+ return;
if (ieee80211_is_s1g_beacon(mgmt->frame_control)) {
- if (skb->len < 15)
- return;
- } else if (skb->len < 24 ||
- (!ieee80211_is_probe_resp(mgmt->frame_control) &&
- !ieee80211_is_beacon(mgmt->frame_control)))
+ if (ieee80211_is_s1g_short_beacon(mgmt->frame_control))
+ min_hdr_len = offsetof(struct ieee80211_ext,
+ u.s1g_short_beacon.variable);
+ else
+ min_hdr_len = offsetof(struct ieee80211_ext,
+ u.s1g_beacon);
+ }
+
+ if (skb->len < min_hdr_len)
return;
sdata1 = rcu_dereference(local->scan_sdata);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index ec6973ee88ef..f2fb69da9b6e 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -4,7 +4,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*/
#include <linux/module.h>
@@ -392,6 +392,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
u64_stats_init(&sta->rx_stats.syncp);
+ ieee80211_init_frag_cache(&sta->frags);
+
sta->sta_state = IEEE80211_STA_NONE;
/* Mark TID as unreserved */
@@ -1102,6 +1104,8 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
ieee80211_sta_debugfs_remove(sta);
+ ieee80211_destroy_frag_cache(&sta->frags);
+
cleanup_single_sta(sta);
}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 78b9d0c7cc58..0333072ebd98 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -3,7 +3,7 @@
* Copyright 2002-2005, Devicescape Software, Inc.
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright(c) 2015-2017 Intel Deutschland GmbH
- * Copyright(c) 2020 Intel Corporation
+ * Copyright(c) 2020-2021 Intel Corporation
*/
#ifndef STA_INFO_H
@@ -439,6 +439,34 @@ struct ieee80211_sta_rx_stats {
};
/*
+ * IEEE 802.11-2016 (10.6 "Defragmentation") recommends support for "concurrent
+ * reception of at least one MSDU per access category per associated STA"
+ * on APs, or "at least one MSDU per access category" on other interface types.
+ *
+ * This limit can be increased by changing this define, at the cost of slower
+ * frame reassembly and increased memory use while fragments are pending.
+ */
+#define IEEE80211_FRAGMENT_MAX 4
+
+struct ieee80211_fragment_entry {
+ struct sk_buff_head skb_list;
+ unsigned long first_frag_time;
+ u16 seq;
+ u16 extra_len;
+ u16 last_frag;
+ u8 rx_queue;
+ u8 check_sequential_pn:1, /* needed for CCMP/GCMP */
+ is_protected:1;
+ u8 last_pn[6]; /* PN of the last fragment if CCMP was used */
+ unsigned int key_color;
+};
+
+struct ieee80211_fragment_cache {
+ struct ieee80211_fragment_entry entries[IEEE80211_FRAGMENT_MAX];
+ unsigned int next;
+};
+
+/*
* The bandwidth threshold below which the per-station CoDel parameters will be
* scaled to be more lenient (to prevent starvation of slow stations). This
* value will be scaled by the number of active stations when it is being
@@ -531,6 +559,7 @@ struct ieee80211_sta_rx_stats {
* @status_stats.last_ack_signal: last ACK signal
* @status_stats.ack_signal_filled: last ACK signal validity
* @status_stats.avg_ack_signal: average ACK signal
+ * @frags: fragment cache
*/
struct sta_info {
/* General information, mostly static */
@@ -639,6 +668,8 @@ struct sta_info {
struct cfg80211_chan_def tdls_chandef;
+ struct ieee80211_fragment_cache frags;
+
/* keep last! */
struct ieee80211_sta sta;
};
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 0b719f3d2dec..2651498d05e8 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -2014,6 +2014,26 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
ieee80211_tx(sdata, sta, skb, false);
}
+static bool ieee80211_validate_radiotap_len(struct sk_buff *skb)
+{
+ struct ieee80211_radiotap_header *rthdr =
+ (struct ieee80211_radiotap_header *)skb->data;
+
+ /* check for not even having the fixed radiotap header part */
+ if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
+ return false; /* too short to be possibly valid */
+
+ /* is it a header version we can trust to find length from? */
+ if (unlikely(rthdr->it_version))
+ return false; /* only version 0 is supported */
+
+ /* does the skb contain enough to deliver on the alleged length? */
+ if (unlikely(skb->len < ieee80211_get_radiotap_len(skb->data)))
+ return false; /* skb too short for claimed rt header extent */
+
+ return true;
+}
+
bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
struct net_device *dev)
{
@@ -2022,8 +2042,6 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
struct ieee80211_radiotap_header *rthdr =
(struct ieee80211_radiotap_header *) skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_supported_band *sband =
- local->hw.wiphy->bands[info->band];
int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
NULL);
u16 txflags;
@@ -2036,17 +2054,8 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
u8 vht_mcs = 0, vht_nss = 0;
int i;
- /* check for not even having the fixed radiotap header part */
- if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
- return false; /* too short to be possibly valid */
-
- /* is it a header version we can trust to find length from? */
- if (unlikely(rthdr->it_version))
- return false; /* only version 0 is supported */
-
- /* does the skb contain enough to deliver on the alleged length? */
- if (unlikely(skb->len < ieee80211_get_radiotap_len(skb->data)))
- return false; /* skb too short for claimed rt header extent */
+ if (!ieee80211_validate_radiotap_len(skb))
+ return false;
info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
IEEE80211_TX_CTL_DONTFRAG;
@@ -2186,6 +2195,9 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
return false;
if (rate_found) {
+ struct ieee80211_supported_band *sband =
+ local->hw.wiphy->bands[info->band];
+
info->control.flags |= IEEE80211_TX_CTRL_RATE_INJECT;
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
@@ -2199,7 +2211,7 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
} else if (rate_flags & IEEE80211_TX_RC_VHT_MCS) {
ieee80211_rate_set_vht(info->control.rates, vht_mcs,
vht_nss);
- } else {
+ } else if (sband) {
for (i = 0; i < sband->n_bitrates; i++) {
if (rate * 5 != sband->bitrates[i].bitrate)
continue;
@@ -2236,8 +2248,8 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
IEEE80211_TX_CTL_INJECTED;
- /* Sanity-check and process the injection radiotap header */
- if (!ieee80211_parse_tx_radiotap(skb, dev))
+ /* Sanity-check the length of the radiotap header */
+ if (!ieee80211_validate_radiotap_len(skb))
goto fail;
/* we now know there is a radiotap header with a length we can use */
@@ -2351,6 +2363,14 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
ieee80211_select_queue_80211(sdata, skb, hdr);
skb_set_queue_mapping(skb, ieee80211_ac_from_tid(skb->priority));
+ /*
+ * Process the radiotap header. This will now take into account the
+ * selected chandef above to accurately set injection rates and
+ * retransmissions.
+ */
+ if (!ieee80211_parse_tx_radiotap(skb, dev))
+ goto fail_rcu;
+
/* remove the injection radiotap header */
skb_pull(skb, len_rthdr);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 0a0481f5af48..060059ef9668 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -947,7 +947,7 @@ static void ieee80211_parse_extension_element(u32 *crc,
switch (elem->data[0]) {
case WLAN_EID_EXT_HE_MU_EDCA:
- if (len == sizeof(*elems->mu_edca_param_set)) {
+ if (len >= sizeof(*elems->mu_edca_param_set)) {
elems->mu_edca_param_set = data;
if (crc)
*crc = crc32_be(*crc, (void *)elem,
@@ -968,7 +968,7 @@ static void ieee80211_parse_extension_element(u32 *crc,
}
break;
case WLAN_EID_EXT_UORA:
- if (len == 1)
+ if (len >= 1)
elems->uora_element = data;
break;
case WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME:
@@ -976,7 +976,7 @@ static void ieee80211_parse_extension_element(u32 *crc,
elems->max_channel_switch_time = data;
break;
case WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION:
- if (len == sizeof(*elems->mbssid_config_ie))
+ if (len >= sizeof(*elems->mbssid_config_ie))
elems->mbssid_config_ie = data;
break;
case WLAN_EID_EXT_HE_SPR:
@@ -985,7 +985,7 @@ static void ieee80211_parse_extension_element(u32 *crc,
elems->he_spr = data;
break;
case WLAN_EID_EXT_HE_6GHZ_CAPA:
- if (len == sizeof(*elems->he_6ghz_capa))
+ if (len >= sizeof(*elems->he_6ghz_capa))
elems->he_6ghz_capa = data;
break;
}
@@ -1074,14 +1074,14 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
switch (id) {
case WLAN_EID_LINK_ID:
- if (elen + 2 != sizeof(struct ieee80211_tdls_lnkie)) {
+ if (elen + 2 < sizeof(struct ieee80211_tdls_lnkie)) {
elem_parse_failed = true;
break;
}
elems->lnk_id = (void *)(pos - 2);
break;
case WLAN_EID_CHAN_SWITCH_TIMING:
- if (elen != sizeof(struct ieee80211_ch_switch_timing)) {
+ if (elen < sizeof(struct ieee80211_ch_switch_timing)) {
elem_parse_failed = true;
break;
}
@@ -1244,7 +1244,7 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
elems->sec_chan_offs = (void *)pos;
break;
case WLAN_EID_CHAN_SWITCH_PARAM:
- if (elen !=
+ if (elen <
sizeof(*elems->mesh_chansw_params_ie)) {
elem_parse_failed = true;
break;
@@ -1253,7 +1253,7 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
break;
case WLAN_EID_WIDE_BW_CHANNEL_SWITCH:
if (!action ||
- elen != sizeof(*elems->wide_bw_chansw_ie)) {
+ elen < sizeof(*elems->wide_bw_chansw_ie)) {
elem_parse_failed = true;
break;
}
@@ -1272,7 +1272,7 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
ie = cfg80211_find_ie(WLAN_EID_WIDE_BW_CHANNEL_SWITCH,
pos, elen);
if (ie) {
- if (ie[1] == sizeof(*elems->wide_bw_chansw_ie))
+ if (ie[1] >= sizeof(*elems->wide_bw_chansw_ie))
elems->wide_bw_chansw_ie =
(void *)(ie + 2);
else
@@ -1316,7 +1316,7 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
elems->cisco_dtpc_elem = pos;
break;
case WLAN_EID_ADDBA_EXT:
- if (elen != sizeof(struct ieee80211_addba_ext_ie)) {
+ if (elen < sizeof(struct ieee80211_addba_ext_ie)) {
elem_parse_failed = true;
break;
}
@@ -1342,7 +1342,7 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
elem, elems);
break;
case WLAN_EID_S1G_CAPABILITIES:
- if (elen == sizeof(*elems->s1g_capab))
+ if (elen >= sizeof(*elems->s1g_capab))
elems->s1g_capab = (void *)pos;
else
elem_parse_failed = true;
@@ -2178,8 +2178,6 @@ static void ieee80211_handle_reconfig_failure(struct ieee80211_local *local)
list_for_each_entry(ctx, &local->chanctx_list, list)
ctx->driver_present = false;
mutex_unlock(&local->chanctx_mtx);
-
- cfg80211_shutdown_all_interfaces(local->hw.wiphy);
}
static void ieee80211_assign_chanctx(struct ieee80211_local *local,
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 91bf32af55e9..bca47fad5a16 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -3,6 +3,7 @@
* Copyright 2002-2004, Instant802 Networks, Inc.
* Copyright 2008, Jouni Malinen <j@w1.fi>
* Copyright (C) 2016-2017 Intel Deutschland GmbH
+ * Copyright (C) 2020-2021 Intel Corporation
*/
#include <linux/netdevice.h>
@@ -167,8 +168,8 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
update_iv:
/* update IV in key information to be able to detect replays */
- rx->key->u.tkip.rx[rx->security_idx].iv32 = rx->tkip_iv32;
- rx->key->u.tkip.rx[rx->security_idx].iv16 = rx->tkip_iv16;
+ rx->key->u.tkip.rx[rx->security_idx].iv32 = rx->tkip.iv32;
+ rx->key->u.tkip.rx[rx->security_idx].iv16 = rx->tkip.iv16;
return RX_CONTINUE;
@@ -294,8 +295,8 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
key, skb->data + hdrlen,
skb->len - hdrlen, rx->sta->sta.addr,
hdr->addr1, hwaccel, rx->security_idx,
- &rx->tkip_iv32,
- &rx->tkip_iv16);
+ &rx->tkip.iv32,
+ &rx->tkip.iv16);
if (res != TKIP_DECRYPT_OK)
return RX_DROP_UNUSABLE;
@@ -553,6 +554,8 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx,
}
memcpy(key->u.ccmp.rx_pn[queue], pn, IEEE80211_CCMP_PN_LEN);
+ if (unlikely(ieee80211_is_frag(hdr)))
+ memcpy(rx->ccm_gcm.pn, pn, IEEE80211_CCMP_PN_LEN);
}
/* Remove CCMP header and MIC */
@@ -781,6 +784,8 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx)
}
memcpy(key->u.gcmp.rx_pn[queue], pn, IEEE80211_GCMP_PN_LEN);
+ if (unlikely(ieee80211_is_frag(hdr)))
+ memcpy(rx->ccm_gcm.pn, pn, IEEE80211_CCMP_PN_LEN);
}
/* Remove GCMP header and MIC */
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index 99fc21406168..9b263f27ce9b 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -130,7 +130,6 @@ static void mptcp_parse_option(const struct sk_buff *skb,
memcpy(mp_opt->hmac, ptr, MPTCPOPT_HMAC_LEN);
pr_debug("MP_JOIN hmac");
} else {
- pr_warn("MP_JOIN bad option size");
mp_opt->mp_join = 0;
}
break;
@@ -357,6 +356,8 @@ void mptcp_get_options(const struct sk_buff *skb,
length--;
continue;
default:
+ if (length < 2)
+ return;
opsize = *ptr++;
if (opsize < 2) /* "silly options" */
return;
@@ -1024,7 +1025,7 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ADDADDR);
} else {
mptcp_pm_add_addr_echoed(msk, &mp_opt.addr);
- mptcp_pm_del_add_timer(msk, &mp_opt.addr);
+ mptcp_pm_del_add_timer(msk, &mp_opt.addr, true);
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ECHOADD);
}
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index 6ba040897738..2469e06a3a9d 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -346,18 +346,18 @@ out:
struct mptcp_pm_add_entry *
mptcp_pm_del_add_timer(struct mptcp_sock *msk,
- struct mptcp_addr_info *addr)
+ struct mptcp_addr_info *addr, bool check_id)
{
struct mptcp_pm_add_entry *entry;
struct sock *sk = (struct sock *)msk;
spin_lock_bh(&msk->pm.lock);
entry = mptcp_lookup_anno_list_by_saddr(msk, addr);
- if (entry)
+ if (entry && (!check_id || entry->addr.id == addr->id))
entry->retrans_times = ADD_ADDR_RETRANS_MAX;
spin_unlock_bh(&msk->pm.lock);
- if (entry)
+ if (entry && (!check_id || entry->addr.id == addr->id))
sk_stop_timer_sync(sk, &entry->add_timer);
return entry;
@@ -1064,7 +1064,7 @@ static bool remove_anno_list_by_saddr(struct mptcp_sock *msk,
{
struct mptcp_pm_add_entry *entry;
- entry = mptcp_pm_del_add_timer(msk, addr);
+ entry = mptcp_pm_del_add_timer(msk, addr, false);
if (entry) {
list_del(&entry->list);
kfree(entry);
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 29a2d690d8d5..632350018fb6 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -280,11 +280,13 @@ static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
/* try to fetch required memory from subflow */
if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
- if (ssk->sk_forward_alloc < skb->truesize)
- goto drop;
- __sk_mem_reclaim(ssk, skb->truesize);
- if (!sk_rmem_schedule(sk, skb, skb->truesize))
+ int amount = sk_mem_pages(skb->truesize) << SK_MEM_QUANTUM_SHIFT;
+
+ if (ssk->sk_forward_alloc < amount)
goto drop;
+
+ ssk->sk_forward_alloc -= amount;
+ sk->sk_forward_alloc += amount;
}
/* the skb map_seq accounts for the skb offset:
@@ -668,18 +670,22 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
/* In most cases we will be able to lock the mptcp socket. If its already
* owned, we need to defer to the work queue to avoid ABBA deadlock.
*/
-static void move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
+static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
{
struct sock *sk = (struct sock *)msk;
unsigned int moved = 0;
if (inet_sk_state_load(sk) == TCP_CLOSE)
- return;
-
- mptcp_data_lock(sk);
+ return false;
__mptcp_move_skbs_from_subflow(msk, ssk, &moved);
__mptcp_ofo_queue(msk);
+ if (unlikely(ssk->sk_err)) {
+ if (!sock_owned_by_user(sk))
+ __mptcp_error_report(sk);
+ else
+ set_bit(MPTCP_ERROR_REPORT, &msk->flags);
+ }
/* If the moves have caught up with the DATA_FIN sequence number
* it's time to ack the DATA_FIN and change socket state, but
@@ -688,7 +694,7 @@ static void move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
*/
if (mptcp_pending_data_fin(sk, NULL))
mptcp_schedule_work(sk);
- mptcp_data_unlock(sk);
+ return moved > 0;
}
void mptcp_data_ready(struct sock *sk, struct sock *ssk)
@@ -696,7 +702,6 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
struct mptcp_sock *msk = mptcp_sk(sk);
int sk_rbuf, ssk_rbuf;
- bool wake;
/* The peer can send data while we are shutting down this
* subflow at msk destruction time, but we must avoid enqueuing
@@ -705,28 +710,22 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
if (unlikely(subflow->disposable))
return;
- /* move_skbs_to_msk below can legitly clear the data_avail flag,
- * but we will need later to properly woke the reader, cache its
- * value
- */
- wake = subflow->data_avail == MPTCP_SUBFLOW_DATA_AVAIL;
- if (wake)
- set_bit(MPTCP_DATA_READY, &msk->flags);
-
ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf);
sk_rbuf = READ_ONCE(sk->sk_rcvbuf);
if (unlikely(ssk_rbuf > sk_rbuf))
sk_rbuf = ssk_rbuf;
- /* over limit? can't append more skbs to msk */
+ /* over limit? can't append more skbs to msk, Also, no need to wake-up*/
if (atomic_read(&sk->sk_rmem_alloc) > sk_rbuf)
- goto wake;
-
- move_skbs_to_msk(msk, ssk);
+ return;
-wake:
- if (wake)
+ /* Wake-up the reader only for in-sequence data */
+ mptcp_data_lock(sk);
+ if (move_skbs_to_msk(msk, ssk)) {
+ set_bit(MPTCP_DATA_READY, &msk->flags);
sk->sk_data_ready(sk);
+ }
+ mptcp_data_unlock(sk);
}
static bool mptcp_do_flush_join_list(struct mptcp_sock *msk)
@@ -858,7 +857,7 @@ static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk)
sock_owned_by_me(sk);
mptcp_for_each_subflow(msk, subflow) {
- if (subflow->data_avail)
+ if (READ_ONCE(subflow->data_avail))
return mptcp_subflow_tcp_sock(subflow);
}
@@ -879,12 +878,18 @@ static bool mptcp_skb_can_collapse_to(u64 write_seq,
!mpext->frozen;
}
+/* we can append data to the given data frag if:
+ * - there is space available in the backing page_frag
+ * - the data frag tail matches the current page_frag free offset
+ * - the data frag end sequence number matches the current write seq
+ */
static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk,
const struct page_frag *pfrag,
const struct mptcp_data_frag *df)
{
return df && pfrag->page == df->page &&
pfrag->size - pfrag->offset > 0 &&
+ pfrag->offset == (df->offset + df->data_len) &&
df->data_seq + df->data_len == msk->write_seq;
}
@@ -941,6 +946,10 @@ static void __mptcp_update_wmem(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
+#ifdef CONFIG_LOCKDEP
+ WARN_ON_ONCE(!lockdep_is_held(&sk->sk_lock.slock));
+#endif
+
if (!msk->wmem_reserved)
return;
@@ -1079,10 +1088,20 @@ out:
static void __mptcp_clean_una_wakeup(struct sock *sk)
{
+#ifdef CONFIG_LOCKDEP
+ WARN_ON_ONCE(!lockdep_is_held(&sk->sk_lock.slock));
+#endif
__mptcp_clean_una(sk);
mptcp_write_space(sk);
}
+static void mptcp_clean_una_wakeup(struct sock *sk)
+{
+ mptcp_data_lock(sk);
+ __mptcp_clean_una_wakeup(sk);
+ mptcp_data_unlock(sk);
+}
+
static void mptcp_enter_memory_pressure(struct sock *sk)
{
struct mptcp_subflow_context *subflow;
@@ -1935,6 +1954,9 @@ static bool __mptcp_move_skbs(struct mptcp_sock *msk)
done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
mptcp_data_unlock(sk);
tcp_cleanup_rbuf(ssk, moved);
+
+ if (unlikely(ssk->sk_err))
+ __mptcp_error_report(sk);
unlock_sock_fast(ssk, slowpath);
} while (!done);
@@ -2293,7 +2315,7 @@ static void __mptcp_retrans(struct sock *sk)
struct sock *ssk;
int ret;
- __mptcp_clean_una_wakeup(sk);
+ mptcp_clean_una_wakeup(sk);
dfrag = mptcp_rtx_head(sk);
if (!dfrag) {
if (mptcp_data_fin_enabled(msk)) {
@@ -2418,13 +2440,12 @@ static int __mptcp_init_sock(struct sock *sk)
timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0);
timer_setup(&sk->sk_timer, mptcp_timeout_timer, 0);
- tcp_assign_congestion_control(sk);
-
return 0;
}
static int mptcp_init_sock(struct sock *sk)
{
+ struct inet_connection_sock *icsk = inet_csk(sk);
struct net *net = sock_net(sk);
int ret;
@@ -2442,6 +2463,16 @@ static int mptcp_init_sock(struct sock *sk)
if (ret)
return ret;
+ /* fetch the ca name; do it outside __mptcp_init_sock(), so that clone will
+ * propagate the correct value
+ */
+ tcp_assign_congestion_control(sk);
+ strcpy(mptcp_sk(sk)->ca_name, icsk->icsk_ca_ops->name);
+
+ /* no need to keep a reference to the ops, the name will suffice */
+ tcp_cleanup_congestion_control(sk);
+ icsk->icsk_ca_ops = NULL;
+
sk_sockets_allocated_inc(sk);
sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1];
sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1];
@@ -2616,7 +2647,6 @@ static void __mptcp_destroy_sock(struct sock *sk)
sk_stream_kill_queues(sk);
xfrm_sk_free_policy(sk);
- tcp_cleanup_congestion_control(sk);
sk_refcnt_debug_release(sk);
mptcp_dispose_initial_subflow(msk);
sock_put(sk);
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index edc0128730df..385796f0ef19 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -258,6 +258,7 @@ struct mptcp_sock {
} rcvq_space;
u32 setsockopt_seq;
+ char ca_name[TCP_CA_NAME_MAX];
};
#define mptcp_lock_sock(___sk, cb) do { \
@@ -361,7 +362,6 @@ mptcp_subflow_rsk(const struct request_sock *rsk)
enum mptcp_data_avail {
MPTCP_SUBFLOW_NODATA,
MPTCP_SUBFLOW_DATA_AVAIL,
- MPTCP_SUBFLOW_OOO_DATA
};
struct mptcp_delegated_action {
@@ -671,7 +671,7 @@ void mptcp_pm_free_anno_list(struct mptcp_sock *msk);
bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk);
struct mptcp_pm_add_entry *
mptcp_pm_del_add_timer(struct mptcp_sock *msk,
- struct mptcp_addr_info *addr);
+ struct mptcp_addr_info *addr, bool check_id);
struct mptcp_pm_add_entry *
mptcp_lookup_anno_list_by_saddr(struct mptcp_sock *msk,
struct mptcp_addr_info *addr);
diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
index 00d941b66c1e..a79798189599 100644
--- a/net/mptcp/sockopt.c
+++ b/net/mptcp/sockopt.c
@@ -547,7 +547,7 @@ static int mptcp_setsockopt_sol_tcp_congestion(struct mptcp_sock *msk, sockptr_t
}
if (ret == 0)
- tcp_set_congestion_control(sk, name, false, cap_net_admin);
+ strcpy(msk->ca_name, name);
release_sock(sk);
return ret;
@@ -705,7 +705,7 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk)
sock_valbool_flag(ssk, SOCK_DBG, sock_flag(sk, SOCK_DBG));
if (inet_csk(sk)->icsk_ca_ops != inet_csk(ssk)->icsk_ca_ops)
- tcp_set_congestion_control(ssk, inet_csk(sk)->icsk_ca_ops->name, false, true);
+ tcp_set_congestion_control(ssk, msk->ca_name, false, true);
}
static void __mptcp_sockopt_sync(struct mptcp_sock *msk, struct sock *ssk)
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index a5ede357cfbc..be1de4084196 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -630,21 +630,20 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
/* if the sk is MP_CAPABLE, we try to fetch the client key */
if (subflow_req->mp_capable) {
- if (TCP_SKB_CB(skb)->seq != subflow_req->ssn_offset + 1) {
- /* here we can receive and accept an in-window,
- * out-of-order pkt, which will not carry the MP_CAPABLE
- * opt even on mptcp enabled paths
- */
- goto create_msk;
- }
-
+ /* we can receive and accept an in-window, out-of-order pkt,
+ * which may not carry the MP_CAPABLE opt even on mptcp enabled
+ * paths: always try to extract the peer key, and fallback
+ * for packets missing it.
+ * Even OoO DSS packets coming legitly after dropped or
+ * reordered MPC will cause fallback, but we don't have other
+ * options.
+ */
mptcp_get_options(skb, &mp_opt);
if (!mp_opt.mp_capable) {
fallback = true;
goto create_child;
}
-create_msk:
new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req);
if (!new_msk)
fallback = true;
@@ -785,10 +784,10 @@ static u64 expand_seq(u64 old_seq, u16 old_data_len, u64 seq)
return seq | ((old_seq + old_data_len + 1) & GENMASK_ULL(63, 32));
}
-static void warn_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
+static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
{
- WARN_ONCE(1, "Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
- ssn, subflow->map_subflow_seq, subflow->map_data_len);
+ pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
+ ssn, subflow->map_subflow_seq, subflow->map_data_len);
}
static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
@@ -813,13 +812,13 @@ static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
/* Mapping covers data later in the subflow stream,
* currently unsupported.
*/
- warn_bad_map(subflow, ssn);
+ dbg_bad_map(subflow, ssn);
return false;
}
if (unlikely(!before(ssn, subflow->map_subflow_seq +
subflow->map_data_len))) {
/* Mapping does covers past subflow data, invalid */
- warn_bad_map(subflow, ssn + skb->len);
+ dbg_bad_map(subflow, ssn);
return false;
}
return true;
@@ -867,7 +866,6 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
data_len = mpext->data_len;
if (data_len == 0) {
- pr_err("Infinite mapping not handled");
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
return MAPPING_INVALID;
}
@@ -1002,7 +1000,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
struct sk_buff *skb;
if (!skb_peek(&ssk->sk_receive_queue))
- subflow->data_avail = 0;
+ WRITE_ONCE(subflow->data_avail, 0);
if (subflow->data_avail)
return true;
@@ -1013,21 +1011,11 @@ static bool subflow_check_data_avail(struct sock *ssk)
status = get_mapping_status(ssk, msk);
trace_subflow_check_data_avail(status, skb_peek(&ssk->sk_receive_queue));
- if (status == MAPPING_INVALID) {
- ssk->sk_err = EBADMSG;
- goto fatal;
- }
- if (status == MAPPING_DUMMY) {
- __mptcp_do_fallback(msk);
- skb = skb_peek(&ssk->sk_receive_queue);
- subflow->map_valid = 1;
- subflow->map_seq = READ_ONCE(msk->ack_seq);
- subflow->map_data_len = skb->len;
- subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq -
- subflow->ssn_offset;
- subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
- return true;
- }
+ if (unlikely(status == MAPPING_INVALID))
+ goto fallback;
+
+ if (unlikely(status == MAPPING_DUMMY))
+ goto fallback;
if (status != MAPPING_OK)
goto no_data;
@@ -1040,10 +1028,8 @@ static bool subflow_check_data_avail(struct sock *ssk)
* MP_CAPABLE-based mapping
*/
if (unlikely(!READ_ONCE(msk->can_ack))) {
- if (!subflow->mpc_map) {
- ssk->sk_err = EBADMSG;
- goto fatal;
- }
+ if (!subflow->mpc_map)
+ goto fallback;
WRITE_ONCE(msk->remote_key, subflow->remote_key);
WRITE_ONCE(msk->ack_seq, subflow->map_seq);
WRITE_ONCE(msk->can_ack, true);
@@ -1053,35 +1039,43 @@ static bool subflow_check_data_avail(struct sock *ssk)
ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
ack_seq);
- if (ack_seq == old_ack) {
- subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
- break;
- } else if (after64(ack_seq, old_ack)) {
- subflow->data_avail = MPTCP_SUBFLOW_OOO_DATA;
- break;
+ if (unlikely(before64(ack_seq, old_ack))) {
+ mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
+ continue;
}
- /* only accept in-sequence mapping. Old values are spurious
- * retransmission
- */
- mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
+ WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
+ break;
}
return true;
no_data:
subflow_sched_work_if_closed(msk, ssk);
return false;
-fatal:
- /* fatal protocol error, close the socket */
- /* This barrier is coupled with smp_rmb() in tcp_poll() */
- smp_wmb();
- ssk->sk_error_report(ssk);
- tcp_set_state(ssk, TCP_CLOSE);
- subflow->reset_transient = 0;
- subflow->reset_reason = MPTCP_RST_EMPTCP;
- tcp_send_active_reset(ssk, GFP_ATOMIC);
- subflow->data_avail = 0;
- return false;
+
+fallback:
+ /* RFC 8684 section 3.7. */
+ if (subflow->mp_join || subflow->fully_established) {
+ /* fatal protocol error, close the socket.
+ * subflow_error_report() will introduce the appropriate barriers
+ */
+ ssk->sk_err = EBADMSG;
+ tcp_set_state(ssk, TCP_CLOSE);
+ subflow->reset_transient = 0;
+ subflow->reset_reason = MPTCP_RST_EMPTCP;
+ tcp_send_active_reset(ssk, GFP_ATOMIC);
+ WRITE_ONCE(subflow->data_avail, 0);
+ return false;
+ }
+
+ __mptcp_do_fallback(msk);
+ skb = skb_peek(&ssk->sk_receive_queue);
+ subflow->map_valid = 1;
+ subflow->map_seq = READ_ONCE(msk->ack_seq);
+ subflow->map_data_len = skb->len;
+ subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
+ WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
+ return true;
}
bool mptcp_subflow_data_available(struct sock *sk)
@@ -1092,7 +1086,7 @@ bool mptcp_subflow_data_available(struct sock *sk)
if (subflow->map_valid &&
mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
subflow->map_valid = 0;
- subflow->data_avail = 0;
+ WRITE_ONCE(subflow->data_avail, 0);
pr_debug("Done with mapping: seq=%u data_len=%u",
subflow->map_subflow_seq,
@@ -1120,41 +1114,6 @@ void mptcp_space(const struct sock *ssk, int *space, int *full_space)
*full_space = tcp_full_space(sk);
}
-static void subflow_data_ready(struct sock *sk)
-{
- struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
- u16 state = 1 << inet_sk_state_load(sk);
- struct sock *parent = subflow->conn;
- struct mptcp_sock *msk;
-
- msk = mptcp_sk(parent);
- if (state & TCPF_LISTEN) {
- /* MPJ subflow are removed from accept queue before reaching here,
- * avoid stray wakeups
- */
- if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue))
- return;
-
- set_bit(MPTCP_DATA_READY, &msk->flags);
- parent->sk_data_ready(parent);
- return;
- }
-
- WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
- !subflow->mp_join && !(state & TCPF_CLOSE));
-
- if (mptcp_subflow_data_available(sk))
- mptcp_data_ready(parent, sk);
-}
-
-static void subflow_write_space(struct sock *ssk)
-{
- struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
-
- mptcp_propagate_sndbuf(sk, ssk);
- mptcp_write_space(sk);
-}
-
void __mptcp_error_report(struct sock *sk)
{
struct mptcp_subflow_context *subflow;
@@ -1195,6 +1154,43 @@ static void subflow_error_report(struct sock *ssk)
mptcp_data_unlock(sk);
}
+static void subflow_data_ready(struct sock *sk)
+{
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+ u16 state = 1 << inet_sk_state_load(sk);
+ struct sock *parent = subflow->conn;
+ struct mptcp_sock *msk;
+
+ msk = mptcp_sk(parent);
+ if (state & TCPF_LISTEN) {
+ /* MPJ subflow are removed from accept queue before reaching here,
+ * avoid stray wakeups
+ */
+ if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue))
+ return;
+
+ set_bit(MPTCP_DATA_READY, &msk->flags);
+ parent->sk_data_ready(parent);
+ return;
+ }
+
+ WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
+ !subflow->mp_join && !(state & TCPF_CLOSE));
+
+ if (mptcp_subflow_data_available(sk))
+ mptcp_data_ready(parent, sk);
+ else if (unlikely(sk->sk_err))
+ subflow_error_report(sk);
+}
+
+static void subflow_write_space(struct sock *ssk)
+{
+ struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
+
+ mptcp_propagate_sndbuf(sk, ssk);
+ mptcp_write_space(sk);
+}
+
static struct inet_connection_sock_af_ops *
subflow_default_af_ops(struct sock *sk)
{
@@ -1505,6 +1501,8 @@ static void subflow_state_change(struct sock *sk)
*/
if (mptcp_subflow_data_available(sk))
mptcp_data_ready(parent, sk);
+ else if (unlikely(sk->sk_err))
+ subflow_error_report(sk);
subflow_sched_work_if_closed(mptcp_sk(parent), sk);
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index d45dbcba8b49..c25097092a06 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -1367,7 +1367,7 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u,
ip_vs_addr_copy(svc->af, &svc->addr, &u->addr);
svc->port = u->port;
svc->fwmark = u->fwmark;
- svc->flags = u->flags;
+ svc->flags = u->flags & ~IP_VS_SVC_F_HASHED;
svc->timeout = u->timeout * HZ;
svc->netmask = u->netmask;
svc->ipvs = ipvs;
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index 89e5bac384d7..dc9ca12b0489 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -664,7 +664,7 @@ int nf_conntrack_proto_init(void)
#if IS_ENABLED(CONFIG_IPV6)
cleanup_sockopt:
- nf_unregister_sockopt(&so_getorigdst6);
+ nf_unregister_sockopt(&so_getorigdst);
#endif
return ret;
}
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 39c02d1aeedf..1d02650dd715 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -306,8 +306,7 @@ void flow_offload_refresh(struct nf_flowtable *flow_table,
{
flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
- if (likely(!nf_flowtable_hw_offload(flow_table) ||
- !test_and_clear_bit(NF_FLOW_HW_REFRESH, &flow->flags)))
+ if (likely(!nf_flowtable_hw_offload(flow_table)))
return;
nf_flow_offload_add(flow_table, flow);
diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
index 2af7bdb38407..528b2f172684 100644
--- a/net/netfilter/nf_flow_table_offload.c
+++ b/net/netfilter/nf_flow_table_offload.c
@@ -902,10 +902,11 @@ static void flow_offload_work_add(struct flow_offload_work *offload)
err = flow_offload_rule_add(offload, flow_rule);
if (err < 0)
- set_bit(NF_FLOW_HW_REFRESH, &offload->flow->flags);
- else
- set_bit(IPS_HW_OFFLOAD_BIT, &offload->flow->ct->status);
+ goto out;
+
+ set_bit(IPS_HW_OFFLOAD_BIT, &offload->flow->ct->status);
+out:
nf_flow_offload_destroy(flow_rule);
}
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index b100c04a0e43..3d6d49420db8 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -31,6 +31,9 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
int length = (th->doff * 4) - sizeof(*th);
u8 buf[40], *ptr;
+ if (unlikely(length < 0))
+ return false;
+
ptr = skb_header_pointer(skb, doff + sizeof(*th), length, buf);
if (ptr == NULL)
return false;
@@ -47,6 +50,8 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
length--;
continue;
default:
+ if (length < 2)
+ return true;
opsize = *ptr++;
if (opsize < 2)
return true;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index d63d2d8f769c..bf4d6ec9fc55 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -736,7 +736,8 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
goto nla_put_failure;
if (nla_put_string(skb, NFTA_TABLE_NAME, table->name) ||
- nla_put_be32(skb, NFTA_TABLE_FLAGS, htonl(table->flags)) ||
+ nla_put_be32(skb, NFTA_TABLE_FLAGS,
+ htonl(table->flags & NFT_TABLE_F_MASK)) ||
nla_put_be32(skb, NFTA_TABLE_USE, htonl(table->use)) ||
nla_put_be64(skb, NFTA_TABLE_HANDLE, cpu_to_be64(table->handle),
NFTA_TABLE_PAD))
@@ -947,20 +948,22 @@ err_register_hooks:
static void nf_tables_table_disable(struct net *net, struct nft_table *table)
{
+ table->flags &= ~NFT_TABLE_F_DORMANT;
nft_table_disable(net, table, 0);
+ table->flags |= NFT_TABLE_F_DORMANT;
}
-enum {
- NFT_TABLE_STATE_UNCHANGED = 0,
- NFT_TABLE_STATE_DORMANT,
- NFT_TABLE_STATE_WAKEUP
-};
+#define __NFT_TABLE_F_INTERNAL (NFT_TABLE_F_MASK + 1)
+#define __NFT_TABLE_F_WAS_DORMANT (__NFT_TABLE_F_INTERNAL << 0)
+#define __NFT_TABLE_F_WAS_AWAKEN (__NFT_TABLE_F_INTERNAL << 1)
+#define __NFT_TABLE_F_UPDATE (__NFT_TABLE_F_WAS_DORMANT | \
+ __NFT_TABLE_F_WAS_AWAKEN)
static int nf_tables_updtable(struct nft_ctx *ctx)
{
struct nft_trans *trans;
u32 flags;
- int ret = 0;
+ int ret;
if (!ctx->nla[NFTA_TABLE_FLAGS])
return 0;
@@ -985,21 +988,27 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
if ((flags & NFT_TABLE_F_DORMANT) &&
!(ctx->table->flags & NFT_TABLE_F_DORMANT)) {
- nft_trans_table_state(trans) = NFT_TABLE_STATE_DORMANT;
+ ctx->table->flags |= NFT_TABLE_F_DORMANT;
+ if (!(ctx->table->flags & __NFT_TABLE_F_UPDATE))
+ ctx->table->flags |= __NFT_TABLE_F_WAS_AWAKEN;
} else if (!(flags & NFT_TABLE_F_DORMANT) &&
ctx->table->flags & NFT_TABLE_F_DORMANT) {
- ret = nf_tables_table_enable(ctx->net, ctx->table);
- if (ret >= 0)
- nft_trans_table_state(trans) = NFT_TABLE_STATE_WAKEUP;
+ ctx->table->flags &= ~NFT_TABLE_F_DORMANT;
+ if (!(ctx->table->flags & __NFT_TABLE_F_UPDATE)) {
+ ret = nf_tables_table_enable(ctx->net, ctx->table);
+ if (ret < 0)
+ goto err_register_hooks;
+
+ ctx->table->flags |= __NFT_TABLE_F_WAS_DORMANT;
+ }
}
- if (ret < 0)
- goto err;
- nft_trans_table_flags(trans) = flags;
nft_trans_table_update(trans) = true;
nft_trans_commit_list_add_tail(ctx->net, trans);
+
return 0;
-err:
+
+err_register_hooks:
nft_trans_destroy(trans);
return ret;
}
@@ -1905,7 +1914,7 @@ static int nft_chain_parse_netdev(struct net *net,
static int nft_chain_parse_hook(struct net *net,
const struct nlattr * const nla[],
struct nft_chain_hook *hook, u8 family,
- bool autoload)
+ struct netlink_ext_ack *extack, bool autoload)
{
struct nftables_pernet *nft_net = nft_pernet(net);
struct nlattr *ha[NFTA_HOOK_MAX + 1];
@@ -1935,8 +1944,10 @@ static int nft_chain_parse_hook(struct net *net,
if (nla[NFTA_CHAIN_TYPE]) {
type = nf_tables_chain_type_lookup(net, nla[NFTA_CHAIN_TYPE],
family, autoload);
- if (IS_ERR(type))
+ if (IS_ERR(type)) {
+ NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_TYPE]);
return PTR_ERR(type);
+ }
}
if (hook->num >= NFT_MAX_HOOKS || !(type->hook_mask & (1 << hook->num)))
return -EOPNOTSUPP;
@@ -1945,8 +1956,11 @@ static int nft_chain_parse_hook(struct net *net,
hook->priority <= NF_IP_PRI_CONNTRACK)
return -EOPNOTSUPP;
- if (!try_module_get(type->owner))
+ if (!try_module_get(type->owner)) {
+ if (nla[NFTA_CHAIN_TYPE])
+ NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_TYPE]);
return -ENOENT;
+ }
hook->type = type;
@@ -2057,7 +2071,8 @@ static int nft_chain_add(struct nft_table *table, struct nft_chain *chain)
static u64 chain_id;
static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
- u8 policy, u32 flags)
+ u8 policy, u32 flags,
+ struct netlink_ext_ack *extack)
{
const struct nlattr * const *nla = ctx->nla;
struct nft_table *table = ctx->table;
@@ -2079,7 +2094,8 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
if (flags & NFT_CHAIN_BINDING)
return -EOPNOTSUPP;
- err = nft_chain_parse_hook(net, nla, &hook, family, true);
+ err = nft_chain_parse_hook(net, nla, &hook, family, extack,
+ true);
if (err < 0)
return err;
@@ -2234,7 +2250,7 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
return -EEXIST;
}
err = nft_chain_parse_hook(ctx->net, nla, &hook, ctx->family,
- false);
+ extack, false);
if (err < 0)
return err;
@@ -2447,7 +2463,7 @@ static int nf_tables_newchain(struct sk_buff *skb, const struct nfnl_info *info,
extack);
}
- return nf_tables_addchain(&ctx, family, genmask, policy, flags);
+ return nf_tables_addchain(&ctx, family, genmask, policy, flags, extack);
}
static int nf_tables_delchain(struct sk_buff *skb, const struct nfnl_info *info,
@@ -3328,8 +3344,10 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
if (n == NFT_RULE_MAXEXPRS)
goto err1;
err = nf_tables_expr_parse(&ctx, tmp, &expr_info[n]);
- if (err < 0)
+ if (err < 0) {
+ NL_SET_BAD_ATTR(extack, tmp);
goto err1;
+ }
size += expr_info[n].ops->size;
n++;
}
@@ -4346,13 +4364,45 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
err = nf_tables_set_alloc_name(&ctx, set, name);
kfree(name);
if (err < 0)
- goto err_set_alloc_name;
+ goto err_set_name;
+
+ udata = NULL;
+ if (udlen) {
+ udata = set->data + size;
+ nla_memcpy(udata, nla[NFTA_SET_USERDATA], udlen);
+ }
+
+ INIT_LIST_HEAD(&set->bindings);
+ INIT_LIST_HEAD(&set->catchall_list);
+ set->table = table;
+ write_pnet(&set->net, net);
+ set->ops = ops;
+ set->ktype = ktype;
+ set->klen = desc.klen;
+ set->dtype = dtype;
+ set->objtype = objtype;
+ set->dlen = desc.dlen;
+ set->flags = flags;
+ set->size = desc.size;
+ set->policy = policy;
+ set->udlen = udlen;
+ set->udata = udata;
+ set->timeout = timeout;
+ set->gc_int = gc_int;
+
+ set->field_count = desc.field_count;
+ for (i = 0; i < desc.field_count; i++)
+ set->field_len[i] = desc.field_len[i];
+
+ err = ops->init(set, &desc, nla);
+ if (err < 0)
+ goto err_set_init;
if (nla[NFTA_SET_EXPR]) {
expr = nft_set_elem_expr_alloc(&ctx, set, nla[NFTA_SET_EXPR]);
if (IS_ERR(expr)) {
err = PTR_ERR(expr);
- goto err_set_alloc_name;
+ goto err_set_expr_alloc;
}
set->exprs[0] = expr;
set->num_exprs++;
@@ -4363,75 +4413,44 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
if (!(flags & NFT_SET_EXPR)) {
err = -EINVAL;
- goto err_set_alloc_name;
+ goto err_set_expr_alloc;
}
i = 0;
nla_for_each_nested(tmp, nla[NFTA_SET_EXPRESSIONS], left) {
if (i == NFT_SET_EXPR_MAX) {
err = -E2BIG;
- goto err_set_init;
+ goto err_set_expr_alloc;
}
if (nla_type(tmp) != NFTA_LIST_ELEM) {
err = -EINVAL;
- goto err_set_init;
+ goto err_set_expr_alloc;
}
expr = nft_set_elem_expr_alloc(&ctx, set, tmp);
if (IS_ERR(expr)) {
err = PTR_ERR(expr);
- goto err_set_init;
+ goto err_set_expr_alloc;
}
set->exprs[i++] = expr;
set->num_exprs++;
}
}
- udata = NULL;
- if (udlen) {
- udata = set->data + size;
- nla_memcpy(udata, nla[NFTA_SET_USERDATA], udlen);
- }
-
- INIT_LIST_HEAD(&set->bindings);
- INIT_LIST_HEAD(&set->catchall_list);
- set->table = table;
- write_pnet(&set->net, net);
- set->ops = ops;
- set->ktype = ktype;
- set->klen = desc.klen;
- set->dtype = dtype;
- set->objtype = objtype;
- set->dlen = desc.dlen;
- set->flags = flags;
- set->size = desc.size;
- set->policy = policy;
- set->udlen = udlen;
- set->udata = udata;
- set->timeout = timeout;
- set->gc_int = gc_int;
set->handle = nf_tables_alloc_handle(table);
- set->field_count = desc.field_count;
- for (i = 0; i < desc.field_count; i++)
- set->field_len[i] = desc.field_len[i];
-
- err = ops->init(set, &desc, nla);
- if (err < 0)
- goto err_set_init;
-
err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
if (err < 0)
- goto err_set_trans;
+ goto err_set_expr_alloc;
list_add_tail_rcu(&set->list, &table->sets);
table->use++;
return 0;
-err_set_trans:
- ops->destroy(set);
-err_set_init:
+err_set_expr_alloc:
for (i = 0; i < set->num_exprs; i++)
nft_expr_destroy(&ctx, set->exprs[i]);
-err_set_alloc_name:
+
+ ops->destroy(set);
+err_set_init:
kfree(set->name);
err_set_name:
kvfree(set);
@@ -8547,10 +8566,14 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
switch (trans->msg_type) {
case NFT_MSG_NEWTABLE:
if (nft_trans_table_update(trans)) {
- if (nft_trans_table_state(trans) == NFT_TABLE_STATE_DORMANT)
+ if (!(trans->ctx.table->flags & __NFT_TABLE_F_UPDATE)) {
+ nft_trans_destroy(trans);
+ break;
+ }
+ if (trans->ctx.table->flags & NFT_TABLE_F_DORMANT)
nf_tables_table_disable(net, trans->ctx.table);
- trans->ctx.table->flags = nft_trans_table_flags(trans);
+ trans->ctx.table->flags &= ~__NFT_TABLE_F_UPDATE;
} else {
nft_clear(net, trans->ctx.table);
}
@@ -8768,9 +8791,17 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
switch (trans->msg_type) {
case NFT_MSG_NEWTABLE:
if (nft_trans_table_update(trans)) {
- if (nft_trans_table_state(trans) == NFT_TABLE_STATE_WAKEUP)
+ if (!(trans->ctx.table->flags & __NFT_TABLE_F_UPDATE)) {
+ nft_trans_destroy(trans);
+ break;
+ }
+ if (trans->ctx.table->flags & __NFT_TABLE_F_WAS_DORMANT) {
nf_tables_table_disable(net, trans->ctx.table);
-
+ trans->ctx.table->flags |= NFT_TABLE_F_DORMANT;
+ } else if (trans->ctx.table->flags & __NFT_TABLE_F_WAS_AWAKEN) {
+ trans->ctx.table->flags &= ~NFT_TABLE_F_DORMANT;
+ }
+ trans->ctx.table->flags &= ~__NFT_TABLE_F_UPDATE;
nft_trans_destroy(trans);
} else {
list_del_rcu(&trans->ctx.table->list);
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index 322ac5dd5402..752b10cae524 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -380,10 +380,14 @@ static int
nfnl_cthelper_update(const struct nlattr * const tb[],
struct nf_conntrack_helper *helper)
{
+ u32 size;
int ret;
- if (tb[NFCTH_PRIV_DATA_LEN])
- return -EBUSY;
+ if (tb[NFCTH_PRIV_DATA_LEN]) {
+ size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
+ if (size != helper->data_len)
+ return -EBUSY;
+ }
if (tb[NFCTH_POLICY]) {
ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]);
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index 0592a9456084..337e22d8b40b 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -1217,7 +1217,7 @@ static void nft_ct_expect_obj_eval(struct nft_object *obj,
struct nf_conn *ct;
ct = nf_ct_get(pkt->skb, &ctinfo);
- if (!ct || ctinfo == IP_CT_UNTRACKED) {
+ if (!ct || nf_ct_is_confirmed(ct) || nf_ct_is_template(ct)) {
regs->verdict.code = NFT_BREAK;
return;
}
diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
index 528a2d7ca991..dce866d93fee 100644
--- a/net/netfilter/nft_set_pipapo.c
+++ b/net/netfilter/nft_set_pipapo.c
@@ -408,8 +408,8 @@ int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst,
*
* Return: true on match, false otherwise.
*/
-static bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext)
+bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext)
{
struct nft_pipapo *priv = nft_set_priv(set);
unsigned long *res_map, *fill_map;
diff --git a/net/netfilter/nft_set_pipapo.h b/net/netfilter/nft_set_pipapo.h
index 25a75591583e..d84afb8fa79a 100644
--- a/net/netfilter/nft_set_pipapo.h
+++ b/net/netfilter/nft_set_pipapo.h
@@ -178,6 +178,8 @@ struct nft_pipapo_elem {
int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst,
union nft_pipapo_map_bucket *mt, bool match_only);
+bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext);
/**
* pipapo_and_field_buckets_4bit() - Intersect 4-bit buckets
diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c
index d65ae0e23028..eabdb8d552ee 100644
--- a/net/netfilter/nft_set_pipapo_avx2.c
+++ b/net/netfilter/nft_set_pipapo_avx2.c
@@ -1131,6 +1131,9 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
bool map_index;
int i, ret = 0;
+ if (unlikely(!irq_fpu_usable()))
+ return nft_pipapo_lookup(net, set, key, ext);
+
m = rcu_dereference(priv->match);
/* This also protects access to all data related to scratch maps */
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 3a62f97acf39..6133e412b948 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -461,11 +461,13 @@ void netlink_table_ungrab(void)
static inline void
netlink_lock_table(void)
{
+ unsigned long flags;
+
/* read_lock() synchronizes us to netlink_table_grab */
- read_lock(&nl_table_lock);
+ read_lock_irqsave(&nl_table_lock, flags);
atomic_inc(&nl_table_users);
- read_unlock(&nl_table_lock);
+ read_unlock_irqrestore(&nl_table_lock, flags);
}
static inline void
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index 53dbe733f998..6cfd30fc0798 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -110,6 +110,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
if (!llcp_sock->service_name) {
nfc_llcp_local_put(llcp_sock->local);
llcp_sock->local = NULL;
+ llcp_sock->dev = NULL;
ret = -ENOMEM;
goto put_dev;
}
@@ -119,6 +120,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
llcp_sock->local = NULL;
kfree(llcp_sock->service_name);
llcp_sock->service_name = NULL;
+ llcp_sock->dev = NULL;
ret = -EADDRINUSE;
goto put_dev;
}
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 9a585332ea84..da7fe9db1b00 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -1191,6 +1191,7 @@ EXPORT_SYMBOL(nci_allocate_device);
void nci_free_device(struct nci_dev *ndev)
{
nfc_free_device(ndev->nfc_dev);
+ nci_hci_deallocate(ndev);
kfree(ndev);
}
EXPORT_SYMBOL(nci_free_device);
diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c
index 6b275a387a92..96865142104f 100644
--- a/net/nfc/nci/hci.c
+++ b/net/nfc/nci/hci.c
@@ -792,3 +792,8 @@ struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev)
return hdev;
}
+
+void nci_hci_deallocate(struct nci_dev *ndev)
+{
+ kfree(ndev->hci_dev);
+}
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index 9c7eb8455ba8..5f1d438a0a23 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -329,7 +329,7 @@ static int rawsock_create(struct net *net, struct socket *sock,
return -ESOCKTNOSUPPORT;
if (sock->type == SOCK_RAW) {
- if (!capable(CAP_NET_RAW))
+ if (!ns_capable(net->user_ns, CAP_NET_RAW))
return -EPERM;
sock->ops = &rawsock_raw_ops;
} else {
diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c
index 96b524ceabca..896b8f5bc885 100644
--- a/net/openvswitch/meter.c
+++ b/net/openvswitch/meter.c
@@ -611,6 +611,14 @@ bool ovs_meter_execute(struct datapath *dp, struct sk_buff *skb,
spin_lock(&meter->lock);
long_delta_ms = (now_ms - meter->used); /* ms */
+ if (long_delta_ms < 0) {
+ /* This condition means that we have several threads fighting
+ * for a meter lock, and the one who received the packets a
+ * bit later wins. Assuming that all racing threads received
+ * packets at the same time to avoid overflow.
+ */
+ long_delta_ms = 0;
+ }
/* Make sure delta_ms will not be too large, so that bucket will not
* wrap around below.
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index ba96db1880ea..330ba68828e7 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -422,7 +422,8 @@ static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts,
ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts))
return TP_STATUS_TS_RAW_HARDWARE;
- if (ktime_to_timespec64_cond(skb->tstamp, ts))
+ if ((flags & SOF_TIMESTAMPING_SOFTWARE) &&
+ ktime_to_timespec64_cond(skb->tstamp, ts))
return TP_STATUS_TS_SOFTWARE;
return 0;
@@ -2340,7 +2341,12 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
- if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
+ /* Always timestamp; prefer an existing software timestamp taken
+ * closer to the time of capture.
+ */
+ ts_status = tpacket_get_timestamp(skb, &ts,
+ po->tp_tstamp | SOF_TIMESTAMPING_SOFTWARE);
+ if (!ts_status)
ktime_get_real_ts64(&ts);
status |= ts_status;
@@ -2677,7 +2683,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
}
if (likely(saddr == NULL)) {
dev = packet_cached_dev_get(po);
- proto = po->num;
+ proto = READ_ONCE(po->num);
} else {
err = -EINVAL;
if (msg->msg_namelen < sizeof(struct sockaddr_ll))
@@ -2890,7 +2896,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
if (likely(saddr == NULL)) {
dev = packet_cached_dev_get(po);
- proto = po->num;
+ proto = READ_ONCE(po->num);
} else {
err = -EINVAL;
if (msg->msg_namelen < sizeof(struct sockaddr_ll))
@@ -3028,10 +3034,13 @@ static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
- if (po->tx_ring.pg_vec)
+ /* Reading tx_ring.pg_vec without holding pg_vec_lock is racy.
+ * tpacket_snd() will redo the check safely.
+ */
+ if (data_race(po->tx_ring.pg_vec))
return tpacket_snd(po, msg);
- else
- return packet_snd(sock, msg, len);
+
+ return packet_snd(sock, msg, len);
}
/*
@@ -3162,7 +3171,7 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
/* prevents packet_notifier() from calling
* register_prot_hook()
*/
- po->num = 0;
+ WRITE_ONCE(po->num, 0);
__unregister_prot_hook(sk, true);
rcu_read_lock();
dev_curr = po->prot_hook.dev;
@@ -3172,17 +3181,17 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
}
BUG_ON(po->running);
- po->num = proto;
+ WRITE_ONCE(po->num, proto);
po->prot_hook.type = proto;
if (unlikely(unlisted)) {
dev_put(dev);
po->prot_hook.dev = NULL;
- po->ifindex = -1;
+ WRITE_ONCE(po->ifindex, -1);
packet_cached_dev_reset(po);
} else {
po->prot_hook.dev = dev;
- po->ifindex = dev ? dev->ifindex : 0;
+ WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0);
packet_cached_dev_assign(po, dev);
}
}
@@ -3496,7 +3505,7 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
uaddr->sa_family = AF_PACKET;
memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
rcu_read_lock();
- dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
+ dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex));
if (dev)
strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
rcu_read_unlock();
@@ -3511,16 +3520,18 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
+ int ifindex;
if (peer)
return -EOPNOTSUPP;
+ ifindex = READ_ONCE(po->ifindex);
sll->sll_family = AF_PACKET;
- sll->sll_ifindex = po->ifindex;
- sll->sll_protocol = po->num;
+ sll->sll_ifindex = ifindex;
+ sll->sll_protocol = READ_ONCE(po->num);
sll->sll_pkttype = 0;
rcu_read_lock();
- dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
+ dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
if (dev) {
sll->sll_hatype = dev->type;
sll->sll_halen = dev->addr_len;
@@ -4099,7 +4110,7 @@ static int packet_notifier(struct notifier_block *this,
}
if (msg == NETDEV_UNREGISTER) {
packet_cached_dev_reset(po);
- po->ifindex = -1;
+ WRITE_ONCE(po->ifindex, -1);
if (po->prot_hook.dev)
dev_put(po->prot_hook.dev);
po->prot_hook.dev = NULL;
@@ -4405,7 +4416,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
was_running = po->running;
num = po->num;
if (was_running) {
- po->num = 0;
+ WRITE_ONCE(po->num, 0);
__unregister_prot_hook(sk, false);
}
spin_unlock(&po->bind_lock);
@@ -4440,7 +4451,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
spin_lock(&po->bind_lock);
if (was_running) {
- po->num = num;
+ WRITE_ONCE(po->num, num);
register_prot_hook(sk);
}
spin_unlock(&po->bind_lock);
@@ -4610,8 +4621,8 @@ static int packet_seq_show(struct seq_file *seq, void *v)
s,
refcount_read(&s->sk_refcnt),
s->sk_type,
- ntohs(po->num),
- po->ifindex,
+ ntohs(READ_ONCE(po->num)),
+ READ_ONCE(po->ifindex),
po->running,
atomic_read(&s->sk_rmem_alloc),
from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index c0477bec09bd..f2efaa4225f9 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -436,7 +436,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
struct qrtr_sock *ipc;
struct sk_buff *skb;
struct qrtr_cb *cb;
- unsigned int size;
+ size_t size;
unsigned int ver;
size_t hdrlen;
diff --git a/net/rds/connection.c b/net/rds/connection.c
index f2fcab182095..a3bc4b54d491 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -240,12 +240,23 @@ static struct rds_connection *__rds_conn_create(struct net *net,
if (loop_trans) {
rds_trans_put(loop_trans);
conn->c_loopback = 1;
- if (is_outgoing && trans->t_prefer_loopback) {
- /* "outgoing" connection - and the transport
- * says it wants the connection handled by the
- * loopback transport. This is what TCP does.
- */
- trans = &rds_loop_transport;
+ if (trans->t_prefer_loopback) {
+ if (likely(is_outgoing)) {
+ /* "outgoing" connection to local address.
+ * Protocol says it wants the connection
+ * handled by the loopback transport.
+ * This is what TCP does.
+ */
+ trans = &rds_loop_transport;
+ } else {
+ /* No transport currently in use
+ * should end up here, but if it
+ * does, reset/destroy the connection.
+ */
+ kmem_cache_free(rds_conn_slab, conn);
+ conn = ERR_PTR(-EOPNOTSUPP);
+ goto out;
+ }
}
}
diff --git a/net/rds/recv.c b/net/rds/recv.c
index 4db109fb6ec2..5b426dc3634d 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -714,7 +714,7 @@ int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
if (rds_cmsg_recv(inc, msg, rs)) {
ret = -EFAULT;
- goto out;
+ break;
}
rds_recvmsg_zcookie(rs, msg);
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 43db0eca911f..abf19c0e3ba0 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -313,8 +313,8 @@ out:
}
#endif
-static int rds_tcp_laddr_check(struct net *net, const struct in6_addr *addr,
- __u32 scope_id)
+int rds_tcp_laddr_check(struct net *net, const struct in6_addr *addr,
+ __u32 scope_id)
{
struct net_device *dev = NULL;
#if IS_ENABLED(CONFIG_IPV6)
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index bad9cf49d565..dc8d745d6857 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -59,7 +59,8 @@ u32 rds_tcp_snd_una(struct rds_tcp_connection *tc);
u64 rds_tcp_map_seq(struct rds_tcp_connection *tc, u32 seq);
extern struct rds_transport rds_tcp_transport;
void rds_tcp_accept_work(struct sock *sk);
-
+int rds_tcp_laddr_check(struct net *net, const struct in6_addr *addr,
+ __u32 scope_id);
/* tcp_connect.c */
int rds_tcp_conn_path_connect(struct rds_conn_path *cp);
void rds_tcp_conn_path_shutdown(struct rds_conn_path *conn);
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 101cf14215a0..09cadd556d1e 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -167,6 +167,12 @@ int rds_tcp_accept_one(struct socket *sock)
}
#endif
+ if (!rds_tcp_laddr_check(sock_net(sock->sk), peer_addr, dev_if)) {
+ /* local address connection is only allowed via loopback */
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
conn = rds_conn_create(sock_net(sock->sk),
my_addr, peer_addr,
&rds_tcp_transport, 0, GFP_KERNEL, dev_if);
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index ec7a1c438df9..a656baa321fe 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -904,14 +904,19 @@ static int tcf_ct_act_nat(struct sk_buff *skb,
}
err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
- if (err == NF_ACCEPT &&
- ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) {
- if (maniptype == NF_NAT_MANIP_SRC)
- maniptype = NF_NAT_MANIP_DST;
- else
- maniptype = NF_NAT_MANIP_SRC;
-
- err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
+ if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) {
+ if (ct->status & IPS_SRC_NAT) {
+ if (maniptype == NF_NAT_MANIP_SRC)
+ maniptype = NF_NAT_MANIP_DST;
+ else
+ maniptype = NF_NAT_MANIP_SRC;
+
+ err = ct_nat_execute(skb, ct, ctinfo, range,
+ maniptype);
+ } else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
+ err = ct_nat_execute(skb, ct, ctinfo, NULL,
+ NF_NAT_MANIP_SRC);
+ }
}
return err;
#else
@@ -984,7 +989,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
*/
cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force);
if (!cached) {
- if (!commit && tcf_ct_flow_table_lookup(p, skb, family)) {
+ if (tcf_ct_flow_table_lookup(p, skb, family)) {
skip_add = true;
goto do_nat;
}
@@ -1022,10 +1027,11 @@ do_nat:
* even if the connection is already confirmed.
*/
nf_conntrack_confirm(skb);
- } else if (!skip_add) {
- tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
}
+ if (!skip_add)
+ tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
+
out_push:
skb_push_rcsum(skb, nh_ofs);
@@ -1202,9 +1208,6 @@ static int tcf_ct_fill_params(struct net *net,
sizeof(p->zone));
}
- if (p->zone == NF_CT_DEFAULT_ZONE_ID)
- return 0;
-
nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
if (!tmpl) {
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 40fbea626dfd..279f9e2a2319 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -1624,7 +1624,7 @@ int tcf_classify_ingress(struct sk_buff *skb,
/* If we missed on some chain */
if (ret == TC_ACT_UNSPEC && last_executed_chain) {
- ext = skb_ext_add(skb, TC_SKB_EXT);
+ ext = tc_skb_ext_alloc(skb);
if (WARN_ON_ONCE(!ext))
return TC_ACT_SHOT;
ext->chain = last_executed_chain;
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 7d37638ee1c7..951542843cab 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -943,7 +943,7 @@ static struct tcphdr *cake_get_tcphdr(const struct sk_buff *skb,
}
tcph = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
- if (!tcph)
+ if (!tcph || tcph->doff < 5)
return NULL;
return skb_header_pointer(skb, offset,
@@ -967,6 +967,8 @@ static const void *cake_get_tcpopt(const struct tcphdr *tcph,
length--;
continue;
}
+ if (length < 2)
+ break;
opsize = *ptr++;
if (opsize < 2 || opsize > length)
break;
@@ -1104,6 +1106,8 @@ static bool cake_tcph_may_drop(const struct tcphdr *tcph,
length--;
continue;
}
+ if (length < 2)
+ break;
opsize = *ptr++;
if (opsize < 2 || opsize > length)
break;
@@ -2338,7 +2342,7 @@ static int cake_config_precedence(struct Qdisc *sch)
/* List of known Diffserv codepoints:
*
- * Least Effort (CS1)
+ * Least Effort (CS1, LE)
* Best Effort (CS0)
* Max Reliability & LLT "Lo" (TOS1)
* Max Throughput (TOS2)
@@ -2360,7 +2364,7 @@ static int cake_config_precedence(struct Qdisc *sch)
* Total 25 codepoints.
*/
-/* List of traffic classes in RFC 4594:
+/* List of traffic classes in RFC 4594, updated by RFC 8622:
* (roughly descending order of contended priority)
* (roughly ascending order of uncontended throughput)
*
@@ -2375,7 +2379,7 @@ static int cake_config_precedence(struct Qdisc *sch)
* Ops, Admin, Management (CS2,TOS1) - eg. ssh
* Standard Service (CS0 & unrecognised codepoints)
* High Throughput Data (AF1x,TOS2) - eg. web traffic
- * Low Priority Data (CS1) - eg. BitTorrent
+ * Low Priority Data (CS1,LE) - eg. BitTorrent
* Total 12 traffic classes.
*/
@@ -2391,7 +2395,7 @@ static int cake_config_diffserv8(struct Qdisc *sch)
* Video Streaming (AF4x, AF3x, CS3)
* Bog Standard (CS0 etc.)
* High Throughput (AF1x, TOS2)
- * Background Traffic (CS1)
+ * Background Traffic (CS1, LE)
*
* Total 8 traffic classes.
*/
@@ -2435,7 +2439,7 @@ static int cake_config_diffserv4(struct Qdisc *sch)
* Latency Sensitive (CS7, CS6, EF, VA, CS5, CS4)
* Streaming Media (AF4x, AF3x, CS3, AF2x, TOS4, CS2, TOS1)
* Best Effort (CS0, AF1x, TOS2, and those not specified)
- * Background Traffic (CS1)
+ * Background Traffic (CS1, LE)
*
* Total 4 traffic classes.
*/
@@ -2473,7 +2477,7 @@ static int cake_config_diffserv4(struct Qdisc *sch)
static int cake_config_diffserv3(struct Qdisc *sch)
{
/* Simplified Diffserv structure with 3 tins.
- * Low Priority (CS1)
+ * Low Priority (CS1, LE)
* Best Effort
* Latency Sensitive (TOS4, VA, EF, CS6, CS7)
*/
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index cd2748e2d4a2..d320bcfb2da2 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -407,7 +407,8 @@ static void dsmark_reset(struct Qdisc *sch)
struct dsmark_qdisc_data *p = qdisc_priv(sch);
pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
- qdisc_reset(p->q);
+ if (p->q)
+ qdisc_reset(p->q);
sch->qstats.backlog = 0;
sch->q.qlen = 0;
}
diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c
index 949163fe68af..cac684952edc 100644
--- a/net/sched/sch_fq_pie.c
+++ b/net/sched/sch_fq_pie.c
@@ -138,8 +138,15 @@ static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
/* Classifies packet into corresponding flow */
idx = fq_pie_classify(skb, sch, &ret);
- sel_flow = &q->flows[idx];
+ if (idx == 0) {
+ if (ret & __NET_XMIT_BYPASS)
+ qdisc_qstats_drop(sch);
+ __qdisc_drop(skb, to_free);
+ return ret;
+ }
+ idx--;
+ sel_flow = &q->flows[idx];
/* Checks whether adding a new packet would exceed memory limit */
get_pie_cb(skb)->mem_usage = skb->truesize;
memory_limited = q->memory_usage > q->memory_limit + skb->truesize;
@@ -297,9 +304,9 @@ static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt,
goto flow_error;
}
q->flows_cnt = nla_get_u32(tb[TCA_FQ_PIE_FLOWS]);
- if (!q->flows_cnt || q->flows_cnt >= 65536) {
+ if (!q->flows_cnt || q->flows_cnt > 65536) {
NL_SET_ERR_MSG_MOD(extack,
- "Number of flows must range in [1..65535]");
+ "Number of flows must range in [1..65536]");
goto flow_error;
}
}
@@ -367,7 +374,7 @@ static void fq_pie_timer(struct timer_list *t)
struct fq_pie_sched_data *q = from_timer(q, t, adapt_timer);
struct Qdisc *sch = q->sch;
spinlock_t *root_lock; /* to lock qdisc for probability calculations */
- u16 idx;
+ u32 idx;
root_lock = qdisc_lock(qdisc_root_sleeping(sch));
spin_lock(root_lock);
@@ -388,7 +395,7 @@ static int fq_pie_init(struct Qdisc *sch, struct nlattr *opt,
{
struct fq_pie_sched_data *q = qdisc_priv(sch);
int err;
- u16 idx;
+ u32 idx;
pie_params_init(&q->p_params);
sch->limit = 10 * 1024;
@@ -500,7 +507,7 @@ static int fq_pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
static void fq_pie_reset(struct Qdisc *sch)
{
struct fq_pie_sched_data *q = qdisc_priv(sch);
- u16 idx;
+ u32 idx;
INIT_LIST_HEAD(&q->new_flows);
INIT_LIST_HEAD(&q->old_flows);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 44991ea726fc..fc8b56bcabf3 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -35,6 +35,25 @@
const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
EXPORT_SYMBOL(default_qdisc_ops);
+static void qdisc_maybe_clear_missed(struct Qdisc *q,
+ const struct netdev_queue *txq)
+{
+ clear_bit(__QDISC_STATE_MISSED, &q->state);
+
+ /* Make sure the below netif_xmit_frozen_or_stopped()
+ * checking happens after clearing STATE_MISSED.
+ */
+ smp_mb__after_atomic();
+
+ /* Checking netif_xmit_frozen_or_stopped() again to
+ * make sure STATE_MISSED is set if the STATE_MISSED
+ * set by netif_tx_wake_queue()'s rescheduling of
+ * net_tx_action() is cleared by the above clear_bit().
+ */
+ if (!netif_xmit_frozen_or_stopped(txq))
+ set_bit(__QDISC_STATE_MISSED, &q->state);
+}
+
/* Main transmission queue. */
/* Modifications to data participating in scheduling must be protected with
@@ -74,6 +93,7 @@ static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
}
} else {
skb = SKB_XOFF_MAGIC;
+ qdisc_maybe_clear_missed(q, txq);
}
}
@@ -242,6 +262,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
}
} else {
skb = NULL;
+ qdisc_maybe_clear_missed(q, txq);
}
if (lock)
spin_unlock(lock);
@@ -251,8 +272,10 @@ validate:
*validate = true;
if ((q->flags & TCQ_F_ONETXQUEUE) &&
- netif_xmit_frozen_or_stopped(txq))
+ netif_xmit_frozen_or_stopped(txq)) {
+ qdisc_maybe_clear_missed(q, txq);
return skb;
+ }
skb = qdisc_dequeue_skb_bad_txq(q);
if (unlikely(skb)) {
@@ -311,6 +334,8 @@ bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
HARD_TX_LOCK(dev, txq, smp_processor_id());
if (!netif_xmit_frozen_or_stopped(txq))
skb = dev_hard_start_xmit(skb, dev, txq, &ret);
+ else
+ qdisc_maybe_clear_missed(q, txq);
HARD_TX_UNLOCK(dev, txq);
} else {
@@ -640,8 +665,10 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
{
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
struct sk_buff *skb = NULL;
+ bool need_retry = true;
int band;
+retry:
for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
struct skb_array *q = band2list(priv, band);
@@ -652,6 +679,23 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
}
if (likely(skb)) {
qdisc_update_stats_at_dequeue(qdisc, skb);
+ } else if (need_retry &&
+ test_bit(__QDISC_STATE_MISSED, &qdisc->state)) {
+ /* Delay clearing the STATE_MISSED here to reduce
+ * the overhead of the second spin_trylock() in
+ * qdisc_run_begin() and __netif_schedule() calling
+ * in qdisc_run_end().
+ */
+ clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
+
+ /* Make sure dequeuing happens after clearing
+ * STATE_MISSED.
+ */
+ smp_mb__after_atomic();
+
+ need_retry = false;
+
+ goto retry;
} else {
WRITE_ONCE(qdisc->empty, true);
}
@@ -1158,8 +1202,10 @@ static void dev_reset_queue(struct net_device *dev,
qdisc_reset(qdisc);
spin_unlock_bh(qdisc_lock(qdisc));
- if (nolock)
+ if (nolock) {
+ clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
spin_unlock_bh(&qdisc->seqlock);
+ }
}
static bool some_qdisc_is_busy(struct net_device *dev)
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 081c11d5717c..8827987ba903 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1488,7 +1488,8 @@ static void htb_parent_to_leaf_offload(struct Qdisc *sch,
struct Qdisc *old_q;
/* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
- qdisc_refcount_inc(new_q);
+ if (new_q)
+ qdisc_refcount_inc(new_q);
old_q = htb_graft_helper(dev_queue, new_q);
WARN_ON(!(old_q->flags & TCQ_F_BUILTIN));
}
@@ -1675,10 +1676,9 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg,
cl->parent->common.classid,
NULL);
if (q->offload) {
- if (new_q) {
+ if (new_q)
htb_set_lockdep_class_child(new_q);
- htb_parent_to_leaf_offload(sch, dev_queue, new_q);
- }
+ htb_parent_to_leaf_offload(sch, dev_queue, new_q);
}
}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 40f9f6c4a0a1..a79d193ff872 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4473,6 +4473,7 @@ static int sctp_setsockopt_encap_port(struct sock *sk,
transports)
t->encap_port = encap_port;
+ asoc->encap_port = encap_port;
return 0;
}
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index e92df779af73..55871b277f47 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -307,7 +307,7 @@ static struct ctl_table sctp_net_table[] = {
.data = &init_net.sctp.encap_port,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = &udp_port_max,
},
diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c
index 9c6e95882553..967712ba52a0 100644
--- a/net/smc/smc_ism.c
+++ b/net/smc/smc_ism.c
@@ -402,6 +402,14 @@ struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
return NULL;
}
+ smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)",
+ WQ_MEM_RECLAIM, name);
+ if (!smcd->event_wq) {
+ kfree(smcd->conn);
+ kfree(smcd);
+ return NULL;
+ }
+
smcd->dev.parent = parent;
smcd->dev.release = smcd_release;
device_initialize(&smcd->dev);
@@ -415,19 +423,14 @@ struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
INIT_LIST_HEAD(&smcd->vlan);
INIT_LIST_HEAD(&smcd->lgr_list);
init_waitqueue_head(&smcd->lgrs_deleted);
- smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)",
- WQ_MEM_RECLAIM, name);
- if (!smcd->event_wq) {
- kfree(smcd->conn);
- kfree(smcd);
- return NULL;
- }
return smcd;
}
EXPORT_SYMBOL_GPL(smcd_alloc_dev);
int smcd_register_dev(struct smcd_dev *smcd)
{
+ int rc;
+
mutex_lock(&smcd_dev_list.mutex);
if (list_empty(&smcd_dev_list.list)) {
u8 *system_eid = NULL;
@@ -447,7 +450,14 @@ int smcd_register_dev(struct smcd_dev *smcd)
dev_name(&smcd->dev), smcd->pnetid,
smcd->pnetid_by_user ? " (user defined)" : "");
- return device_add(&smcd->dev);
+ rc = device_add(&smcd->dev);
+ if (rc) {
+ mutex_lock(&smcd_dev_list.mutex);
+ list_del(&smcd->list);
+ mutex_unlock(&smcd_dev_list.mutex);
+ }
+
+ return rc;
}
EXPORT_SYMBOL_GPL(smcd_register_dev);
diff --git a/net/socket.c b/net/socket.c
index 27e3e7d53f8e..4f2c6d2795d0 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1072,19 +1072,6 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
* what to do with it - that's up to the protocol still.
*/
-/**
- * get_net_ns - increment the refcount of the network namespace
- * @ns: common namespace (net)
- *
- * Returns the net's common namespace.
- */
-
-struct ns_common *get_net_ns(struct ns_common *ns)
-{
- return &get_net(container_of(ns, struct net, ns))->ns;
-}
-EXPORT_SYMBOL_GPL(get_net_ns);
-
static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
{
struct socket *sock;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index f555d335e910..42623d6b8f0e 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1677,13 +1677,6 @@ call_reserveresult(struct rpc_task *task)
return;
}
- /*
- * Even though there was an error, we may have acquired
- * a request slot somehow. Make sure not to leak it.
- */
- if (task->tk_rqstp)
- xprt_release(task);
-
switch (status) {
case -ENOMEM:
rpc_delay(task, HZ >> 2);
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index e5b5a960a69b..3509a7f139b9 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -70,6 +70,7 @@
static void xprt_init(struct rpc_xprt *xprt, struct net *net);
static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
static void xprt_destroy(struct rpc_xprt *xprt);
+static void xprt_request_init(struct rpc_task *task);
static DEFINE_SPINLOCK(xprt_list_lock);
static LIST_HEAD(xprt_list);
@@ -1606,17 +1607,40 @@ xprt_transmit(struct rpc_task *task)
spin_unlock(&xprt->queue_lock);
}
-static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
+static void xprt_complete_request_init(struct rpc_task *task)
+{
+ if (task->tk_rqstp)
+ xprt_request_init(task);
+}
+
+void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
{
set_bit(XPRT_CONGESTED, &xprt->state);
- rpc_sleep_on(&xprt->backlog, task, NULL);
+ rpc_sleep_on(&xprt->backlog, task, xprt_complete_request_init);
+}
+EXPORT_SYMBOL_GPL(xprt_add_backlog);
+
+static bool __xprt_set_rq(struct rpc_task *task, void *data)
+{
+ struct rpc_rqst *req = data;
+
+ if (task->tk_rqstp == NULL) {
+ memset(req, 0, sizeof(*req)); /* mark unused */
+ task->tk_rqstp = req;
+ return true;
+ }
+ return false;
}
-static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
+bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
- if (rpc_wake_up_next(&xprt->backlog) == NULL)
+ if (rpc_wake_up_first(&xprt->backlog, __xprt_set_rq, req) == NULL) {
clear_bit(XPRT_CONGESTED, &xprt->state);
+ return false;
+ }
+ return true;
}
+EXPORT_SYMBOL_GPL(xprt_wake_up_backlog);
static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
{
@@ -1626,7 +1650,7 @@ static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task
goto out;
spin_lock(&xprt->reserve_lock);
if (test_bit(XPRT_CONGESTED, &xprt->state)) {
- rpc_sleep_on(&xprt->backlog, task, NULL);
+ xprt_add_backlog(xprt, task);
ret = true;
}
spin_unlock(&xprt->reserve_lock);
@@ -1703,11 +1727,11 @@ EXPORT_SYMBOL_GPL(xprt_alloc_slot);
void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
spin_lock(&xprt->reserve_lock);
- if (!xprt_dynamic_free_slot(xprt, req)) {
+ if (!xprt_wake_up_backlog(xprt, req) &&
+ !xprt_dynamic_free_slot(xprt, req)) {
memset(req, 0, sizeof(*req)); /* mark unused */
list_add(&req->rq_list, &xprt->free);
}
- xprt_wake_up_backlog(xprt);
spin_unlock(&xprt->reserve_lock);
}
EXPORT_SYMBOL_GPL(xprt_free_slot);
@@ -1894,10 +1918,10 @@ void xprt_release(struct rpc_task *task)
xdr_free_bvec(&req->rq_snd_buf);
if (req->rq_cred != NULL)
put_rpccred(req->rq_cred);
- task->tk_rqstp = NULL;
if (req->rq_release_snd_buf)
req->rq_release_snd_buf(req);
+ task->tk_rqstp = NULL;
if (likely(!bc_prealloc(req)))
xprt->ops->free_slot(xprt, req);
else
diff --git a/net/sunrpc/xprtmultipath.c b/net/sunrpc/xprtmultipath.c
index 78c075a68c04..1b4073131c6f 100644
--- a/net/sunrpc/xprtmultipath.c
+++ b/net/sunrpc/xprtmultipath.c
@@ -7,13 +7,13 @@
* Trond Myklebust <trond.myklebust@primarydata.com>
*
*/
+#include <linux/atomic.h>
#include <linux/types.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/rcupdate.h>
#include <linux/rculist.h>
#include <linux/slab.h>
-#include <asm/cmpxchg.h>
#include <linux/spinlock.h>
#include <linux/sunrpc/xprt.h>
#include <linux/sunrpc/addr.h>
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 649f7d8b9733..c335c1361564 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -628,8 +628,9 @@ out_mapping_err:
return false;
}
-/* The tail iovec might not reside in the same page as the
- * head iovec.
+/* The tail iovec may include an XDR pad for the page list,
+ * as well as additional content, and may not reside in the
+ * same page as the head iovec.
*/
static bool rpcrdma_prepare_tail_iov(struct rpcrdma_req *req,
struct xdr_buf *xdr,
@@ -747,19 +748,27 @@ static bool rpcrdma_prepare_readch(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_req *req,
struct xdr_buf *xdr)
{
- struct kvec *tail = &xdr->tail[0];
-
if (!rpcrdma_prepare_head_iov(r_xprt, req, xdr->head[0].iov_len))
return false;
- /* If there is a Read chunk, the page list is handled
+ /* If there is a Read chunk, the page list is being handled
* via explicit RDMA, and thus is skipped here.
*/
- if (tail->iov_len) {
- if (!rpcrdma_prepare_tail_iov(req, xdr,
- offset_in_page(tail->iov_base),
- tail->iov_len))
+ /* Do not include the tail if it is only an XDR pad */
+ if (xdr->tail[0].iov_len > 3) {
+ unsigned int page_base, len;
+
+ /* If the content in the page list is an odd length,
+ * xdr_write_pages() adds a pad at the beginning of
+ * the tail iovec. Force the tail's non-pad content to
+ * land at the next XDR position in the Send message.
+ */
+ page_base = offset_in_page(xdr->tail[0].iov_base);
+ len = xdr->tail[0].iov_len;
+ page_base += len & 3;
+ len -= len & 3;
+ if (!rpcrdma_prepare_tail_iov(req, xdr, page_base, len))
return false;
kref_get(&req->rl_kref);
}
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 09953597d055..19a49d26b1e4 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -520,9 +520,8 @@ xprt_rdma_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
return;
out_sleep:
- set_bit(XPRT_CONGESTED, &xprt->state);
- rpc_sleep_on(&xprt->backlog, task, NULL);
task->tk_status = -EAGAIN;
+ xprt_add_backlog(xprt, task);
}
/**
@@ -537,10 +536,11 @@ xprt_rdma_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *rqst)
struct rpcrdma_xprt *r_xprt =
container_of(xprt, struct rpcrdma_xprt, rx_xprt);
- memset(rqst, 0, sizeof(*rqst));
- rpcrdma_buffer_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst));
- if (unlikely(!rpc_wake_up_next(&xprt->backlog)))
- clear_bit(XPRT_CONGESTED, &xprt->state);
+ rpcrdma_reply_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst));
+ if (!xprt_wake_up_backlog(xprt, rqst)) {
+ memset(rqst, 0, sizeof(*rqst));
+ rpcrdma_buffer_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst));
+ }
}
static bool rpcrdma_check_regbuf(struct rpcrdma_xprt *r_xprt,
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 1e965a380896..649c23518ec0 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -1201,6 +1201,20 @@ rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt)
}
/**
+ * rpcrdma_reply_put - Put reply buffers back into pool
+ * @buffers: buffer pool
+ * @req: object to return
+ *
+ */
+void rpcrdma_reply_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
+{
+ if (req->rl_reply) {
+ rpcrdma_rep_put(buffers, req->rl_reply);
+ req->rl_reply = NULL;
+ }
+}
+
+/**
* rpcrdma_buffer_get - Get a request buffer
* @buffers: Buffer pool from which to obtain a buffer
*
@@ -1228,9 +1242,7 @@ rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
*/
void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
{
- if (req->rl_reply)
- rpcrdma_rep_put(buffers, req->rl_reply);
- req->rl_reply = NULL;
+ rpcrdma_reply_put(buffers, req);
spin_lock(&buffers->rb_lock);
list_add(&req->rl_list, &buffers->rb_send_bufs);
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 436ad7312614..5d231d94e944 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -479,6 +479,7 @@ struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *);
void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers,
struct rpcrdma_req *req);
void rpcrdma_rep_put(struct rpcrdma_buffer *buf, struct rpcrdma_rep *rep);
+void rpcrdma_reply_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req);
bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size,
gfp_t flags);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 47aa47a2b07c..316d04945587 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1010,6 +1010,8 @@ static int xs_tcp_send_request(struct rpc_rqst *req)
kernel_sock_shutdown(transport->sock, SHUT_RDWR);
return -ENOTCONN;
}
+ if (!transport->inet)
+ return -ENOTCONN;
xs_pktdump("packet data:",
req->rq_svec->iov_base,
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 5cc1f0307215..3f4542e0f065 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -60,7 +60,7 @@ static int __net_init tipc_init_net(struct net *net)
tn->trial_addr = 0;
tn->addr_trial_end = 0;
tn->capabilities = TIPC_NODE_CAPABILITIES;
- INIT_WORK(&tn->final_work.work, tipc_net_finalize_work);
+ INIT_WORK(&tn->work, tipc_net_finalize_work);
memset(tn->node_id, 0, sizeof(tn->node_id));
memset(tn->node_id_string, 0, sizeof(tn->node_id_string));
tn->mon_threshold = TIPC_DEF_MON_THRESHOLD;
@@ -110,7 +110,7 @@ static void __net_exit tipc_exit_net(struct net *net)
tipc_detach_loopback(net);
/* Make sure the tipc_net_finalize_work() finished */
- cancel_work_sync(&tn->final_work.work);
+ cancel_work_sync(&tn->work);
tipc_net_stop(net);
tipc_bcast_stop(net);
@@ -119,6 +119,8 @@ static void __net_exit tipc_exit_net(struct net *net)
#ifdef CONFIG_TIPC_CRYPTO
tipc_crypto_stop(&tipc_net(net)->crypto_tx);
#endif
+ while (atomic_read(&tn->wq_count))
+ cond_resched();
}
static void __net_exit tipc_pernet_pre_exit(struct net *net)
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 03de7b213f55..0a3f7a70a50a 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -91,12 +91,6 @@ extern unsigned int tipc_net_id __read_mostly;
extern int sysctl_tipc_rmem[3] __read_mostly;
extern int sysctl_tipc_named_timeout __read_mostly;
-struct tipc_net_work {
- struct work_struct work;
- struct net *net;
- u32 addr;
-};
-
struct tipc_net {
u8 node_id[NODE_ID_LEN];
u32 node_addr;
@@ -148,7 +142,9 @@ struct tipc_net {
struct tipc_crypto *crypto_tx;
#endif
/* Work item for net finalize */
- struct tipc_net_work final_work;
+ struct work_struct work;
+ /* The numbers of work queues in schedule */
+ atomic_t wq_count;
};
static inline struct tipc_net *tipc_net(struct net *net)
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 5380f605b851..da69e1abf68f 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -168,7 +168,7 @@ static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d,
/* Apply trial address if we just left trial period */
if (!trial && !self) {
- tipc_sched_net_finalize(net, tn->trial_addr);
+ schedule_work(&tn->work);
msg_set_prevnode(buf_msg(d->skb), tn->trial_addr);
msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
}
@@ -308,7 +308,7 @@ static void tipc_disc_timeout(struct timer_list *t)
if (!time_before(jiffies, tn->addr_trial_end) && !tipc_own_addr(net)) {
mod_timer(&d->timer, jiffies + TIPC_DISC_INIT);
spin_unlock_bh(&d->lock);
- tipc_sched_net_finalize(net, tn->trial_addr);
+ schedule_work(&tn->work);
return;
}
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 115109259430..c44b4bfaaee6 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -372,6 +372,11 @@ char tipc_link_plane(struct tipc_link *l)
return l->net_plane;
}
+struct net *tipc_link_net(struct tipc_link *l)
+{
+ return l->net;
+}
+
void tipc_link_update_caps(struct tipc_link *l, u16 capabilities)
{
l->peer_caps = capabilities;
diff --git a/net/tipc/link.h b/net/tipc/link.h
index fc07232c9a12..a16f401fdabd 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -156,4 +156,5 @@ int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
struct sk_buff_head *xmitq);
bool tipc_link_too_silent(struct tipc_link *l);
+struct net *tipc_link_net(struct tipc_link *l);
#endif
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 3f0a25345a7c..ce6ab54822d8 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -149,18 +149,13 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
if (unlikely(head))
goto err;
*buf = NULL;
+ if (skb_has_frag_list(frag) && __skb_linearize(frag))
+ goto err;
frag = skb_unshare(frag, GFP_ATOMIC);
if (unlikely(!frag))
goto err;
head = *headbuf = frag;
TIPC_SKB_CB(head)->tail = NULL;
- if (skb_is_nonlinear(head)) {
- skb_walk_frags(head, tail) {
- TIPC_SKB_CB(head)->tail = tail;
- }
- } else {
- skb_frag_list_init(head);
- }
return 0;
}
diff --git a/net/tipc/net.c b/net/tipc/net.c
index a130195af188..0e95572e56b4 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -41,6 +41,7 @@
#include "socket.h"
#include "node.h"
#include "bcast.h"
+#include "link.h"
#include "netlink.h"
#include "monitor.h"
@@ -142,19 +143,9 @@ static void tipc_net_finalize(struct net *net, u32 addr)
void tipc_net_finalize_work(struct work_struct *work)
{
- struct tipc_net_work *fwork;
+ struct tipc_net *tn = container_of(work, struct tipc_net, work);
- fwork = container_of(work, struct tipc_net_work, work);
- tipc_net_finalize(fwork->net, fwork->addr);
-}
-
-void tipc_sched_net_finalize(struct net *net, u32 addr)
-{
- struct tipc_net *tn = tipc_net(net);
-
- tn->final_work.net = net;
- tn->final_work.addr = addr;
- schedule_work(&tn->final_work.work);
+ tipc_net_finalize(tipc_link_net(tn->bcl), tn->trial_addr);
}
void tipc_net_stop(struct net *net)
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 8217905348f4..81af92954c6c 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -423,18 +423,18 @@ static void tipc_node_write_unlock(struct tipc_node *n)
write_unlock_bh(&n->lock);
if (flags & TIPC_NOTIFY_NODE_DOWN)
- tipc_publ_notify(net, publ_list, n->addr, n->capabilities);
+ tipc_publ_notify(net, publ_list, sk.node, n->capabilities);
if (flags & TIPC_NOTIFY_NODE_UP)
- tipc_named_node_up(net, n->addr, n->capabilities);
+ tipc_named_node_up(net, sk.node, n->capabilities);
if (flags & TIPC_NOTIFY_LINK_UP) {
- tipc_mon_peer_up(net, n->addr, bearer_id);
- tipc_nametbl_publish(net, &ua, &sk, n->link_id);
+ tipc_mon_peer_up(net, sk.node, bearer_id);
+ tipc_nametbl_publish(net, &ua, &sk, sk.ref);
}
if (flags & TIPC_NOTIFY_LINK_DOWN) {
- tipc_mon_peer_down(net, n->addr, bearer_id);
- tipc_nametbl_withdraw(net, &ua, &sk, n->link_id);
+ tipc_mon_peer_down(net, sk.node, bearer_id);
+ tipc_nametbl_withdraw(net, &ua, &sk, sk.ref);
}
}
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 58935cd0d068..53af72824c9c 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1262,7 +1262,10 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
spin_lock_bh(&inputq->lock);
if (skb_peek(arrvq) == skb) {
skb_queue_splice_tail_init(&tmpq, inputq);
- __skb_dequeue(arrvq);
+ /* Decrease the skb's refcnt as increasing in the
+ * function tipc_skb_peek
+ */
+ kfree_skb(__skb_dequeue(arrvq));
}
spin_unlock_bh(&inputq->lock);
__skb_queue_purge(&tmpq);
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index e556d2cdc064..c2bb818704c8 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -814,6 +814,7 @@ static void cleanup_bearer(struct work_struct *work)
kfree_rcu(rcast, rcu);
}
+ atomic_dec(&tipc_net(sock_net(ub->ubsock->sk))->wq_count);
dst_cache_destroy(&ub->rcast.dst_cache);
udp_tunnel_sock_release(ub->ubsock);
synchronize_net();
@@ -834,6 +835,7 @@ static void tipc_udp_disable(struct tipc_bearer *b)
RCU_INIT_POINTER(ub->bearer, NULL);
/* sock_release need to be done outside of rtnl lock */
+ atomic_inc(&tipc_net(sock_net(ub->ubsock->sk))->wq_count);
INIT_WORK(&ub->work, cleanup_bearer);
schedule_work(&ub->work);
}
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 76a6f8c2eec4..bd9f1567aa39 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -50,6 +50,7 @@ static void tls_device_gc_task(struct work_struct *work);
static DECLARE_WORK(tls_device_gc_work, tls_device_gc_task);
static LIST_HEAD(tls_device_gc_list);
static LIST_HEAD(tls_device_list);
+static LIST_HEAD(tls_device_down_list);
static DEFINE_SPINLOCK(tls_device_lock);
static void tls_device_free_ctx(struct tls_context *ctx)
@@ -680,15 +681,13 @@ static void tls_device_resync_rx(struct tls_context *tls_ctx,
struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
struct net_device *netdev;
- if (WARN_ON(test_and_set_bit(TLS_RX_SYNC_RUNNING, &tls_ctx->flags)))
- return;
-
trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type);
+ rcu_read_lock();
netdev = READ_ONCE(tls_ctx->netdev);
if (netdev)
netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
TLS_OFFLOAD_CTX_DIR_RX);
- clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
+ rcu_read_unlock();
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC);
}
@@ -761,6 +760,8 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
if (tls_ctx->rx_conf != TLS_HW)
return;
+ if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags)))
+ return;
prot = &tls_ctx->prot_info;
rx_ctx = tls_offload_ctx_rx(tls_ctx);
@@ -963,6 +964,17 @@ int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
ctx->sw.decrypted |= is_decrypted;
+ if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) {
+ if (likely(is_encrypted || is_decrypted))
+ return 0;
+
+ /* After tls_device_down disables the offload, the next SKB will
+ * likely have initial fragments decrypted, and final ones not
+ * decrypted. We need to reencrypt that single SKB.
+ */
+ return tls_device_reencrypt(sk, skb);
+ }
+
/* Return immediately if the record is either entirely plaintext or
* entirely ciphertext. Otherwise handle reencrypt partially decrypted
* record.
@@ -1292,6 +1304,26 @@ static int tls_device_down(struct net_device *netdev)
spin_unlock_irqrestore(&tls_device_lock, flags);
list_for_each_entry_safe(ctx, tmp, &list, list) {
+ /* Stop offloaded TX and switch to the fallback.
+ * tls_is_sk_tx_device_offloaded will return false.
+ */
+ WRITE_ONCE(ctx->sk->sk_validate_xmit_skb, tls_validate_xmit_skb_sw);
+
+ /* Stop the RX and TX resync.
+ * tls_dev_resync must not be called after tls_dev_del.
+ */
+ WRITE_ONCE(ctx->netdev, NULL);
+
+ /* Start skipping the RX resync logic completely. */
+ set_bit(TLS_RX_DEV_DEGRADED, &ctx->flags);
+
+ /* Sync with inflight packets. After this point:
+ * TX: no non-encrypted packets will be passed to the driver.
+ * RX: resync requests from the driver will be ignored.
+ */
+ synchronize_net();
+
+ /* Release the offload context on the driver side. */
if (ctx->tx_conf == TLS_HW)
netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
TLS_OFFLOAD_CTX_DIR_TX);
@@ -1299,15 +1331,21 @@ static int tls_device_down(struct net_device *netdev)
!test_bit(TLS_RX_DEV_CLOSED, &ctx->flags))
netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
TLS_OFFLOAD_CTX_DIR_RX);
- WRITE_ONCE(ctx->netdev, NULL);
- smp_mb__before_atomic(); /* pairs with test_and_set_bit() */
- while (test_bit(TLS_RX_SYNC_RUNNING, &ctx->flags))
- usleep_range(10, 200);
+
dev_put(netdev);
- list_del_init(&ctx->list);
- if (refcount_dec_and_test(&ctx->refcount))
- tls_device_free_ctx(ctx);
+ /* Move the context to a separate list for two reasons:
+ * 1. When the context is deallocated, list_del is called.
+ * 2. It's no longer an offloaded context, so we don't want to
+ * run offload-specific code on this context.
+ */
+ spin_lock_irqsave(&tls_device_lock, flags);
+ list_move_tail(&ctx->list, &tls_device_down_list);
+ spin_unlock_irqrestore(&tls_device_lock, flags);
+
+ /* Device contexts for RX and TX will be freed in on sk_destruct
+ * by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW.
+ */
}
up_write(&device_offload_lock);
diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
index cacf040872c7..e40bedd112b6 100644
--- a/net/tls/tls_device_fallback.c
+++ b/net/tls/tls_device_fallback.c
@@ -431,6 +431,13 @@ struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
}
EXPORT_SYMBOL_GPL(tls_validate_xmit_skb);
+struct sk_buff *tls_validate_xmit_skb_sw(struct sock *sk,
+ struct net_device *dev,
+ struct sk_buff *skb)
+{
+ return tls_sw_fallback(sk, skb);
+}
+
struct sk_buff *tls_encrypt_skb(struct sk_buff *skb)
{
return tls_sw_fallback(skb->sk, skb);
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 47b7c5334c34..fde56ff49163 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -636,6 +636,7 @@ struct tls_context *tls_ctx_create(struct sock *sk)
mutex_init(&ctx->tx_lock);
rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
ctx->sk_proto = READ_ONCE(sk->sk_prot);
+ ctx->sk = sk;
return ctx;
}
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 1dcb34dfd56b..694de024d0ee 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -37,6 +37,7 @@
#include <linux/sched/signal.h>
#include <linux/module.h>
+#include <linux/splice.h>
#include <crypto/aead.h>
#include <net/strparser.h>
@@ -1281,7 +1282,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
}
static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
- int flags, long timeo, int *err)
+ bool nonblock, long timeo, int *err)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
@@ -1306,7 +1307,7 @@ static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
if (sock_flag(sk, SOCK_DONE))
return NULL;
- if ((flags & MSG_DONTWAIT) || !timeo) {
+ if (nonblock || !timeo) {
*err = -EAGAIN;
return NULL;
}
@@ -1786,7 +1787,7 @@ int tls_sw_recvmsg(struct sock *sk,
bool async_capable;
bool async = false;
- skb = tls_wait_data(sk, psock, flags, timeo, &err);
+ skb = tls_wait_data(sk, psock, flags & MSG_DONTWAIT, timeo, &err);
if (!skb) {
if (psock) {
int ret = sk_msg_recvmsg(sk, psock, msg, len,
@@ -1990,9 +1991,9 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
lock_sock(sk);
- timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+ timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK);
- skb = tls_wait_data(sk, NULL, flags, timeo, &err);
+ skb = tls_wait_data(sk, NULL, flags & SPLICE_F_NONBLOCK, timeo, &err);
if (!skb)
goto splice_read_end;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 5a31307ceb76..5d1192ceb139 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -535,12 +535,14 @@ static void unix_release_sock(struct sock *sk, int embrion)
u->path.mnt = NULL;
state = sk->sk_state;
sk->sk_state = TCP_CLOSE;
+
+ skpair = unix_peer(sk);
+ unix_peer(sk) = NULL;
+
unix_state_unlock(sk);
wake_up_interruptible_all(&u->peer_wait);
- skpair = unix_peer(sk);
-
if (skpair != NULL) {
if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
unix_state_lock(skpair);
@@ -555,7 +557,6 @@ static void unix_release_sock(struct sock *sk, int embrion)
unix_dgram_peer_wake_disconnect(sk, skpair);
sock_put(skpair); /* It may now die */
- unix_peer(sk) = NULL;
}
/* Try to flush out this socket. Throw out buffers at least */
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index 2eee93985ab0..af590ae606b6 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -28,7 +28,7 @@ $(obj)/shipped-certs.c: $(wildcard $(srctree)/$(src)/certs/*.hex)
@$(kecho) " GEN $@"
@(echo '#include "reg.h"'; \
echo 'const u8 shipped_regdb_certs[] = {'; \
- cat $^ ; \
+ echo | cat - $^ ; \
echo '};'; \
echo 'unsigned int shipped_regdb_certs_len = sizeof(shipped_regdb_certs);'; \
) > $@
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 6fbf7537faf5..8d0883e81093 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1340,6 +1340,11 @@ void cfg80211_register_wdev(struct cfg80211_registered_device *rdev,
rdev->devlist_generation++;
wdev->registered = true;
+ if (wdev->netdev &&
+ sysfs_create_link(&wdev->netdev->dev.kobj, &rdev->wiphy.dev.kobj,
+ "phy80211"))
+ pr_err("failed to add phy80211 symlink to netdev!\n");
+
nl80211_notify_iface(rdev, wdev, NL80211_CMD_NEW_INTERFACE);
}
@@ -1365,14 +1370,6 @@ int cfg80211_register_netdevice(struct net_device *dev)
if (ret)
goto out;
- if (sysfs_create_link(&dev->dev.kobj, &rdev->wiphy.dev.kobj,
- "phy80211")) {
- pr_err("failed to add phy80211 symlink to netdev!\n");
- unregister_netdevice(dev);
- ret = -EINVAL;
- goto out;
- }
-
cfg80211_register_wdev(rdev, wdev);
ret = 0;
out:
diff --git a/net/wireless/pmsr.c b/net/wireless/pmsr.c
index 6bdd96408022..d245968b74cb 100644
--- a/net/wireless/pmsr.c
+++ b/net/wireless/pmsr.c
@@ -334,6 +334,7 @@ void cfg80211_pmsr_complete(struct wireless_dev *wdev,
gfp_t gfp)
{
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
+ struct cfg80211_pmsr_request *tmp, *prev, *to_free = NULL;
struct sk_buff *msg;
void *hdr;
@@ -364,9 +365,20 @@ free_msg:
nlmsg_free(msg);
free_request:
spin_lock_bh(&wdev->pmsr_lock);
- list_del(&req->list);
+ /*
+ * cfg80211_pmsr_process_abort() may have already moved this request
+ * to the free list, and will free it later. In this case, don't free
+ * it here.
+ */
+ list_for_each_entry_safe(tmp, prev, &wdev->pmsr_list, list) {
+ if (tmp == req) {
+ list_del(&req->list);
+ to_free = req;
+ break;
+ }
+ }
spin_unlock_bh(&wdev->pmsr_lock);
- kfree(req);
+ kfree(to_free);
}
EXPORT_SYMBOL_GPL(cfg80211_pmsr_complete);
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 9b959e3b09c6..0c3f05c9be27 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -133,6 +133,10 @@ static int wiphy_resume(struct device *dev)
if (rdev->wiphy.registered && rdev->ops->resume)
ret = rdev_resume(rdev);
wiphy_unlock(&rdev->wiphy);
+
+ if (ret)
+ cfg80211_shutdown_all_interfaces(&rdev->wiphy);
+
rtnl_unlock();
return ret;
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 382c5262d997..18dba3d7c638 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -542,7 +542,7 @@ EXPORT_SYMBOL(ieee80211_get_mesh_hdrlen);
int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
const u8 *addr, enum nl80211_iftype iftype,
- u8 data_offset)
+ u8 data_offset, bool is_amsdu)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct {
@@ -629,7 +629,7 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
skb_copy_bits(skb, hdrlen, &payload, sizeof(payload));
tmp.h_proto = payload.proto;
- if (likely((ether_addr_equal(payload.hdr, rfc1042_header) &&
+ if (likely((!is_amsdu && ether_addr_equal(payload.hdr, rfc1042_header) &&
tmp.h_proto != htons(ETH_P_AARP) &&
tmp.h_proto != htons(ETH_P_IPX)) ||
ether_addr_equal(payload.hdr, bridge_tunnel_header)))
@@ -771,6 +771,9 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
remaining = skb->len - offset;
if (subframe_len > remaining)
goto purge;
+ /* mitigate A-MSDU aggregation injection attacks */
+ if (ether_addr_equal(eth.h_dest, rfc1042_header))
+ goto purge;
offset += sizeof(struct ethhdr);
last = remaining <= subframe_len + padding;
@@ -1056,6 +1059,9 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
case NL80211_IFTYPE_MESH_POINT:
/* mesh should be handled? */
break;
+ case NL80211_IFTYPE_OCB:
+ cfg80211_leave_ocb(rdev, dev);
+ break;
default:
break;
}
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 44d6566dd23e..1816899499ce 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -536,7 +536,7 @@ static int x25_create(struct net *net, struct socket *sock, int protocol,
if (protocol)
goto out;
- rc = -ENOBUFS;
+ rc = -ENOMEM;
if ((sk = x25_alloc_socket(net, kern)) == NULL)
goto out;
diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c
index aa696854be78..53e300f860bb 100644
--- a/samples/bpf/xdpsock_user.c
+++ b/samples/bpf/xdpsock_user.c
@@ -1255,7 +1255,7 @@ static void tx_only(struct xsk_socket_info *xsk, u32 *frame_nb, int batch_size)
for (i = 0; i < batch_size; i++) {
struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx,
idx + i);
- tx_desc->addr = (*frame_nb + i) << XSK_UMEM__DEFAULT_FRAME_SHIFT;
+ tx_desc->addr = (*frame_nb + i) * opt_xsk_frame_size;
tx_desc->len = PKT_SIZE;
}
diff --git a/samples/vfio-mdev/mdpy-fb.c b/samples/vfio-mdev/mdpy-fb.c
index 21dbf63d6e41..9ec93d90e8a5 100644
--- a/samples/vfio-mdev/mdpy-fb.c
+++ b/samples/vfio-mdev/mdpy-fb.c
@@ -117,22 +117,27 @@ static int mdpy_fb_probe(struct pci_dev *pdev,
if (format != DRM_FORMAT_XRGB8888) {
pci_err(pdev, "format mismatch (0x%x != 0x%x)\n",
format, DRM_FORMAT_XRGB8888);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_release_regions;
}
if (width < 100 || width > 10000) {
pci_err(pdev, "width (%d) out of range\n", width);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_release_regions;
}
if (height < 100 || height > 10000) {
pci_err(pdev, "height (%d) out of range\n", height);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_release_regions;
}
pci_info(pdev, "mdpy found: %dx%d framebuffer\n",
width, height);
info = framebuffer_alloc(sizeof(struct mdpy_fb_par), &pdev->dev);
- if (!info)
+ if (!info) {
+ ret = -ENOMEM;
goto err_release_regions;
+ }
pci_set_drvdata(pdev, info);
par = info->par;
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 949f723efe53..34d257653fb4 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -268,7 +268,8 @@ define rule_as_o_S
endef
# Built-in and composite module parts
-$(obj)/%.o: $(src)/%.c $(recordmcount_source) $(objtool_dep) FORCE
+.SECONDEXPANSION:
+$(obj)/%.o: $(src)/%.c $(recordmcount_source) $$(objtool_dep) FORCE
$(call if_changed_rule,cc_o_c)
$(call cmd,force_checksrc)
@@ -349,7 +350,7 @@ cmd_modversions_S = \
fi
endif
-$(obj)/%.o: $(src)/%.S $(objtool_dep) FORCE
+$(obj)/%.o: $(src)/%.S $$(objtool_dep) FORCE
$(call if_changed_rule,as_o_S)
targets += $(filter-out $(subdir-builtin), $(real-obj-y))
diff --git a/scripts/Makefile.modfinal b/scripts/Makefile.modfinal
index dd87cea9fba7..a7883e455290 100644
--- a/scripts/Makefile.modfinal
+++ b/scripts/Makefile.modfinal
@@ -59,7 +59,7 @@ quiet_cmd_ld_ko_o = LD [M] $@
quiet_cmd_btf_ko = BTF [M] $@
cmd_btf_ko = \
if [ -f vmlinux ]; then \
- LLVM_OBJCOPY=$(OBJCOPY) $(PAHOLE) -J --btf_base vmlinux $@; \
+ LLVM_OBJCOPY="$(OBJCOPY)" $(PAHOLE) -J --btf_base vmlinux $@; \
else \
printf "Skipping BTF generation for %s due to unavailability of vmlinux\n" $@ 1>&2; \
fi;
diff --git a/scripts/atomic/check-atomics.sh b/scripts/atomic/check-atomics.sh
index 82748d42ecc5..9c7fbd4bcbce 100755
--- a/scripts/atomic/check-atomics.sh
+++ b/scripts/atomic/check-atomics.sh
@@ -17,7 +17,6 @@ cat <<EOF |
asm-generic/atomic-instrumented.h
asm-generic/atomic-long.h
linux/atomic-arch-fallback.h
-linux/atomic-fallback.h
EOF
while read header; do
OLDSUM="$(tail -n 1 ${LINUXDIR}/include/${header})"
diff --git a/scripts/atomic/gen-atomic-instrumented.sh b/scripts/atomic/gen-atomic-instrumented.sh
index 5766ffcec7c5..b0c45aee19d7 100755
--- a/scripts/atomic/gen-atomic-instrumented.sh
+++ b/scripts/atomic/gen-atomic-instrumented.sh
@@ -41,34 +41,6 @@ gen_params_checks()
done
}
-# gen_guard(meta, atomic, pfx, name, sfx, order)
-gen_guard()
-{
- local meta="$1"; shift
- local atomic="$1"; shift
- local pfx="$1"; shift
- local name="$1"; shift
- local sfx="$1"; shift
- local order="$1"; shift
-
- local atomicname="arch_${atomic}_${pfx}${name}${sfx}${order}"
-
- local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")"
-
- # We definitely need a preprocessor symbol for this atomic if it is an
- # ordering variant, or if there's a generic fallback.
- if [ ! -z "${order}" ] || [ ! -z "${template}" ]; then
- printf "defined(${atomicname})"
- return
- fi
-
- # If this is a base variant, but a relaxed variant *may* exist, then we
- # only have a preprocessor symbol if the relaxed variant isn't defined
- if meta_has_relaxed "${meta}"; then
- printf "!defined(${atomicname}_relaxed) || defined(${atomicname})"
- fi
-}
-
#gen_proto_order_variant(meta, pfx, name, sfx, order, atomic, int, arg...)
gen_proto_order_variant()
{
@@ -82,16 +54,12 @@ gen_proto_order_variant()
local atomicname="${atomic}_${pfx}${name}${sfx}${order}"
- local guard="$(gen_guard "${meta}" "${atomic}" "${pfx}" "${name}" "${sfx}" "${order}")"
-
local ret="$(gen_ret_type "${meta}" "${int}")"
local params="$(gen_params "${int}" "${atomic}" "$@")"
local checks="$(gen_params_checks "${meta}" "$@")"
local args="$(gen_args "$@")"
local retstmt="$(gen_ret_stmt "${meta}")"
- [ ! -z "${guard}" ] && printf "#if ${guard}\n"
-
cat <<EOF
static __always_inline ${ret}
${atomicname}(${params})
@@ -99,11 +67,8 @@ ${atomicname}(${params})
${checks}
${retstmt}arch_${atomicname}(${args});
}
-#define ${atomicname} ${atomicname}
EOF
- [ ! -z "${guard}" ] && printf "#endif\n"
-
printf "\n"
}
@@ -139,19 +104,6 @@ EOF
fi
}
-gen_optional_xchg()
-{
- local name="$1"; shift
- local sfx="$1"; shift
- local guard="defined(arch_${name}${sfx})"
-
- [ -z "${sfx}" ] && guard="!defined(arch_${name}_relaxed) || defined(arch_${name})"
-
- printf "#if ${guard}\n"
- gen_xchg "${name}${sfx}" ""
- printf "#endif\n\n"
-}
-
cat << EOF
// SPDX-License-Identifier: GPL-2.0
@@ -188,7 +140,8 @@ done
for xchg in "xchg" "cmpxchg" "cmpxchg64" "try_cmpxchg"; do
for order in "" "_acquire" "_release" "_relaxed"; do
- gen_optional_xchg "${xchg}" "${order}"
+ gen_xchg "${xchg}${order}" ""
+ printf "\n"
done
done
diff --git a/scripts/atomic/gen-atomics.sh b/scripts/atomic/gen-atomics.sh
index d29e159ef489..f776a574224d 100755
--- a/scripts/atomic/gen-atomics.sh
+++ b/scripts/atomic/gen-atomics.sh
@@ -11,7 +11,6 @@ cat <<EOF |
gen-atomic-instrumented.sh asm-generic/atomic-instrumented.h
gen-atomic-long.sh asm-generic/atomic-long.h
gen-atomic-fallback.sh linux/atomic-arch-fallback.h arch_
-gen-atomic-fallback.sh linux/atomic-fallback.h
EOF
while read script header args; do
/bin/sh ${ATOMICDIR}/${script} ${ATOMICTBL} ${args} > ${LINUXDIR}/include/${header}
diff --git a/scripts/dummy-tools/gcc b/scripts/dummy-tools/gcc
index f6d543725f1e..b2483149bbe5 100755
--- a/scripts/dummy-tools/gcc
+++ b/scripts/dummy-tools/gcc
@@ -76,7 +76,11 @@ fi
if arg_contain -S "$@"; then
# For scripts/gcc-x86-*-has-stack-protector.sh
if arg_contain -fstack-protector "$@"; then
- echo "%gs"
+ if arg_contain -mstack-protector-guard-reg=fs "$@"; then
+ echo "%fs"
+ else
+ echo "%gs"
+ fi
exit 0
fi
diff --git a/scripts/jobserver-exec b/scripts/jobserver-exec
index 48d141e3ec56..8762887a970c 100755
--- a/scripts/jobserver-exec
+++ b/scripts/jobserver-exec
@@ -10,7 +10,7 @@ from __future__ import print_function
import os, sys, errno
import subprocess
-# Extract and prepare jobserver file descriptors from envirnoment.
+# Extract and prepare jobserver file descriptors from environment.
claim = 0
jobs = b""
try:
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index f4de4c97015b..0e0f6466b18d 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -240,7 +240,7 @@ gen_btf()
fi
info "BTF" ${2}
- LLVM_OBJCOPY=${OBJCOPY} ${PAHOLE} -J ${extra_paholeopt} ${1}
+ LLVM_OBJCOPY="${OBJCOPY}" ${PAHOLE} -J ${extra_paholeopt} ${1}
# Create ${2} which contains just .BTF section but no symbols. Add
# SHF_ALLOC because .BTF will be part of the vmlinux image. --strip-all
diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
index f9b19524da11..1e9baa5c4fc6 100644
--- a/scripts/recordmcount.h
+++ b/scripts/recordmcount.h
@@ -192,15 +192,20 @@ static unsigned int get_symindex(Elf_Sym const *sym, Elf32_Word const *symtab,
Elf32_Word const *symtab_shndx)
{
unsigned long offset;
+ unsigned short shndx = w2(sym->st_shndx);
int index;
- if (sym->st_shndx != SHN_XINDEX)
- return w2(sym->st_shndx);
+ if (shndx > SHN_UNDEF && shndx < SHN_LORESERVE)
+ return shndx;
- offset = (unsigned long)sym - (unsigned long)symtab;
- index = offset / sizeof(*sym);
+ if (shndx == SHN_XINDEX) {
+ offset = (unsigned long)sym - (unsigned long)symtab;
+ index = offset / sizeof(*sym);
- return w(symtab_shndx[index]);
+ return w(symtab_shndx[index]);
+ }
+
+ return 0;
}
static unsigned int get_shnum(Elf_Ehdr const *ehdr, Elf_Shdr const *shdr0)
diff --git a/security/keys/trusted-keys/trusted_tpm1.c b/security/keys/trusted-keys/trusted_tpm1.c
index 469394550801..aa108bea6739 100644
--- a/security/keys/trusted-keys/trusted_tpm1.c
+++ b/security/keys/trusted-keys/trusted_tpm1.c
@@ -493,10 +493,12 @@ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
ret = tpm_get_random(chip, td->nonceodd, TPM_NONCE_SIZE);
if (ret < 0)
- return ret;
+ goto out;
- if (ret != TPM_NONCE_SIZE)
- return -EIO;
+ if (ret != TPM_NONCE_SIZE) {
+ ret = -EIO;
+ goto out;
+ }
ordinal = htonl(TPM_ORD_SEAL);
datsize = htonl(datalen);
diff --git a/security/keys/trusted-keys/trusted_tpm2.c b/security/keys/trusted-keys/trusted_tpm2.c
index 617fabd4d913..0165da386289 100644
--- a/security/keys/trusted-keys/trusted_tpm2.c
+++ b/security/keys/trusted-keys/trusted_tpm2.c
@@ -336,9 +336,9 @@ out:
rc = -EPERM;
}
if (blob_len < 0)
- return blob_len;
-
- payload->blob_len = blob_len;
+ rc = blob_len;
+ else
+ payload->blob_len = blob_len;
tpm_put_ops(chip);
return rc;
diff --git a/sound/core/control_led.c b/sound/core/control_led.c
index 25f57c14f294..a90e31dbde61 100644
--- a/sound/core/control_led.c
+++ b/sound/core/control_led.c
@@ -17,6 +17,9 @@ MODULE_LICENSE("GPL");
#define MAX_LED (((SNDRV_CTL_ELEM_ACCESS_MIC_LED - SNDRV_CTL_ELEM_ACCESS_SPK_LED) \
>> SNDRV_CTL_ELEM_ACCESS_LED_SHIFT) + 1)
+#define to_led_card_dev(_dev) \
+ container_of(_dev, struct snd_ctl_led_card, dev)
+
enum snd_ctl_led_mode {
MODE_FOLLOW_MUTE = 0,
MODE_FOLLOW_ROUTE,
@@ -371,6 +374,21 @@ static void snd_ctl_led_disconnect(struct snd_card *card)
snd_ctl_led_refresh();
}
+static void snd_ctl_led_card_release(struct device *dev)
+{
+ struct snd_ctl_led_card *led_card = to_led_card_dev(dev);
+
+ kfree(led_card);
+}
+
+static void snd_ctl_led_release(struct device *dev)
+{
+}
+
+static void snd_ctl_led_dev_release(struct device *dev)
+{
+}
+
/*
* sysfs
*/
@@ -663,6 +681,7 @@ static void snd_ctl_led_sysfs_add(struct snd_card *card)
led_card->number = card->number;
led_card->led = led;
device_initialize(&led_card->dev);
+ led_card->dev.release = snd_ctl_led_card_release;
if (dev_set_name(&led_card->dev, "card%d", card->number) < 0)
goto cerr;
led_card->dev.parent = &led->dev;
@@ -681,7 +700,6 @@ cerr:
put_device(&led_card->dev);
cerr2:
printk(KERN_ERR "snd_ctl_led: unable to add card%d", card->number);
- kfree(led_card);
}
}
@@ -700,8 +718,7 @@ static void snd_ctl_led_sysfs_remove(struct snd_card *card)
snprintf(link_name, sizeof(link_name), "led-%s", led->name);
sysfs_remove_link(&card->ctl_dev.kobj, link_name);
sysfs_remove_link(&led_card->dev.kobj, "card");
- device_del(&led_card->dev);
- kfree(led_card);
+ device_unregister(&led_card->dev);
led->cards[card->number] = NULL;
}
}
@@ -723,6 +740,7 @@ static int __init snd_ctl_led_init(void)
device_initialize(&snd_ctl_led_dev);
snd_ctl_led_dev.class = sound_class;
+ snd_ctl_led_dev.release = snd_ctl_led_dev_release;
dev_set_name(&snd_ctl_led_dev, "ctl-led");
if (device_add(&snd_ctl_led_dev)) {
put_device(&snd_ctl_led_dev);
@@ -733,15 +751,16 @@ static int __init snd_ctl_led_init(void)
INIT_LIST_HEAD(&led->controls);
device_initialize(&led->dev);
led->dev.parent = &snd_ctl_led_dev;
+ led->dev.release = snd_ctl_led_release;
led->dev.groups = snd_ctl_led_dev_attr_groups;
dev_set_name(&led->dev, led->name);
if (device_add(&led->dev)) {
put_device(&led->dev);
for (; group > 0; group--) {
led = &snd_ctl_leds[group - 1];
- device_del(&led->dev);
+ device_unregister(&led->dev);
}
- device_del(&snd_ctl_led_dev);
+ device_unregister(&snd_ctl_led_dev);
return -ENOMEM;
}
}
@@ -767,9 +786,9 @@ static void __exit snd_ctl_led_exit(void)
}
for (group = 0; group < MAX_LED; group++) {
led = &snd_ctl_leds[group];
- device_del(&led->dev);
+ device_unregister(&led->dev);
}
- device_del(&snd_ctl_led_dev);
+ device_unregister(&snd_ctl_led_dev);
snd_ctl_led_clean(NULL);
}
diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
index 1645e4142e30..9863be6fd43e 100644
--- a/sound/core/seq/seq_timer.c
+++ b/sound/core/seq/seq_timer.c
@@ -297,8 +297,16 @@ int snd_seq_timer_open(struct snd_seq_queue *q)
return err;
}
spin_lock_irq(&tmr->lock);
- tmr->timeri = t;
+ if (tmr->timeri)
+ err = -EBUSY;
+ else
+ tmr->timeri = t;
spin_unlock_irq(&tmr->lock);
+ if (err < 0) {
+ snd_timer_close(t);
+ snd_timer_instance_free(t);
+ return err;
+ }
return 0;
}
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 6898b1ac0d7f..92b7008fcdb8 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -520,9 +520,10 @@ static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
return;
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
return;
+ event += 10; /* convert to SNDRV_TIMER_EVENT_MXXX */
list_for_each_entry(ts, &ti->slave_active_head, active_list)
if (ts->ccallback)
- ts->ccallback(ts, event + 100, &tstamp, resolution);
+ ts->ccallback(ts, event, &tstamp, resolution);
}
/* start/continue a master timer */
diff --git a/sound/firewire/Kconfig b/sound/firewire/Kconfig
index 25778765cbfe..9897bd26a438 100644
--- a/sound/firewire/Kconfig
+++ b/sound/firewire/Kconfig
@@ -38,7 +38,7 @@ config SND_OXFW
* Mackie(Loud) Onyx 1640i (former model)
* Mackie(Loud) Onyx Satellite
* Mackie(Loud) Tapco Link.Firewire
- * Mackie(Loud) d.2 pro/d.4 pro
+ * Mackie(Loud) d.4 pro
* Mackie(Loud) U.420/U.420d
* TASCAM FireOne
* Stanton Controllers & Systems 1 Deck/Mixer
@@ -84,7 +84,7 @@ config SND_BEBOB
* PreSonus FIREBOX/FIREPOD/FP10/Inspire1394
* BridgeCo RDAudio1/Audio5
* Mackie Onyx 1220/1620/1640 (FireWire I/O Card)
- * Mackie d.2 (FireWire Option)
+ * Mackie d.2 (FireWire Option) and d.2 Pro
* Stanton FinalScratch 2 (ScratchAmp)
* Tascam IF-FW/DM
* Behringer XENIX UFX 1204/1604
diff --git a/sound/firewire/amdtp-stream-trace.h b/sound/firewire/amdtp-stream-trace.h
index 26e7cb555d3c..aa53c13b89d3 100644
--- a/sound/firewire/amdtp-stream-trace.h
+++ b/sound/firewire/amdtp-stream-trace.h
@@ -14,8 +14,8 @@
#include <linux/tracepoint.h>
TRACE_EVENT(amdtp_packet,
- TP_PROTO(const struct amdtp_stream *s, u32 cycles, const __be32 *cip_header, unsigned int payload_length, unsigned int data_blocks, unsigned int data_block_counter, unsigned int index),
- TP_ARGS(s, cycles, cip_header, payload_length, data_blocks, data_block_counter, index),
+ TP_PROTO(const struct amdtp_stream *s, u32 cycles, const __be32 *cip_header, unsigned int payload_length, unsigned int data_blocks, unsigned int data_block_counter, unsigned int packet_index, unsigned int index),
+ TP_ARGS(s, cycles, cip_header, payload_length, data_blocks, data_block_counter, packet_index, index),
TP_STRUCT__entry(
__field(unsigned int, second)
__field(unsigned int, cycle)
@@ -48,7 +48,7 @@ TRACE_EVENT(amdtp_packet,
__entry->payload_quadlets = payload_length / sizeof(__be32);
__entry->data_blocks = data_blocks;
__entry->data_block_counter = data_block_counter,
- __entry->packet_index = s->packet_index;
+ __entry->packet_index = packet_index;
__entry->irq = !!in_interrupt();
__entry->index = index;
),
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index 4e2f2bb7879f..5805c5de39fb 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -526,7 +526,7 @@ static void build_it_pkt_header(struct amdtp_stream *s, unsigned int cycle,
}
trace_amdtp_packet(s, cycle, cip_header, payload_length, data_blocks,
- data_block_counter, index);
+ data_block_counter, s->packet_index, index);
}
static int check_cip_header(struct amdtp_stream *s, const __be32 *buf,
@@ -630,21 +630,27 @@ static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle,
unsigned int *payload_length,
unsigned int *data_blocks,
unsigned int *data_block_counter,
- unsigned int *syt, unsigned int index)
+ unsigned int *syt, unsigned int packet_index, unsigned int index)
{
const __be32 *cip_header;
+ unsigned int cip_header_size;
int err;
*payload_length = be32_to_cpu(ctx_header[0]) >> ISO_DATA_LENGTH_SHIFT;
- if (*payload_length > s->ctx_data.tx.ctx_header_size +
- s->ctx_data.tx.max_ctx_payload_length) {
+
+ if (!(s->flags & CIP_NO_HEADER))
+ cip_header_size = 8;
+ else
+ cip_header_size = 0;
+
+ if (*payload_length > cip_header_size + s->ctx_data.tx.max_ctx_payload_length) {
dev_err(&s->unit->device,
"Detect jumbo payload: %04x %04x\n",
- *payload_length, s->ctx_data.tx.max_ctx_payload_length);
+ *payload_length, cip_header_size + s->ctx_data.tx.max_ctx_payload_length);
return -EIO;
}
- if (!(s->flags & CIP_NO_HEADER)) {
+ if (cip_header_size > 0) {
cip_header = ctx_header + 2;
err = check_cip_header(s, cip_header, *payload_length,
data_blocks, data_block_counter, syt);
@@ -662,7 +668,7 @@ static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle,
}
trace_amdtp_packet(s, cycle, cip_header, *payload_length, *data_blocks,
- *data_block_counter, index);
+ *data_block_counter, packet_index, index);
return err;
}
@@ -701,12 +707,13 @@ static int generate_device_pkt_descs(struct amdtp_stream *s,
unsigned int packets)
{
unsigned int dbc = s->data_block_counter;
+ unsigned int packet_index = s->packet_index;
+ unsigned int queue_size = s->queue_size;
int i;
int err;
for (i = 0; i < packets; ++i) {
struct pkt_desc *desc = descs + i;
- unsigned int index = (s->packet_index + i) % s->queue_size;
unsigned int cycle;
unsigned int payload_length;
unsigned int data_blocks;
@@ -715,7 +722,7 @@ static int generate_device_pkt_descs(struct amdtp_stream *s,
cycle = compute_cycle_count(ctx_header[1]);
err = parse_ir_ctx_header(s, cycle, ctx_header, &payload_length,
- &data_blocks, &dbc, &syt, i);
+ &data_blocks, &dbc, &syt, packet_index, i);
if (err < 0)
return err;
@@ -723,13 +730,15 @@ static int generate_device_pkt_descs(struct amdtp_stream *s,
desc->syt = syt;
desc->data_blocks = data_blocks;
desc->data_block_counter = dbc;
- desc->ctx_payload = s->buffer.packets[index].buffer;
+ desc->ctx_payload = s->buffer.packets[packet_index].buffer;
if (!(s->flags & CIP_DBC_IS_END_EVENT))
dbc = (dbc + desc->data_blocks) & 0xff;
ctx_header +=
s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
+
+ packet_index = (packet_index + 1) % queue_size;
}
s->data_block_counter = dbc;
@@ -795,7 +804,7 @@ static void generate_pkt_descs(struct amdtp_stream *s, struct pkt_desc *descs,
static inline void cancel_stream(struct amdtp_stream *s)
{
s->packet_index = -1;
- if (current_work() == &s->period_work)
+ if (in_interrupt())
amdtp_stream_pcm_abort(s);
WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
}
@@ -1065,23 +1074,22 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
s->data_block_counter = 0;
}
- /* initialize packet buffer */
+ // initialize packet buffer.
+ max_ctx_payload_size = amdtp_stream_get_max_payload(s);
if (s->direction == AMDTP_IN_STREAM) {
dir = DMA_FROM_DEVICE;
type = FW_ISO_CONTEXT_RECEIVE;
- if (!(s->flags & CIP_NO_HEADER))
+ if (!(s->flags & CIP_NO_HEADER)) {
+ max_ctx_payload_size -= 8;
ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
- else
+ } else {
ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
-
- max_ctx_payload_size = amdtp_stream_get_max_payload(s) -
- ctx_header_size;
+ }
} else {
dir = DMA_TO_DEVICE;
type = FW_ISO_CONTEXT_TRANSMIT;
ctx_header_size = 0; // No effect for IT context.
- max_ctx_payload_size = amdtp_stream_get_max_payload(s);
if (!(s->flags & CIP_NO_HEADER))
max_ctx_payload_size -= IT_PKT_HEADER_SIZE_CIP;
}
diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
index 2c8e3392a490..daeecfa8b9aa 100644
--- a/sound/firewire/bebob/bebob.c
+++ b/sound/firewire/bebob/bebob.c
@@ -387,7 +387,7 @@ static const struct ieee1394_device_id bebob_id_table[] = {
SND_BEBOB_DEV_ENTRY(VEN_BRIDGECO, 0x00010049, &spec_normal),
/* Mackie, Onyx 1220/1620/1640 (Firewire I/O Card) */
SND_BEBOB_DEV_ENTRY(VEN_MACKIE2, 0x00010065, &spec_normal),
- /* Mackie, d.2 (Firewire Option) */
+ // Mackie, d.2 (Firewire option card) and d.2 Pro (the card is built-in).
SND_BEBOB_DEV_ENTRY(VEN_MACKIE1, 0x00010067, &spec_normal),
/* Stanton, ScratchAmp */
SND_BEBOB_DEV_ENTRY(VEN_STANTON, 0x00000001, &spec_normal),
diff --git a/sound/firewire/dice/dice-alesis.c b/sound/firewire/dice/dice-alesis.c
index 0916864511d5..27c13b9cc9ef 100644
--- a/sound/firewire/dice/dice-alesis.c
+++ b/sound/firewire/dice/dice-alesis.c
@@ -16,7 +16,7 @@ alesis_io14_tx_pcm_chs[MAX_STREAMS][SND_DICE_RATE_MODE_COUNT] = {
static const unsigned int
alesis_io26_tx_pcm_chs[MAX_STREAMS][SND_DICE_RATE_MODE_COUNT] = {
{10, 10, 4}, /* Tx0 = Analog + S/PDIF. */
- {16, 8, 0}, /* Tx1 = ADAT1 + ADAT2. */
+ {16, 4, 0}, /* Tx1 = ADAT1 + ADAT2 (available at low rate). */
};
int snd_dice_detect_alesis_formats(struct snd_dice *dice)
diff --git a/sound/firewire/dice/dice-pcm.c b/sound/firewire/dice/dice-pcm.c
index af8a90ee40f3..a69ca1111b03 100644
--- a/sound/firewire/dice/dice-pcm.c
+++ b/sound/firewire/dice/dice-pcm.c
@@ -218,7 +218,7 @@ static int pcm_open(struct snd_pcm_substream *substream)
if (frames_per_period > 0) {
// For double_pcm_frame quirk.
- if (rate > 96000) {
+ if (rate > 96000 && !dice->disable_double_pcm_frames) {
frames_per_period *= 2;
frames_per_buffer *= 2;
}
@@ -273,7 +273,7 @@ static int pcm_hw_params(struct snd_pcm_substream *substream,
mutex_lock(&dice->mutex);
// For double_pcm_frame quirk.
- if (rate > 96000) {
+ if (rate > 96000 && !dice->disable_double_pcm_frames) {
events_per_period /= 2;
events_per_buffer /= 2;
}
diff --git a/sound/firewire/dice/dice-stream.c b/sound/firewire/dice/dice-stream.c
index 1a14c083e8ce..c4dfe76500c2 100644
--- a/sound/firewire/dice/dice-stream.c
+++ b/sound/firewire/dice/dice-stream.c
@@ -181,7 +181,7 @@ static int keep_resources(struct snd_dice *dice, struct amdtp_stream *stream,
// as 'Dual Wire'.
// For this quirk, blocking mode is required and PCM buffer size should
// be aligned to SYT_INTERVAL.
- double_pcm_frames = rate > 96000;
+ double_pcm_frames = (rate > 96000 && !dice->disable_double_pcm_frames);
if (double_pcm_frames) {
rate /= 2;
pcm_chs *= 2;
diff --git a/sound/firewire/dice/dice-tcelectronic.c b/sound/firewire/dice/dice-tcelectronic.c
index a8875d24ba2a..43a3bcb15b3d 100644
--- a/sound/firewire/dice/dice-tcelectronic.c
+++ b/sound/firewire/dice/dice-tcelectronic.c
@@ -38,8 +38,8 @@ static const struct dice_tc_spec konnekt_24d = {
};
static const struct dice_tc_spec konnekt_live = {
- .tx_pcm_chs = {{16, 16, 16}, {0, 0, 0} },
- .rx_pcm_chs = {{16, 16, 16}, {0, 0, 0} },
+ .tx_pcm_chs = {{16, 16, 6}, {0, 0, 0} },
+ .rx_pcm_chs = {{16, 16, 6}, {0, 0, 0} },
.has_midi = true,
};
diff --git a/sound/firewire/dice/dice.c b/sound/firewire/dice/dice.c
index 107a81691f0e..239d164b0eea 100644
--- a/sound/firewire/dice/dice.c
+++ b/sound/firewire/dice/dice.c
@@ -21,6 +21,7 @@ MODULE_LICENSE("GPL v2");
#define OUI_SSL 0x0050c2 // Actually ID reserved by IEEE.
#define OUI_PRESONUS 0x000a92
#define OUI_HARMAN 0x000fd7
+#define OUI_AVID 0x00a07e
#define DICE_CATEGORY_ID 0x04
#define WEISS_CATEGORY_ID 0x00
@@ -222,6 +223,14 @@ static int dice_probe(struct fw_unit *unit,
(snd_dice_detect_formats_t)entry->driver_data;
}
+ // Below models are compliant to IEC 61883-1/6 and have no quirk at high sampling transfer
+ // frequency.
+ // * Avid M-Box 3 Pro
+ // * M-Audio Profire 610
+ // * M-Audio Profire 2626
+ if (entry->vendor_id == OUI_MAUDIO || entry->vendor_id == OUI_AVID)
+ dice->disable_double_pcm_frames = true;
+
spin_lock_init(&dice->lock);
mutex_init(&dice->mutex);
init_completion(&dice->clock_accepted);
@@ -278,7 +287,22 @@ static void dice_bus_reset(struct fw_unit *unit)
#define DICE_INTERFACE 0x000001
+#define DICE_DEV_ENTRY_TYPICAL(vendor, model, data) \
+ { \
+ .match_flags = IEEE1394_MATCH_VENDOR_ID | \
+ IEEE1394_MATCH_MODEL_ID | \
+ IEEE1394_MATCH_SPECIFIER_ID | \
+ IEEE1394_MATCH_VERSION, \
+ .vendor_id = (vendor), \
+ .model_id = (model), \
+ .specifier_id = (vendor), \
+ .version = DICE_INTERFACE, \
+ .driver_data = (kernel_ulong_t)(data), \
+ }
+
static const struct ieee1394_device_id dice_id_table[] = {
+ // Avid M-Box 3 Pro. To match in probe function.
+ DICE_DEV_ENTRY_TYPICAL(OUI_AVID, 0x000004, snd_dice_detect_extension_formats),
/* M-Audio Profire 2626 has a different value in version field. */
{
.match_flags = IEEE1394_MATCH_VENDOR_ID |
diff --git a/sound/firewire/dice/dice.h b/sound/firewire/dice/dice.h
index adc6f7c84460..3c967d1b3605 100644
--- a/sound/firewire/dice/dice.h
+++ b/sound/firewire/dice/dice.h
@@ -109,7 +109,8 @@ struct snd_dice {
struct fw_iso_resources rx_resources[MAX_STREAMS];
struct amdtp_stream tx_stream[MAX_STREAMS];
struct amdtp_stream rx_stream[MAX_STREAMS];
- bool global_enabled;
+ bool global_enabled:1;
+ bool disable_double_pcm_frames:1;
struct completion clock_accepted;
unsigned int substreams_counter;
diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
index 1f1e3236efb8..9eea25c46dc7 100644
--- a/sound/firewire/oxfw/oxfw.c
+++ b/sound/firewire/oxfw/oxfw.c
@@ -355,7 +355,6 @@ static const struct ieee1394_device_id oxfw_id_table[] = {
* Onyx-i series (former models): 0x081216
* Mackie Onyx Satellite: 0x00200f
* Tapco LINK.firewire 4x6: 0x000460
- * d.2 pro: Unknown
* d.4 pro: Unknown
* U.420: Unknown
* U.420d: Unknown
diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
index ab5ff7867eb9..d8be146793ee 100644
--- a/sound/hda/intel-dsp-config.c
+++ b/sound/hda/intel-dsp-config.c
@@ -331,6 +331,10 @@ static const struct config_entry config_table[] = {
.flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
.device = 0x51c8,
},
+ {
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+ .device = 0x51cc,
+ },
#endif
};
diff --git a/sound/isa/gus/gus_main.c b/sound/isa/gus/gus_main.c
index afc088f0377c..b7518122a10d 100644
--- a/sound/isa/gus/gus_main.c
+++ b/sound/isa/gus/gus_main.c
@@ -77,17 +77,8 @@ static const struct snd_kcontrol_new snd_gus_joystick_control = {
static void snd_gus_init_control(struct snd_gus_card *gus)
{
- int ret;
-
- if (!gus->ace_flag) {
- ret =
- snd_ctl_add(gus->card,
- snd_ctl_new1(&snd_gus_joystick_control,
- gus));
- if (ret)
- snd_printk(KERN_ERR "gus: snd_ctl_add failed: %d\n",
- ret);
- }
+ if (!gus->ace_flag)
+ snd_ctl_add(gus->card, snd_ctl_new1(&snd_gus_joystick_control, gus));
}
/*
diff --git a/sound/isa/sb/sb16_main.c b/sound/isa/sb/sb16_main.c
index 38dc1fde25f3..aa4870531023 100644
--- a/sound/isa/sb/sb16_main.c
+++ b/sound/isa/sb/sb16_main.c
@@ -846,14 +846,10 @@ int snd_sb16dsp_pcm(struct snd_sb *chip, int device)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_sb16_playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_sb16_capture_ops);
- if (chip->dma16 >= 0 && chip->dma8 != chip->dma16) {
- err = snd_ctl_add(card, snd_ctl_new1(
- &snd_sb16_dma_control, chip));
- if (err)
- return err;
- } else {
+ if (chip->dma16 >= 0 && chip->dma8 != chip->dma16)
+ snd_ctl_add(card, snd_ctl_new1(&snd_sb16_dma_control, chip));
+ else
pcm->info_flags = SNDRV_PCM_INFO_HALF_DUPLEX;
- }
snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV,
card->dev, 64*1024, 128*1024);
diff --git a/sound/isa/sb/sb8.c b/sound/isa/sb/sb8.c
index 6c9d534ce8b6..ed3a87ebe3f4 100644
--- a/sound/isa/sb/sb8.c
+++ b/sound/isa/sb/sb8.c
@@ -93,12 +93,12 @@ static int snd_sb8_probe(struct device *pdev, unsigned int dev)
acard = card->private_data;
card->private_free = snd_sb8_free;
- /* block the 0x388 port to avoid PnP conflicts */
+ /*
+ * Block the 0x388 port to avoid PnP conflicts.
+ * No need to check this value after request_region,
+ * as we never do anything with it.
+ */
acard->fm_res = request_region(0x388, 4, "SoundBlaster FM");
- if (!acard->fm_res) {
- err = -EBUSY;
- goto _err;
- }
if (port[dev] != SNDRV_AUTO_PORT) {
if ((err = snd_sbdsp_create(card, port[dev], irq[dev],
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index a31009afc025..5462f771c2f9 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -2917,6 +2917,7 @@ static int hda_codec_runtime_resume(struct device *dev)
#ifdef CONFIG_PM_SLEEP
static int hda_codec_pm_prepare(struct device *dev)
{
+ dev->power.power_state = PMSG_SUSPEND;
return pm_runtime_suspended(dev);
}
@@ -2924,6 +2925,10 @@ static void hda_codec_pm_complete(struct device *dev)
{
struct hda_codec *codec = dev_to_hda_codec(dev);
+ /* If no other pm-functions are called between prepare() and complete() */
+ if (dev->power.power_state.event == PM_EVENT_SUSPEND)
+ dev->power.power_state = PMSG_RESUME;
+
if (pm_runtime_suspended(dev) && (codec->jackpoll_interval ||
hda_codec_need_resume(codec) || codec->forced_resume))
pm_request_resume(dev);
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index b638fc2ef6f7..1f8018f9ce57 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -3520,6 +3520,7 @@ static int cap_sw_put(struct snd_kcontrol *kcontrol,
static const struct snd_kcontrol_new cap_sw_temp = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Capture Switch",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
.info = cap_sw_info,
.get = cap_sw_get,
.put = cap_sw_put,
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 79ade335c8a0..470753b36c8a 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2485,6 +2485,9 @@ static const struct pci_device_id azx_ids[] = {
/* Alderlake-P */
{ PCI_DEVICE(0x8086, 0x51c8),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+ /* Alderlake-M */
+ { PCI_DEVICE(0x8086, 0x51cc),
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
/* Elkhart Lake */
{ PCI_DEVICE(0x8086, 0x4b55),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 726507d0b04c..8629e84fef23 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -2206,10 +2206,9 @@ static void cs8409_cs42l42_fixups(struct hda_codec *codec,
break;
case HDA_FIXUP_ACT_PROBE:
- /* Set initial volume on Bullseye to -26 dB */
- if (codec->fixup_id == CS8409_BULLSEYE)
- snd_hda_codec_amp_init_stereo(codec, CS8409_CS42L42_DMIC_ADC_PIN_NID,
- HDA_INPUT, 0, 0xff, 0x19);
+ /* Set initial DMIC volume to -26 dB */
+ snd_hda_codec_amp_init_stereo(codec, CS8409_CS42L42_DMIC_ADC_PIN_NID,
+ HDA_INPUT, 0, 0xff, 0x19);
snd_hda_gen_add_kctl(&spec->gen,
NULL, &cs8409_cs42l42_hp_volume_mixer);
snd_hda_gen_add_kctl(&spec->gen,
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 6d58f24c9702..ab5113cccffa 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -395,7 +395,6 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
case 0x10ec0282:
case 0x10ec0283:
case 0x10ec0286:
- case 0x10ec0287:
case 0x10ec0288:
case 0x10ec0285:
case 0x10ec0298:
@@ -406,6 +405,10 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
case 0x10ec0275:
alc_update_coef_idx(codec, 0xe, 0, 1<<0);
break;
+ case 0x10ec0287:
+ alc_update_coef_idx(codec, 0x10, 1<<9, 0);
+ alc_write_coef_idx(codec, 0x8, 0x4ab7);
+ break;
case 0x10ec0293:
alc_update_coef_idx(codec, 0xa, 1<<13, 0);
break;
@@ -2600,6 +2603,28 @@ static const struct hda_model_fixup alc882_fixup_models[] = {
{}
};
+static const struct snd_hda_pin_quirk alc882_pin_fixup_tbl[] = {
+ SND_HDA_PIN_QUIRK(0x10ec1220, 0x1043, "ASUS", ALC1220_FIXUP_CLEVO_P950,
+ {0x14, 0x01014010},
+ {0x15, 0x01011012},
+ {0x16, 0x01016011},
+ {0x18, 0x01a19040},
+ {0x19, 0x02a19050},
+ {0x1a, 0x0181304f},
+ {0x1b, 0x0221401f},
+ {0x1e, 0x01456130}),
+ SND_HDA_PIN_QUIRK(0x10ec1220, 0x1462, "MS-7C35", ALC1220_FIXUP_CLEVO_P950,
+ {0x14, 0x01015010},
+ {0x15, 0x01011012},
+ {0x16, 0x01011011},
+ {0x18, 0x01a11040},
+ {0x19, 0x02a19050},
+ {0x1a, 0x0181104f},
+ {0x1b, 0x0221401f},
+ {0x1e, 0x01451130}),
+ {}
+};
+
/*
* BIOS auto configuration
*/
@@ -2641,6 +2666,7 @@ static int patch_alc882(struct hda_codec *codec)
snd_hda_pick_fixup(codec, alc882_fixup_models, alc882_fixup_tbl,
alc882_fixups);
+ snd_hda_pick_pin_fixup(codec, alc882_pin_fixup_tbl, alc882_fixups, true);
snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
alc_auto_parse_customize_define(codec);
@@ -6251,6 +6277,35 @@ static void alc294_fixup_gx502_hp(struct hda_codec *codec,
}
}
+static void alc294_gu502_toggle_output(struct hda_codec *codec,
+ struct hda_jack_callback *cb)
+{
+ /* Windows sets 0x10 to 0x8420 for Node 0x20 which is
+ * responsible from changes between speakers and headphones
+ */
+ if (snd_hda_jack_detect_state(codec, 0x21) == HDA_JACK_PRESENT)
+ alc_write_coef_idx(codec, 0x10, 0x8420);
+ else
+ alc_write_coef_idx(codec, 0x10, 0x0a20);
+}
+
+static void alc294_fixup_gu502_hp(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ if (!is_jack_detectable(codec, 0x21))
+ return;
+
+ switch (action) {
+ case HDA_FIXUP_ACT_PRE_PROBE:
+ snd_hda_jack_detect_enable_callback(codec, 0x21,
+ alc294_gu502_toggle_output);
+ break;
+ case HDA_FIXUP_ACT_INIT:
+ alc294_gu502_toggle_output(codec, NULL);
+ break;
+ }
+}
+
static void alc285_fixup_hp_gpio_amp_init(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
@@ -6468,6 +6523,9 @@ enum {
ALC294_FIXUP_ASUS_GX502_HP,
ALC294_FIXUP_ASUS_GX502_PINS,
ALC294_FIXUP_ASUS_GX502_VERBS,
+ ALC294_FIXUP_ASUS_GU502_HP,
+ ALC294_FIXUP_ASUS_GU502_PINS,
+ ALC294_FIXUP_ASUS_GU502_VERBS,
ALC285_FIXUP_HP_GPIO_LED,
ALC285_FIXUP_HP_MUTE_LED,
ALC236_FIXUP_HP_GPIO_LED,
@@ -6507,6 +6565,10 @@ enum {
ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST,
ALC295_FIXUP_ASUS_DACS,
ALC295_FIXUP_HP_OMEN,
+ ALC285_FIXUP_HP_SPECTRE_X360,
+ ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP,
+ ALC623_FIXUP_LENOVO_THINKSTATION_P340,
+ ALC255_FIXUP_ACER_HEADPHONE_AND_MIC,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -7709,6 +7771,35 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc294_fixup_gx502_hp,
},
+ [ALC294_FIXUP_ASUS_GU502_PINS] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x19, 0x01a11050 }, /* rear HP mic */
+ { 0x1a, 0x01a11830 }, /* rear external mic */
+ { 0x21, 0x012110f0 }, /* rear HP out */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC294_FIXUP_ASUS_GU502_VERBS
+ },
+ [ALC294_FIXUP_ASUS_GU502_VERBS] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ /* set 0x15 to HP-OUT ctrl */
+ { 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc0 },
+ /* unmute the 0x15 amp */
+ { 0x15, AC_VERB_SET_AMP_GAIN_MUTE, 0xb000 },
+ /* set 0x1b to HP-OUT */
+ { 0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x24 },
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC294_FIXUP_ASUS_GU502_HP
+ },
+ [ALC294_FIXUP_ASUS_GU502_HP] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc294_fixup_gu502_hp,
+ },
[ALC294_FIXUP_ASUS_COEF_1B] = {
.type = HDA_FIXUP_VERBS,
.v.verbs = (const struct hda_verb[]) {
@@ -8035,6 +8126,36 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC269_FIXUP_HP_LINE1_MIC1_LED,
},
+ [ALC285_FIXUP_HP_SPECTRE_X360] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x14, 0x90170110 }, /* enable top speaker */
+ {}
+ },
+ .chained = true,
+ .chain_id = ALC285_FIXUP_SPEAKER2_TO_DAC1,
+ },
+ [ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc285_fixup_ideapad_s740_coef,
+ .chained = true,
+ .chain_id = ALC285_FIXUP_THINKPAD_HEADSET_JACK,
+ },
+ [ALC623_FIXUP_LENOVO_THINKSTATION_P340] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_no_shutup,
+ .chained = true,
+ .chain_id = ALC283_FIXUP_HEADSET_MIC,
+ },
+ [ALC255_FIXUP_ACER_HEADPHONE_AND_MIC] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x21, 0x03211030 }, /* Change the Headphone location to Left */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC255_FIXUP_XIAOMI_HEADSET_MIC
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -8071,6 +8192,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC),
SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
SND_PCI_QUIRK(0x1025, 0x1430, "Acer TravelMate B311R-31", ALC256_FIXUP_ACER_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1025, 0x1466, "Acer Aspire A515-56", ALC255_FIXUP_ACER_HEADPHONE_AND_MIC),
SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X),
@@ -8192,11 +8314,15 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ SND_PCI_QUIRK(0x103c, 0x841c, "HP Pavilion 15-CK0xx", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN),
SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360),
SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO),
+ SND_PCI_QUIRK(0x103c, 0x8716, "HP Elite Dragonfly G2 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ SND_PCI_QUIRK(0x103c, 0x8720, "HP EliteBook x360 1040 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8730, "HP ProBook 445 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
@@ -8215,7 +8341,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
SND_PCI_QUIRK(0x103c, 0x8846, "HP EliteBook 850 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x884b, "HP EliteBook 840 Aero G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x884c, "HP EliteBook 840 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x886d, "HP ZBook Fury 17.3 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ SND_PCI_QUIRK(0x103c, 0x8870, "HP ZBook Fury 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ SND_PCI_QUIRK(0x103c, 0x8873, "HP ZBook Studio 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ SND_PCI_QUIRK(0x103c, 0x888d, "HP ZBook Power 15.6 inch G8 Mobile Workstation PC", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8896, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK(0x103c, 0x8898, "HP EliteBook 845 G8 Notebook PC", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
@@ -8253,6 +8385,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
+ SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
@@ -8309,12 +8442,19 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1558, 0x50b8, "Clevo NK50SZ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x50d5, "Clevo NP50D5", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x50f0, "Clevo NH50A[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x50f2, "Clevo NH50E[PR]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x50f3, "Clevo NH58DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x50f5, "Clevo NH55EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x50f6, "Clevo NH55DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x5101, "Clevo S510WU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x5157, "Clevo W517GU1", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x51a1, "Clevo NS50MU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x70a1, "Clevo NB70T[HJK]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x70b3, "Clevo NK70SB", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x70f2, "Clevo NH79EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x70f3, "Clevo NH77DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x70f4, "Clevo NH77EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x70f6, "Clevo NH77DPQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x8228, "Clevo NR40BU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x8520, "Clevo NH50D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x8521, "Clevo NH77D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
@@ -8332,11 +8472,19 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1558, 0x8a51, "Clevo NH70RCQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x8d50, "Clevo NH55RCQ-M", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x951d, "Clevo N950T[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x9600, "Clevo N960K[PR]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x961d, "Clevo N960S[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x971d, "Clevo N970T[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0xa500, "Clevo NL53RU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0xa600, "Clevo NL5XNU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0xb018, "Clevo NP50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0xb019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0xb022, "Clevo NH77D[DC][QW]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0xc018, "Clevo NP50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0xc019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0xc022, "Clevo NH77[DC][QW]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS),
- SND_PCI_QUIRK(0x17aa, 0x1048, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x17aa, 0x1048, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340),
SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
@@ -8386,6 +8534,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME),
SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
+ SND_PCI_QUIRK(0x17aa, 0x3843, "Yoga 9i", ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP),
SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
@@ -8600,6 +8749,10 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC274_FIXUP_HP_MIC, .name = "alc274-hp-mic-detect"},
{.id = ALC245_FIXUP_HP_X360_AMP, .name = "alc245-hp-x360-amp"},
{.id = ALC295_FIXUP_HP_OMEN, .name = "alc295-hp-omen"},
+ {.id = ALC285_FIXUP_HP_SPECTRE_X360, .name = "alc285-hp-spectre-x360"},
+ {.id = ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, .name = "alc287-ideapad-bass-spk-amp"},
+ {.id = ALC623_FIXUP_LENOVO_THINKSTATION_P340, .name = "alc623-lenovo-thinkstation-p340"},
+ {.id = ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, .name = "alc255-acer-headphone-and-mic"},
{}
};
#define ALC225_STANDARD_PINS \
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index 35903d1a1cbd..5b124c4ad572 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -331,6 +331,7 @@ struct ichdev {
unsigned int ali_slot; /* ALI DMA slot */
struct ac97_pcm *pcm;
int pcm_open_flag;
+ unsigned int prepared:1;
unsigned int suspended: 1;
};
@@ -691,6 +692,9 @@ static inline void snd_intel8x0_update(struct intel8x0 *chip, struct ichdev *ich
int status, civ, i, step;
int ack = 0;
+ if (!ichdev->prepared || ichdev->suspended)
+ return;
+
spin_lock_irqsave(&chip->reg_lock, flags);
status = igetbyte(chip, port + ichdev->roff_sr);
civ = igetbyte(chip, port + ICH_REG_OFF_CIV);
@@ -881,6 +885,7 @@ static int snd_intel8x0_hw_params(struct snd_pcm_substream *substream,
if (ichdev->pcm_open_flag) {
snd_ac97_pcm_close(ichdev->pcm);
ichdev->pcm_open_flag = 0;
+ ichdev->prepared = 0;
}
err = snd_ac97_pcm_open(ichdev->pcm, params_rate(hw_params),
params_channels(hw_params),
@@ -902,6 +907,7 @@ static int snd_intel8x0_hw_free(struct snd_pcm_substream *substream)
if (ichdev->pcm_open_flag) {
snd_ac97_pcm_close(ichdev->pcm);
ichdev->pcm_open_flag = 0;
+ ichdev->prepared = 0;
}
return 0;
}
@@ -976,6 +982,7 @@ static int snd_intel8x0_pcm_prepare(struct snd_pcm_substream *substream)
ichdev->pos_shift = (runtime->sample_bits > 16) ? 2 : 1;
}
snd_intel8x0_setup_periods(chip, ichdev);
+ ichdev->prepared = 1;
return 0;
}
diff --git a/sound/soc/amd/raven/acp3x-pcm-dma.c b/sound/soc/amd/raven/acp3x-pcm-dma.c
index f22bb2bdf527..8148b0d22e88 100644
--- a/sound/soc/amd/raven/acp3x-pcm-dma.c
+++ b/sound/soc/amd/raven/acp3x-pcm-dma.c
@@ -235,10 +235,6 @@ static int acp3x_dma_open(struct snd_soc_component *component,
return ret;
}
- if (!adata->play_stream && !adata->capture_stream &&
- !adata->i2ssp_play_stream && !adata->i2ssp_capture_stream)
- rv_writel(1, adata->acp3x_base + mmACP_EXTERNAL_INTR_ENB);
-
i2s_data->acp3x_base = adata->acp3x_base;
runtime->private_data = i2s_data;
return ret;
@@ -365,12 +361,6 @@ static int acp3x_dma_close(struct snd_soc_component *component,
}
}
- /* Disable ACP irq, when the current stream is being closed and
- * another stream is also not active.
- */
- if (!adata->play_stream && !adata->capture_stream &&
- !adata->i2ssp_play_stream && !adata->i2ssp_capture_stream)
- rv_writel(0, adata->acp3x_base + mmACP_EXTERNAL_INTR_ENB);
return 0;
}
diff --git a/sound/soc/amd/raven/acp3x.h b/sound/soc/amd/raven/acp3x.h
index 03fe93913e12..c3f0c8b7545d 100644
--- a/sound/soc/amd/raven/acp3x.h
+++ b/sound/soc/amd/raven/acp3x.h
@@ -77,6 +77,7 @@
#define ACP_POWER_OFF_IN_PROGRESS 0x03
#define ACP3x_ITER_IRER_SAMP_LEN_MASK 0x38
+#define ACP_EXT_INTR_STAT_CLEAR_MASK 0xFFFFFFFF
struct acp3x_platform_info {
u16 play_i2s_instance;
diff --git a/sound/soc/amd/raven/pci-acp3x.c b/sound/soc/amd/raven/pci-acp3x.c
index d3536fd6a124..a013a607b3d4 100644
--- a/sound/soc/amd/raven/pci-acp3x.c
+++ b/sound/soc/amd/raven/pci-acp3x.c
@@ -76,6 +76,19 @@ static int acp3x_reset(void __iomem *acp3x_base)
return -ETIMEDOUT;
}
+static void acp3x_enable_interrupts(void __iomem *acp_base)
+{
+ rv_writel(0x01, acp_base + mmACP_EXTERNAL_INTR_ENB);
+}
+
+static void acp3x_disable_interrupts(void __iomem *acp_base)
+{
+ rv_writel(ACP_EXT_INTR_STAT_CLEAR_MASK, acp_base +
+ mmACP_EXTERNAL_INTR_STAT);
+ rv_writel(0x00, acp_base + mmACP_EXTERNAL_INTR_CNTL);
+ rv_writel(0x00, acp_base + mmACP_EXTERNAL_INTR_ENB);
+}
+
static int acp3x_init(struct acp3x_dev_data *adata)
{
void __iomem *acp3x_base = adata->acp3x_base;
@@ -93,6 +106,7 @@ static int acp3x_init(struct acp3x_dev_data *adata)
pr_err("ACP3x reset failed\n");
return ret;
}
+ acp3x_enable_interrupts(acp3x_base);
return 0;
}
@@ -100,6 +114,7 @@ static int acp3x_deinit(void __iomem *acp3x_base)
{
int ret;
+ acp3x_disable_interrupts(acp3x_base);
/* Reset */
ret = acp3x_reset(acp3x_base);
if (ret) {
diff --git a/sound/soc/codecs/ak5558.c b/sound/soc/codecs/ak5558.c
index 34aed80db0eb..37d4600b6f2c 100644
--- a/sound/soc/codecs/ak5558.c
+++ b/sound/soc/codecs/ak5558.c
@@ -307,7 +307,7 @@ static struct snd_soc_dai_driver ak5558_dai = {
};
static struct snd_soc_dai_driver ak5552_dai = {
- .name = "ak5558-aif",
+ .name = "ak5552-aif",
.capture = {
.stream_name = "Capture",
.channels_min = 1,
diff --git a/sound/soc/codecs/cs35l32.c b/sound/soc/codecs/cs35l32.c
index f4067230ac42..88e79b9f52ed 100644
--- a/sound/soc/codecs/cs35l32.c
+++ b/sound/soc/codecs/cs35l32.c
@@ -261,6 +261,9 @@ static const struct regmap_config cs35l32_regmap = {
.readable_reg = cs35l32_readable_register,
.precious_reg = cs35l32_precious_register,
.cache_type = REGCACHE_RBTREE,
+
+ .use_single_read = true,
+ .use_single_write = true,
};
static int cs35l32_handle_of_data(struct i2c_client *i2c_client,
diff --git a/sound/soc/codecs/cs35l33.c b/sound/soc/codecs/cs35l33.c
index 7ad7b733af9b..e8f3dcfd144d 100644
--- a/sound/soc/codecs/cs35l33.c
+++ b/sound/soc/codecs/cs35l33.c
@@ -1201,6 +1201,7 @@ static int cs35l33_i2c_probe(struct i2c_client *i2c_client,
dev_err(&i2c_client->dev,
"CS35L33 Device ID (%X). Expected ID %X\n",
devid, CS35L33_CHIP_ID);
+ ret = -EINVAL;
goto err_enable;
}
diff --git a/sound/soc/codecs/cs35l34.c b/sound/soc/codecs/cs35l34.c
index 110ee2d06358..3d3c3c34dfe2 100644
--- a/sound/soc/codecs/cs35l34.c
+++ b/sound/soc/codecs/cs35l34.c
@@ -800,6 +800,9 @@ static struct regmap_config cs35l34_regmap = {
.readable_reg = cs35l34_readable_register,
.precious_reg = cs35l34_precious_register,
.cache_type = REGCACHE_RBTREE,
+
+ .use_single_read = true,
+ .use_single_write = true,
};
static int cs35l34_handle_of_data(struct i2c_client *i2c_client,
diff --git a/sound/soc/codecs/cs42l42.c b/sound/soc/codecs/cs42l42.c
index bf982e145e94..77473c226f9e 100644
--- a/sound/soc/codecs/cs42l42.c
+++ b/sound/soc/codecs/cs42l42.c
@@ -399,6 +399,9 @@ static const struct regmap_config cs42l42_regmap = {
.reg_defaults = cs42l42_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(cs42l42_reg_defaults),
.cache_type = REGCACHE_RBTREE,
+
+ .use_single_read = true,
+ .use_single_write = true,
};
static DECLARE_TLV_DB_SCALE(adc_tlv, -9600, 100, false);
diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c
index c44a5cdb796e..7cdffdf6b8cf 100644
--- a/sound/soc/codecs/cs42l56.c
+++ b/sound/soc/codecs/cs42l56.c
@@ -1175,7 +1175,7 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client,
struct cs42l56_platform_data *pdata =
dev_get_platdata(&i2c_client->dev);
int ret, i;
- unsigned int devid = 0;
+ unsigned int devid;
unsigned int alpha_rev, metal_rev;
unsigned int reg;
@@ -1245,6 +1245,11 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client,
}
ret = regmap_read(cs42l56->regmap, CS42L56_CHIP_ID_1, &reg);
+ if (ret) {
+ dev_err(&i2c_client->dev, "Failed to read chip ID: %d\n", ret);
+ return ret;
+ }
+
devid = reg & CS42L56_CHIP_ID_MASK;
if (devid != CS42L56_DEVID) {
dev_err(&i2c_client->dev,
diff --git a/sound/soc/codecs/cs42l73.c b/sound/soc/codecs/cs42l73.c
index c3f974ec78e5..e92bacaab53f 100644
--- a/sound/soc/codecs/cs42l73.c
+++ b/sound/soc/codecs/cs42l73.c
@@ -1268,6 +1268,9 @@ static const struct regmap_config cs42l73_regmap = {
.volatile_reg = cs42l73_volatile_register,
.readable_reg = cs42l73_readable_register,
.cache_type = REGCACHE_RBTREE,
+
+ .use_single_read = true,
+ .use_single_write = true,
};
static int cs42l73_i2c_probe(struct i2c_client *i2c_client,
diff --git a/sound/soc/codecs/cs43130.c b/sound/soc/codecs/cs43130.c
index 80bc7c10ed75..80cd3ea0c157 100644
--- a/sound/soc/codecs/cs43130.c
+++ b/sound/soc/codecs/cs43130.c
@@ -1735,6 +1735,14 @@ static DEVICE_ATTR(hpload_dc_r, 0444, cs43130_show_dc_r, NULL);
static DEVICE_ATTR(hpload_ac_l, 0444, cs43130_show_ac_l, NULL);
static DEVICE_ATTR(hpload_ac_r, 0444, cs43130_show_ac_r, NULL);
+static struct attribute *hpload_attrs[] = {
+ &dev_attr_hpload_dc_l.attr,
+ &dev_attr_hpload_dc_r.attr,
+ &dev_attr_hpload_ac_l.attr,
+ &dev_attr_hpload_ac_r.attr,
+};
+ATTRIBUTE_GROUPS(hpload);
+
static struct reg_sequence hp_en_cal_seq[] = {
{CS43130_INT_MASK_4, CS43130_INT_MASK_ALL},
{CS43130_HP_MEAS_LOAD_1, 0},
@@ -2302,25 +2310,15 @@ static int cs43130_probe(struct snd_soc_component *component)
cs43130->hpload_done = false;
if (cs43130->dc_meas) {
- ret = device_create_file(component->dev, &dev_attr_hpload_dc_l);
- if (ret < 0)
- return ret;
-
- ret = device_create_file(component->dev, &dev_attr_hpload_dc_r);
- if (ret < 0)
- return ret;
-
- ret = device_create_file(component->dev, &dev_attr_hpload_ac_l);
- if (ret < 0)
- return ret;
-
- ret = device_create_file(component->dev, &dev_attr_hpload_ac_r);
- if (ret < 0)
+ ret = sysfs_create_groups(&component->dev->kobj, hpload_groups);
+ if (ret)
return ret;
cs43130->wq = create_singlethread_workqueue("cs43130_hp");
- if (!cs43130->wq)
+ if (!cs43130->wq) {
+ sysfs_remove_groups(&component->dev->kobj, hpload_groups);
return -ENOMEM;
+ }
INIT_WORK(&cs43130->work, cs43130_imp_meas);
}
diff --git a/sound/soc/codecs/cs53l30.c b/sound/soc/codecs/cs53l30.c
index 3d67cbf9eaaa..abe0cc0bc03a 100644
--- a/sound/soc/codecs/cs53l30.c
+++ b/sound/soc/codecs/cs53l30.c
@@ -912,6 +912,9 @@ static struct regmap_config cs53l30_regmap = {
.writeable_reg = cs53l30_writeable_register,
.readable_reg = cs53l30_readable_register,
.cache_type = REGCACHE_RBTREE,
+
+ .use_single_read = true,
+ .use_single_write = true,
};
static int cs53l30_i2c_probe(struct i2c_client *client,
diff --git a/sound/soc/codecs/da7219.c b/sound/soc/codecs/da7219.c
index bd3c523a8617..13009d08b09a 100644
--- a/sound/soc/codecs/da7219.c
+++ b/sound/soc/codecs/da7219.c
@@ -2181,10 +2181,7 @@ static int da7219_register_dai_clks(struct snd_soc_component *component)
ret);
goto err;
}
-
- da7219->dai_clks[i] = devm_clk_hw_get_clk(dev, dai_clk_hw, NULL);
- if (IS_ERR(da7219->dai_clks[i]))
- return PTR_ERR(da7219->dai_clks[i]);
+ da7219->dai_clks[i] = dai_clk_hw->clk;
/* For DT setup onecell data, otherwise create lookup */
if (np) {
diff --git a/sound/soc/codecs/lpass-rx-macro.c b/sound/soc/codecs/lpass-rx-macro.c
index b0ebfc8d180c..171ab7f519c0 100644
--- a/sound/soc/codecs/lpass-rx-macro.c
+++ b/sound/soc/codecs/lpass-rx-macro.c
@@ -3579,6 +3579,7 @@ static const struct of_device_id rx_macro_dt_match[] = {
{ .compatible = "qcom,sm8250-lpass-rx-macro" },
{ }
};
+MODULE_DEVICE_TABLE(of, rx_macro_dt_match);
static struct platform_driver rx_macro_driver = {
.driver = {
diff --git a/sound/soc/codecs/lpass-tx-macro.c b/sound/soc/codecs/lpass-tx-macro.c
index acd2fbc0ca7c..27a0d5defd27 100644
--- a/sound/soc/codecs/lpass-tx-macro.c
+++ b/sound/soc/codecs/lpass-tx-macro.c
@@ -1846,6 +1846,7 @@ static const struct of_device_id tx_macro_dt_match[] = {
{ .compatible = "qcom,sm8250-lpass-tx-macro" },
{ }
};
+MODULE_DEVICE_TABLE(of, tx_macro_dt_match);
static struct platform_driver tx_macro_driver = {
.driver = {
.name = "tx_macro",
diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
index 4be24e7f51c8..f8e49e45ce33 100644
--- a/sound/soc/codecs/max98088.c
+++ b/sound/soc/codecs/max98088.c
@@ -41,6 +41,7 @@ struct max98088_priv {
enum max98088_type devtype;
struct max98088_pdata *pdata;
struct clk *mclk;
+ unsigned char mclk_prescaler;
unsigned int sysclk;
struct max98088_cdata dai[2];
int eq_textcnt;
@@ -998,13 +999,16 @@ static int max98088_dai1_hw_params(struct snd_pcm_substream *substream,
/* Configure NI when operating as master */
if (snd_soc_component_read(component, M98088_REG_14_DAI1_FORMAT)
& M98088_DAI_MAS) {
+ unsigned long pclk;
+
if (max98088->sysclk == 0) {
dev_err(component->dev, "Invalid system clock frequency\n");
return -EINVAL;
}
ni = 65536ULL * (rate < 50000 ? 96ULL : 48ULL)
* (unsigned long long int)rate;
- do_div(ni, (unsigned long long int)max98088->sysclk);
+ pclk = DIV_ROUND_CLOSEST(max98088->sysclk, max98088->mclk_prescaler);
+ ni = DIV_ROUND_CLOSEST_ULL(ni, pclk);
snd_soc_component_write(component, M98088_REG_12_DAI1_CLKCFG_HI,
(ni >> 8) & 0x7F);
snd_soc_component_write(component, M98088_REG_13_DAI1_CLKCFG_LO,
@@ -1065,13 +1069,16 @@ static int max98088_dai2_hw_params(struct snd_pcm_substream *substream,
/* Configure NI when operating as master */
if (snd_soc_component_read(component, M98088_REG_1C_DAI2_FORMAT)
& M98088_DAI_MAS) {
+ unsigned long pclk;
+
if (max98088->sysclk == 0) {
dev_err(component->dev, "Invalid system clock frequency\n");
return -EINVAL;
}
ni = 65536ULL * (rate < 50000 ? 96ULL : 48ULL)
* (unsigned long long int)rate;
- do_div(ni, (unsigned long long int)max98088->sysclk);
+ pclk = DIV_ROUND_CLOSEST(max98088->sysclk, max98088->mclk_prescaler);
+ ni = DIV_ROUND_CLOSEST_ULL(ni, pclk);
snd_soc_component_write(component, M98088_REG_1A_DAI2_CLKCFG_HI,
(ni >> 8) & 0x7F);
snd_soc_component_write(component, M98088_REG_1B_DAI2_CLKCFG_LO,
@@ -1113,8 +1120,10 @@ static int max98088_dai_set_sysclk(struct snd_soc_dai *dai,
*/
if ((freq >= 10000000) && (freq < 20000000)) {
snd_soc_component_write(component, M98088_REG_10_SYS_CLK, 0x10);
+ max98088->mclk_prescaler = 1;
} else if ((freq >= 20000000) && (freq < 30000000)) {
snd_soc_component_write(component, M98088_REG_10_SYS_CLK, 0x20);
+ max98088->mclk_prescaler = 2;
} else {
dev_err(component->dev, "Invalid master clock frequency\n");
return -EINVAL;
diff --git a/sound/soc/codecs/rt5659.c b/sound/soc/codecs/rt5659.c
index 87f5709fe2cc..4a50b169fe03 100644
--- a/sound/soc/codecs/rt5659.c
+++ b/sound/soc/codecs/rt5659.c
@@ -2433,13 +2433,18 @@ static int set_dmic_power(struct snd_soc_dapm_widget *w,
return 0;
}
-static const struct snd_soc_dapm_widget rt5659_dapm_widgets[] = {
+static const struct snd_soc_dapm_widget rt5659_particular_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("LDO2", RT5659_PWR_ANLG_3, RT5659_PWR_LDO2_BIT, 0,
NULL, 0),
- SND_SOC_DAPM_SUPPLY("PLL", RT5659_PWR_ANLG_3, RT5659_PWR_PLL_BIT, 0,
- NULL, 0),
+ SND_SOC_DAPM_SUPPLY("MICBIAS1", RT5659_PWR_ANLG_2, RT5659_PWR_MB1_BIT,
+ 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("Mic Det Power", RT5659_PWR_VOL,
RT5659_PWR_MIC_DET_BIT, 0, NULL, 0),
+};
+
+static const struct snd_soc_dapm_widget rt5659_dapm_widgets[] = {
+ SND_SOC_DAPM_SUPPLY("PLL", RT5659_PWR_ANLG_3, RT5659_PWR_PLL_BIT, 0,
+ NULL, 0),
SND_SOC_DAPM_SUPPLY("Mono Vref", RT5659_PWR_ANLG_1,
RT5659_PWR_VREF3_BIT, 0, NULL, 0),
@@ -2464,8 +2469,6 @@ static const struct snd_soc_dapm_widget rt5659_dapm_widgets[] = {
RT5659_ADC_MONO_R_ASRC_SFT, 0, NULL, 0),
/* Input Side */
- SND_SOC_DAPM_SUPPLY("MICBIAS1", RT5659_PWR_ANLG_2, RT5659_PWR_MB1_BIT,
- 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("MICBIAS2", RT5659_PWR_ANLG_2, RT5659_PWR_MB2_BIT,
0, NULL, 0),
SND_SOC_DAPM_SUPPLY("MICBIAS3", RT5659_PWR_ANLG_2, RT5659_PWR_MB3_BIT,
@@ -3660,10 +3663,23 @@ static int rt5659_set_bias_level(struct snd_soc_component *component,
static int rt5659_probe(struct snd_soc_component *component)
{
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(component);
struct rt5659_priv *rt5659 = snd_soc_component_get_drvdata(component);
rt5659->component = component;
+ switch (rt5659->pdata.jd_src) {
+ case RT5659_JD_HDA_HEADER:
+ break;
+
+ default:
+ snd_soc_dapm_new_controls(dapm,
+ rt5659_particular_dapm_widgets,
+ ARRAY_SIZE(rt5659_particular_dapm_widgets));
+ break;
+ }
+
return 0;
}
diff --git a/sound/soc/codecs/rt5682-sdw.c b/sound/soc/codecs/rt5682-sdw.c
index fed80c8f994f..e78ba3b064c4 100644
--- a/sound/soc/codecs/rt5682-sdw.c
+++ b/sound/soc/codecs/rt5682-sdw.c
@@ -462,7 +462,8 @@ static int rt5682_io_init(struct device *dev, struct sdw_slave *slave)
regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_2,
RT5682_EXT_JD_SRC, RT5682_EXT_JD_SRC_MANUAL);
- regmap_write(rt5682->regmap, RT5682_CBJ_CTRL_1, 0xd042);
+ regmap_write(rt5682->regmap, RT5682_CBJ_CTRL_1, 0xd142);
+ regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_5, 0x0700, 0x0600);
regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_3,
RT5682_CBJ_IN_BUF_EN, RT5682_CBJ_IN_BUF_EN);
regmap_update_bits(rt5682->regmap, RT5682_SAR_IL_CMD_1,
diff --git a/sound/soc/codecs/rt711-sdca.c b/sound/soc/codecs/rt711-sdca.c
index cc36739f7fcf..24a084e0b48a 100644
--- a/sound/soc/codecs/rt711-sdca.c
+++ b/sound/soc/codecs/rt711-sdca.c
@@ -683,13 +683,13 @@ static int rt711_sdca_set_fu1e_capture_ctl(struct rt711_sdca_priv *rt711)
ch_r = (rt711->fu1e_dapm_mute || rt711->fu1e_mixer_r_mute) ? 0x01 : 0x00;
err = regmap_write(rt711->regmap,
- SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT711_SDCA_ENT_USER_FU1E,
+ SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT711_SDCA_ENT_USER_FU1E,
RT711_SDCA_CTL_FU_MUTE, CH_L), ch_l);
if (err < 0)
return err;
err = regmap_write(rt711->regmap,
- SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT711_SDCA_ENT_USER_FU1E,
+ SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT711_SDCA_ENT_USER_FU1E,
RT711_SDCA_CTL_FU_MUTE, CH_R), ch_r);
if (err < 0)
return err;
diff --git a/sound/soc/codecs/sti-sas.c b/sound/soc/codecs/sti-sas.c
index ffdf7e559515..82a24e330065 100644
--- a/sound/soc/codecs/sti-sas.c
+++ b/sound/soc/codecs/sti-sas.c
@@ -408,6 +408,7 @@ static const struct of_device_id sti_sas_dev_match[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, sti_sas_dev_match);
static int sti_sas_driver_probe(struct platform_device *pdev)
{
diff --git a/sound/soc/codecs/tas2562.h b/sound/soc/codecs/tas2562.h
index 81866aeb3fbf..55b2a1f52ca3 100644
--- a/sound/soc/codecs/tas2562.h
+++ b/sound/soc/codecs/tas2562.h
@@ -57,13 +57,13 @@
#define TAS2562_TDM_CFG0_RAMPRATE_MASK BIT(5)
#define TAS2562_TDM_CFG0_RAMPRATE_44_1 BIT(5)
#define TAS2562_TDM_CFG0_SAMPRATE_MASK GENMASK(3, 1)
-#define TAS2562_TDM_CFG0_SAMPRATE_7305_8KHZ 0x0
-#define TAS2562_TDM_CFG0_SAMPRATE_14_7_16KHZ 0x1
-#define TAS2562_TDM_CFG0_SAMPRATE_22_05_24KHZ 0x2
-#define TAS2562_TDM_CFG0_SAMPRATE_29_4_32KHZ 0x3
-#define TAS2562_TDM_CFG0_SAMPRATE_44_1_48KHZ 0x4
-#define TAS2562_TDM_CFG0_SAMPRATE_88_2_96KHZ 0x5
-#define TAS2562_TDM_CFG0_SAMPRATE_176_4_192KHZ 0x6
+#define TAS2562_TDM_CFG0_SAMPRATE_7305_8KHZ (0x0 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_14_7_16KHZ (0x1 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_22_05_24KHZ (0x2 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_29_4_32KHZ (0x3 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_44_1_48KHZ (0x4 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_88_2_96KHZ (0x5 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_176_4_192KHZ (0x6 << 1)
#define TAS2562_TDM_CFG2_RIGHT_JUSTIFY BIT(6)
diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
index 0917d65d6921..556c284f49dd 100644
--- a/sound/soc/fsl/Kconfig
+++ b/sound/soc/fsl/Kconfig
@@ -119,6 +119,7 @@ config SND_SOC_FSL_RPMSG
tristate "NXP Audio Base On RPMSG support"
depends on COMMON_CLK
depends on RPMSG
+ depends on SND_IMX_SOC || SND_IMX_SOC = n
select SND_SOC_IMX_RPMSG if SND_IMX_SOC != n
help
Say Y if you want to add rpmsg audio support for the Freescale CPUs.
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
index c62bfd1c3ac7..4f55b316cf0f 100644
--- a/sound/soc/fsl/fsl-asoc-card.c
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -744,6 +744,7 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
/* Initialize sound card */
priv->pdev = pdev;
priv->card.dev = &pdev->dev;
+ priv->card.owner = THIS_MODULE;
ret = snd_soc_of_parse_card_name(&priv->card, "model");
if (ret) {
snprintf(priv->name, sizeof(priv->name), "%s-audio",
diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c
index 2c8a2fcb7922..5e71382467e8 100644
--- a/sound/soc/generic/audio-graph-card.c
+++ b/sound/soc/generic/audio-graph-card.c
@@ -209,7 +209,7 @@ static void graph_parse_mclk_fs(struct device_node *top,
static int graph_parse_node(struct asoc_simple_priv *priv,
struct device_node *ep,
struct link_info *li,
- int is_cpu)
+ int *cpu)
{
struct device *dev = simple_priv_to_dev(priv);
struct device_node *top = dev->of_node;
@@ -217,9 +217,9 @@ static int graph_parse_node(struct asoc_simple_priv *priv,
struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link);
struct snd_soc_dai_link_component *dlc;
struct asoc_simple_dai *dai;
- int ret, single = 0;
+ int ret;
- if (is_cpu) {
+ if (cpu) {
dlc = asoc_link_to_cpu(dai_link, 0);
dai = simple_props_to_dai_cpu(dai_props, 0);
} else {
@@ -229,7 +229,7 @@ static int graph_parse_node(struct asoc_simple_priv *priv,
graph_parse_mclk_fs(top, ep, dai_props);
- ret = asoc_simple_parse_dai(ep, dlc, &single);
+ ret = asoc_simple_parse_dai(ep, dlc, cpu);
if (ret < 0)
return ret;
@@ -241,9 +241,6 @@ static int graph_parse_node(struct asoc_simple_priv *priv,
if (ret < 0)
return ret;
- if (is_cpu)
- asoc_simple_canonicalize_cpu(dlc, single);
-
return 0;
}
@@ -276,33 +273,29 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
struct link_info *li)
{
struct device *dev = simple_priv_to_dev(priv);
- struct snd_soc_card *card = simple_priv_to_card(priv);
struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link);
struct device_node *top = dev->of_node;
struct device_node *ep = li->cpu ? cpu_ep : codec_ep;
- struct device_node *port;
- struct device_node *ports;
- struct snd_soc_dai_link_component *cpus = asoc_link_to_cpu(dai_link, 0);
- struct snd_soc_dai_link_component *codecs = asoc_link_to_codec(dai_link, 0);
char dai_name[64];
int ret;
- port = of_get_parent(ep);
- ports = of_get_parent(port);
-
dev_dbg(dev, "link_of DPCM (%pOF)\n", ep);
if (li->cpu) {
+ struct snd_soc_card *card = simple_priv_to_card(priv);
+ struct snd_soc_dai_link_component *cpus = asoc_link_to_cpu(dai_link, 0);
+ int is_single_links = 0;
+
/* Codec is dummy */
/* FE settings */
dai_link->dynamic = 1;
dai_link->dpcm_merged_format = 1;
- ret = graph_parse_node(priv, cpu_ep, li, 1);
+ ret = graph_parse_node(priv, cpu_ep, li, &is_single_links);
if (ret)
- goto out_put_node;
+ return ret;
snprintf(dai_name, sizeof(dai_name),
"fe.%pOFP.%s", cpus->of_node, cpus->dai_name);
@@ -318,8 +311,13 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
*/
if (card->component_chaining && !soc_component_is_pcm(cpus))
dai_link->no_pcm = 1;
+
+ asoc_simple_canonicalize_cpu(cpus, is_single_links);
} else {
- struct snd_soc_codec_conf *cconf;
+ struct snd_soc_codec_conf *cconf = simple_props_to_codec_conf(dai_props, 0);
+ struct snd_soc_dai_link_component *codecs = asoc_link_to_codec(dai_link, 0);
+ struct device_node *port;
+ struct device_node *ports;
/* CPU is dummy */
@@ -327,22 +325,25 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
dai_link->no_pcm = 1;
dai_link->be_hw_params_fixup = asoc_simple_be_hw_params_fixup;
- cconf = simple_props_to_codec_conf(dai_props, 0);
-
- ret = graph_parse_node(priv, codec_ep, li, 0);
+ ret = graph_parse_node(priv, codec_ep, li, NULL);
if (ret < 0)
- goto out_put_node;
+ return ret;
snprintf(dai_name, sizeof(dai_name),
"be.%pOFP.%s", codecs->of_node, codecs->dai_name);
/* check "prefix" from top node */
+ port = of_get_parent(ep);
+ ports = of_get_parent(port);
snd_soc_of_parse_node_prefix(top, cconf, codecs->of_node,
"prefix");
if (of_node_name_eq(ports, "ports"))
snd_soc_of_parse_node_prefix(ports, cconf, codecs->of_node, "prefix");
snd_soc_of_parse_node_prefix(port, cconf, codecs->of_node,
"prefix");
+
+ of_node_put(ports);
+ of_node_put(port);
}
graph_parse_convert(dev, ep, &dai_props->adata);
@@ -351,11 +352,8 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
ret = graph_link_init(priv, cpu_ep, codec_ep, li, dai_name);
-out_put_node:
li->link++;
- of_node_put(ports);
- of_node_put(port);
return ret;
}
@@ -369,20 +367,23 @@ static int graph_dai_link_of(struct asoc_simple_priv *priv,
struct snd_soc_dai_link_component *cpus = asoc_link_to_cpu(dai_link, 0);
struct snd_soc_dai_link_component *codecs = asoc_link_to_codec(dai_link, 0);
char dai_name[64];
- int ret;
+ int ret, is_single_links = 0;
dev_dbg(dev, "link_of (%pOF)\n", cpu_ep);
- ret = graph_parse_node(priv, cpu_ep, li, 1);
+ ret = graph_parse_node(priv, cpu_ep, li, &is_single_links);
if (ret < 0)
return ret;
- ret = graph_parse_node(priv, codec_ep, li, 0);
+ ret = graph_parse_node(priv, codec_ep, li, NULL);
if (ret < 0)
return ret;
snprintf(dai_name, sizeof(dai_name),
"%s-%s", cpus->dai_name, codecs->dai_name);
+
+ asoc_simple_canonicalize_cpu(cpus, is_single_links);
+
ret = graph_link_init(priv, cpu_ep, codec_ep, li, dai_name);
if (ret < 0)
return ret;
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index a1373be4558f..0015f534d42d 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -93,12 +93,11 @@ static void simple_parse_convert(struct device *dev,
}
static void simple_parse_mclk_fs(struct device_node *top,
- struct device_node *cpu,
- struct device_node *codec,
+ struct device_node *np,
struct simple_dai_props *props,
char *prefix)
{
- struct device_node *node = of_get_parent(cpu);
+ struct device_node *node = of_get_parent(np);
char prop[128];
snprintf(prop, sizeof(prop), "%smclk-fs", PREFIX);
@@ -106,12 +105,71 @@ static void simple_parse_mclk_fs(struct device_node *top,
snprintf(prop, sizeof(prop), "%smclk-fs", prefix);
of_property_read_u32(node, prop, &props->mclk_fs);
- of_property_read_u32(cpu, prop, &props->mclk_fs);
- of_property_read_u32(codec, prop, &props->mclk_fs);
+ of_property_read_u32(np, prop, &props->mclk_fs);
of_node_put(node);
}
+static int simple_parse_node(struct asoc_simple_priv *priv,
+ struct device_node *np,
+ struct link_info *li,
+ char *prefix,
+ int *cpu)
+{
+ struct device *dev = simple_priv_to_dev(priv);
+ struct device_node *top = dev->of_node;
+ struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
+ struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link);
+ struct snd_soc_dai_link_component *dlc;
+ struct asoc_simple_dai *dai;
+ int ret;
+
+ if (cpu) {
+ dlc = asoc_link_to_cpu(dai_link, 0);
+ dai = simple_props_to_dai_cpu(dai_props, 0);
+ } else {
+ dlc = asoc_link_to_codec(dai_link, 0);
+ dai = simple_props_to_dai_codec(dai_props, 0);
+ }
+
+ simple_parse_mclk_fs(top, np, dai_props, prefix);
+
+ ret = asoc_simple_parse_dai(np, dlc, cpu);
+ if (ret)
+ return ret;
+
+ ret = asoc_simple_parse_clk(dev, np, dai, dlc);
+ if (ret)
+ return ret;
+
+ ret = asoc_simple_parse_tdm(np, dai);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int simple_link_init(struct asoc_simple_priv *priv,
+ struct device_node *node,
+ struct device_node *codec,
+ struct link_info *li,
+ char *prefix, char *name)
+{
+ struct device *dev = simple_priv_to_dev(priv);
+ struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
+ int ret;
+
+ ret = asoc_simple_parse_daifmt(dev, node, codec,
+ prefix, &dai_link->dai_fmt);
+ if (ret < 0)
+ return 0;
+
+ dai_link->init = asoc_simple_dai_init;
+ dai_link->ops = &simple_ops;
+
+ return asoc_simple_set_dailink_name(dev, dai_link, name);
+}
+
static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
struct device_node *np,
struct device_node *codec,
@@ -121,24 +179,21 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
struct device *dev = simple_priv_to_dev(priv);
struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link);
- struct asoc_simple_dai *dai;
- struct snd_soc_dai_link_component *cpus = asoc_link_to_cpu(dai_link, 0);
- struct snd_soc_dai_link_component *codecs = asoc_link_to_codec(dai_link, 0);
- struct snd_soc_dai_link_component *platforms = asoc_link_to_platform(dai_link, 0);
struct device_node *top = dev->of_node;
struct device_node *node = of_get_parent(np);
char *prefix = "";
+ char dai_name[64];
int ret;
dev_dbg(dev, "link_of DPCM (%pOF)\n", np);
- li->link++;
-
/* For single DAI link & old style of DT node */
if (is_top)
prefix = PREFIX;
if (li->cpu) {
+ struct snd_soc_dai_link_component *cpus = asoc_link_to_cpu(dai_link, 0);
+ struct snd_soc_dai_link_component *platforms = asoc_link_to_platform(dai_link, 0);
int is_single_links = 0;
/* Codec is dummy */
@@ -147,25 +202,16 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
dai_link->dynamic = 1;
dai_link->dpcm_merged_format = 1;
- dai = simple_props_to_dai_cpu(dai_props, 0);
-
- ret = asoc_simple_parse_dai(np, cpus, &is_single_links);
- if (ret)
- goto out_put_node;
-
- ret = asoc_simple_parse_clk(dev, np, dai, cpus);
+ ret = simple_parse_node(priv, np, li, prefix, &is_single_links);
if (ret < 0)
goto out_put_node;
- ret = asoc_simple_set_dailink_name(dev, dai_link,
- "fe.%s",
- cpus->dai_name);
- if (ret < 0)
- goto out_put_node;
+ snprintf(dai_name, sizeof(dai_name), "fe.%s", cpus->dai_name);
asoc_simple_canonicalize_cpu(cpus, is_single_links);
asoc_simple_canonicalize_platform(platforms, cpus);
} else {
+ struct snd_soc_dai_link_component *codecs = asoc_link_to_codec(dai_link, 0);
struct snd_soc_codec_conf *cconf;
/* CPU is dummy */
@@ -174,22 +220,13 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
dai_link->no_pcm = 1;
dai_link->be_hw_params_fixup = asoc_simple_be_hw_params_fixup;
- dai = simple_props_to_dai_codec(dai_props, 0);
cconf = simple_props_to_codec_conf(dai_props, 0);
- ret = asoc_simple_parse_dai(np, codecs, NULL);
+ ret = simple_parse_node(priv, np, li, prefix, NULL);
if (ret < 0)
goto out_put_node;
- ret = asoc_simple_parse_clk(dev, np, dai, codecs);
- if (ret < 0)
- goto out_put_node;
-
- ret = asoc_simple_set_dailink_name(dev, dai_link,
- "be.%s",
- codecs->dai_name);
- if (ret < 0)
- goto out_put_node;
+ snprintf(dai_name, sizeof(dai_name), "be.%s", codecs->dai_name);
/* check "prefix" from top node */
snd_soc_of_parse_node_prefix(top, cconf, codecs->of_node,
@@ -201,23 +238,14 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
}
simple_parse_convert(dev, np, &dai_props->adata);
- simple_parse_mclk_fs(top, np, codec, dai_props, prefix);
-
- ret = asoc_simple_parse_tdm(np, dai);
- if (ret)
- goto out_put_node;
-
- ret = asoc_simple_parse_daifmt(dev, node, codec,
- prefix, &dai_link->dai_fmt);
- if (ret < 0)
- goto out_put_node;
snd_soc_dai_link_set_capabilities(dai_link);
- dai_link->ops = &simple_ops;
- dai_link->init = asoc_simple_dai_init;
+ ret = simple_link_init(priv, node, codec, li, prefix, dai_name);
out_put_node:
+ li->link++;
+
of_node_put(node);
return ret;
}
@@ -230,23 +258,19 @@ static int simple_dai_link_of(struct asoc_simple_priv *priv,
{
struct device *dev = simple_priv_to_dev(priv);
struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
- struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link);
- struct asoc_simple_dai *cpu_dai = simple_props_to_dai_cpu(dai_props, 0);
- struct asoc_simple_dai *codec_dai = simple_props_to_dai_codec(dai_props, 0);
struct snd_soc_dai_link_component *cpus = asoc_link_to_cpu(dai_link, 0);
struct snd_soc_dai_link_component *codecs = asoc_link_to_codec(dai_link, 0);
struct snd_soc_dai_link_component *platforms = asoc_link_to_platform(dai_link, 0);
- struct device_node *top = dev->of_node;
struct device_node *cpu = NULL;
struct device_node *node = NULL;
struct device_node *plat = NULL;
+ char dai_name[64];
char prop[128];
char *prefix = "";
int ret, single_cpu = 0;
cpu = np;
node = of_get_parent(np);
- li->link++;
dev_dbg(dev, "link_of (%pOF)\n", node);
@@ -257,18 +281,11 @@ static int simple_dai_link_of(struct asoc_simple_priv *priv,
snprintf(prop, sizeof(prop), "%splat", prefix);
plat = of_get_child_by_name(node, prop);
- ret = asoc_simple_parse_daifmt(dev, node, codec,
- prefix, &dai_link->dai_fmt);
- if (ret < 0)
- goto dai_link_of_err;
-
- simple_parse_mclk_fs(top, cpu, codec, dai_props, prefix);
-
- ret = asoc_simple_parse_dai(cpu, cpus, &single_cpu);
+ ret = simple_parse_node(priv, cpu, li, prefix, &single_cpu);
if (ret < 0)
goto dai_link_of_err;
- ret = asoc_simple_parse_dai(codec, codecs, NULL);
+ ret = simple_parse_node(priv, codec, li, prefix, NULL);
if (ret < 0)
goto dai_link_of_err;
@@ -276,39 +293,20 @@ static int simple_dai_link_of(struct asoc_simple_priv *priv,
if (ret < 0)
goto dai_link_of_err;
- ret = asoc_simple_parse_tdm(cpu, cpu_dai);
- if (ret < 0)
- goto dai_link_of_err;
-
- ret = asoc_simple_parse_tdm(codec, codec_dai);
- if (ret < 0)
- goto dai_link_of_err;
-
- ret = asoc_simple_parse_clk(dev, cpu, cpu_dai, cpus);
- if (ret < 0)
- goto dai_link_of_err;
-
- ret = asoc_simple_parse_clk(dev, codec, codec_dai, codecs);
- if (ret < 0)
- goto dai_link_of_err;
-
- ret = asoc_simple_set_dailink_name(dev, dai_link,
- "%s-%s",
- cpus->dai_name,
- codecs->dai_name);
- if (ret < 0)
- goto dai_link_of_err;
-
- dai_link->ops = &simple_ops;
- dai_link->init = asoc_simple_dai_init;
+ snprintf(dai_name, sizeof(dai_name),
+ "%s-%s", cpus->dai_name, codecs->dai_name);
asoc_simple_canonicalize_cpu(cpus, single_cpu);
asoc_simple_canonicalize_platform(platforms, cpus);
+ ret = simple_link_init(priv, node, codec, li, prefix, dai_name);
+
dai_link_of_err:
of_node_put(plat);
of_node_put(node);
+ li->link++;
+
return ret;
}
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index df2f5d55e8ff..22dbd9d93c1e 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -574,6 +574,17 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
BYT_RT5640_SSP0_AIF1 |
BYT_RT5640_MCLK_EN),
},
+ { /* Glavey TM800A550L */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+ /* Above strings are too generic, also match on BIOS version */
+ DMI_MATCH(DMI_BIOS_VERSION, "ZY-8-BI-PX4S70VTR400-X423B-005-D"),
+ },
+ .driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
+ },
{
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
@@ -652,6 +663,20 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
BYT_RT5640_MONO_SPEAKER |
BYT_RT5640_MCLK_EN),
},
+ { /* Lenovo Miix 3-830 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 3-830"),
+ },
+ .driver_data = (void *)(BYT_RT5640_IN1_MAP |
+ BYT_RT5640_JD_SRC_JD2_IN4N |
+ BYT_RT5640_OVCD_TH_2000UA |
+ BYT_RT5640_OVCD_SF_0P75 |
+ BYT_RT5640_MONO_SPEAKER |
+ BYT_RT5640_DIFF_MIC |
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
+ },
{ /* Linx Linx7 tablet */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LINX"),
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
index b941adcbb8f9..939e7e28486a 100644
--- a/sound/soc/pxa/pxa-ssp.c
+++ b/sound/soc/pxa/pxa-ssp.c
@@ -61,22 +61,6 @@ static void dump_registers(struct ssp_device *ssp)
pxa_ssp_read_reg(ssp, SSACD));
}
-static void pxa_ssp_enable(struct ssp_device *ssp)
-{
- uint32_t sscr0;
-
- sscr0 = __raw_readl(ssp->mmio_base + SSCR0) | SSCR0_SSE;
- __raw_writel(sscr0, ssp->mmio_base + SSCR0);
-}
-
-static void pxa_ssp_disable(struct ssp_device *ssp)
-{
- uint32_t sscr0;
-
- sscr0 = __raw_readl(ssp->mmio_base + SSCR0) & ~SSCR0_SSE;
- __raw_writel(sscr0, ssp->mmio_base + SSCR0);
-}
-
static void pxa_ssp_set_dma_params(struct ssp_device *ssp, int width4,
int out, struct snd_dmaengine_dai_dma_data *dma)
{
diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
index c62d2612e8f5..a6e95db6b3fb 100644
--- a/sound/soc/qcom/lpass-cpu.c
+++ b/sound/soc/qcom/lpass-cpu.c
@@ -93,8 +93,30 @@ static void lpass_cpu_daiops_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
+ struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
+ unsigned int id = dai->driver->id;
clk_disable_unprepare(drvdata->mi2s_osr_clk[dai->driver->id]);
+ /*
+ * Ensure LRCLK is disabled even in device node validation.
+ * Will not impact if disabled in lpass_cpu_daiops_trigger()
+ * suspend.
+ */
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ regmap_fields_write(i2sctl->spken, id, LPAIF_I2SCTL_SPKEN_DISABLE);
+ else
+ regmap_fields_write(i2sctl->micen, id, LPAIF_I2SCTL_MICEN_DISABLE);
+
+ /*
+ * BCLK may not be enabled if lpass_cpu_daiops_prepare is called before
+ * lpass_cpu_daiops_shutdown. It's paired with the clk_enable in
+ * lpass_cpu_daiops_prepare.
+ */
+ if (drvdata->mi2s_was_prepared[dai->driver->id]) {
+ drvdata->mi2s_was_prepared[dai->driver->id] = false;
+ clk_disable(drvdata->mi2s_bit_clk[dai->driver->id]);
+ }
+
clk_unprepare(drvdata->mi2s_bit_clk[dai->driver->id]);
}
@@ -275,6 +297,18 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ /*
+ * Ensure lpass BCLK/LRCLK is enabled during
+ * device resume as lpass_cpu_daiops_prepare() is not called
+ * after the device resumes. We don't check mi2s_was_prepared before
+ * enable/disable BCLK in trigger events because:
+ * 1. These trigger events are paired, so the BCLK
+ * enable_count is balanced.
+ * 2. the BCLK can be shared (ex: headset and headset mic),
+ * we need to increase the enable_count so that we don't
+ * turn off the shared BCLK while other devices are using
+ * it.
+ */
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
ret = regmap_fields_write(i2sctl->spken, id,
LPAIF_I2SCTL_SPKEN_ENABLE);
@@ -296,6 +330,10 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ /*
+ * To ensure lpass BCLK/LRCLK is disabled during
+ * device suspend.
+ */
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
ret = regmap_fields_write(i2sctl->spken, id,
LPAIF_I2SCTL_SPKEN_DISABLE);
@@ -315,12 +353,53 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
return ret;
}
+static int lpass_cpu_daiops_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
+ struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
+ unsigned int id = dai->driver->id;
+ int ret;
+
+ /*
+ * Ensure lpass BCLK/LRCLK is enabled bit before playback/capture
+ * data flow starts. This allows other codec to have some delay before
+ * the data flow.
+ * (ex: to drop start up pop noise before capture starts).
+ */
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ ret = regmap_fields_write(i2sctl->spken, id, LPAIF_I2SCTL_SPKEN_ENABLE);
+ else
+ ret = regmap_fields_write(i2sctl->micen, id, LPAIF_I2SCTL_MICEN_ENABLE);
+
+ if (ret) {
+ dev_err(dai->dev, "error writing to i2sctl reg: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Check mi2s_was_prepared before enabling BCLK as lpass_cpu_daiops_prepare can
+ * be called multiple times. It's paired with the clk_disable in
+ * lpass_cpu_daiops_shutdown.
+ */
+ if (!drvdata->mi2s_was_prepared[dai->driver->id]) {
+ ret = clk_enable(drvdata->mi2s_bit_clk[id]);
+ if (ret) {
+ dev_err(dai->dev, "error in enabling mi2s bit clk: %d\n", ret);
+ return ret;
+ }
+ drvdata->mi2s_was_prepared[dai->driver->id] = true;
+ }
+ return 0;
+}
+
const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops = {
.set_sysclk = lpass_cpu_daiops_set_sysclk,
.startup = lpass_cpu_daiops_startup,
.shutdown = lpass_cpu_daiops_shutdown,
.hw_params = lpass_cpu_daiops_hw_params,
.trigger = lpass_cpu_daiops_trigger,
+ .prepare = lpass_cpu_daiops_prepare,
};
EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_dai_ops);
@@ -835,18 +914,8 @@ int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev)
if (dai_id == LPASS_DP_RX)
continue;
- drvdata->mi2s_osr_clk[dai_id] = devm_clk_get(dev,
+ drvdata->mi2s_osr_clk[dai_id] = devm_clk_get_optional(dev,
variant->dai_osr_clk_names[i]);
- if (IS_ERR(drvdata->mi2s_osr_clk[dai_id])) {
- dev_warn(dev,
- "%s() error getting optional %s: %ld\n",
- __func__,
- variant->dai_osr_clk_names[i],
- PTR_ERR(drvdata->mi2s_osr_clk[dai_id]));
-
- drvdata->mi2s_osr_clk[dai_id] = NULL;
- }
-
drvdata->mi2s_bit_clk[dai_id] = devm_clk_get(dev,
variant->dai_bit_clk_names[i]);
if (IS_ERR(drvdata->mi2s_bit_clk[dai_id])) {
diff --git a/sound/soc/qcom/lpass.h b/sound/soc/qcom/lpass.h
index 83b2e08ade06..7f72214404ba 100644
--- a/sound/soc/qcom/lpass.h
+++ b/sound/soc/qcom/lpass.h
@@ -67,6 +67,10 @@ struct lpass_data {
/* MI2S SD lines to use for playback/capture */
unsigned int mi2s_playback_sd_mode[LPASS_MAX_MI2S_PORTS];
unsigned int mi2s_capture_sd_mode[LPASS_MAX_MI2S_PORTS];
+
+ /* The state of MI2S prepare dai_ops was called */
+ bool mi2s_was_prepared[LPASS_MAX_MI2S_PORTS];
+
int hdmi_port_enable;
/* low-power audio interface (LPAIF) registers */
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 1c0904acb935..a76974ccfce1 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -2225,6 +2225,8 @@ static char *fmt_single_name(struct device *dev, int *id)
return NULL;
name = devm_kstrdup(dev, devname, GFP_KERNEL);
+ if (!name)
+ return NULL;
/* are we a "%s.%d" name (platform and SPI components) */
found = strstr(name, dev->driver->name);
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 73076d425efb..4893a56208e0 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -1901,7 +1901,7 @@ static void stream_caps_new_ver(struct snd_soc_tplg_stream_caps *dest,
* @src: older version of pcm as a source
* @pcm: latest version of pcm created from the source
*
- * Support from vesion 4. User should free the returned pcm manually.
+ * Support from version 4. User should free the returned pcm manually.
*/
static int pcm_new_ver(struct soc_tplg *tplg,
struct snd_soc_tplg_pcm *src,
@@ -2089,7 +2089,7 @@ static void set_link_hw_format(struct snd_soc_dai_link *link,
* @src: old version of phyical link config as a source
* @link: latest version of physical link config created from the source
*
- * Support from vesion 4. User need free the returned link config manually.
+ * Support from version 4. User need free the returned link config manually.
*/
static int link_new_ver(struct soc_tplg *tplg,
struct snd_soc_tplg_link_config *src,
@@ -2400,7 +2400,7 @@ static int soc_tplg_dai_elems_load(struct soc_tplg *tplg,
* @src: old version of manifest as a source
* @manifest: latest version of manifest created from the source
*
- * Support from vesion 4. Users need free the returned manifest manually.
+ * Support from version 4. Users need free the returned manifest manually.
*/
static int manifest_new_ver(struct soc_tplg *tplg,
struct snd_soc_tplg_manifest *src,
diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
index 8d7bab433fb3..c1f9f0f58464 100644
--- a/sound/soc/sof/intel/hda-dai.c
+++ b/sound/soc/sof/intel/hda-dai.c
@@ -421,11 +421,16 @@ static int ssp_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, SOF_AUDIO_PCM_DRV_NAME);
struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ struct sof_ipc_fw_version *v = &sdev->fw_ready.version;
struct sof_ipc_dai_config *config;
struct snd_sof_dai *sof_dai;
struct sof_ipc_reply reply;
int ret;
+ /* DAI_CONFIG IPC during hw_params is not supported in older firmware */
+ if (v->abi_version < SOF_ABI_VER(3, 18, 0))
+ return 0;
+
list_for_each_entry(sof_dai, &sdev->dai_list, list) {
if (!sof_dai->cpu_dai_name || !sof_dai->dai_config)
continue;
diff --git a/sound/soc/sof/pm.c b/sound/soc/sof/pm.c
index fd265803f7bc..c83fb6255961 100644
--- a/sound/soc/sof/pm.c
+++ b/sound/soc/sof/pm.c
@@ -256,6 +256,7 @@ suspend:
/* reset FW state */
sdev->fw_state = SOF_FW_BOOT_NOT_STARTED;
+ sdev->enabled_cores_mask = 0;
return ret;
}
diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c
index c1561237ee24..3aa1cf262402 100644
--- a/sound/soc/stm/stm32_sai_sub.c
+++ b/sound/soc/stm/stm32_sai_sub.c
@@ -484,10 +484,7 @@ static int stm32_sai_add_mclk_provider(struct stm32_sai_sub_data *sai)
dev_err(dev, "mclk register returned %d\n", ret);
return ret;
}
-
- sai->sai_mclk = devm_clk_hw_get_clk(dev, hw, NULL);
- if (IS_ERR(sai->sai_mclk))
- return PTR_ERR(sai->sai_mclk);
+ sai->sai_mclk = hw->clk;
/* register mclk provider */
return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, hw);
diff --git a/sound/usb/format.c b/sound/usb/format.c
index e6ff317a6785..2287f8c65315 100644
--- a/sound/usb/format.c
+++ b/sound/usb/format.c
@@ -436,7 +436,7 @@ static bool check_valid_altsetting_v2v3(struct snd_usb_audio *chip, int iface,
if (snd_BUG_ON(altsetting >= 64 - 8))
return false;
- err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), UAC2_CS_CUR,
+ err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
UAC2_AS_VAL_ALT_SETTINGS << 8,
iface, &raw_data, sizeof(raw_data));
diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
index a030dd65eb28..9602929b7de9 100644
--- a/sound/usb/line6/driver.c
+++ b/sound/usb/line6/driver.c
@@ -699,6 +699,10 @@ static int line6_init_cap_control(struct usb_line6 *line6)
line6->buffer_message = kmalloc(LINE6_MIDI_MESSAGE_MAXLEN, GFP_KERNEL);
if (!line6->buffer_message)
return -ENOMEM;
+
+ ret = line6_init_midi(line6);
+ if (ret < 0)
+ return ret;
} else {
ret = line6_hwdep_init(line6);
if (ret < 0)
diff --git a/sound/usb/line6/pod.c b/sound/usb/line6/pod.c
index cd44cb5f1310..16e644330c4d 100644
--- a/sound/usb/line6/pod.c
+++ b/sound/usb/line6/pod.c
@@ -376,11 +376,6 @@ static int pod_init(struct usb_line6 *line6,
if (err < 0)
return err;
- /* initialize MIDI subsystem: */
- err = line6_init_midi(line6);
- if (err < 0)
- return err;
-
/* initialize PCM subsystem: */
err = line6_init_pcm(line6, &pod_pcm_properties);
if (err < 0)
diff --git a/sound/usb/line6/variax.c b/sound/usb/line6/variax.c
index ed158f04de80..c2245aa93b08 100644
--- a/sound/usb/line6/variax.c
+++ b/sound/usb/line6/variax.c
@@ -159,7 +159,6 @@ static int variax_init(struct usb_line6 *line6,
const struct usb_device_id *id)
{
struct usb_line6_variax *variax = line6_to_variax(line6);
- int err;
line6->process_message = line6_variax_process_message;
line6->disconnect = line6_variax_disconnect;
@@ -172,11 +171,6 @@ static int variax_init(struct usb_line6 *line6,
if (variax->buffer_activate == NULL)
return -ENOMEM;
- /* initialize MIDI subsystem: */
- err = line6_init_midi(&variax->line6);
- if (err < 0)
- return err;
-
/* initiate startup procedure: */
schedule_delayed_work(&line6->startup_work,
msecs_to_jiffies(VARIAX_STARTUP_DELAY1));
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index a10ac75969a8..2c01649c70f6 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -1750,7 +1750,7 @@ static struct usb_midi_in_jack_descriptor *find_usb_in_jack_descriptor(
struct usb_midi_in_jack_descriptor *injd =
(struct usb_midi_in_jack_descriptor *)extra;
- if (injd->bLength > 4 &&
+ if (injd->bLength >= sizeof(*injd) &&
injd->bDescriptorType == USB_DT_CS_INTERFACE &&
injd->bDescriptorSubtype == UAC_MIDI_IN_JACK &&
injd->bJackID == jack_id)
@@ -1773,7 +1773,7 @@ static struct usb_midi_out_jack_descriptor *find_usb_out_jack_descriptor(
struct usb_midi_out_jack_descriptor *outjd =
(struct usb_midi_out_jack_descriptor *)extra;
- if (outjd->bLength > 4 &&
+ if (outjd->bLength >= sizeof(*outjd) &&
outjd->bDescriptorType == USB_DT_CS_INTERFACE &&
outjd->bDescriptorSubtype == UAC_MIDI_OUT_JACK &&
outjd->bJackID == jack_id)
@@ -1820,7 +1820,8 @@ static void snd_usbmidi_init_substream(struct snd_usb_midi *umidi,
outjd = find_usb_out_jack_descriptor(hostif, jack_id);
if (outjd) {
sz = USB_DT_MIDI_OUT_SIZE(outjd->bNrInputPins);
- iJack = *(((uint8_t *) outjd) + sz - sizeof(uint8_t));
+ if (outjd->bLength >= sz)
+ iJack = *(((uint8_t *) outjd) + sz - sizeof(uint8_t));
}
} else {
/* and out jacks connect to ins */
@@ -1956,8 +1957,12 @@ static int snd_usbmidi_get_ms_info(struct snd_usb_midi *umidi,
ms_ep = find_usb_ms_endpoint_descriptor(hostep);
if (!ms_ep)
continue;
+ if (ms_ep->bLength <= sizeof(*ms_ep))
+ continue;
if (ms_ep->bNumEmbMIDIJack > 0x10)
continue;
+ if (ms_ep->bLength < sizeof(*ms_ep) + ms_ep->bNumEmbMIDIJack)
+ continue;
if (usb_endpoint_dir_out(ep)) {
if (endpoints[epidx].out_ep) {
if (++epidx >= MIDI_MAX_ENDPOINTS) {
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index fda66b2dbb01..37ad77524c0b 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -3060,7 +3060,7 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
case USB_ID(0x1235, 0x8203): /* Focusrite Scarlett 6i6 2nd Gen */
case USB_ID(0x1235, 0x8204): /* Focusrite Scarlett 18i8 2nd Gen */
case USB_ID(0x1235, 0x8201): /* Focusrite Scarlett 18i20 2nd Gen */
- err = snd_scarlett_gen2_controls_create(mixer);
+ err = snd_scarlett_gen2_init(mixer);
break;
case USB_ID(0x041e, 0x323b): /* Creative Sound Blaster E1 */
diff --git a/sound/usb/mixer_scarlett_gen2.c b/sound/usb/mixer_scarlett_gen2.c
index 560c2ade829d..4caf379d5b99 100644
--- a/sound/usb/mixer_scarlett_gen2.c
+++ b/sound/usb/mixer_scarlett_gen2.c
@@ -635,7 +635,7 @@ static int scarlett2_usb(
/* send a second message to get the response */
err = snd_usb_ctl_msg(mixer->chip->dev,
- usb_sndctrlpipe(mixer->chip->dev, 0),
+ usb_rcvctrlpipe(mixer->chip->dev, 0),
SCARLETT2_USB_VENDOR_SPECIFIC_CMD_RESP,
USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
0,
@@ -1997,38 +1997,11 @@ static int scarlett2_mixer_status_create(struct usb_mixer_interface *mixer)
return usb_submit_urb(mixer->urb, GFP_KERNEL);
}
-/* Entry point */
-int snd_scarlett_gen2_controls_create(struct usb_mixer_interface *mixer)
+static int snd_scarlett_gen2_controls_create(struct usb_mixer_interface *mixer,
+ const struct scarlett2_device_info *info)
{
- const struct scarlett2_device_info *info;
int err;
- /* only use UAC_VERSION_2 */
- if (!mixer->protocol)
- return 0;
-
- switch (mixer->chip->usb_id) {
- case USB_ID(0x1235, 0x8203):
- info = &s6i6_gen2_info;
- break;
- case USB_ID(0x1235, 0x8204):
- info = &s18i8_gen2_info;
- break;
- case USB_ID(0x1235, 0x8201):
- info = &s18i20_gen2_info;
- break;
- default: /* device not (yet) supported */
- return -EINVAL;
- }
-
- if (!(mixer->chip->setup & SCARLETT2_ENABLE)) {
- usb_audio_err(mixer->chip,
- "Focusrite Scarlett Gen 2 Mixer Driver disabled; "
- "use options snd_usb_audio device_setup=1 "
- "to enable and report any issues to g@b4.vu");
- return 0;
- }
-
/* Initialise private data, routing, sequence number */
err = scarlett2_init_private(mixer, info);
if (err < 0)
@@ -2073,3 +2046,51 @@ int snd_scarlett_gen2_controls_create(struct usb_mixer_interface *mixer)
return 0;
}
+
+int snd_scarlett_gen2_init(struct usb_mixer_interface *mixer)
+{
+ struct snd_usb_audio *chip = mixer->chip;
+ const struct scarlett2_device_info *info;
+ int err;
+
+ /* only use UAC_VERSION_2 */
+ if (!mixer->protocol)
+ return 0;
+
+ switch (chip->usb_id) {
+ case USB_ID(0x1235, 0x8203):
+ info = &s6i6_gen2_info;
+ break;
+ case USB_ID(0x1235, 0x8204):
+ info = &s18i8_gen2_info;
+ break;
+ case USB_ID(0x1235, 0x8201):
+ info = &s18i20_gen2_info;
+ break;
+ default: /* device not (yet) supported */
+ return -EINVAL;
+ }
+
+ if (!(chip->setup & SCARLETT2_ENABLE)) {
+ usb_audio_info(chip,
+ "Focusrite Scarlett Gen 2 Mixer Driver disabled; "
+ "use options snd_usb_audio vid=0x%04x pid=0x%04x "
+ "device_setup=1 to enable and report any issues "
+ "to g@b4.vu",
+ USB_ID_VENDOR(chip->usb_id),
+ USB_ID_PRODUCT(chip->usb_id));
+ return 0;
+ }
+
+ usb_audio_info(chip,
+ "Focusrite Scarlett Gen 2 Mixer Driver enabled pid=0x%04x",
+ USB_ID_PRODUCT(chip->usb_id));
+
+ err = snd_scarlett_gen2_controls_create(mixer, info);
+ if (err < 0)
+ usb_audio_err(mixer->chip,
+ "Error initialising Scarlett Mixer Driver: %d",
+ err);
+
+ return err;
+}
diff --git a/sound/usb/mixer_scarlett_gen2.h b/sound/usb/mixer_scarlett_gen2.h
index 52e1dad77afd..668c6b0cb50a 100644
--- a/sound/usb/mixer_scarlett_gen2.h
+++ b/sound/usb/mixer_scarlett_gen2.h
@@ -2,6 +2,6 @@
#ifndef __USB_MIXER_SCARLETT_GEN2_H
#define __USB_MIXER_SCARLETT_GEN2_H
-int snd_scarlett_gen2_controls_create(struct usb_mixer_interface *mixer);
+int snd_scarlett_gen2_init(struct usb_mixer_interface *mixer);
#endif /* __USB_MIXER_SCARLETT_GEN2_H */
diff --git a/tools/arch/mips/include/uapi/asm/perf_regs.h b/tools/arch/mips/include/uapi/asm/perf_regs.h
new file mode 100644
index 000000000000..d0f4ecd616cf
--- /dev/null
+++ b/tools/arch/mips/include/uapi/asm/perf_regs.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_MIPS_PERF_REGS_H
+#define _ASM_MIPS_PERF_REGS_H
+
+enum perf_event_mips_regs {
+ PERF_REG_MIPS_PC,
+ PERF_REG_MIPS_R1,
+ PERF_REG_MIPS_R2,
+ PERF_REG_MIPS_R3,
+ PERF_REG_MIPS_R4,
+ PERF_REG_MIPS_R5,
+ PERF_REG_MIPS_R6,
+ PERF_REG_MIPS_R7,
+ PERF_REG_MIPS_R8,
+ PERF_REG_MIPS_R9,
+ PERF_REG_MIPS_R10,
+ PERF_REG_MIPS_R11,
+ PERF_REG_MIPS_R12,
+ PERF_REG_MIPS_R13,
+ PERF_REG_MIPS_R14,
+ PERF_REG_MIPS_R15,
+ PERF_REG_MIPS_R16,
+ PERF_REG_MIPS_R17,
+ PERF_REG_MIPS_R18,
+ PERF_REG_MIPS_R19,
+ PERF_REG_MIPS_R20,
+ PERF_REG_MIPS_R21,
+ PERF_REG_MIPS_R22,
+ PERF_REG_MIPS_R23,
+ PERF_REG_MIPS_R24,
+ PERF_REG_MIPS_R25,
+ PERF_REG_MIPS_R26,
+ PERF_REG_MIPS_R27,
+ PERF_REG_MIPS_R28,
+ PERF_REG_MIPS_R29,
+ PERF_REG_MIPS_R30,
+ PERF_REG_MIPS_R31,
+ PERF_REG_MIPS_MAX = PERF_REG_MIPS_R31 + 1,
+};
+#endif /* _ASM_MIPS_PERF_REGS_H */
diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h
index b7dd944dc867..8f28fafa98b3 100644
--- a/tools/arch/x86/include/asm/disabled-features.h
+++ b/tools/arch/x86/include/asm/disabled-features.h
@@ -56,11 +56,8 @@
# define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31))
#endif
-#ifdef CONFIG_IOMMU_SUPPORT
-# define DISABLE_ENQCMD 0
-#else
-# define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31))
-#endif
+/* Force disable because it's broken beyond repair */
+#define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31))
#ifdef CONFIG_X86_SGX
# define DISABLE_SGX 0
diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
index 742d89a00721..211ba3375ee9 100644
--- a/tools/arch/x86/include/asm/msr-index.h
+++ b/tools/arch/x86/include/asm/msr-index.h
@@ -537,9 +537,9 @@
/* K8 MSRs */
#define MSR_K8_TOP_MEM1 0xc001001a
#define MSR_K8_TOP_MEM2 0xc001001d
-#define MSR_K8_SYSCFG 0xc0010010
-#define MSR_K8_SYSCFG_MEM_ENCRYPT_BIT 23
-#define MSR_K8_SYSCFG_MEM_ENCRYPT BIT_ULL(MSR_K8_SYSCFG_MEM_ENCRYPT_BIT)
+#define MSR_AMD64_SYSCFG 0xc0010010
+#define MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT 23
+#define MSR_AMD64_SYSCFG_MEM_ENCRYPT BIT_ULL(MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT)
#define MSR_K8_INT_PENDING_MSG 0xc0010055
/* C1E active bits in int pending message */
#define K8_INTP_C1E_ACTIVE_MASK 0x18000000
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h
index 5a3022c8af82..0662f644aad9 100644
--- a/tools/arch/x86/include/uapi/asm/kvm.h
+++ b/tools/arch/x86/include/uapi/asm/kvm.h
@@ -437,6 +437,8 @@ struct kvm_vmx_nested_state_hdr {
__u16 flags;
} smm;
+ __u16 pad;
+
__u32 flags;
__u64 preemption_timer_deadline;
};
diff --git a/tools/bootconfig/include/linux/bootconfig.h b/tools/bootconfig/include/linux/bootconfig.h
index 078cbd2ba651..de7f30f99af3 100644
--- a/tools/bootconfig/include/linux/bootconfig.h
+++ b/tools/bootconfig/include/linux/bootconfig.h
@@ -4,4 +4,8 @@
#include "../../../../include/linux/bootconfig.h"
+#ifndef fallthrough
+# define fallthrough
+#endif
+
#endif
diff --git a/tools/bootconfig/main.c b/tools/bootconfig/main.c
index 7362bef1a368..6cd6080cac04 100644
--- a/tools/bootconfig/main.c
+++ b/tools/bootconfig/main.c
@@ -399,6 +399,7 @@ static int apply_xbc(const char *path, const char *xbc_path)
}
/* TODO: Ensure the @path is initramfs/initrd image */
if (fstat(fd, &stat) < 0) {
+ ret = -errno;
pr_err("Failed to get the size of %s\n", path);
goto out;
}
diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
index 790944c35602..baee8591ac76 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
@@ -30,7 +30,8 @@ CGROUP COMMANDS
| *ATTACH_TYPE* := { **ingress** | **egress** | **sock_create** | **sock_ops** | **device** |
| **bind4** | **bind6** | **post_bind4** | **post_bind6** | **connect4** | **connect6** |
| **getpeername4** | **getpeername6** | **getsockname4** | **getsockname6** | **sendmsg4** |
-| **sendmsg6** | **recvmsg4** | **recvmsg6** | **sysctl** | **getsockopt** | **setsockopt** }
+| **sendmsg6** | **recvmsg4** | **recvmsg6** | **sysctl** | **getsockopt** | **setsockopt** |
+| **sock_release** }
| *ATTACH_FLAGS* := { **multi** | **override** }
DESCRIPTION
@@ -106,6 +107,7 @@ DESCRIPTION
**getpeername6** call to getpeername(2) for an inet6 socket (since 5.8);
**getsockname4** call to getsockname(2) for an inet4 socket (since 5.8);
**getsockname6** call to getsockname(2) for an inet6 socket (since 5.8).
+ **sock_release** closing an userspace inet socket (since 5.9).
**bpftool cgroup detach** *CGROUP* *ATTACH_TYPE* *PROG*
Detach *PROG* from the cgroup *CGROUP* and attach type
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
index 358c7309d419..fe1b38e7e887 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
@@ -44,7 +44,7 @@ PROG COMMANDS
| **cgroup/connect4** | **cgroup/connect6** | **cgroup/getpeername4** | **cgroup/getpeername6** |
| **cgroup/getsockname4** | **cgroup/getsockname6** | **cgroup/sendmsg4** | **cgroup/sendmsg6** |
| **cgroup/recvmsg4** | **cgroup/recvmsg6** | **cgroup/sysctl** |
-| **cgroup/getsockopt** | **cgroup/setsockopt** |
+| **cgroup/getsockopt** | **cgroup/setsockopt** | **cgroup/sock_release** |
| **struct_ops** | **fentry** | **fexit** | **freplace** | **sk_lookup**
| }
| *ATTACH_TYPE* := {
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index d67518bcbd44..cc33c5824a2f 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -478,7 +478,7 @@ _bpftool()
cgroup/recvmsg4 cgroup/recvmsg6 \
cgroup/post_bind4 cgroup/post_bind6 \
cgroup/sysctl cgroup/getsockopt \
- cgroup/setsockopt struct_ops \
+ cgroup/setsockopt cgroup/sock_release struct_ops \
fentry fexit freplace sk_lookup" -- \
"$cur" ) )
return 0
@@ -1021,7 +1021,7 @@ _bpftool()
device bind4 bind6 post_bind4 post_bind6 connect4 connect6 \
getpeername4 getpeername6 getsockname4 getsockname6 \
sendmsg4 sendmsg6 recvmsg4 recvmsg6 sysctl getsockopt \
- setsockopt'
+ setsockopt sock_release'
local ATTACH_FLAGS='multi override'
local PROG_TYPE='id pinned tag name'
case $prev in
@@ -1032,7 +1032,7 @@ _bpftool()
ingress|egress|sock_create|sock_ops|device|bind4|bind6|\
post_bind4|post_bind6|connect4|connect6|getpeername4|\
getpeername6|getsockname4|getsockname6|sendmsg4|sendmsg6|\
- recvmsg4|recvmsg6|sysctl|getsockopt|setsockopt)
+ recvmsg4|recvmsg6|sysctl|getsockopt|setsockopt|sock_release)
COMPREPLY=( $( compgen -W "$PROG_TYPE" -- \
"$cur" ) )
return 0
diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c
index d901cc1b904a..6e53b1d393f4 100644
--- a/tools/bpf/bpftool/cgroup.c
+++ b/tools/bpf/bpftool/cgroup.c
@@ -28,7 +28,8 @@
" connect6 | getpeername4 | getpeername6 |\n" \
" getsockname4 | getsockname6 | sendmsg4 |\n" \
" sendmsg6 | recvmsg4 | recvmsg6 |\n" \
- " sysctl | getsockopt | setsockopt }"
+ " sysctl | getsockopt | setsockopt |\n" \
+ " sock_release }"
static unsigned int query_flags;
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 3f067d2d7584..da4846c9856a 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -2138,7 +2138,7 @@ static int do_help(int argc, char **argv)
" cgroup/getpeername4 | cgroup/getpeername6 |\n"
" cgroup/getsockname4 | cgroup/getsockname6 | cgroup/sendmsg4 |\n"
" cgroup/sendmsg6 | cgroup/recvmsg4 | cgroup/recvmsg6 |\n"
- " cgroup/getsockopt | cgroup/setsockopt |\n"
+ " cgroup/getsockopt | cgroup/setsockopt | cgroup/sock_release |\n"
" struct_ops | fentry | fexit | freplace | sk_lookup }\n"
" ATTACH_TYPE := { msg_verdict | stream_verdict | stream_parser |\n"
" flow_dissector }\n"
diff --git a/tools/build/Makefile.build b/tools/build/Makefile.build
index cd72016c3cfa..715092fc6a23 100644
--- a/tools/build/Makefile.build
+++ b/tools/build/Makefile.build
@@ -51,39 +51,39 @@ subdir-obj-y :=
build-file := $(dir)/Build
-include $(build-file)
-quiet_cmd_flex = FLEX $@
-quiet_cmd_bison = BISON $@
+quiet_cmd_flex = FLEX $@
+quiet_cmd_bison = BISON $@
# Create directory unless it exists
-quiet_cmd_mkdir = MKDIR $(dir $@)
+quiet_cmd_mkdir = MKDIR $(dir $@)
cmd_mkdir = mkdir -p $(dir $@)
rule_mkdir = $(if $(wildcard $(dir $@)),,@$(call echo-cmd,mkdir) $(cmd_mkdir))
# Compile command
-quiet_cmd_cc_o_c = CC $@
+quiet_cmd_cc_o_c = CC $@
cmd_cc_o_c = $(CC) $(c_flags) -c -o $@ $<
-quiet_cmd_host_cc_o_c = HOSTCC $@
+quiet_cmd_host_cc_o_c = HOSTCC $@
cmd_host_cc_o_c = $(HOSTCC) $(host_c_flags) -c -o $@ $<
-quiet_cmd_cxx_o_c = CXX $@
+quiet_cmd_cxx_o_c = CXX $@
cmd_cxx_o_c = $(CXX) $(cxx_flags) -c -o $@ $<
-quiet_cmd_cpp_i_c = CPP $@
+quiet_cmd_cpp_i_c = CPP $@
cmd_cpp_i_c = $(CC) $(c_flags) -E -o $@ $<
-quiet_cmd_cc_s_c = AS $@
+quiet_cmd_cc_s_c = AS $@
cmd_cc_s_c = $(CC) $(c_flags) -S -o $@ $<
-quiet_cmd_gen = GEN $@
+quiet_cmd_gen = GEN $@
# Link agregate command
# If there's nothing to link, create empty $@ object.
-quiet_cmd_ld_multi = LD $@
+quiet_cmd_ld_multi = LD $@
cmd_ld_multi = $(if $(strip $(obj-y)),\
$(LD) -r -o $@ $(filter $(obj-y),$^),rm -f $@; $(AR) rcs $@)
-quiet_cmd_host_ld_multi = HOSTLD $@
+quiet_cmd_host_ld_multi = HOSTLD $@
cmd_host_ld_multi = $(if $(strip $(obj-y)),\
$(HOSTLD) -r -o $@ $(filter $(obj-y),$^),rm -f $@; $(HOSTAR) rcs $@)
diff --git a/tools/include/linux/bits.h b/tools/include/linux/bits.h
index 7f475d59a097..87d112650dfb 100644
--- a/tools/include/linux/bits.h
+++ b/tools/include/linux/bits.h
@@ -22,7 +22,7 @@
#include <linux/build_bug.h>
#define GENMASK_INPUT_CHECK(h, l) \
(BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
- __builtin_constant_p((l) > (h)), (l) > (h), 0)))
+ __is_constexpr((l) > (h)), (l) > (h), 0)))
#else
/*
* BUILD_BUG_ON_ZERO is not available in h files included from asm files,
diff --git a/tools/include/linux/const.h b/tools/include/linux/const.h
index 81b8aae5a855..435ddd72d2c4 100644
--- a/tools/include/linux/const.h
+++ b/tools/include/linux/const.h
@@ -3,4 +3,12 @@
#include <vdso/const.h>
+/*
+ * This returns a constant expression while determining if an argument is
+ * a constant expression, most importantly without evaluating the argument.
+ * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de>
+ */
+#define __is_constexpr(x) \
+ (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
+
#endif /* _LINUX_CONST_H */
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index 6de5a7fc066b..d2a942086fcb 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -863,8 +863,7 @@ __SYSCALL(__NR_process_madvise, sys_process_madvise)
__SC_COMP(__NR_epoll_pwait2, sys_epoll_pwait2, compat_sys_epoll_pwait2)
#define __NR_mount_setattr 442
__SYSCALL(__NR_mount_setattr, sys_mount_setattr)
-#define __NR_quotactl_path 443
-__SYSCALL(__NR_quotactl_path, sys_quotactl_path)
+/* 443 is reserved for quotactl_path */
#define __NR_landlock_create_ruleset 444
__SYSCALL(__NR_landlock_create_ruleset, sys_landlock_create_ruleset)
diff --git a/tools/include/uapi/linux/fs.h b/tools/include/uapi/linux/fs.h
index f44eb0a04afd..4c32e97dcdf0 100644
--- a/tools/include/uapi/linux/fs.h
+++ b/tools/include/uapi/linux/fs.h
@@ -185,7 +185,7 @@ struct fsxattr {
#define BLKROTATIONAL _IO(0x12,126)
#define BLKZEROOUT _IO(0x12,127)
/*
- * A jump here: 130-131 are reserved for zoned block devices
+ * A jump here: 130-136 are reserved for zoned block devices
* (see uapi/linux/blkzoned.h)
*/
diff --git a/tools/include/uapi/linux/in.h b/tools/include/uapi/linux/in.h
index 7d6687618d80..d1b327036ae4 100644
--- a/tools/include/uapi/linux/in.h
+++ b/tools/include/uapi/linux/in.h
@@ -289,6 +289,9 @@ struct sockaddr_in {
/* Address indicating an error return. */
#define INADDR_NONE ((unsigned long int) 0xffffffff)
+/* Dummy address for src of ICMP replies if no real address is set (RFC7600). */
+#define INADDR_DUMMY ((unsigned long int) 0xc0000008)
+
/* Network number for local host loopback. */
#define IN_LOOPBACKNET 127
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index 3fd9a7e9d90c..79d9c44d1ad7 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -8,6 +8,7 @@
* Note: you must update KVM_API_VERSION if you change this interface.
*/
+#include <linux/const.h>
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/ioctl.h>
@@ -1879,8 +1880,8 @@ struct kvm_hyperv_eventfd {
* conversion after harvesting an entry. Also, it must not skip any
* dirty bits, so that dirty bits are always harvested in sequence.
*/
-#define KVM_DIRTY_GFN_F_DIRTY BIT(0)
-#define KVM_DIRTY_GFN_F_RESET BIT(1)
+#define KVM_DIRTY_GFN_F_DIRTY _BITUL(0)
+#define KVM_DIRTY_GFN_F_RESET _BITUL(1)
#define KVM_DIRTY_GFN_F_MASK 0x3
/*
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index bf8143505c49..f92880a15645 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -464,7 +464,7 @@ struct perf_event_attr {
/*
* User provided data if sigtrap=1, passed back to user via
- * siginfo_t::si_perf, e.g. to permit user to identify the event.
+ * siginfo_t::si_perf_data, e.g. to permit user to identify the event.
*/
__u64 sig_data;
};
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index e2a3cf437814..c41d9b2b59ac 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -3216,6 +3216,9 @@ static int add_dummy_ksym_var(struct btf *btf)
const struct btf_var_secinfo *vs;
const struct btf_type *sec;
+ if (!btf)
+ return 0;
+
sec_btf_id = btf__find_by_name_kind(btf, KSYMS_SEC,
BTF_KIND_DATASEC);
if (sec_btf_id < 0)
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index ee426226928f..acbcf6c7bdf8 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -41,6 +41,11 @@
#define ELF_C_READ_MMAP ELF_C_READ
#endif
+/* Older libelf all end up in this expression, for both 32 and 64 bit */
+#ifndef GELF_ST_VISIBILITY
+#define GELF_ST_VISIBILITY(o) ((o) & 0x03)
+#endif
+
#define BTF_INFO_ENC(kind, kind_flag, vlen) \
((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
#define BTF_TYPE_ENC(name, info, size_or_type) (name), (info), (size_or_type)
diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
index 6061431ee04c..e9b619aa0cdf 100644
--- a/tools/lib/bpf/xsk.c
+++ b/tools/lib/bpf/xsk.c
@@ -1094,7 +1094,7 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
goto out_put_ctx;
}
if (xsk->fd == umem->fd)
- umem->rx_ring_setup_done = true;
+ umem->tx_ring_setup_done = true;
}
err = xsk_get_mmap_offsets(xsk->fd, &off);
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index cedf3ede7545..bc821056aba9 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -19,6 +19,7 @@
#include <objtool/elf.h>
#include <objtool/arch.h>
#include <objtool/warn.h>
+#include <objtool/endianness.h>
#include <arch/elf.h>
static int is_x86_64(const struct elf *elf)
@@ -683,7 +684,7 @@ static int elf_add_alternative(struct elf *elf,
sec = find_section_by_name(elf, ".altinstructions");
if (!sec) {
sec = elf_create_section(elf, ".altinstructions",
- SHF_WRITE, size, 0);
+ SHF_ALLOC, size, 0);
if (!sec) {
WARN_ELF("elf_create_section");
@@ -725,7 +726,7 @@ static int elf_add_alternative(struct elf *elf,
return -1;
}
- alt->cpuid = cpuid;
+ alt->cpuid = bswap_if_needed(cpuid);
alt->instrlen = orig_len;
alt->replacementlen = repl_len;
@@ -746,6 +747,10 @@ int arch_rewrite_retpolines(struct objtool_file *file)
list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
+ if (insn->type != INSN_JUMP_DYNAMIC &&
+ insn->type != INSN_CALL_DYNAMIC)
+ continue;
+
if (!strcmp(insn->sec->name, ".text.__x86.indirect_thunk"))
continue;
diff --git a/tools/objtool/arch/x86/include/arch/special.h b/tools/objtool/arch/x86/include/arch/special.h
index 14271cca0c74..f2918f789a0a 100644
--- a/tools/objtool/arch/x86/include/arch/special.h
+++ b/tools/objtool/arch/x86/include/arch/special.h
@@ -9,6 +9,7 @@
#define JUMP_ENTRY_SIZE 16
#define JUMP_ORIG_OFFSET 0
#define JUMP_NEW_OFFSET 4
+#define JUMP_KEY_OFFSET 8
#define ALT_ENTRY_SIZE 12
#define ALT_ORIG_OFFSET 0
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 9ed1a4cd00dc..e5947fbb9e7a 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -1225,15 +1225,41 @@ static int handle_jump_alt(struct objtool_file *file,
struct instruction *orig_insn,
struct instruction **new_insn)
{
- if (orig_insn->type == INSN_NOP)
- return 0;
+ if (orig_insn->type != INSN_JUMP_UNCONDITIONAL &&
+ orig_insn->type != INSN_NOP) {
- if (orig_insn->type != INSN_JUMP_UNCONDITIONAL) {
WARN_FUNC("unsupported instruction at jump label",
orig_insn->sec, orig_insn->offset);
return -1;
}
+ if (special_alt->key_addend & 2) {
+ struct reloc *reloc = insn_reloc(file, orig_insn);
+
+ if (reloc) {
+ reloc->type = R_NONE;
+ elf_write_reloc(file->elf, reloc);
+ }
+ elf_write_insn(file->elf, orig_insn->sec,
+ orig_insn->offset, orig_insn->len,
+ arch_nop_insn(orig_insn->len));
+ orig_insn->type = INSN_NOP;
+ }
+
+ if (orig_insn->type == INSN_NOP) {
+ if (orig_insn->len == 2)
+ file->jl_nop_short++;
+ else
+ file->jl_nop_long++;
+
+ return 0;
+ }
+
+ if (orig_insn->len == 2)
+ file->jl_short++;
+ else
+ file->jl_long++;
+
*new_insn = list_next_entry(orig_insn, list);
return 0;
}
@@ -1314,6 +1340,12 @@ static int add_special_section_alts(struct objtool_file *file)
free(special_alt);
}
+ if (stats) {
+ printf("jl\\\tNOP\tJMP\n");
+ printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short);
+ printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long);
+ }
+
out:
return ret;
}
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index d08f5f3670f8..8676c7598728 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -9,6 +9,7 @@
#include <sys/types.h>
#include <sys/stat.h>
+#include <sys/mman.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
@@ -27,21 +28,27 @@ static inline u32 str_hash(const char *str)
return jhash(str, strlen(str), 0);
}
-static inline int elf_hash_bits(void)
-{
- return vmlinux ? ELF_HASH_BITS : 16;
-}
+#define __elf_table(name) (elf->name##_hash)
+#define __elf_bits(name) (elf->name##_bits)
-#define elf_hash_add(hashtable, node, key) \
- hlist_add_head(node, &hashtable[hash_min(key, elf_hash_bits())])
+#define elf_hash_add(name, node, key) \
+ hlist_add_head(node, &__elf_table(name)[hash_min(key, __elf_bits(name))])
-static void elf_hash_init(struct hlist_head *table)
-{
- __hash_init(table, 1U << elf_hash_bits());
-}
+#define elf_hash_for_each_possible(name, obj, member, key) \
+ hlist_for_each_entry(obj, &__elf_table(name)[hash_min(key, __elf_bits(name))], member)
-#define elf_hash_for_each_possible(name, obj, member, key) \
- hlist_for_each_entry(obj, &name[hash_min(key, elf_hash_bits())], member)
+#define elf_alloc_hash(name, size) \
+({ \
+ __elf_bits(name) = max(10, ilog2(size)); \
+ __elf_table(name) = mmap(NULL, sizeof(struct hlist_head) << __elf_bits(name), \
+ PROT_READ|PROT_WRITE, \
+ MAP_PRIVATE|MAP_ANON, -1, 0); \
+ if (__elf_table(name) == (void *)-1L) { \
+ WARN("mmap fail " #name); \
+ __elf_table(name) = NULL; \
+ } \
+ __elf_table(name); \
+})
static bool symbol_to_offset(struct rb_node *a, const struct rb_node *b)
{
@@ -80,9 +87,10 @@ struct section *find_section_by_name(const struct elf *elf, const char *name)
{
struct section *sec;
- elf_hash_for_each_possible(elf->section_name_hash, sec, name_hash, str_hash(name))
+ elf_hash_for_each_possible(section_name, sec, name_hash, str_hash(name)) {
if (!strcmp(sec->name, name))
return sec;
+ }
return NULL;
}
@@ -92,9 +100,10 @@ static struct section *find_section_by_index(struct elf *elf,
{
struct section *sec;
- elf_hash_for_each_possible(elf->section_hash, sec, hash, idx)
+ elf_hash_for_each_possible(section, sec, hash, idx) {
if (sec->idx == idx)
return sec;
+ }
return NULL;
}
@@ -103,9 +112,10 @@ static struct symbol *find_symbol_by_index(struct elf *elf, unsigned int idx)
{
struct symbol *sym;
- elf_hash_for_each_possible(elf->symbol_hash, sym, hash, idx)
+ elf_hash_for_each_possible(symbol, sym, hash, idx) {
if (sym->idx == idx)
return sym;
+ }
return NULL;
}
@@ -170,9 +180,10 @@ struct symbol *find_symbol_by_name(const struct elf *elf, const char *name)
{
struct symbol *sym;
- elf_hash_for_each_possible(elf->symbol_name_hash, sym, name_hash, str_hash(name))
+ elf_hash_for_each_possible(symbol_name, sym, name_hash, str_hash(name)) {
if (!strcmp(sym->name, name))
return sym;
+ }
return NULL;
}
@@ -189,8 +200,8 @@ struct reloc *find_reloc_by_dest_range(const struct elf *elf, struct section *se
sec = sec->reloc;
for_offset_range(o, offset, offset + len) {
- elf_hash_for_each_possible(elf->reloc_hash, reloc, hash,
- sec_offset_hash(sec, o)) {
+ elf_hash_for_each_possible(reloc, reloc, hash,
+ sec_offset_hash(sec, o)) {
if (reloc->sec != sec)
continue;
@@ -228,6 +239,10 @@ static int read_sections(struct elf *elf)
return -1;
}
+ if (!elf_alloc_hash(section, sections_nr) ||
+ !elf_alloc_hash(section_name, sections_nr))
+ return -1;
+
for (i = 0; i < sections_nr; i++) {
sec = malloc(sizeof(*sec));
if (!sec) {
@@ -273,13 +288,18 @@ static int read_sections(struct elf *elf)
}
sec->len = sec->sh.sh_size;
+ if (sec->sh.sh_flags & SHF_EXECINSTR)
+ elf->text_size += sec->len;
+
list_add_tail(&sec->list, &elf->sections);
- elf_hash_add(elf->section_hash, &sec->hash, sec->idx);
- elf_hash_add(elf->section_name_hash, &sec->name_hash, str_hash(sec->name));
+ elf_hash_add(section, &sec->hash, sec->idx);
+ elf_hash_add(section_name, &sec->name_hash, str_hash(sec->name));
}
- if (stats)
+ if (stats) {
printf("nr_sections: %lu\n", (unsigned long)sections_nr);
+ printf("section_bits: %d\n", elf->section_bits);
+ }
/* sanity check, one more call to elf_nextscn() should return NULL */
if (elf_nextscn(elf->elf, s)) {
@@ -308,8 +328,8 @@ static void elf_add_symbol(struct elf *elf, struct symbol *sym)
else
entry = &sym->sec->symbol_list;
list_add(&sym->list, entry);
- elf_hash_add(elf->symbol_hash, &sym->hash, sym->idx);
- elf_hash_add(elf->symbol_name_hash, &sym->name_hash, str_hash(sym->name));
+ elf_hash_add(symbol, &sym->hash, sym->idx);
+ elf_hash_add(symbol_name, &sym->name_hash, str_hash(sym->name));
/*
* Don't store empty STT_NOTYPE symbols in the rbtree. They
@@ -329,19 +349,25 @@ static int read_symbols(struct elf *elf)
Elf32_Word shndx;
symtab = find_section_by_name(elf, ".symtab");
- if (!symtab) {
+ if (symtab) {
+ symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
+ if (symtab_shndx)
+ shndx_data = symtab_shndx->data;
+
+ symbols_nr = symtab->sh.sh_size / symtab->sh.sh_entsize;
+ } else {
/*
* A missing symbol table is actually possible if it's an empty
- * .o file. This can happen for thunk_64.o.
+ * .o file. This can happen for thunk_64.o. Make sure to at
+ * least allocate the symbol hash tables so we can do symbol
+ * lookups without crashing.
*/
- return 0;
+ symbols_nr = 0;
}
- symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
- if (symtab_shndx)
- shndx_data = symtab_shndx->data;
-
- symbols_nr = symtab->sh.sh_size / symtab->sh.sh_entsize;
+ if (!elf_alloc_hash(symbol, symbols_nr) ||
+ !elf_alloc_hash(symbol_name, symbols_nr))
+ return -1;
for (i = 0; i < symbols_nr; i++) {
sym = malloc(sizeof(*sym));
@@ -389,8 +415,10 @@ static int read_symbols(struct elf *elf)
elf_add_symbol(elf, sym);
}
- if (stats)
+ if (stats) {
printf("nr_symbols: %lu\n", (unsigned long)symbols_nr);
+ printf("symbol_bits: %d\n", elf->symbol_bits);
+ }
/* Create parent/child links for any cold subfunctions */
list_for_each_entry(sec, &elf->sections, list) {
@@ -479,7 +507,7 @@ int elf_add_reloc(struct elf *elf, struct section *sec, unsigned long offset,
reloc->addend = addend;
list_add_tail(&reloc->list, &sec->reloc->reloc_list);
- elf_hash_add(elf->reloc_hash, &reloc->hash, reloc_hash(reloc));
+ elf_hash_add(reloc, &reloc->hash, reloc_hash(reloc));
sec->reloc->changed = true;
@@ -556,6 +584,9 @@ static int read_relocs(struct elf *elf)
unsigned int symndx;
unsigned long nr_reloc, max_reloc = 0, tot_reloc = 0;
+ if (!elf_alloc_hash(reloc, elf->text_size / 16))
+ return -1;
+
list_for_each_entry(sec, &elf->sections, list) {
if ((sec->sh.sh_type != SHT_RELA) &&
(sec->sh.sh_type != SHT_REL))
@@ -600,7 +631,7 @@ static int read_relocs(struct elf *elf)
}
list_add_tail(&reloc->list, &sec->reloc_list);
- elf_hash_add(elf->reloc_hash, &reloc->hash, reloc_hash(reloc));
+ elf_hash_add(reloc, &reloc->hash, reloc_hash(reloc));
nr_reloc++;
}
@@ -611,6 +642,7 @@ static int read_relocs(struct elf *elf)
if (stats) {
printf("max_reloc: %lu\n", max_reloc);
printf("tot_reloc: %lu\n", tot_reloc);
+ printf("reloc_bits: %d\n", elf->reloc_bits);
}
return 0;
@@ -632,12 +664,6 @@ struct elf *elf_open_read(const char *name, int flags)
INIT_LIST_HEAD(&elf->sections);
- elf_hash_init(elf->symbol_hash);
- elf_hash_init(elf->symbol_name_hash);
- elf_hash_init(elf->section_hash);
- elf_hash_init(elf->section_name_hash);
- elf_hash_init(elf->reloc_hash);
-
elf->fd = open(name, flags);
if (elf->fd == -1) {
fprintf(stderr, "objtool: Can't open '%s': %s\n",
@@ -717,7 +743,7 @@ static int elf_add_string(struct elf *elf, struct section *strtab, char *str)
struct symbol *elf_create_undef_symbol(struct elf *elf, const char *name)
{
- struct section *symtab;
+ struct section *symtab, *symtab_shndx;
struct symbol *sym;
Elf_Data *data;
Elf_Scn *s;
@@ -762,12 +788,36 @@ struct symbol *elf_create_undef_symbol(struct elf *elf, const char *name)
data->d_buf = &sym->sym;
data->d_size = sizeof(sym->sym);
data->d_align = 1;
+ data->d_type = ELF_T_SYM;
sym->idx = symtab->len / sizeof(sym->sym);
symtab->len += data->d_size;
symtab->changed = true;
+ symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
+ if (symtab_shndx) {
+ s = elf_getscn(elf->elf, symtab_shndx->idx);
+ if (!s) {
+ WARN_ELF("elf_getscn");
+ return NULL;
+ }
+
+ data = elf_newdata(s);
+ if (!data) {
+ WARN_ELF("elf_newdata");
+ return NULL;
+ }
+
+ data->d_buf = &sym->sym.st_size; /* conveniently 0 */
+ data->d_size = sizeof(Elf32_Word);
+ data->d_align = 4;
+ data->d_type = ELF_T_WORD;
+
+ symtab_shndx->len += 4;
+ symtab_shndx->changed = true;
+ }
+
sym->sec = find_section_by_index(elf, 0);
elf_add_symbol(elf, sym);
@@ -850,8 +900,8 @@ struct section *elf_create_section(struct elf *elf, const char *name,
return NULL;
list_add_tail(&sec->list, &elf->sections);
- elf_hash_add(elf->section_hash, &sec->hash, sec->idx);
- elf_hash_add(elf->section_name_hash, &sec->name_hash, str_hash(sec->name));
+ elf_hash_add(section, &sec->hash, sec->idx);
+ elf_hash_add(section_name, &sec->name_hash, str_hash(sec->name));
elf->changed = true;
diff --git a/tools/objtool/include/objtool/elf.h b/tools/objtool/include/objtool/elf.h
index 45e5ede363b0..e34395047530 100644
--- a/tools/objtool/include/objtool/elf.h
+++ b/tools/objtool/include/objtool/elf.h
@@ -83,12 +83,20 @@ struct elf {
int fd;
bool changed;
char *name;
+ unsigned int text_size;
struct list_head sections;
- DECLARE_HASHTABLE(symbol_hash, ELF_HASH_BITS);
- DECLARE_HASHTABLE(symbol_name_hash, ELF_HASH_BITS);
- DECLARE_HASHTABLE(section_hash, ELF_HASH_BITS);
- DECLARE_HASHTABLE(section_name_hash, ELF_HASH_BITS);
- DECLARE_HASHTABLE(reloc_hash, ELF_HASH_BITS);
+
+ int symbol_bits;
+ int symbol_name_bits;
+ int section_bits;
+ int section_name_bits;
+ int reloc_bits;
+
+ struct hlist_head *symbol_hash;
+ struct hlist_head *symbol_name_hash;
+ struct hlist_head *section_hash;
+ struct hlist_head *section_name_hash;
+ struct hlist_head *reloc_hash;
};
#define OFFSET_STRIDE_BITS 4
diff --git a/tools/objtool/include/objtool/objtool.h b/tools/objtool/include/objtool/objtool.h
index e4084afb2304..24fa83634de4 100644
--- a/tools/objtool/include/objtool/objtool.h
+++ b/tools/objtool/include/objtool/objtool.h
@@ -22,6 +22,9 @@ struct objtool_file {
struct list_head static_call_list;
struct list_head mcount_loc_list;
bool ignore_unreachables, c_file, hints, rodata;
+
+ unsigned long jl_short, jl_long;
+ unsigned long jl_nop_short, jl_nop_long;
};
struct objtool_file *objtool_open_read(const char *_objname);
diff --git a/tools/objtool/include/objtool/special.h b/tools/objtool/include/objtool/special.h
index 8a09f4e9d480..dc4721e19002 100644
--- a/tools/objtool/include/objtool/special.h
+++ b/tools/objtool/include/objtool/special.h
@@ -27,6 +27,7 @@ struct special_alt {
unsigned long new_off;
unsigned int orig_len, new_len; /* group only */
+ u8 key_addend;
};
int special_get_alts(struct elf *elf, struct list_head *alts);
diff --git a/tools/objtool/special.c b/tools/objtool/special.c
index 07b21cfabf5c..bc925cf19e2d 100644
--- a/tools/objtool/special.c
+++ b/tools/objtool/special.c
@@ -23,6 +23,7 @@ struct special_entry {
unsigned char size, orig, new;
unsigned char orig_len, new_len; /* group only */
unsigned char feature; /* ALTERNATIVE macro CPU feature */
+ unsigned char key; /* jump_label key */
};
struct special_entry entries[] = {
@@ -42,6 +43,7 @@ struct special_entry entries[] = {
.size = JUMP_ENTRY_SIZE,
.orig = JUMP_ORIG_OFFSET,
.new = JUMP_NEW_OFFSET,
+ .key = JUMP_KEY_OFFSET,
},
{
.sec = "__ex_table",
@@ -122,6 +124,18 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry,
alt->new_off -= 0x7ffffff0;
}
+ if (entry->key) {
+ struct reloc *key_reloc;
+
+ key_reloc = find_reloc_by_dest(elf, sec, offset + entry->key);
+ if (!key_reloc) {
+ WARN_FUNC("can't find key reloc",
+ sec, offset + entry->key);
+ return -1;
+ }
+ alt->key_addend = key_reloc->addend;
+ }
+
return 0;
}
diff --git a/tools/perf/Documentation/perf-intel-pt.txt b/tools/perf/Documentation/perf-intel-pt.txt
index 1dcec73c910c..bcf3eca5afbe 100644
--- a/tools/perf/Documentation/perf-intel-pt.txt
+++ b/tools/perf/Documentation/perf-intel-pt.txt
@@ -108,9 +108,9 @@ displayed as follows:
perf script --itrace=ibxwpe -F+flags
-The flags are "bcrosyiABEx" which stand for branch, call, return, conditional,
-system, asynchronous, interrupt, transaction abort, trace begin, trace end, and
-in transaction, respectively.
+The flags are "bcrosyiABExgh" which stand for branch, call, return, conditional,
+system, asynchronous, interrupt, transaction abort, trace begin, trace end,
+in transaction, VM-entry, and VM-exit respectively.
perf script also supports higher level ways to dump instruction traces:
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index 5b8b61075039..48a5f5b26dd4 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -183,14 +183,15 @@ OPTIONS
At this point usage is displayed, and perf-script exits.
The flags field is synthesized and may have a value when Instruction
- Trace decoding. The flags are "bcrosyiABEx" which stand for branch,
+ Trace decoding. The flags are "bcrosyiABExgh" which stand for branch,
call, return, conditional, system, asynchronous, interrupt,
- transaction abort, trace begin, trace end, and in transaction,
+ transaction abort, trace begin, trace end, in transaction, VM-Entry, and VM-Exit
respectively. Known combinations of flags are printed more nicely e.g.
"call" for "bc", "return" for "br", "jcc" for "bo", "jmp" for "b",
"int" for "bci", "iret" for "bri", "syscall" for "bcs", "sysret" for "brs",
"async" for "by", "hw int" for "bcyi", "tx abrt" for "bA", "tr strt" for "bB",
- "tr end" for "bE". However the "x" flag will be display separately in those
+ "tr end" for "bE", "vmentry" for "bcg", "vmexit" for "bch".
+ However the "x" flag will be displayed separately in those
cases e.g. "jcc (x)" for a condition branch within a transaction.
The callindent field is synthesized and may have a value when
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 406a9519145e..73df23dd664c 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -90,7 +90,6 @@ endif
ifeq ($(ARCH),mips)
NO_PERF_REGS := 0
CFLAGS += -I$(OUTPUT)arch/mips/include/generated
- CFLAGS += -I../../arch/mips/include/uapi -I../../arch/mips/include/generated/uapi
LIBUNWIND_LIBS = -lunwind -lunwind-mips
endif
diff --git a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
index 9974f5f8e49b..9cd1c34f31b5 100644
--- a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
+++ b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
@@ -357,7 +357,7 @@
440 n64 process_madvise sys_process_madvise
441 n64 epoll_pwait2 sys_epoll_pwait2
442 n64 mount_setattr sys_mount_setattr
-443 n64 quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 n64 landlock_create_ruleset sys_landlock_create_ruleset
445 n64 landlock_add_rule sys_landlock_add_rule
446 n64 landlock_restrict_self sys_landlock_restrict_self
diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
index 2e68fbb57cc6..8f052ff4058c 100644
--- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
@@ -522,7 +522,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/tools/perf/arch/s390/entry/syscalls/syscall.tbl b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
index 7e4a2aba366d..0690263df1dd 100644
--- a/tools/perf/arch/s390/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
@@ -445,7 +445,7 @@
440 common process_madvise sys_process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self sys_landlock_restrict_self
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
index ecd551b08d05..ce18119ea0d0 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
@@ -364,7 +364,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c
index 87f5b1a4a7fa..833405c27dae 100644
--- a/tools/perf/builtin-buildid-list.c
+++ b/tools/perf/builtin-buildid-list.c
@@ -80,6 +80,9 @@ static int perf_session__list_build_ids(bool force, bool with_hits)
if (!perf_header__has_feat(&session->header, HEADER_BUILD_ID))
with_hits = true;
+ if (zstd_init(&(session->zstd_data), 0) < 0)
+ pr_warning("Decompression initialization failed. Reported data may be incomplete.\n");
+
/*
* in pipe-mode, the only way to get the buildids is to parse
* the record stream. Buildids are stored as RECORD_HEADER_BUILD_ID
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 3337b5f93336..84803abeb942 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -2714,6 +2714,12 @@ int cmd_record(int argc, const char **argv)
rec->no_buildid = true;
}
+ if (rec->opts.record_cgroup && !perf_can_record_cgroup()) {
+ pr_err("Kernel has no cgroup sampling support.\n");
+ err = -EINVAL;
+ goto out_opts;
+ }
+
if (rec->opts.kcore)
rec->data.is_dir = true;
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 5a830ae09418..f9f74a514315 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -572,7 +572,8 @@ static int enable_counters(void)
* - we have initial delay configured
*/
if (!target__none(&target) || stat_config.initial_delay) {
- evlist__enable(evsel_list);
+ if (!all_counters_use_bpf)
+ evlist__enable(evsel_list);
if (stat_config.initial_delay > 0)
pr_info(EVLIST_ENABLED_MSG);
}
@@ -581,13 +582,19 @@ static int enable_counters(void)
static void disable_counters(void)
{
+ struct evsel *counter;
+
/*
* If we don't have tracee (attaching to task or cpu), counters may
* still be running. To get accurate group ratios, we must stop groups
* from counting before reading their constituent counters.
*/
- if (!target__none(&target))
- evlist__disable(evsel_list);
+ if (!target__none(&target)) {
+ evlist__for_each_entry(evsel_list, counter)
+ bpf_counter__disable(counter);
+ if (!all_counters_use_bpf)
+ evlist__disable(evsel_list);
+ }
}
static volatile int workload_exec_errno;
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index dd8ff287e930..c783558332b8 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -39,6 +39,7 @@ arch/x86/lib/x86-opcode-map.txt
arch/x86/tools/gen-insn-attr-x86.awk
arch/arm/include/uapi/asm/perf_regs.h
arch/arm64/include/uapi/asm/perf_regs.h
+arch/mips/include/uapi/asm/perf_regs.h
arch/powerpc/include/uapi/asm/perf_regs.h
arch/s390/include/uapi/asm/perf_regs.h
arch/x86/include/uapi/asm/perf_regs.h
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 20cb91ef06ff..2f6b67189b42 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -443,6 +443,8 @@ int main(int argc, const char **argv)
const char *cmd;
char sbuf[STRERR_BUFSIZE];
+ perf_debug_setup();
+
/* libsubcmd init */
exec_cmd_init("perf", PREFIX, PERF_EXEC_PATH, EXEC_PATH_ENVIRONMENT);
pager_init(PERF_PAGER_ENVIRONMENT);
@@ -531,8 +533,6 @@ int main(int argc, const char **argv)
*/
pthread__block_sigwinch();
- perf_debug_setup();
-
while (1) {
static int done_help;
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/cache.json b/tools/perf/pmu-events/arch/powerpc/power10/cache.json
index 616f29098c71..605be14f441c 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/cache.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/cache.json
@@ -1,46 +1,56 @@
[
{
- "EventCode": "1003C",
+ "EventCode": "0x1003C",
"EventName": "PM_EXEC_STALL_DMISS_L2L3",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from either the local L2 or local L3."
},
{
- "EventCode": "34056",
+ "EventCode": "0x1E054",
+ "EventName": "PM_EXEC_STALL_DMISS_L21_L31",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from another core's L2 or L3 on the same chip."
+ },
+ {
+ "EventCode": "0x34054",
+ "EventName": "PM_EXEC_STALL_DMISS_L2L3_NOCONFLICT",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local L2 or local L3, without a dispatch conflict."
+ },
+ {
+ "EventCode": "0x34056",
"EventName": "PM_EXEC_STALL_LOAD_FINISH",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline was finishing a load after its data was reloaded from a data source beyond the local L1; cycles in which the LSU was processing an L1-hit; cycles in which the NTF instruction merged with another load in the LMQ."
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was finishing a load after its data was reloaded from a data source beyond the local L1; cycles in which the LSU was processing an L1-hit; cycles in which the NTF instruction merged with another load in the LMQ; cycles in which the NTF instruction is waiting for a data reload for a load miss, but the data comes back with a non-NTF instruction."
},
{
- "EventCode": "3006C",
+ "EventCode": "0x3006C",
"EventName": "PM_RUN_CYC_SMT2_MODE",
"BriefDescription": "Cycles when this thread's run latch is set and the core is in SMT2 mode."
},
{
- "EventCode": "300F4",
+ "EventCode": "0x300F4",
"EventName": "PM_RUN_INST_CMPL_CONC",
"BriefDescription": "PowerPC instructions completed by this thread when all threads in the core had the run-latch set."
},
{
- "EventCode": "4C016",
+ "EventCode": "0x4C016",
"EventName": "PM_EXEC_STALL_DMISS_L2L3_CONFLICT",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local L2 or local L3, with a dispatch conflict."
},
{
- "EventCode": "4D014",
+ "EventCode": "0x4D014",
"EventName": "PM_EXEC_STALL_LOAD",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was a load instruction executing in the Load Store Unit."
},
{
- "EventCode": "4D016",
+ "EventCode": "0x4D016",
"EventName": "PM_EXEC_STALL_PTESYNC",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was a PTESYNC instruction executing in the Load Store Unit."
},
{
- "EventCode": "401EA",
+ "EventCode": "0x401EA",
"EventName": "PM_THRESH_EXC_128",
"BriefDescription": "Threshold counter exceeded a value of 128."
},
{
- "EventCode": "400F6",
+ "EventCode": "0x400F6",
"EventName": "PM_BR_MPRED_CMPL",
"BriefDescription": "A mispredicted branch completed. Includes direction and target."
}
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/floating_point.json b/tools/perf/pmu-events/arch/powerpc/power10/floating_point.json
index 703cd431ae5b..54acb55e2c8c 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/floating_point.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/floating_point.json
@@ -1,6 +1,6 @@
[
{
- "EventCode": "4016E",
+ "EventCode": "0x4016E",
"EventName": "PM_THRESH_NOT_MET",
"BriefDescription": "Threshold counter did not meet threshold."
}
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/frontend.json b/tools/perf/pmu-events/arch/powerpc/power10/frontend.json
index eac8609dcc90..558f9530f54e 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/frontend.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/frontend.json
@@ -1,216 +1,246 @@
[
{
- "EventCode": "10004",
+ "EventCode": "0x10004",
"EventName": "PM_EXEC_STALL_TRANSLATION",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline suffered a TLB miss or ERAT miss and waited for it to resolve."
},
{
- "EventCode": "10010",
+ "EventCode": "0x10006",
+ "EventName": "PM_DISP_STALL_HELD_OTHER_CYC",
+ "BriefDescription": "Cycles in which the NTC instruction is held at dispatch for any other reason."
+ },
+ {
+ "EventCode": "0x10010",
"EventName": "PM_PMC4_OVERFLOW",
"BriefDescription": "The event selected for PMC4 caused the event counter to overflow."
},
{
- "EventCode": "10020",
+ "EventCode": "0x10020",
"EventName": "PM_PMC4_REWIND",
"BriefDescription": "The speculative event selected for PMC4 rewinds and the counter for PMC4 is not charged."
},
{
- "EventCode": "10038",
+ "EventCode": "0x10038",
"EventName": "PM_DISP_STALL_TRANSLATION",
"BriefDescription": "Cycles when dispatch was stalled for this thread because the MMU was handling a translation miss."
},
{
- "EventCode": "1003A",
+ "EventCode": "0x1003A",
"EventName": "PM_DISP_STALL_BR_MPRED_IC_L2",
"BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L2 after suffering a branch mispredict."
},
{
- "EventCode": "1E050",
+ "EventCode": "0x1D05E",
+ "EventName": "PM_DISP_STALL_HELD_HALT_CYC",
+ "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because of power management."
+ },
+ {
+ "EventCode": "0x1E050",
"EventName": "PM_DISP_STALL_HELD_STF_MAPPER_CYC",
"BriefDescription": "Cycles in which the NTC instruction is held at dispatch because the STF mapper/SRB was full. Includes GPR (count, link, tar), VSR, VMR, FPR."
},
{
- "EventCode": "1F054",
+ "EventCode": "0x1F054",
"EventName": "PM_DTLB_HIT",
"BriefDescription": "The PTE required by the instruction was resident in the TLB (data TLB access). When MMCR1[16]=0 this event counts only demand hits. When MMCR1[16]=1 this event includes demand and prefetch. Applies to both HPT and RPT."
},
{
- "EventCode": "101E8",
+ "EventCode": "0x10064",
+ "EventName": "PM_DISP_STALL_IC_L2",
+ "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L2."
+ },
+ {
+ "EventCode": "0x101E8",
"EventName": "PM_THRESH_EXC_256",
"BriefDescription": "Threshold counter exceeded a count of 256."
},
{
- "EventCode": "101EC",
+ "EventCode": "0x101EC",
"EventName": "PM_THRESH_MET",
"BriefDescription": "Threshold exceeded."
},
{
- "EventCode": "100F2",
+ "EventCode": "0x100F2",
"EventName": "PM_1PLUS_PPC_CMPL",
"BriefDescription": "Cycles in which at least one instruction is completed by this thread."
},
{
- "EventCode": "100F6",
+ "EventCode": "0x100F6",
"EventName": "PM_IERAT_MISS",
"BriefDescription": "IERAT Reloaded to satisfy an IERAT miss. All page sizes are counted by this event."
},
{
- "EventCode": "100F8",
+ "EventCode": "0x100F8",
"EventName": "PM_DISP_STALL_CYC",
"BriefDescription": "Cycles the ICT has no itags assigned to this thread (no instructions were dispatched during these cycles)."
},
{
- "EventCode": "20114",
+ "EventCode": "0x20006",
+ "EventName": "PM_DISP_STALL_HELD_ISSQ_FULL_CYC",
+ "BriefDescription": "Cycles in which the NTC instruction is held at dispatch due to Issue queue full. Includes issue queue and branch queue."
+ },
+ {
+ "EventCode": "0x20114",
"EventName": "PM_MRK_L2_RC_DISP",
"BriefDescription": "Marked instruction RC dispatched in L2."
},
{
- "EventCode": "2C010",
+ "EventCode": "0x2C010",
"EventName": "PM_EXEC_STALL_LSU",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the Load Store Unit. This does not include simple fixed point instructions."
},
{
- "EventCode": "2C016",
+ "EventCode": "0x2C016",
"EventName": "PM_DISP_STALL_IERAT_ONLY_MISS",
"BriefDescription": "Cycles when dispatch was stalled while waiting to resolve an instruction ERAT miss."
},
{
- "EventCode": "2C01E",
+ "EventCode": "0x2C01E",
"EventName": "PM_DISP_STALL_BR_MPRED_IC_L3",
"BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L3 after suffering a branch mispredict."
},
{
- "EventCode": "2D01A",
+ "EventCode": "0x2D01A",
"EventName": "PM_DISP_STALL_IC_MISS",
"BriefDescription": "Cycles when dispatch was stalled for this thread due to an Icache Miss."
},
{
- "EventCode": "2D01C",
- "EventName": "PM_CMPL_STALL_STCX",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a stcx waiting for resolution from the nest before completing."
- },
- {
- "EventCode": "2E018",
+ "EventCode": "0x2E018",
"EventName": "PM_DISP_STALL_FETCH",
"BriefDescription": "Cycles when dispatch was stalled for this thread because Fetch was being held."
},
{
- "EventCode": "2E01A",
+ "EventCode": "0x2E01A",
"EventName": "PM_DISP_STALL_HELD_XVFC_MAPPER_CYC",
"BriefDescription": "Cycles in which the NTC instruction is held at dispatch because the XVFC mapper/SRB was full."
},
{
- "EventCode": "2C142",
+ "EventCode": "0x2C142",
"EventName": "PM_MRK_XFER_FROM_SRC_PMC2",
"BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[15:27]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
},
{
- "EventCode": "24050",
+ "EventCode": "0x24050",
"EventName": "PM_IOPS_DISP",
"BriefDescription": "Internal Operations dispatched. PM_IOPS_DISP / PM_INST_DISP will show the average number of internal operations per PowerPC instruction."
},
{
- "EventCode": "2405E",
+ "EventCode": "0x2405E",
"EventName": "PM_ISSUE_CANCEL",
"BriefDescription": "An instruction issued and the issue was later cancelled. Only one cancel per PowerPC instruction."
},
{
- "EventCode": "200FA",
+ "EventCode": "0x200FA",
"EventName": "PM_BR_TAKEN_CMPL",
"BriefDescription": "Branch Taken instruction completed."
},
{
- "EventCode": "30012",
+ "EventCode": "0x30004",
+ "EventName": "PM_DISP_STALL_FLUSH",
+ "BriefDescription": "Cycles when dispatch was stalled because of a flush that happened to an instruction(s) that was not yet NTC. PM_EXEC_STALL_NTC_FLUSH only includes instructions that were flushed after becoming NTC."
+ },
+ {
+ "EventCode": "0x3000A",
+ "EventName": "PM_DISP_STALL_ITLB_MISS",
+ "BriefDescription": "Cycles when dispatch was stalled while waiting to resolve an instruction TLB miss."
+ },
+ {
+ "EventCode": "0x30012",
"EventName": "PM_FLUSH_COMPLETION",
"BriefDescription": "The instruction that was next to complete (oldest in the pipeline) did not complete because it suffered a flush."
},
{
- "EventCode": "30014",
+ "EventCode": "0x30014",
"EventName": "PM_EXEC_STALL_STORE",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was a store instruction executing in the Load Store Unit."
},
{
- "EventCode": "30018",
+ "EventCode": "0x30018",
"EventName": "PM_DISP_STALL_HELD_SCOREBOARD_CYC",
"BriefDescription": "Cycles in which the NTC instruction is held at dispatch while waiting on the Scoreboard. This event combines VSCR and FPSCR together."
},
{
- "EventCode": "30026",
+ "EventCode": "0x30026",
"EventName": "PM_EXEC_STALL_STORE_MISS",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was a store whose cache line was not resident in the L1 and was waiting for allocation of the missing line into the L1."
},
{
- "EventCode": "3012A",
+ "EventCode": "0x3012A",
"EventName": "PM_MRK_L2_RC_DONE",
"BriefDescription": "L2 RC machine completed the transaction for the marked instruction."
},
{
- "EventCode": "3F046",
+ "EventCode": "0x3F046",
"EventName": "PM_ITLB_HIT_1G",
"BriefDescription": "Instruction TLB hit (IERAT reload) page size 1G, which implies Radix Page Table translation is in use. When MMCR1[17]=0 this event counts only for demand misses. When MMCR1[17]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "34058",
+ "EventCode": "0x34058",
"EventName": "PM_DISP_STALL_BR_MPRED_ICMISS",
"BriefDescription": "Cycles when dispatch was stalled after a mispredicted branch resulted in an instruction cache miss."
},
{
- "EventCode": "3D05C",
+ "EventCode": "0x3D05C",
"EventName": "PM_DISP_STALL_HELD_RENAME_CYC",
"BriefDescription": "Cycles in which the NTC instruction is held at dispatch because the mapper/SRB was full. Includes GPR (count, link, tar), VSR, VMR, FPR and XVFC."
},
{
- "EventCode": "3E052",
+ "EventCode": "0x3E052",
"EventName": "PM_DISP_STALL_IC_L3",
"BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L3."
},
{
- "EventCode": "3E054",
+ "EventCode": "0x3E054",
"EventName": "PM_LD_MISS_L1",
"BriefDescription": "Load Missed L1, counted at execution time (can be greater than loads finished). LMQ merges are not included in this count. i.e. if a load instruction misses on an address that is already allocated on the LMQ, this event will not increment for that load). Note that this count is per slice, so if a load spans multiple slices this event will increment multiple times for a single load."
},
{
- "EventCode": "301EA",
+ "EventCode": "0x301EA",
"EventName": "PM_THRESH_EXC_1024",
"BriefDescription": "Threshold counter exceeded a value of 1024."
},
{
- "EventCode": "300FA",
+ "EventCode": "0x300FA",
"EventName": "PM_INST_FROM_L3MISS",
"BriefDescription": "The processor's instruction cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss."
},
{
- "EventCode": "40006",
+ "EventCode": "0x40006",
"EventName": "PM_ISSUE_KILL",
"BriefDescription": "Cycles in which an instruction or group of instructions were cancelled after being issued. This event increments once per occurrence, regardless of how many instructions are included in the issue group."
},
{
- "EventCode": "40116",
+ "EventCode": "0x40116",
"EventName": "PM_MRK_LARX_FIN",
"BriefDescription": "Marked load and reserve instruction (LARX) finished. LARX and STCX are instructions used to acquire a lock."
},
{
- "EventCode": "4C010",
+ "EventCode": "0x4C010",
"EventName": "PM_DISP_STALL_BR_MPRED_IC_L3MISS",
"BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from sources beyond the local L3 after suffering a mispredicted branch."
},
{
- "EventCode": "4D01E",
+ "EventCode": "0x4D01E",
"EventName": "PM_DISP_STALL_BR_MPRED",
"BriefDescription": "Cycles when dispatch was stalled for this thread due to a mispredicted branch."
},
{
- "EventCode": "4E010",
+ "EventCode": "0x4E010",
"EventName": "PM_DISP_STALL_IC_L3MISS",
"BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from any source beyond the local L3."
},
{
- "EventCode": "4E01A",
+ "EventCode": "0x4E01A",
"EventName": "PM_DISP_STALL_HELD_CYC",
"BriefDescription": "Cycles in which the NTC instruction is held at dispatch for any reason."
},
{
- "EventCode": "44056",
+ "EventCode": "0x4003C",
+ "EventName": "PM_DISP_STALL_HELD_SYNC_CYC",
+ "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because of a synchronizing instruction that requires the ICT to be empty before dispatch."
+ },
+ {
+ "EventCode": "0x44056",
"EventName": "PM_VECTOR_ST_CMPL",
"BriefDescription": "Vector store instructions completed."
}
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/locks.json b/tools/perf/pmu-events/arch/powerpc/power10/locks.json
index 016d8de0e14a..b5a0d6521963 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/locks.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/locks.json
@@ -1,11 +1,11 @@
[
{
- "EventCode": "1E058",
+ "EventCode": "0x1E058",
"EventName": "PM_STCX_FAIL_FIN",
"BriefDescription": "Conditional store instruction (STCX) failed. LARX and STCX are instructions used to acquire a lock."
},
{
- "EventCode": "4E050",
+ "EventCode": "0x4E050",
"EventName": "PM_STCX_PASS_FIN",
"BriefDescription": "Conditional store instruction (STCX) passed. LARX and STCX are instructions used to acquire a lock."
}
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/marked.json b/tools/perf/pmu-events/arch/powerpc/power10/marked.json
index 93a5a5910648..58b5dfe3a273 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/marked.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/marked.json
@@ -1,146 +1,141 @@
[
{
- "EventCode": "1002C",
+ "EventCode": "0x1002C",
"EventName": "PM_LD_PREFETCH_CACHE_LINE_MISS",
"BriefDescription": "The L1 cache was reloaded with a line that fulfills a prefetch request."
},
{
- "EventCode": "10132",
+ "EventCode": "0x10132",
"EventName": "PM_MRK_INST_ISSUED",
"BriefDescription": "Marked instruction issued. Note that stores always get issued twice, the address gets issued to the LSU and the data gets issued to the VSU. Also, issues can sometimes get killed/cancelled and cause multiple sequential issues for the same instruction."
},
{
- "EventCode": "101E0",
+ "EventCode": "0x101E0",
"EventName": "PM_MRK_INST_DISP",
"BriefDescription": "The thread has dispatched a randomly sampled marked instruction."
},
{
- "EventCode": "101E2",
+ "EventCode": "0x101E2",
"EventName": "PM_MRK_BR_TAKEN_CMPL",
"BriefDescription": "Marked Branch Taken instruction completed."
},
{
- "EventCode": "20112",
+ "EventCode": "0x20112",
"EventName": "PM_MRK_NTF_FIN",
"BriefDescription": "The marked instruction became the oldest in the pipeline before it finished. It excludes instructions that finish at dispatch."
},
{
- "EventCode": "2C01C",
+ "EventCode": "0x2C01C",
"EventName": "PM_EXEC_STALL_DMISS_OFF_CHIP",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from a remote chip."
},
{
- "EventCode": "20138",
+ "EventCode": "0x20138",
"EventName": "PM_MRK_ST_NEST",
"BriefDescription": "A store has been sampled/marked and is at the point of execution where it has completed in the core and can no longer be flushed. At this point the store is sent to the L2."
},
{
- "EventCode": "2013A",
+ "EventCode": "0x2013A",
"EventName": "PM_MRK_BRU_FIN",
"BriefDescription": "Marked Branch instruction finished."
},
{
- "EventCode": "2C144",
+ "EventCode": "0x2C144",
"EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC2",
"BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[15:27]."
},
{
- "EventCode": "24156",
+ "EventCode": "0x24156",
"EventName": "PM_MRK_STCX_FIN",
"BriefDescription": "Marked conditional store instruction (STCX) finished. LARX and STCX are instructions used to acquire a lock."
},
{
- "EventCode": "24158",
+ "EventCode": "0x24158",
"EventName": "PM_MRK_INST",
"BriefDescription": "An instruction was marked. Includes both Random Instruction Sampling (RIS) at decode time and Random Event Sampling (RES) at the time the configured event happens."
},
{
- "EventCode": "2415C",
+ "EventCode": "0x2415C",
"EventName": "PM_MRK_BR_CMPL",
"BriefDescription": "A marked branch completed. All branches are included."
},
{
- "EventCode": "200FD",
+ "EventCode": "0x200FD",
"EventName": "PM_L1_ICACHE_MISS",
"BriefDescription": "Demand iCache Miss."
},
{
- "EventCode": "30130",
+ "EventCode": "0x30130",
"EventName": "PM_MRK_INST_FIN",
"BriefDescription": "marked instruction finished. Excludes instructions that finish at dispatch. Note that stores always finish twice since the address gets issued to the LSU and the data gets issued to the VSU."
},
{
- "EventCode": "34146",
+ "EventCode": "0x34146",
"EventName": "PM_MRK_LD_CMPL",
"BriefDescription": "Marked loads completed."
},
{
- "EventCode": "3E158",
+ "EventCode": "0x3E158",
"EventName": "PM_MRK_STCX_FAIL",
"BriefDescription": "Marked conditional store instruction (STCX) failed. LARX and STCX are instructions used to acquire a lock."
},
{
- "EventCode": "3E15A",
+ "EventCode": "0x3E15A",
"EventName": "PM_MRK_ST_FIN",
"BriefDescription": "The marked instruction was a store of any kind."
},
{
- "EventCode": "30068",
+ "EventCode": "0x30068",
"EventName": "PM_L1_ICACHE_RELOADED_PREF",
"BriefDescription": "Counts all Icache prefetch reloads ( includes demand turned into prefetch)."
},
{
- "EventCode": "301E4",
+ "EventCode": "0x301E4",
"EventName": "PM_MRK_BR_MPRED_CMPL",
"BriefDescription": "Marked Branch Mispredicted. Includes direction and target."
},
{
- "EventCode": "300F6",
+ "EventCode": "0x300F6",
"EventName": "PM_LD_DEMAND_MISS_L1",
"BriefDescription": "The L1 cache was reloaded with a line that fulfills a demand miss request. Counted at reload time, before finish."
},
{
- "EventCode": "300FE",
+ "EventCode": "0x300FE",
"EventName": "PM_DATA_FROM_L3MISS",
"BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss."
},
{
- "EventCode": "40012",
+ "EventCode": "0x40012",
"EventName": "PM_L1_ICACHE_RELOADED_ALL",
"BriefDescription": "Counts all Icache reloads includes demand, prefetch, prefetch turned into demand and demand turned into prefetch."
},
{
- "EventCode": "40134",
+ "EventCode": "0x40134",
"EventName": "PM_MRK_INST_TIMEO",
"BriefDescription": "Marked instruction finish timeout (instruction was lost)."
},
{
- "EventCode": "4003C",
- "EventName": "PM_DISP_STALL_HELD_SYNC_CYC",
- "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because of a synchronizing instruction that requires the ICT to be empty before dispatch."
- },
- {
- "EventCode": "4505A",
+ "EventCode": "0x4505A",
"EventName": "PM_SP_FLOP_CMPL",
"BriefDescription": "Single Precision floating point instructions completed."
},
{
- "EventCode": "4D058",
+ "EventCode": "0x4D058",
"EventName": "PM_VECTOR_FLOP_CMPL",
"BriefDescription": "Vector floating point instructions completed."
},
{
- "EventCode": "4D05A",
+ "EventCode": "0x4D05A",
"EventName": "PM_NON_MATH_FLOP_CMPL",
"BriefDescription": "Non Math instructions completed."
},
{
- "EventCode": "401E0",
+ "EventCode": "0x401E0",
"EventName": "PM_MRK_INST_CMPL",
"BriefDescription": "marked instruction completed."
},
{
- "EventCode": "400FE",
+ "EventCode": "0x400FE",
"EventName": "PM_DATA_FROM_MEMORY",
"BriefDescription": "The processor's data cache was reloaded from local, remote, or distant memory due to a demand miss."
}
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/memory.json b/tools/perf/pmu-events/arch/powerpc/power10/memory.json
index b01141eeebee..843b51f531e9 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/memory.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/memory.json
@@ -1,191 +1,186 @@
[
{
- "EventCode": "1000A",
+ "EventCode": "0x1000A",
"EventName": "PM_PMC3_REWIND",
"BriefDescription": "The speculative event selected for PMC3 rewinds and the counter for PMC3 is not charged."
},
{
- "EventCode": "1C040",
+ "EventCode": "0x1C040",
"EventName": "PM_XFER_FROM_SRC_PMC1",
"BriefDescription": "The processor's L1 data cache was reloaded from the source specified in MMCR3[0:12]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
},
{
- "EventCode": "1C142",
+ "EventCode": "0x1C142",
"EventName": "PM_MRK_XFER_FROM_SRC_PMC1",
"BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[0:12]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
},
{
- "EventCode": "1C144",
+ "EventCode": "0x1C144",
"EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC1",
"BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[0:12]."
},
{
- "EventCode": "1C056",
+ "EventCode": "0x1C056",
"EventName": "PM_DERAT_MISS_4K",
"BriefDescription": "Data ERAT Miss (Data TLB Access) page size 4K. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "1C058",
+ "EventCode": "0x1C058",
"EventName": "PM_DTLB_MISS_16G",
"BriefDescription": "Data TLB reload (after a miss) page size 16G. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "1C05C",
+ "EventCode": "0x1C05C",
"EventName": "PM_DTLB_MISS_2M",
"BriefDescription": "Data TLB reload (after a miss) page size 2M. Implies radix translation was used. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "1E056",
+ "EventCode": "0x1E056",
"EventName": "PM_EXEC_STALL_STORE_PIPE",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the store unit. This does not include cycles spent handling store misses, PTESYNC instructions or TLBIE instructions."
},
{
- "EventCode": "1F150",
+ "EventCode": "0x1F150",
"EventName": "PM_MRK_ST_L2_CYC",
"BriefDescription": "Cycles from L2 RC dispatch to L2 RC completion."
},
{
- "EventCode": "10062",
+ "EventCode": "0x10062",
"EventName": "PM_LD_L3MISS_PEND_CYC",
"BriefDescription": "Cycles L3 miss was pending for this thread."
},
{
- "EventCode": "20010",
+ "EventCode": "0x20010",
"EventName": "PM_PMC1_OVERFLOW",
"BriefDescription": "The event selected for PMC1 caused the event counter to overflow."
},
{
- "EventCode": "2001A",
+ "EventCode": "0x2001A",
"EventName": "PM_ITLB_HIT",
"BriefDescription": "The PTE required to translate the instruction address was resident in the TLB (instruction TLB access/IERAT reload). Applies to both HPT and RPT. When MMCR1[17]=0 this event counts only for demand misses. When MMCR1[17]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "2003E",
+ "EventCode": "0x2003E",
"EventName": "PM_PTESYNC_FIN",
"BriefDescription": "Ptesync instruction finished in the store unit. Only one ptesync can finish at a time."
},
{
- "EventCode": "2C040",
+ "EventCode": "0x2C040",
"EventName": "PM_XFER_FROM_SRC_PMC2",
"BriefDescription": "The processor's L1 data cache was reloaded from the source specified in MMCR3[15:27]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
},
{
- "EventCode": "2C054",
+ "EventCode": "0x2C054",
"EventName": "PM_DERAT_MISS_64K",
"BriefDescription": "Data ERAT Miss (Data TLB Access) page size 64K. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "2C056",
+ "EventCode": "0x2C056",
"EventName": "PM_DTLB_MISS_4K",
"BriefDescription": "Data TLB reload (after a miss) page size 4K. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "2D154",
+ "EventCode": "0x2D154",
"EventName": "PM_MRK_DERAT_MISS_64K",
"BriefDescription": "Data ERAT Miss (Data TLB Access) page size 64K for a marked instruction. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "200F6",
+ "EventCode": "0x200F6",
"EventName": "PM_DERAT_MISS",
"BriefDescription": "DERAT Reloaded to satisfy a DERAT miss. All page sizes are counted by this event. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "3000A",
- "EventName": "PM_DISP_STALL_ITLB_MISS",
- "BriefDescription": "Cycles when dispatch was stalled while waiting to resolve an instruction TLB miss."
- },
- {
- "EventCode": "30016",
+ "EventCode": "0x30016",
"EventName": "PM_EXEC_STALL_DERAT_DTLB_MISS",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline suffered a TLB miss and waited for it resolve."
},
{
- "EventCode": "3C040",
+ "EventCode": "0x3C040",
"EventName": "PM_XFER_FROM_SRC_PMC3",
"BriefDescription": "The processor's L1 data cache was reloaded from the source specified in MMCR3[30:42]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
},
{
- "EventCode": "3C142",
+ "EventCode": "0x3C142",
"EventName": "PM_MRK_XFER_FROM_SRC_PMC3",
"BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[30:42]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
},
{
- "EventCode": "3C144",
+ "EventCode": "0x3C144",
"EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC3",
"BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[30:42]."
},
{
- "EventCode": "3C054",
+ "EventCode": "0x3C054",
"EventName": "PM_DERAT_MISS_16M",
"BriefDescription": "Data ERAT Miss (Data TLB Access) page size 16M. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "3C056",
+ "EventCode": "0x3C056",
"EventName": "PM_DTLB_MISS_64K",
"BriefDescription": "Data TLB reload (after a miss) page size 64K. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "3C058",
+ "EventCode": "0x3C058",
"EventName": "PM_LARX_FIN",
"BriefDescription": "Load and reserve instruction (LARX) finished. LARX and STCX are instructions used to acquire a lock."
},
{
- "EventCode": "301E2",
+ "EventCode": "0x301E2",
"EventName": "PM_MRK_ST_CMPL",
"BriefDescription": "Marked store completed and sent to nest. Note that this count excludes cache-inhibited stores."
},
{
- "EventCode": "300FC",
+ "EventCode": "0x300FC",
"EventName": "PM_DTLB_MISS",
"BriefDescription": "The DPTEG required for the load/store instruction in execution was missing from the TLB. It includes pages of all sizes for demand and prefetch activity."
},
{
- "EventCode": "4D02C",
+ "EventCode": "0x4D02C",
"EventName": "PM_PMC1_REWIND",
"BriefDescription": "The speculative event selected for PMC1 rewinds and the counter for PMC1 is not charged."
},
{
- "EventCode": "4003E",
+ "EventCode": "0x4003E",
"EventName": "PM_LD_CMPL",
"BriefDescription": "Loads completed."
},
{
- "EventCode": "4C040",
+ "EventCode": "0x4C040",
"EventName": "PM_XFER_FROM_SRC_PMC4",
"BriefDescription": "The processor's L1 data cache was reloaded from the source specified in MMCR3[45:57]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
},
{
- "EventCode": "4C142",
+ "EventCode": "0x4C142",
"EventName": "PM_MRK_XFER_FROM_SRC_PMC4",
"BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[45:57]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
},
{
- "EventCode": "4C144",
+ "EventCode": "0x4C144",
"EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC4",
"BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[45:57]."
},
{
- "EventCode": "4C056",
+ "EventCode": "0x4C056",
"EventName": "PM_DTLB_MISS_16M",
"BriefDescription": "Data TLB reload (after a miss) page size 16M. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "4C05A",
+ "EventCode": "0x4C05A",
"EventName": "PM_DTLB_MISS_1G",
"BriefDescription": "Data TLB reload (after a miss) page size 1G. Implies radix translation was used. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "4C15E",
+ "EventCode": "0x4C15E",
"EventName": "PM_MRK_DTLB_MISS_64K",
"BriefDescription": "Marked Data TLB reload (after a miss) page size 64K. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "4D056",
+ "EventCode": "0x4D056",
"EventName": "PM_NON_FMA_FLOP_CMPL",
"BriefDescription": "Non FMA instruction completed."
},
{
- "EventCode": "40164",
+ "EventCode": "0x40164",
"EventName": "PM_MRK_DERAT_MISS_2M",
"BriefDescription": "Data ERAT Miss (Data TLB Access) page size 2M for a marked instruction. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
}
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/others.json b/tools/perf/pmu-events/arch/powerpc/power10/others.json
index a119e56cbf1c..7d0de1a2860b 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/others.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/others.json
@@ -1,296 +1,271 @@
[
{
- "EventCode": "10016",
+ "EventCode": "0x10016",
"EventName": "PM_VSU0_ISSUE",
"BriefDescription": "VSU instructions issued to VSU pipe 0."
},
{
- "EventCode": "1001C",
+ "EventCode": "0x1001C",
"EventName": "PM_ULTRAVISOR_INST_CMPL",
"BriefDescription": "PowerPC instructions that completed while the thread was in ultravisor state."
},
{
- "EventCode": "100F0",
+ "EventCode": "0x100F0",
"EventName": "PM_CYC",
"BriefDescription": "Processor cycles."
},
{
- "EventCode": "10134",
+ "EventCode": "0x10134",
"EventName": "PM_MRK_ST_DONE_L2",
"BriefDescription": "Marked stores completed in L2 (RC machine done)."
},
{
- "EventCode": "1505E",
+ "EventCode": "0x1505E",
"EventName": "PM_LD_HIT_L1",
"BriefDescription": "Loads that finished without experiencing an L1 miss."
},
{
- "EventCode": "1D05E",
- "EventName": "PM_DISP_STALL_HELD_HALT_CYC",
- "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because of power management."
- },
- {
- "EventCode": "1E054",
- "EventName": "PM_EXEC_STALL_DMISS_L21_L31",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from another core's L2 or L3 on the same chip."
- },
- {
- "EventCode": "1E05A",
- "EventName": "PM_CMPL_STALL_LWSYNC",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a lwsync waiting to complete."
- },
- {
- "EventCode": "1F056",
+ "EventCode": "0x1F056",
"EventName": "PM_DISP_SS0_2_INSTR_CYC",
"BriefDescription": "Cycles in which Superslice 0 dispatches either 1 or 2 instructions."
},
{
- "EventCode": "1F15C",
+ "EventCode": "0x1F15C",
"EventName": "PM_MRK_STCX_L2_CYC",
"BriefDescription": "Cycles spent in the nest portion of a marked Stcx instruction. It starts counting when the operation starts to drain to the L2 and it stops counting when the instruction retires from the Instruction Completion Table (ICT) in the Instruction Sequencing Unit (ISU)."
},
{
- "EventCode": "10066",
+ "EventCode": "0x10066",
"EventName": "PM_ADJUNCT_CYC",
"BriefDescription": "Cycles in which the thread is in Adjunct state. MSR[S HV PR] bits = 011."
},
{
- "EventCode": "101E4",
+ "EventCode": "0x101E4",
"EventName": "PM_MRK_L1_ICACHE_MISS",
"BriefDescription": "Marked Instruction suffered an icache Miss."
},
{
- "EventCode": "101EA",
+ "EventCode": "0x101EA",
"EventName": "PM_MRK_L1_RELOAD_VALID",
"BriefDescription": "Marked demand reload."
},
{
- "EventCode": "100F4",
+ "EventCode": "0x100F4",
"EventName": "PM_FLOP_CMPL",
"BriefDescription": "Floating Point Operations Completed. Includes any type. It counts once for each 1, 2, 4 or 8 flop instruction. Use PM_1|2|4|8_FLOP_CMPL events to count flops."
},
{
- "EventCode": "100FA",
+ "EventCode": "0x100FA",
"EventName": "PM_RUN_LATCH_ANY_THREAD_CYC",
"BriefDescription": "Cycles when at least one thread has the run latch set."
},
{
- "EventCode": "100FC",
+ "EventCode": "0x100FC",
"EventName": "PM_LD_REF_L1",
"BriefDescription": "All L1 D cache load references counted at finish, gated by reject. In P9 and earlier this event counted only cacheable loads but in P10 both cacheable and non-cacheable loads are included."
},
{
- "EventCode": "20006",
- "EventName": "PM_DISP_STALL_HELD_ISSQ_FULL_CYC",
- "BriefDescription": "Cycles in which the NTC instruction is held at dispatch due to Issue queue full. Includes issue queue and branch queue."
- },
- {
- "EventCode": "2000C",
+ "EventCode": "0x2000C",
"EventName": "PM_RUN_LATCH_ALL_THREADS_CYC",
"BriefDescription": "Cycles when the run latch is set for all threads."
},
{
- "EventCode": "2E010",
+ "EventCode": "0x2E010",
"EventName": "PM_ADJUNCT_INST_CMPL",
"BriefDescription": "PowerPC instructions that completed while the thread is in Adjunct state."
},
{
- "EventCode": "2E014",
+ "EventCode": "0x2E014",
"EventName": "PM_STCX_FIN",
"BriefDescription": "Conditional store instruction (STCX) finished. LARX and STCX are instructions used to acquire a lock."
},
{
- "EventCode": "20130",
+ "EventCode": "0x20130",
"EventName": "PM_MRK_INST_DECODED",
"BriefDescription": "An instruction was marked at decode time. Random Instruction Sampling (RIS) only."
},
{
- "EventCode": "20132",
+ "EventCode": "0x20132",
"EventName": "PM_MRK_DFU_ISSUE",
"BriefDescription": "The marked instruction was a decimal floating point operation issued to the VSU. Measured at issue time."
},
{
- "EventCode": "20134",
+ "EventCode": "0x20134",
"EventName": "PM_MRK_FXU_ISSUE",
"BriefDescription": "The marked instruction was a fixed point operation issued to the VSU. Measured at issue time."
},
{
- "EventCode": "2505C",
+ "EventCode": "0x2505C",
"EventName": "PM_VSU_ISSUE",
"BriefDescription": "At least one VSU instruction was issued to one of the VSU pipes. Up to 4 per cycle. Includes fixed point operations."
},
{
- "EventCode": "2F054",
+ "EventCode": "0x2F054",
"EventName": "PM_DISP_SS1_2_INSTR_CYC",
"BriefDescription": "Cycles in which Superslice 1 dispatches either 1 or 2 instructions."
},
{
- "EventCode": "2F056",
+ "EventCode": "0x2F056",
"EventName": "PM_DISP_SS1_4_INSTR_CYC",
"BriefDescription": "Cycles in which Superslice 1 dispatches either 3 or 4 instructions."
},
{
- "EventCode": "2006C",
+ "EventCode": "0x2006C",
"EventName": "PM_RUN_CYC_SMT4_MODE",
"BriefDescription": "Cycles when this thread's run latch is set and the core is in SMT4 mode."
},
{
- "EventCode": "201E0",
+ "EventCode": "0x201E0",
"EventName": "PM_MRK_DATA_FROM_MEMORY",
"BriefDescription": "The processor's data cache was reloaded from local, remote, or distant memory due to a demand miss for a marked load."
},
{
- "EventCode": "201E4",
+ "EventCode": "0x201E4",
"EventName": "PM_MRK_DATA_FROM_L3MISS",
"BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss for a marked load."
},
{
- "EventCode": "201E8",
+ "EventCode": "0x201E8",
"EventName": "PM_THRESH_EXC_512",
"BriefDescription": "Threshold counter exceeded a value of 512."
},
{
- "EventCode": "200F2",
+ "EventCode": "0x200F2",
"EventName": "PM_INST_DISP",
"BriefDescription": "PowerPC instructions dispatched."
},
{
- "EventCode": "30132",
+ "EventCode": "0x30132",
"EventName": "PM_MRK_VSU_FIN",
"BriefDescription": "VSU marked instructions finished. Excludes simple FX instructions issued to the Store Unit."
},
{
- "EventCode": "30038",
+ "EventCode": "0x30038",
"EventName": "PM_EXEC_STALL_DMISS_LMEM",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local memory, local OpenCapp cache, or local OpenCapp memory."
},
{
- "EventCode": "3F04A",
+ "EventCode": "0x3F04A",
"EventName": "PM_LSU_ST5_FIN",
"BriefDescription": "LSU Finished an internal operation in ST2 port."
},
{
- "EventCode": "34054",
- "EventName": "PM_EXEC_STALL_DMISS_L2L3_NOCONFLICT",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local L2 or local L3, without a dispatch conflict."
- },
- {
- "EventCode": "3405A",
+ "EventCode": "0x3405A",
"EventName": "PM_PRIVILEGED_INST_CMPL",
"BriefDescription": "PowerPC Instructions that completed while the thread is in Privileged state."
},
{
- "EventCode": "3F150",
+ "EventCode": "0x3F150",
"EventName": "PM_MRK_ST_DRAIN_CYC",
"BriefDescription": "cycles to drain st from core to L2."
},
{
- "EventCode": "3F054",
+ "EventCode": "0x3F054",
"EventName": "PM_DISP_SS0_4_INSTR_CYC",
"BriefDescription": "Cycles in which Superslice 0 dispatches either 3 or 4 instructions."
},
{
- "EventCode": "3F056",
+ "EventCode": "0x3F056",
"EventName": "PM_DISP_SS0_8_INSTR_CYC",
"BriefDescription": "Cycles in which Superslice 0 dispatches either 5, 6, 7 or 8 instructions."
},
{
- "EventCode": "30162",
+ "EventCode": "0x30162",
"EventName": "PM_MRK_ISSUE_DEPENDENT_LOAD",
"BriefDescription": "The marked instruction was dependent on a load. It is eligible for issue kill."
},
{
- "EventCode": "40114",
+ "EventCode": "0x40114",
"EventName": "PM_MRK_START_PROBE_NOP_DISP",
"BriefDescription": "Marked Start probe nop dispatched. Instruction AND R0,R0,R0."
},
{
- "EventCode": "4001C",
+ "EventCode": "0x4001C",
"EventName": "PM_VSU_FIN",
"BriefDescription": "VSU instructions finished."
},
{
- "EventCode": "4C01A",
+ "EventCode": "0x4C01A",
"EventName": "PM_EXEC_STALL_DMISS_OFF_NODE",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from a distant chip."
},
{
- "EventCode": "4D012",
+ "EventCode": "0x4D012",
"EventName": "PM_PMC3_SAVED",
"BriefDescription": "The conditions for the speculative event selected for PMC3 are met and PMC3 is charged."
},
{
- "EventCode": "4D022",
+ "EventCode": "0x4D022",
"EventName": "PM_HYPERVISOR_INST_CMPL",
"BriefDescription": "PowerPC instructions that completed while the thread is in hypervisor state."
},
{
- "EventCode": "4D026",
+ "EventCode": "0x4D026",
"EventName": "PM_ULTRAVISOR_CYC",
"BriefDescription": "Cycles when the thread is in Ultravisor state. MSR[S HV PR]=110."
},
{
- "EventCode": "4D028",
+ "EventCode": "0x4D028",
"EventName": "PM_PRIVILEGED_CYC",
"BriefDescription": "Cycles when the thread is in Privileged state. MSR[S HV PR]=x00."
},
{
- "EventCode": "40030",
+ "EventCode": "0x40030",
"EventName": "PM_INST_FIN",
"BriefDescription": "Instructions finished."
},
{
- "EventCode": "44146",
+ "EventCode": "0x44146",
"EventName": "PM_MRK_STCX_CORE_CYC",
"BriefDescription": "Cycles spent in the core portion of a marked Stcx instruction. It starts counting when the instruction is decoded and stops counting when it drains into the L2."
},
{
- "EventCode": "44054",
+ "EventCode": "0x44054",
"EventName": "PM_VECTOR_LD_CMPL",
"BriefDescription": "Vector load instructions completed."
},
{
- "EventCode": "45054",
+ "EventCode": "0x45054",
"EventName": "PM_FMA_CMPL",
"BriefDescription": "Two floating point instructions completed (FMA class of instructions: fmadd, fnmadd, fmsub, fnmsub). Scalar instructions only."
},
{
- "EventCode": "45056",
+ "EventCode": "0x45056",
"EventName": "PM_SCALAR_FLOP_CMPL",
"BriefDescription": "Scalar floating point instructions completed."
},
{
- "EventCode": "4505C",
+ "EventCode": "0x4505C",
"EventName": "PM_MATH_FLOP_CMPL",
"BriefDescription": "Math floating point instructions completed."
},
{
- "EventCode": "4D05E",
+ "EventCode": "0x4D05E",
"EventName": "PM_BR_CMPL",
"BriefDescription": "A branch completed. All branches are included."
},
{
- "EventCode": "4E15E",
+ "EventCode": "0x4E15E",
"EventName": "PM_MRK_INST_FLUSHED",
"BriefDescription": "The marked instruction was flushed."
},
{
- "EventCode": "401E6",
+ "EventCode": "0x401E6",
"EventName": "PM_MRK_INST_FROM_L3MISS",
"BriefDescription": "The processor's instruction cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss for a marked instruction."
},
{
- "EventCode": "401E8",
+ "EventCode": "0x401E8",
"EventName": "PM_MRK_DATA_FROM_L2MISS",
"BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1 or L2 due to a demand miss for a marked load."
},
{
- "EventCode": "400F0",
+ "EventCode": "0x400F0",
"EventName": "PM_LD_DEMAND_MISS_L1_FIN",
"BriefDescription": "Load Missed L1, counted at finish time."
},
{
- "EventCode": "400FA",
+ "EventCode": "0x400FA",
"EventName": "PM_RUN_INST_CMPL",
"BriefDescription": "Completed PowerPC instructions gated by the run latch."
}
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json b/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json
index b61b5cc157ee..b8aded6045fa 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json
@@ -1,296 +1,291 @@
[
{
- "EventCode": "100FE",
+ "EventCode": "0x100FE",
"EventName": "PM_INST_CMPL",
"BriefDescription": "PowerPC instructions completed."
},
{
- "EventCode": "10006",
- "EventName": "PM_DISP_STALL_HELD_OTHER_CYC",
- "BriefDescription": "Cycles in which the NTC instruction is held at dispatch for any other reason."
- },
- {
- "EventCode": "1000C",
+ "EventCode": "0x1000C",
"EventName": "PM_LSU_LD0_FIN",
"BriefDescription": "LSU Finished an internal operation in LD0 port."
},
{
- "EventCode": "1000E",
+ "EventCode": "0x1000E",
"EventName": "PM_MMA_ISSUED",
"BriefDescription": "MMA instructions issued."
},
{
- "EventCode": "10012",
+ "EventCode": "0x10012",
"EventName": "PM_LSU_ST0_FIN",
"BriefDescription": "LSU Finished an internal operation in ST0 port."
},
{
- "EventCode": "10014",
+ "EventCode": "0x10014",
"EventName": "PM_LSU_ST4_FIN",
"BriefDescription": "LSU Finished an internal operation in ST4 port."
},
{
- "EventCode": "10018",
+ "EventCode": "0x10018",
"EventName": "PM_IC_DEMAND_CYC",
"BriefDescription": "Cycles in which an instruction reload is pending to satisfy a demand miss."
},
{
- "EventCode": "10022",
+ "EventCode": "0x10022",
"EventName": "PM_PMC2_SAVED",
"BriefDescription": "The conditions for the speculative event selected for PMC2 are met and PMC2 is charged."
},
{
- "EventCode": "10024",
+ "EventCode": "0x10024",
"EventName": "PM_PMC5_OVERFLOW",
"BriefDescription": "The event selected for PMC5 caused the event counter to overflow."
},
{
- "EventCode": "10058",
+ "EventCode": "0x10058",
"EventName": "PM_EXEC_STALL_FIN_AT_DISP",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline finished at dispatch and did not require execution in the LSU, BRU or VSU."
},
{
- "EventCode": "1005A",
+ "EventCode": "0x1005A",
"EventName": "PM_FLUSH_MPRED",
"BriefDescription": "A flush occurred due to a mispredicted branch. Includes target and direction."
},
{
- "EventCode": "1C05A",
+ "EventCode": "0x1C05A",
"EventName": "PM_DERAT_MISS_2M",
"BriefDescription": "Data ERAT Miss (Data TLB Access) page size 2M. Implies radix translation. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
- "EventCode": "10064",
- "EventName": "PM_DISP_STALL_IC_L2",
- "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L2."
+ "EventCode": "0x1E05A",
+ "EventName": "PM_CMPL_STALL_LWSYNC",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a lwsync waiting to complete."
},
{
- "EventCode": "10068",
+ "EventCode": "0x10068",
"EventName": "PM_BR_FIN",
"BriefDescription": "A branch instruction finished. Includes predicted/mispredicted/unconditional."
},
{
- "EventCode": "1006A",
+ "EventCode": "0x1006A",
"EventName": "PM_FX_LSU_FIN",
"BriefDescription": "Simple fixed point instruction issued to the store unit. Measured at finish time."
},
{
- "EventCode": "1006C",
+ "EventCode": "0x1006C",
"EventName": "PM_RUN_CYC_ST_MODE",
"BriefDescription": "Cycles when the run latch is set and the core is in ST mode."
},
{
- "EventCode": "20004",
+ "EventCode": "0x20004",
"EventName": "PM_ISSUE_STALL",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was dispatched but not issued yet."
},
{
- "EventCode": "2000A",
+ "EventCode": "0x2000A",
"EventName": "PM_HYPERVISOR_CYC",
"BriefDescription": "Cycles when the thread is in Hypervisor state. MSR[S HV PR]=010."
},
{
- "EventCode": "2000E",
+ "EventCode": "0x2000E",
"EventName": "PM_LSU_LD1_FIN",
"BriefDescription": "LSU Finished an internal operation in LD1 port."
},
{
- "EventCode": "2C014",
+ "EventCode": "0x2C014",
"EventName": "PM_CMPL_STALL_SPECIAL",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline required special handling before completing."
},
{
- "EventCode": "2C018",
+ "EventCode": "0x2C018",
"EventName": "PM_EXEC_STALL_DMISS_L3MISS",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from a source beyond the local L2 or local L3."
},
{
- "EventCode": "2D010",
+ "EventCode": "0x2D010",
"EventName": "PM_LSU_ST1_FIN",
"BriefDescription": "LSU Finished an internal operation in ST1 port."
},
{
- "EventCode": "2D012",
+ "EventCode": "0x2D012",
"EventName": "PM_VSU1_ISSUE",
"BriefDescription": "VSU instructions issued to VSU pipe 1."
},
{
- "EventCode": "2D018",
+ "EventCode": "0x2D018",
"EventName": "PM_EXEC_STALL_VSU",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the VSU (includes FXU, VSU, CRU)."
},
{
- "EventCode": "2E01E",
+ "EventCode": "0x2D01C",
+ "EventName": "PM_CMPL_STALL_STCX",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a stcx waiting for resolution from the nest before completing."
+ },
+ {
+ "EventCode": "0x2E01E",
"EventName": "PM_EXEC_STALL_NTC_FLUSH",
- "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in any unit before it was flushed. Note that if the flush of the oldest instruction happens after finish, the cycles from dispatch to issue will be included in PM_DISP_STALL and the cycles from issue to finish will be included in PM_EXEC_STALL and its corresponding children."
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in any unit before it was flushed. Note that if the flush of the oldest instruction happens after finish, the cycles from dispatch to issue will be included in PM_DISP_STALL and the cycles from issue to finish will be included in PM_EXEC_STALL and its corresponding children. This event will also count cycles when the previous NTF instruction is still completing and the new NTF instruction is stalled at dispatch."
},
{
- "EventCode": "2013C",
+ "EventCode": "0x2013C",
"EventName": "PM_MRK_FX_LSU_FIN",
"BriefDescription": "The marked instruction was simple fixed point that was issued to the store unit. Measured at finish time."
},
{
- "EventCode": "2405A",
+ "EventCode": "0x2405A",
"EventName": "PM_NTC_FIN",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline (NTC) finishes. Note that instructions can finish out of order, therefore not all the instructions that finish have a Next-to-complete status."
},
{
- "EventCode": "201E2",
+ "EventCode": "0x201E2",
"EventName": "PM_MRK_LD_MISS_L1",
"BriefDescription": "Marked DL1 Demand Miss counted at finish time."
},
{
- "EventCode": "200F4",
+ "EventCode": "0x200F4",
"EventName": "PM_RUN_CYC",
"BriefDescription": "Processor cycles gated by the run latch."
},
{
- "EventCode": "30004",
- "EventName": "PM_DISP_STALL_FLUSH",
- "BriefDescription": "Cycles when dispatch was stalled because of a flush that happened to an instruction(s) that was not yet NTC. PM_EXEC_STALL_NTC_FLUSH only includes instructions that were flushed after becoming NTC."
- },
- {
- "EventCode": "30008",
+ "EventCode": "0x30008",
"EventName": "PM_EXEC_STALL",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting to finish in one of the execution units (BRU, LSU, VSU). Only cycles between issue and finish are counted in this category."
},
{
- "EventCode": "3001A",
+ "EventCode": "0x3001A",
"EventName": "PM_LSU_ST2_FIN",
"BriefDescription": "LSU Finished an internal operation in ST2 port."
},
{
- "EventCode": "30020",
+ "EventCode": "0x30020",
"EventName": "PM_PMC2_REWIND",
"BriefDescription": "The speculative event selected for PMC2 rewinds and the counter for PMC2 is not charged."
},
{
- "EventCode": "30022",
+ "EventCode": "0x30022",
"EventName": "PM_PMC4_SAVED",
"BriefDescription": "The conditions for the speculative event selected for PMC4 are met and PMC4 is charged."
},
{
- "EventCode": "30024",
+ "EventCode": "0x30024",
"EventName": "PM_PMC6_OVERFLOW",
"BriefDescription": "The event selected for PMC6 caused the event counter to overflow."
},
{
- "EventCode": "30028",
+ "EventCode": "0x30028",
"EventName": "PM_CMPL_STALL_MEM_ECC",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for the non-speculative finish of either a stcx waiting for its result or a load waiting for non-critical sectors of data and ECC."
},
{
- "EventCode": "30036",
+ "EventCode": "0x30036",
"EventName": "PM_EXEC_STALL_SIMPLE_FX",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was a simple fixed point instruction executing in the Load Store Unit."
},
{
- "EventCode": "3003A",
+ "EventCode": "0x3003A",
"EventName": "PM_CMPL_STALL_EXCEPTION",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was not allowed to complete because it was interrupted by ANY exception, which has to be serviced before the instruction can complete."
},
{
- "EventCode": "3F044",
+ "EventCode": "0x3F044",
"EventName": "PM_VSU2_ISSUE",
"BriefDescription": "VSU instructions issued to VSU pipe 2."
},
{
- "EventCode": "30058",
+ "EventCode": "0x30058",
"EventName": "PM_TLBIE_FIN",
"BriefDescription": "TLBIE instructions finished in the LSU. Two TLBIEs can finish each cycle. All will be counted."
},
{
- "EventCode": "3D058",
+ "EventCode": "0x3D058",
"EventName": "PM_SCALAR_FSQRT_FDIV_ISSUE",
"BriefDescription": "Scalar versions of four floating point operations: fdiv,fsqrt (xvdivdp, xvdivsp, xvsqrtdp, xvsqrtsp)."
},
{
- "EventCode": "30066",
+ "EventCode": "0x30066",
"EventName": "PM_LSU_FIN",
"BriefDescription": "LSU Finished an internal operation (up to 4 per cycle)."
},
{
- "EventCode": "40004",
+ "EventCode": "0x40004",
"EventName": "PM_FXU_ISSUE",
"BriefDescription": "A fixed point instruction was issued to the VSU."
},
{
- "EventCode": "40008",
+ "EventCode": "0x40008",
"EventName": "PM_NTC_ALL_FIN",
"BriefDescription": "Cycles in which both instructions in the ICT entry pair show as finished. These are the cycles between finish and completion for the oldest pair of instructions in the pipeline."
},
{
- "EventCode": "40010",
+ "EventCode": "0x40010",
"EventName": "PM_PMC3_OVERFLOW",
"BriefDescription": "The event selected for PMC3 caused the event counter to overflow."
},
{
- "EventCode": "4C012",
+ "EventCode": "0x4C012",
"EventName": "PM_EXEC_STALL_DERAT_ONLY_MISS",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline suffered an ERAT miss and waited for it resolve."
},
{
- "EventCode": "4C018",
+ "EventCode": "0x4C018",
"EventName": "PM_CMPL_STALL",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline cannot complete because the thread was blocked for any reason."
},
{
- "EventCode": "4C01E",
+ "EventCode": "0x4C01E",
"EventName": "PM_LSU_ST3_FIN",
"BriefDescription": "LSU Finished an internal operation in ST3 port."
},
{
- "EventCode": "4D018",
+ "EventCode": "0x4D018",
"EventName": "PM_EXEC_STALL_BRU",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the Branch unit."
},
{
- "EventCode": "4D01A",
+ "EventCode": "0x4D01A",
"EventName": "PM_CMPL_STALL_HWSYNC",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was a hwsync waiting for response from L2 before completing."
},
{
- "EventCode": "4D01C",
+ "EventCode": "0x4D01C",
"EventName": "PM_EXEC_STALL_TLBIEL",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was a TLBIEL instruction executing in the Load Store Unit. TLBIEL instructions have lower overhead than TLBIE instructions because they don't get set to the nest."
},
{
- "EventCode": "4E012",
+ "EventCode": "0x4E012",
"EventName": "PM_EXEC_STALL_UNKNOWN",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline completed without an ntf_type pulse. The ntf_pulse was missed by the ISU because the NTF finishes and completions came too close together."
},
{
- "EventCode": "4D020",
+ "EventCode": "0x4D020",
"EventName": "PM_VSU3_ISSUE",
"BriefDescription": "VSU instruction was issued to VSU pipe 3."
},
{
- "EventCode": "40132",
+ "EventCode": "0x40132",
"EventName": "PM_MRK_LSU_FIN",
"BriefDescription": "LSU marked instruction finish."
},
{
- "EventCode": "45058",
+ "EventCode": "0x45058",
"EventName": "PM_IC_MISS_CMPL",
"BriefDescription": "Non-speculative icache miss, counted at completion."
},
{
- "EventCode": "4D050",
+ "EventCode": "0x4D050",
"EventName": "PM_VSU_NON_FLOP_CMPL",
"BriefDescription": "Non-floating point VSU instructions completed."
},
{
- "EventCode": "4D052",
+ "EventCode": "0x4D052",
"EventName": "PM_2FLOP_CMPL",
"BriefDescription": "Double Precision vector version of fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg completed."
},
{
- "EventCode": "400F2",
+ "EventCode": "0x400F2",
"EventName": "PM_1PLUS_PPC_DISP",
"BriefDescription": "Cycles at least one Instr Dispatched."
},
{
- "EventCode": "400F8",
+ "EventCode": "0x400F8",
"EventName": "PM_FLUSH",
"BriefDescription": "Flush (any type)."
}
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/pmc.json b/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
index ea122a91ceb0..b5d1bd39cfb2 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
@@ -1,21 +1,21 @@
[
{
- "EventCode": "301E8",
+ "EventCode": "0x301E8",
"EventName": "PM_THRESH_EXC_64",
"BriefDescription": "Threshold counter exceeded a value of 64."
},
{
- "EventCode": "45050",
+ "EventCode": "0x45050",
"EventName": "PM_1FLOP_CMPL",
"BriefDescription": "One floating point instruction completed (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg)."
},
{
- "EventCode": "45052",
+ "EventCode": "0x45052",
"EventName": "PM_4FLOP_CMPL",
"BriefDescription": "Four floating point instructions completed (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg)."
},
{
- "EventCode": "4D054",
+ "EventCode": "0x4D054",
"EventName": "PM_8FLOP_CMPL",
"BriefDescription": "Four Double Precision vector instructions completed."
}
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/translation.json b/tools/perf/pmu-events/arch/powerpc/power10/translation.json
index 5a714e3dd71a..db3766dca07c 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/translation.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/translation.json
@@ -1,56 +1,56 @@
[
{
- "EventCode": "1F15E",
+ "EventCode": "0x1F15E",
"EventName": "PM_MRK_START_PROBE_NOP_CMPL",
"BriefDescription": "Marked Start probe nop (AND R0,R0,R0) completed."
},
{
- "EventCode": "20016",
+ "EventCode": "0x20016",
"EventName": "PM_ST_FIN",
"BriefDescription": "Store finish count. Includes speculative activity."
},
{
- "EventCode": "20018",
+ "EventCode": "0x20018",
"EventName": "PM_ST_FWD",
"BriefDescription": "Store forwards that finished."
},
{
- "EventCode": "2011C",
+ "EventCode": "0x2011C",
"EventName": "PM_MRK_NTF_CYC",
"BriefDescription": "Cycles during which the marked instruction is the oldest in the pipeline (NTF or NTC)."
},
{
- "EventCode": "2E01C",
+ "EventCode": "0x2E01C",
"EventName": "PM_EXEC_STALL_TLBIE",
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was a TLBIE instruction executing in the Load Store Unit."
},
{
- "EventCode": "201E6",
+ "EventCode": "0x201E6",
"EventName": "PM_THRESH_EXC_32",
"BriefDescription": "Threshold counter exceeded a value of 32."
},
{
- "EventCode": "200F0",
+ "EventCode": "0x200F0",
"EventName": "PM_ST_CMPL",
"BriefDescription": "Stores completed from S2Q (2nd-level store queue). This event includes regular stores, stcx and cache inhibited stores. The following operations are excluded (pteupdate, snoop tlbie complete, store atomics, miso, load atomic payloads, tlbie, tlbsync, slbieg, isync, msgsnd, slbiag, cpabort, copy, tcheck, tend, stsync, dcbst, icbi, dcbf, hwsync, lwsync, ptesync, eieio, msgsync)."
},
{
- "EventCode": "200FE",
+ "EventCode": "0x200FE",
"EventName": "PM_DATA_FROM_L2MISS",
"BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1 or L2 due to a demand miss."
},
{
- "EventCode": "30010",
+ "EventCode": "0x30010",
"EventName": "PM_PMC2_OVERFLOW",
"BriefDescription": "The event selected for PMC2 caused the event counter to overflow."
},
{
- "EventCode": "4D010",
+ "EventCode": "0x4D010",
"EventName": "PM_PMC1_SAVED",
"BriefDescription": "The conditions for the speculative event selected for PMC1 are met and PMC1 is charged."
},
{
- "EventCode": "4D05C",
+ "EventCode": "0x4D05C",
"EventName": "PM_DPP_FLOP_CMPL",
"BriefDescription": "Double-Precision or Quad-Precision instructions completed."
}
diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
index 7422b0ea8790..9604446f8360 100644
--- a/tools/perf/pmu-events/jevents.c
+++ b/tools/perf/pmu-events/jevents.c
@@ -960,7 +960,7 @@ static int get_maxfds(void)
struct rlimit rlim;
if (getrlimit(RLIMIT_NOFILE, &rlim) == 0)
- return min((int)rlim.rlim_max / 2, 512);
+ return min(rlim.rlim_max / 2, (rlim_t)512);
return 512;
}
diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py
index 7daa8bb70a5a..711d4f9f5645 100755
--- a/tools/perf/scripts/python/exported-sql-viewer.py
+++ b/tools/perf/scripts/python/exported-sql-viewer.py
@@ -91,6 +91,11 @@
from __future__ import print_function
import sys
+# Only change warnings if the python -W option was not used
+if not sys.warnoptions:
+ import warnings
+ # PySide2 causes deprecation warnings, ignore them.
+ warnings.filterwarnings("ignore", category=DeprecationWarning)
import argparse
import weakref
import threading
@@ -125,8 +130,9 @@ if pyside_version_1:
from PySide.QtGui import *
from PySide.QtSql import *
-from decimal import *
-from ctypes import *
+from decimal import Decimal, ROUND_HALF_UP
+from ctypes import CDLL, Structure, create_string_buffer, addressof, sizeof, \
+ c_void_p, c_bool, c_byte, c_char, c_int, c_uint, c_longlong, c_ulonglong
from multiprocessing import Process, Array, Value, Event
# xrange is range in Python3
@@ -3868,7 +3874,7 @@ def CopyTableCellsToClipboard(view, as_csv=False, with_hdr=False):
if with_hdr:
model = indexes[0].model()
for col in range(min_col, max_col + 1):
- val = model.headerData(col, Qt.Horizontal)
+ val = model.headerData(col, Qt.Horizontal, Qt.DisplayRole)
if as_csv:
text += sep + ToCSValue(val)
sep = ","
diff --git a/tools/perf/tests/attr/base-record b/tools/perf/tests/attr/base-record
index 4a7b8deef3fd..8c10955eff93 100644
--- a/tools/perf/tests/attr/base-record
+++ b/tools/perf/tests/attr/base-record
@@ -16,7 +16,7 @@ pinned=0
exclusive=0
exclude_user=0
exclude_kernel=0|1
-exclude_hv=0
+exclude_hv=0|1
exclude_idle=0
mmap=1
comm=1
diff --git a/tools/perf/tests/pfm.c b/tools/perf/tests/pfm.c
index 76a53126efdf..d4b0ef74defc 100644
--- a/tools/perf/tests/pfm.c
+++ b/tools/perf/tests/pfm.c
@@ -131,8 +131,8 @@ static int test__pfm_group(void)
},
{
.events = "{},{instructions}",
- .nr_events = 0,
- .nr_groups = 0,
+ .nr_events = 1,
+ .nr_groups = 1,
},
{
.events = "{instructions},{instructions}",
diff --git a/tools/perf/tests/shell/stat_bpf_counters.sh b/tools/perf/tests/shell/stat_bpf_counters.sh
index 22eb31e48ca7..2f9948b3d943 100755
--- a/tools/perf/tests/shell/stat_bpf_counters.sh
+++ b/tools/perf/tests/shell/stat_bpf_counters.sh
@@ -11,9 +11,9 @@ compare_number()
second_num=$2
# upper bound is first_num * 110%
- upper=$(( $first_num + $first_num / 10 ))
+ upper=$(expr $first_num + $first_num / 10 )
# lower bound is first_num * 90%
- lower=$(( $first_num - $first_num / 10 ))
+ lower=$(expr $first_num - $first_num / 10 )
if [ $second_num -gt $upper ] || [ $second_num -lt $lower ]; then
echo "The difference between $first_num and $second_num are greater than 10%."
diff --git a/tools/perf/trace/beauty/include/linux/socket.h b/tools/perf/trace/beauty/include/linux/socket.h
index b8fc5c53ba6f..0d8e3dcb7f88 100644
--- a/tools/perf/trace/beauty/include/linux/socket.h
+++ b/tools/perf/trace/beauty/include/linux/socket.h
@@ -438,6 +438,4 @@ extern int __sys_socketpair(int family, int type, int protocol,
int __user *usockvec);
extern int __sys_shutdown_sock(struct socket *sock, int how);
extern int __sys_shutdown(int fd, int how);
-
-extern struct ns_common *get_net_ns(struct ns_common *ns);
#endif /* _LINUX_SOCKET_H */
diff --git a/tools/perf/util/bpf_counter.c b/tools/perf/util/bpf_counter.c
index ddb52f748c8e..5ed674a2f55e 100644
--- a/tools/perf/util/bpf_counter.c
+++ b/tools/perf/util/bpf_counter.c
@@ -451,10 +451,10 @@ static int bperf_reload_leader_program(struct evsel *evsel, int attr_map_fd,
goto out;
}
- err = -1;
link = bpf_program__attach(skel->progs.on_switch);
- if (!link) {
+ if (IS_ERR(link)) {
pr_err("Failed to attach leader program\n");
+ err = PTR_ERR(link);
goto out;
}
@@ -521,9 +521,10 @@ static int bperf__load(struct evsel *evsel, struct target *target)
evsel->bperf_leader_link_fd = bpf_link_get_fd_by_id(entry.link_id);
if (evsel->bperf_leader_link_fd < 0 &&
- bperf_reload_leader_program(evsel, attr_map_fd, &entry))
+ bperf_reload_leader_program(evsel, attr_map_fd, &entry)) {
+ err = -1;
goto out;
-
+ }
/*
* The bpf_link holds reference to the leader program, and the
* leader program holds reference to the maps. Therefore, if
@@ -550,6 +551,7 @@ static int bperf__load(struct evsel *evsel, struct target *target)
/* Step 2: load the follower skeleton */
evsel->follower_skel = bperf_follower_bpf__open();
if (!evsel->follower_skel) {
+ err = -1;
pr_err("Failed to open follower skeleton\n");
goto out;
}
diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
index b2f4920e19a6..7d2ba8419b0c 100644
--- a/tools/perf/util/dwarf-aux.c
+++ b/tools/perf/util/dwarf-aux.c
@@ -975,9 +975,13 @@ static int __die_find_variable_cb(Dwarf_Die *die_mem, void *data)
if ((tag == DW_TAG_formal_parameter ||
tag == DW_TAG_variable) &&
die_compare_name(die_mem, fvp->name) &&
- /* Does the DIE have location information or external instance? */
+ /*
+ * Does the DIE have location information or const value
+ * or external instance?
+ */
(dwarf_attr(die_mem, DW_AT_external, &attr) ||
- dwarf_attr(die_mem, DW_AT_location, &attr)))
+ dwarf_attr(die_mem, DW_AT_location, &attr) ||
+ dwarf_attr(die_mem, DW_AT_const_value, &attr)))
return DIE_FIND_CB_END;
if (dwarf_haspc(die_mem, fvp->addr))
return DIE_FIND_CB_CONTINUE;
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 9130f6fad8d5..bc5e4f294e9e 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -144,6 +144,7 @@ static void perf_env__purge_bpf(struct perf_env *env)
node = rb_entry(next, struct bpf_prog_info_node, rb_node);
next = rb_next(&node->rb_node);
rb_erase(&node->rb_node, root);
+ free(node->info_linear);
free(node);
}
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 8a62fb39e365..19ad64f2bd83 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -100,7 +100,7 @@ enum {
PERF_IP_FLAG_VMEXIT = 1ULL << 12,
};
-#define PERF_IP_FLAG_CHARS "bcrosyiABEx"
+#define PERF_IP_FLAG_CHARS "bcrosyiABExgh"
#define PERF_BRANCH_MASK (\
PERF_IP_FLAG_BRANCH |\
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 6e5c41528c7d..6ea3e677dc1e 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -425,9 +425,6 @@ static void __evlist__disable(struct evlist *evlist, char *evsel_name)
if (affinity__setup(&affinity) < 0)
return;
- evlist__for_each_entry(evlist, pos)
- bpf_counter__disable(pos);
-
/* Disable 'immediate' events last */
for (imm = 0; imm <= 1; imm++) {
evlist__for_each_cpu(evlist, i, cpu) {
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 4a3cd1b5bb33..a8d8463f8ee5 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -428,6 +428,7 @@ struct evsel *evsel__clone(struct evsel *orig)
evsel->auto_merge_stats = orig->auto_merge_stats;
evsel->collect_stat = orig->collect_stat;
evsel->weak_group = orig->weak_group;
+ evsel->use_config_name = orig->use_config_name;
if (evsel__copy_config_terms(evsel, orig) < 0)
goto out_err;
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 75cf5dbfe208..bdad52a06438 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -83,8 +83,10 @@ struct evsel {
bool collect_stat;
bool weak_group;
bool bpf_counter;
+ bool use_config_name;
int bpf_fd;
struct bpf_object *bpf_obj;
+ struct list_head config_terms;
};
/*
@@ -116,10 +118,8 @@ struct evsel {
bool merged_stat;
bool reset_group;
bool errored;
- bool use_config_name;
struct hashmap *per_pkg_mask;
struct evsel *leader;
- struct list_head config_terms;
int err;
int cpu_iter;
struct {
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 8c59677bee13..20ad663978cc 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -1146,6 +1146,8 @@ static bool intel_pt_fup_event(struct intel_pt_decoder *decoder)
decoder->set_fup_tx_flags = false;
decoder->tx_flags = decoder->fup_tx_flags;
decoder->state.type = INTEL_PT_TRANSACTION;
+ if (decoder->fup_tx_flags & INTEL_PT_ABORT_TX)
+ decoder->state.type |= INTEL_PT_BRANCH;
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = 0;
decoder->state.flags = decoder->fup_tx_flags;
@@ -1220,8 +1222,10 @@ static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
return 0;
if (err == -EAGAIN ||
intel_pt_fup_with_nlip(decoder, &intel_pt_insn, ip, err)) {
+ bool no_tip = decoder->pkt_state != INTEL_PT_STATE_FUP;
+
decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
- if (intel_pt_fup_event(decoder))
+ if (intel_pt_fup_event(decoder) && no_tip)
return 0;
return -EAGAIN;
}
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 8658d42ce57a..0dfec8761b9a 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -707,8 +707,10 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
*ip += intel_pt_insn->length;
- if (to_ip && *ip == to_ip)
+ if (to_ip && *ip == to_ip) {
+ intel_pt_insn->length = 0;
goto out_no_cache;
+ }
if (*ip >= al.map->end)
break;
@@ -1198,6 +1200,7 @@ static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
{
+ ptq->insn_len = 0;
if (ptq->state->flags & INTEL_PT_ABORT_TX) {
ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT;
} else if (ptq->state->flags & INTEL_PT_ASYNC) {
@@ -1211,7 +1214,6 @@ static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
PERF_IP_FLAG_ASYNC |
PERF_IP_FLAG_INTERRUPT;
- ptq->insn_len = 0;
} else {
if (ptq->state->from_ip)
ptq->flags = intel_pt_insn_type(ptq->state->insn_op);
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 3ff4936a15a4..da19be7da284 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -776,10 +776,10 @@ static int machine__process_ksymbol_register(struct machine *machine,
if (dso) {
dso->kernel = DSO_SPACE__KERNEL;
map = map__new2(0, dso);
+ dso__put(dso);
}
if (!dso || !map) {
- dso__put(dso);
return -ENOMEM;
}
@@ -792,6 +792,7 @@ static int machine__process_ksymbol_register(struct machine *machine,
map->start = event->ksymbol.addr;
map->end = map->start + event->ksymbol.len;
maps__insert(&machine->kmaps, map);
+ map__put(map);
dso__set_loaded(dso);
if (is_bpf_image(event->ksymbol.name)) {
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index 8336dd8e8098..d3cf2dee36c8 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -162,10 +162,10 @@ static bool contains_event(struct evsel **metric_events, int num_events,
return false;
}
-static bool evsel_same_pmu(struct evsel *ev1, struct evsel *ev2)
+static bool evsel_same_pmu_or_none(struct evsel *ev1, struct evsel *ev2)
{
if (!ev1->pmu_name || !ev2->pmu_name)
- return false;
+ return true;
return !strcmp(ev1->pmu_name, ev2->pmu_name);
}
@@ -288,7 +288,7 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist,
*/
if (!has_constraint &&
ev->leader != metric_events[i]->leader &&
- evsel_same_pmu(ev->leader, metric_events[i]->leader))
+ evsel_same_pmu_or_none(ev->leader, metric_events[i]->leader))
break;
if (!strcmp(metric_events[i]->name, ev->name)) {
set_bit(ev->idx, evlist_used);
@@ -1073,16 +1073,18 @@ static int metricgroup__add_metric_sys_event_iter(struct pmu_event *pe,
ret = add_metric(d->metric_list, pe, d->metric_no_group, &m, NULL, d->ids);
if (ret)
- return ret;
+ goto out;
ret = resolve_metric(d->metric_no_group,
d->metric_list, NULL, d->ids);
if (ret)
- return ret;
+ goto out;
*(d->has_match) = true;
- return *d->ret;
+out:
+ *(d->ret) = ret;
+ return ret;
}
static int metricgroup__add_metric(const char *metric, bool metric_no_group,
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 4dad14265b81..84108c17f48d 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -150,6 +150,10 @@ struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
.symbol = "bpf-output",
.alias = "",
},
+ [PERF_COUNT_SW_CGROUP_SWITCHES] = {
+ .symbol = "cgroup-switches",
+ .alias = "",
+ },
};
#define __PERF_EVENT_FIELD(config, name) \
@@ -2928,9 +2932,14 @@ restart:
}
for (i = 0; i < max; i++, syms++) {
+ /*
+ * New attr.config still not supported here, the latest
+ * example was PERF_COUNT_SW_CGROUP_SWITCHES
+ */
+ if (syms->symbol == NULL)
+ continue;
- if (event_glob != NULL && syms->symbol != NULL &&
- !(strglobmatch(syms->symbol, event_glob) ||
+ if (event_glob != NULL && !(strglobmatch(syms->symbol, event_glob) ||
(syms->alias && strglobmatch(syms->alias, event_glob))))
continue;
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index fb8646cc3e83..923849024b15 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -347,6 +347,7 @@ emulation-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EM
dummy { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_DUMMY); }
duration_time { return tool(yyscanner, PERF_TOOL_DURATION_TIME); }
bpf-output { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_BPF_OUTPUT); }
+cgroup-switches { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CGROUP_SWITCHES); }
/*
* We have to handle the kernel PMU event cycles-ct/cycles-t/mem-loads/mem-stores separately.
diff --git a/tools/perf/util/perf_api_probe.c b/tools/perf/util/perf_api_probe.c
index 829af17a0867..020411682a3c 100644
--- a/tools/perf/util/perf_api_probe.c
+++ b/tools/perf/util/perf_api_probe.c
@@ -103,6 +103,11 @@ static void perf_probe_build_id(struct evsel *evsel)
evsel->core.attr.build_id = 1;
}
+static void perf_probe_cgroup(struct evsel *evsel)
+{
+ evsel->core.attr.cgroup = 1;
+}
+
bool perf_can_sample_identifier(void)
{
return perf_probe_api(perf_probe_sample_identifier);
@@ -182,3 +187,8 @@ bool perf_can_record_build_id(void)
{
return perf_probe_api(perf_probe_build_id);
}
+
+bool perf_can_record_cgroup(void)
+{
+ return perf_probe_api(perf_probe_cgroup);
+}
diff --git a/tools/perf/util/perf_api_probe.h b/tools/perf/util/perf_api_probe.h
index f12ca55f509a..b104168efb15 100644
--- a/tools/perf/util/perf_api_probe.h
+++ b/tools/perf/util/perf_api_probe.h
@@ -12,5 +12,6 @@ bool perf_can_record_switch_events(void);
bool perf_can_record_text_poke_events(void);
bool perf_can_sample_identifier(void);
bool perf_can_record_build_id(void);
+bool perf_can_record_cgroup(void);
#endif // __PERF_API_PROBE_H
diff --git a/tools/perf/util/pfm.c b/tools/perf/util/pfm.c
index d735acb6c29c..6eef6dfeaa57 100644
--- a/tools/perf/util/pfm.c
+++ b/tools/perf/util/pfm.c
@@ -62,8 +62,16 @@ int parse_libpfm_events_option(const struct option *opt, const char *str,
}
/* no event */
- if (*q == '\0')
+ if (*q == '\0') {
+ if (*sep == '}') {
+ if (grp_evt < 0) {
+ ui__error("cannot close a non-existing event group\n");
+ goto error;
+ }
+ grp_evt--;
+ }
continue;
+ }
memset(&attr, 0, sizeof(attr));
event_attr_init(&attr);
@@ -107,6 +115,7 @@ int parse_libpfm_events_option(const struct option *opt, const char *str,
grp_evt = -1;
}
}
+ free(p_orig);
return 0;
error:
free(p_orig);
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 866f2d514d72..b029c29ce227 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -190,6 +190,9 @@ static int convert_variable_location(Dwarf_Die *vr_die, Dwarf_Addr addr,
immediate_value_is_supported()) {
Dwarf_Sword snum;
+ if (!tvar)
+ return 0;
+
dwarf_formsdata(&attr, &snum);
ret = asprintf(&tvar->value, "\\%ld", (long)snum);
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 106b3d60881a..e59242c361ce 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -1723,6 +1723,7 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset,
if (event->header.size < hdr_sz || event->header.size > buf_sz)
return -1;
+ buf += hdr_sz;
rest = event->header.size - hdr_sz;
if (readn(fd, buf, rest) != (ssize_t)rest)
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index a76fff5e7d83..ca326f98c7a2 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -541,7 +541,7 @@ static void uniquify_event_name(struct evsel *counter)
char *config;
int ret = 0;
- if (counter->uniquified_name ||
+ if (counter->uniquified_name || counter->use_config_name ||
!counter->pmu_name || !strncmp(counter->name, counter->pmu_name,
strlen(counter->pmu_name)))
return;
@@ -555,10 +555,8 @@ static void uniquify_event_name(struct evsel *counter)
}
} else {
if (perf_pmu__has_hybrid()) {
- if (!counter->use_config_name) {
- ret = asprintf(&new_name, "%s/%s/",
- counter->pmu_name, counter->name);
- }
+ ret = asprintf(&new_name, "%s/%s/",
+ counter->pmu_name, counter->name);
} else {
ret = asprintf(&new_name, "%s [%s]",
counter->name, counter->pmu_name);
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 4c56aa837434..a73345730ba9 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -2412,6 +2412,7 @@ int cleanup_sdt_note_list(struct list_head *sdt_notes)
list_for_each_entry_safe(pos, tmp, sdt_notes, note_list) {
list_del_init(&pos->note_list);
+ zfree(&pos->args);
zfree(&pos->name);
zfree(&pos->provider);
free(pos);
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
index f9271f3ea912..071312f5eb92 100644
--- a/tools/scripts/Makefile.include
+++ b/tools/scripts/Makefile.include
@@ -131,29 +131,29 @@ QUIET_SUBDIR1 =
ifneq ($(silent),1)
ifneq ($(V),1)
- QUIET_CC = @echo ' CC '$@;
- QUIET_CC_FPIC = @echo ' CC FPIC '$@;
- QUIET_CLANG = @echo ' CLANG '$@;
- QUIET_AR = @echo ' AR '$@;
- QUIET_LINK = @echo ' LINK '$@;
- QUIET_MKDIR = @echo ' MKDIR '$@;
- QUIET_GEN = @echo ' GEN '$@;
+ QUIET_CC = @echo ' CC '$@;
+ QUIET_CC_FPIC = @echo ' CC FPIC '$@;
+ QUIET_CLANG = @echo ' CLANG '$@;
+ QUIET_AR = @echo ' AR '$@;
+ QUIET_LINK = @echo ' LINK '$@;
+ QUIET_MKDIR = @echo ' MKDIR '$@;
+ QUIET_GEN = @echo ' GEN '$@;
QUIET_SUBDIR0 = +@subdir=
QUIET_SUBDIR1 = ;$(NO_SUBDIR) \
- echo ' SUBDIR '$$subdir; \
+ echo ' SUBDIR '$$subdir; \
$(MAKE) $(PRINT_DIR) -C $$subdir
- QUIET_FLEX = @echo ' FLEX '$@;
- QUIET_BISON = @echo ' BISON '$@;
- QUIET_GENSKEL = @echo ' GEN-SKEL '$@;
+ QUIET_FLEX = @echo ' FLEX '$@;
+ QUIET_BISON = @echo ' BISON '$@;
+ QUIET_GENSKEL = @echo ' GENSKEL '$@;
descend = \
- +@echo ' DESCEND '$(1); \
+ +@echo ' DESCEND '$(1); \
mkdir -p $(OUTPUT)$(1) && \
$(MAKE) $(COMMAND_O) subdir=$(if $(subdir),$(subdir)/$(1),$(1)) $(PRINT_DIR) -C $(1) $(2)
- QUIET_CLEAN = @printf ' CLEAN %s\n' $1;
- QUIET_INSTALL = @printf ' INSTALL %s\n' $1;
- QUIET_UNINST = @printf ' UNINST %s\n' $1;
+ QUIET_CLEAN = @printf ' CLEAN %s\n' $1;
+ QUIET_INSTALL = @printf ' INSTALL %s\n' $1;
+ QUIET_UNINST = @printf ' UNINST %s\n' $1;
endif
endif
diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c
index c62d372d426f..ed563bdd88f3 100644
--- a/tools/testing/nvdimm/test/iomap.c
+++ b/tools/testing/nvdimm/test/iomap.c
@@ -62,7 +62,7 @@ struct nfit_test_resource *get_nfit_res(resource_size_t resource)
}
EXPORT_SYMBOL(get_nfit_res);
-void __iomem *__nfit_test_ioremap(resource_size_t offset, unsigned long size,
+static void __iomem *__nfit_test_ioremap(resource_size_t offset, unsigned long size,
void __iomem *(*fallback_fn)(resource_size_t, unsigned long))
{
struct nfit_test_resource *nfit_res = get_nfit_res(offset);
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index 9b185bf82da8..54f367cbadae 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -1871,9 +1871,16 @@ static void smart_init(struct nfit_test *t)
}
}
+static size_t sizeof_spa(struct acpi_nfit_system_address *spa)
+{
+ /* until spa location cookie support is added... */
+ return sizeof(*spa) - 8;
+}
+
static int nfit_test0_alloc(struct nfit_test *t)
{
- size_t nfit_size = sizeof(struct acpi_nfit_system_address) * NUM_SPA
+ struct acpi_nfit_system_address *spa = NULL;
+ size_t nfit_size = sizeof_spa(spa) * NUM_SPA
+ sizeof(struct acpi_nfit_memory_map) * NUM_MEM
+ sizeof(struct acpi_nfit_control_region) * NUM_DCR
+ offsetof(struct acpi_nfit_control_region,
@@ -1937,7 +1944,8 @@ static int nfit_test0_alloc(struct nfit_test *t)
static int nfit_test1_alloc(struct nfit_test *t)
{
- size_t nfit_size = sizeof(struct acpi_nfit_system_address) * 2
+ struct acpi_nfit_system_address *spa = NULL;
+ size_t nfit_size = sizeof_spa(spa) * 2
+ sizeof(struct acpi_nfit_memory_map) * 2
+ offsetof(struct acpi_nfit_control_region, window_size) * 2;
int i;
@@ -2000,7 +2008,7 @@ static void nfit_test0_setup(struct nfit_test *t)
*/
spa = nfit_buf;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
- spa->header.length = sizeof(*spa);
+ spa->header.length = sizeof_spa(spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
spa->range_index = 0+1;
spa->address = t->spa_set_dma[0];
@@ -2014,7 +2022,7 @@ static void nfit_test0_setup(struct nfit_test *t)
*/
spa = nfit_buf + offset;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
- spa->header.length = sizeof(*spa);
+ spa->header.length = sizeof_spa(spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
spa->range_index = 1+1;
spa->address = t->spa_set_dma[1];
@@ -2024,7 +2032,7 @@ static void nfit_test0_setup(struct nfit_test *t)
/* spa2 (dcr0) dimm0 */
spa = nfit_buf + offset;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
- spa->header.length = sizeof(*spa);
+ spa->header.length = sizeof_spa(spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
spa->range_index = 2+1;
spa->address = t->dcr_dma[0];
@@ -2034,7 +2042,7 @@ static void nfit_test0_setup(struct nfit_test *t)
/* spa3 (dcr1) dimm1 */
spa = nfit_buf + offset;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
- spa->header.length = sizeof(*spa);
+ spa->header.length = sizeof_spa(spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
spa->range_index = 3+1;
spa->address = t->dcr_dma[1];
@@ -2044,7 +2052,7 @@ static void nfit_test0_setup(struct nfit_test *t)
/* spa4 (dcr2) dimm2 */
spa = nfit_buf + offset;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
- spa->header.length = sizeof(*spa);
+ spa->header.length = sizeof_spa(spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
spa->range_index = 4+1;
spa->address = t->dcr_dma[2];
@@ -2054,7 +2062,7 @@ static void nfit_test0_setup(struct nfit_test *t)
/* spa5 (dcr3) dimm3 */
spa = nfit_buf + offset;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
- spa->header.length = sizeof(*spa);
+ spa->header.length = sizeof_spa(spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
spa->range_index = 5+1;
spa->address = t->dcr_dma[3];
@@ -2064,7 +2072,7 @@ static void nfit_test0_setup(struct nfit_test *t)
/* spa6 (bdw for dcr0) dimm0 */
spa = nfit_buf + offset;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
- spa->header.length = sizeof(*spa);
+ spa->header.length = sizeof_spa(spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
spa->range_index = 6+1;
spa->address = t->dimm_dma[0];
@@ -2074,7 +2082,7 @@ static void nfit_test0_setup(struct nfit_test *t)
/* spa7 (bdw for dcr1) dimm1 */
spa = nfit_buf + offset;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
- spa->header.length = sizeof(*spa);
+ spa->header.length = sizeof_spa(spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
spa->range_index = 7+1;
spa->address = t->dimm_dma[1];
@@ -2084,7 +2092,7 @@ static void nfit_test0_setup(struct nfit_test *t)
/* spa8 (bdw for dcr2) dimm2 */
spa = nfit_buf + offset;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
- spa->header.length = sizeof(*spa);
+ spa->header.length = sizeof_spa(spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
spa->range_index = 8+1;
spa->address = t->dimm_dma[2];
@@ -2094,7 +2102,7 @@ static void nfit_test0_setup(struct nfit_test *t)
/* spa9 (bdw for dcr3) dimm3 */
spa = nfit_buf + offset;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
- spa->header.length = sizeof(*spa);
+ spa->header.length = sizeof_spa(spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
spa->range_index = 9+1;
spa->address = t->dimm_dma[3];
@@ -2581,7 +2589,7 @@ static void nfit_test0_setup(struct nfit_test *t)
/* spa10 (dcr4) dimm4 */
spa = nfit_buf + offset;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
- spa->header.length = sizeof(*spa);
+ spa->header.length = sizeof_spa(spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
spa->range_index = 10+1;
spa->address = t->dcr_dma[4];
@@ -2595,7 +2603,7 @@ static void nfit_test0_setup(struct nfit_test *t)
*/
spa = nfit_buf + offset;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
- spa->header.length = sizeof(*spa);
+ spa->header.length = sizeof_spa(spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
spa->range_index = 11+1;
spa->address = t->spa_set_dma[2];
@@ -2605,7 +2613,7 @@ static void nfit_test0_setup(struct nfit_test *t)
/* spa12 (bdw for dcr4) dimm4 */
spa = nfit_buf + offset;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
- spa->header.length = sizeof(*spa);
+ spa->header.length = sizeof_spa(spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
spa->range_index = 12+1;
spa->address = t->dimm_dma[4];
@@ -2739,7 +2747,7 @@ static void nfit_test1_setup(struct nfit_test *t)
/* spa0 (flat range with no bdw aliasing) */
spa = nfit_buf + offset;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
- spa->header.length = sizeof(*spa);
+ spa->header.length = sizeof_spa(spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
spa->range_index = 0+1;
spa->address = t->spa_set_dma[0];
@@ -2749,7 +2757,7 @@ static void nfit_test1_setup(struct nfit_test *t)
/* virtual cd region */
spa = nfit_buf + offset;
spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
- spa->header.length = sizeof(*spa);
+ spa->header.length = sizeof_spa(spa);
memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_VCD), 16);
spa->range_index = 0;
spa->address = t->spa_set_dma[1];
diff --git a/tools/testing/selftests/arm64/bti/test.c b/tools/testing/selftests/arm64/bti/test.c
index 656b04976ccc..67b77ab83c20 100644
--- a/tools/testing/selftests/arm64/bti/test.c
+++ b/tools/testing/selftests/arm64/bti/test.c
@@ -6,6 +6,7 @@
#include "system.h"
+#include <stddef.h>
#include <linux/errno.h>
#include <linux/auxvec.h>
#include <linux/signal.h>
diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c
index 12ee40284da0..2060bc122c53 100644
--- a/tools/testing/selftests/bpf/network_helpers.c
+++ b/tools/testing/selftests/bpf/network_helpers.c
@@ -40,7 +40,7 @@ struct ipv6_packet pkt_v6 = {
.tcp.doff = 5,
};
-static int settimeo(int fd, int timeout_ms)
+int settimeo(int fd, int timeout_ms)
{
struct timeval timeout = { .tv_sec = 3 };
diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h
index 7205f8afdba1..5e0d51c07b63 100644
--- a/tools/testing/selftests/bpf/network_helpers.h
+++ b/tools/testing/selftests/bpf/network_helpers.h
@@ -33,6 +33,7 @@ struct ipv6_packet {
} __packed;
extern struct ipv6_packet pkt_v6;
+int settimeo(int fd, int timeout_ms);
int start_server(int family, int type, const char *addr, __u16 port,
int timeout_ms);
int connect_to_fd(int server_fd, int timeout_ms);
diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf.c b/tools/testing/selftests/bpf/prog_tests/ringbuf.c
index de78617f6550..f9a8ae331963 100644
--- a/tools/testing/selftests/bpf/prog_tests/ringbuf.c
+++ b/tools/testing/selftests/bpf/prog_tests/ringbuf.c
@@ -86,8 +86,9 @@ void test_ringbuf(void)
const size_t rec_sz = BPF_RINGBUF_HDR_SZ + sizeof(struct sample);
pthread_t thread;
long bg_ret = -1;
- int err, cnt;
+ int err, cnt, rb_fd;
int page_size = getpagesize();
+ void *mmap_ptr, *tmp_ptr;
skel = test_ringbuf__open();
if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
@@ -101,6 +102,52 @@ void test_ringbuf(void)
if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
goto cleanup;
+ rb_fd = bpf_map__fd(skel->maps.ringbuf);
+ /* good read/write cons_pos */
+ mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rb_fd, 0);
+ ASSERT_OK_PTR(mmap_ptr, "rw_cons_pos");
+ tmp_ptr = mremap(mmap_ptr, page_size, 2 * page_size, MREMAP_MAYMOVE);
+ if (!ASSERT_ERR_PTR(tmp_ptr, "rw_extend"))
+ goto cleanup;
+ ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_cons_pos_protect");
+ ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw");
+
+ /* bad writeable prod_pos */
+ mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, page_size);
+ err = -errno;
+ ASSERT_ERR_PTR(mmap_ptr, "wr_prod_pos");
+ ASSERT_EQ(err, -EPERM, "wr_prod_pos_err");
+
+ /* bad writeable data pages */
+ mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
+ err = -errno;
+ ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_one");
+ ASSERT_EQ(err, -EPERM, "wr_data_page_one_err");
+ mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 3 * page_size);
+ ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_two");
+ mmap_ptr = mmap(NULL, 2 * page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
+ ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_all");
+
+ /* good read-only pages */
+ mmap_ptr = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED, rb_fd, 0);
+ if (!ASSERT_OK_PTR(mmap_ptr, "ro_prod_pos"))
+ goto cleanup;
+
+ ASSERT_ERR(mprotect(mmap_ptr, 4 * page_size, PROT_WRITE), "write_protect");
+ ASSERT_ERR(mprotect(mmap_ptr, 4 * page_size, PROT_EXEC), "exec_protect");
+ ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 4 * page_size, MREMAP_MAYMOVE), "ro_remap");
+ ASSERT_OK(munmap(mmap_ptr, 4 * page_size), "unmap_ro");
+
+ /* good read-only pages with initial offset */
+ mmap_ptr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rb_fd, page_size);
+ if (!ASSERT_OK_PTR(mmap_ptr, "ro_prod_pos"))
+ goto cleanup;
+
+ ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_WRITE), "write_protect");
+ ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_protect");
+ ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 3 * page_size, MREMAP_MAYMOVE), "ro_remap");
+ ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_ro");
+
/* only trigger BPF program for current process */
skel->bss->pid = getpid();
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
new file mode 100644
index 000000000000..5703c918812b
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
@@ -0,0 +1,785 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/*
+ * This test sets up 3 netns (src <-> fwd <-> dst). There is no direct veth link
+ * between src and dst. The netns fwd has veth links to each src and dst. The
+ * client is in src and server in dst. The test installs a TC BPF program to each
+ * host facing veth in fwd which calls into i) bpf_redirect_neigh() to perform the
+ * neigh addr population and redirect or ii) bpf_redirect_peer() for namespace
+ * switch from ingress side; it also installs a checker prog on the egress side
+ * to drop unexpected traffic.
+ */
+
+#define _GNU_SOURCE
+
+#include <arpa/inet.h>
+#include <linux/limits.h>
+#include <linux/sysctl.h>
+#include <linux/if_tun.h>
+#include <linux/if.h>
+#include <sched.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <sys/stat.h>
+#include <sys/mount.h>
+
+#include "test_progs.h"
+#include "network_helpers.h"
+#include "test_tc_neigh_fib.skel.h"
+#include "test_tc_neigh.skel.h"
+#include "test_tc_peer.skel.h"
+
+#define NS_SRC "ns_src"
+#define NS_FWD "ns_fwd"
+#define NS_DST "ns_dst"
+
+#define IP4_SRC "172.16.1.100"
+#define IP4_DST "172.16.2.100"
+#define IP4_TUN_SRC "172.17.1.100"
+#define IP4_TUN_FWD "172.17.1.200"
+#define IP4_PORT 9004
+
+#define IP6_SRC "0::1:dead:beef:cafe"
+#define IP6_DST "0::2:dead:beef:cafe"
+#define IP6_TUN_SRC "1::1:dead:beef:cafe"
+#define IP6_TUN_FWD "1::2:dead:beef:cafe"
+#define IP6_PORT 9006
+
+#define IP4_SLL "169.254.0.1"
+#define IP4_DLL "169.254.0.2"
+#define IP4_NET "169.254.0.0"
+
+#define MAC_DST_FWD "00:11:22:33:44:55"
+#define MAC_DST "00:22:33:44:55:66"
+
+#define IFADDR_STR_LEN 18
+#define PING_ARGS "-i 0.2 -c 3 -w 10 -q"
+
+#define SRC_PROG_PIN_FILE "/sys/fs/bpf/test_tc_src"
+#define DST_PROG_PIN_FILE "/sys/fs/bpf/test_tc_dst"
+#define CHK_PROG_PIN_FILE "/sys/fs/bpf/test_tc_chk"
+
+#define TIMEOUT_MILLIS 10000
+
+#define log_err(MSG, ...) \
+ fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \
+ __FILE__, __LINE__, strerror(errno), ##__VA_ARGS__)
+
+static const char * const namespaces[] = {NS_SRC, NS_FWD, NS_DST, NULL};
+
+static int write_file(const char *path, const char *newval)
+{
+ FILE *f;
+
+ f = fopen(path, "r+");
+ if (!f)
+ return -1;
+ if (fwrite(newval, strlen(newval), 1, f) != 1) {
+ log_err("writing to %s failed", path);
+ fclose(f);
+ return -1;
+ }
+ fclose(f);
+ return 0;
+}
+
+struct nstoken {
+ int orig_netns_fd;
+};
+
+static int setns_by_fd(int nsfd)
+{
+ int err;
+
+ err = setns(nsfd, CLONE_NEWNET);
+ close(nsfd);
+
+ if (!ASSERT_OK(err, "setns"))
+ return err;
+
+ /* Switch /sys to the new namespace so that e.g. /sys/class/net
+ * reflects the devices in the new namespace.
+ */
+ err = unshare(CLONE_NEWNS);
+ if (!ASSERT_OK(err, "unshare"))
+ return err;
+
+ err = umount2("/sys", MNT_DETACH);
+ if (!ASSERT_OK(err, "umount2 /sys"))
+ return err;
+
+ err = mount("sysfs", "/sys", "sysfs", 0, NULL);
+ if (!ASSERT_OK(err, "mount /sys"))
+ return err;
+
+ err = mount("bpffs", "/sys/fs/bpf", "bpf", 0, NULL);
+ if (!ASSERT_OK(err, "mount /sys/fs/bpf"))
+ return err;
+
+ return 0;
+}
+
+/**
+ * open_netns() - Switch to specified network namespace by name.
+ *
+ * Returns token with which to restore the original namespace
+ * using close_netns().
+ */
+static struct nstoken *open_netns(const char *name)
+{
+ int nsfd;
+ char nspath[PATH_MAX];
+ int err;
+ struct nstoken *token;
+
+ token = malloc(sizeof(struct nstoken));
+ if (!ASSERT_OK_PTR(token, "malloc token"))
+ return NULL;
+
+ token->orig_netns_fd = open("/proc/self/ns/net", O_RDONLY);
+ if (!ASSERT_GE(token->orig_netns_fd, 0, "open /proc/self/ns/net"))
+ goto fail;
+
+ snprintf(nspath, sizeof(nspath), "%s/%s", "/var/run/netns", name);
+ nsfd = open(nspath, O_RDONLY | O_CLOEXEC);
+ if (!ASSERT_GE(nsfd, 0, "open netns fd"))
+ goto fail;
+
+ err = setns_by_fd(nsfd);
+ if (!ASSERT_OK(err, "setns_by_fd"))
+ goto fail;
+
+ return token;
+fail:
+ free(token);
+ return NULL;
+}
+
+static void close_netns(struct nstoken *token)
+{
+ ASSERT_OK(setns_by_fd(token->orig_netns_fd), "setns_by_fd");
+ free(token);
+}
+
+static int netns_setup_namespaces(const char *verb)
+{
+ const char * const *ns = namespaces;
+ char cmd[128];
+
+ while (*ns) {
+ snprintf(cmd, sizeof(cmd), "ip netns %s %s", verb, *ns);
+ if (!ASSERT_OK(system(cmd), cmd))
+ return -1;
+ ns++;
+ }
+ return 0;
+}
+
+struct netns_setup_result {
+ int ifindex_veth_src_fwd;
+ int ifindex_veth_dst_fwd;
+};
+
+static int get_ifaddr(const char *name, char *ifaddr)
+{
+ char path[PATH_MAX];
+ FILE *f;
+ int ret;
+
+ snprintf(path, PATH_MAX, "/sys/class/net/%s/address", name);
+ f = fopen(path, "r");
+ if (!ASSERT_OK_PTR(f, path))
+ return -1;
+
+ ret = fread(ifaddr, 1, IFADDR_STR_LEN, f);
+ if (!ASSERT_EQ(ret, IFADDR_STR_LEN, "fread ifaddr")) {
+ fclose(f);
+ return -1;
+ }
+ fclose(f);
+ return 0;
+}
+
+static int get_ifindex(const char *name)
+{
+ char path[PATH_MAX];
+ char buf[32];
+ FILE *f;
+ int ret;
+
+ snprintf(path, PATH_MAX, "/sys/class/net/%s/ifindex", name);
+ f = fopen(path, "r");
+ if (!ASSERT_OK_PTR(f, path))
+ return -1;
+
+ ret = fread(buf, 1, sizeof(buf), f);
+ if (!ASSERT_GT(ret, 0, "fread ifindex")) {
+ fclose(f);
+ return -1;
+ }
+ fclose(f);
+ return atoi(buf);
+}
+
+#define SYS(fmt, ...) \
+ ({ \
+ char cmd[1024]; \
+ snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \
+ if (!ASSERT_OK(system(cmd), cmd)) \
+ goto fail; \
+ })
+
+static int netns_setup_links_and_routes(struct netns_setup_result *result)
+{
+ struct nstoken *nstoken = NULL;
+ char veth_src_fwd_addr[IFADDR_STR_LEN+1] = {};
+
+ SYS("ip link add veth_src type veth peer name veth_src_fwd");
+ SYS("ip link add veth_dst type veth peer name veth_dst_fwd");
+
+ SYS("ip link set veth_dst_fwd address " MAC_DST_FWD);
+ SYS("ip link set veth_dst address " MAC_DST);
+
+ if (get_ifaddr("veth_src_fwd", veth_src_fwd_addr))
+ goto fail;
+
+ result->ifindex_veth_src_fwd = get_ifindex("veth_src_fwd");
+ if (result->ifindex_veth_src_fwd < 0)
+ goto fail;
+ result->ifindex_veth_dst_fwd = get_ifindex("veth_dst_fwd");
+ if (result->ifindex_veth_dst_fwd < 0)
+ goto fail;
+
+ SYS("ip link set veth_src netns " NS_SRC);
+ SYS("ip link set veth_src_fwd netns " NS_FWD);
+ SYS("ip link set veth_dst_fwd netns " NS_FWD);
+ SYS("ip link set veth_dst netns " NS_DST);
+
+ /** setup in 'src' namespace */
+ nstoken = open_netns(NS_SRC);
+ if (!ASSERT_OK_PTR(nstoken, "setns src"))
+ goto fail;
+
+ SYS("ip addr add " IP4_SRC "/32 dev veth_src");
+ SYS("ip addr add " IP6_SRC "/128 dev veth_src nodad");
+ SYS("ip link set dev veth_src up");
+
+ SYS("ip route add " IP4_DST "/32 dev veth_src scope global");
+ SYS("ip route add " IP4_NET "/16 dev veth_src scope global");
+ SYS("ip route add " IP6_DST "/128 dev veth_src scope global");
+
+ SYS("ip neigh add " IP4_DST " dev veth_src lladdr %s",
+ veth_src_fwd_addr);
+ SYS("ip neigh add " IP6_DST " dev veth_src lladdr %s",
+ veth_src_fwd_addr);
+
+ close_netns(nstoken);
+
+ /** setup in 'fwd' namespace */
+ nstoken = open_netns(NS_FWD);
+ if (!ASSERT_OK_PTR(nstoken, "setns fwd"))
+ goto fail;
+
+ /* The fwd netns automatically gets a v6 LL address / routes, but also
+ * needs v4 one in order to start ARP probing. IP4_NET route is added
+ * to the endpoints so that the ARP processing will reply.
+ */
+ SYS("ip addr add " IP4_SLL "/32 dev veth_src_fwd");
+ SYS("ip addr add " IP4_DLL "/32 dev veth_dst_fwd");
+ SYS("ip link set dev veth_src_fwd up");
+ SYS("ip link set dev veth_dst_fwd up");
+
+ SYS("ip route add " IP4_SRC "/32 dev veth_src_fwd scope global");
+ SYS("ip route add " IP6_SRC "/128 dev veth_src_fwd scope global");
+ SYS("ip route add " IP4_DST "/32 dev veth_dst_fwd scope global");
+ SYS("ip route add " IP6_DST "/128 dev veth_dst_fwd scope global");
+
+ close_netns(nstoken);
+
+ /** setup in 'dst' namespace */
+ nstoken = open_netns(NS_DST);
+ if (!ASSERT_OK_PTR(nstoken, "setns dst"))
+ goto fail;
+
+ SYS("ip addr add " IP4_DST "/32 dev veth_dst");
+ SYS("ip addr add " IP6_DST "/128 dev veth_dst nodad");
+ SYS("ip link set dev veth_dst up");
+
+ SYS("ip route add " IP4_SRC "/32 dev veth_dst scope global");
+ SYS("ip route add " IP4_NET "/16 dev veth_dst scope global");
+ SYS("ip route add " IP6_SRC "/128 dev veth_dst scope global");
+
+ SYS("ip neigh add " IP4_SRC " dev veth_dst lladdr " MAC_DST_FWD);
+ SYS("ip neigh add " IP6_SRC " dev veth_dst lladdr " MAC_DST_FWD);
+
+ close_netns(nstoken);
+
+ return 0;
+fail:
+ if (nstoken)
+ close_netns(nstoken);
+ return -1;
+}
+
+static int netns_load_bpf(void)
+{
+ SYS("tc qdisc add dev veth_src_fwd clsact");
+ SYS("tc filter add dev veth_src_fwd ingress bpf da object-pinned "
+ SRC_PROG_PIN_FILE);
+ SYS("tc filter add dev veth_src_fwd egress bpf da object-pinned "
+ CHK_PROG_PIN_FILE);
+
+ SYS("tc qdisc add dev veth_dst_fwd clsact");
+ SYS("tc filter add dev veth_dst_fwd ingress bpf da object-pinned "
+ DST_PROG_PIN_FILE);
+ SYS("tc filter add dev veth_dst_fwd egress bpf da object-pinned "
+ CHK_PROG_PIN_FILE);
+
+ return 0;
+fail:
+ return -1;
+}
+
+static void test_tcp(int family, const char *addr, __u16 port)
+{
+ int listen_fd = -1, accept_fd = -1, client_fd = -1;
+ char buf[] = "testing testing";
+ int n;
+ struct nstoken *nstoken;
+
+ nstoken = open_netns(NS_DST);
+ if (!ASSERT_OK_PTR(nstoken, "setns dst"))
+ return;
+
+ listen_fd = start_server(family, SOCK_STREAM, addr, port, 0);
+ if (!ASSERT_GE(listen_fd, 0, "listen"))
+ goto done;
+
+ close_netns(nstoken);
+ nstoken = open_netns(NS_SRC);
+ if (!ASSERT_OK_PTR(nstoken, "setns src"))
+ goto done;
+
+ client_fd = connect_to_fd(listen_fd, TIMEOUT_MILLIS);
+ if (!ASSERT_GE(client_fd, 0, "connect_to_fd"))
+ goto done;
+
+ accept_fd = accept(listen_fd, NULL, NULL);
+ if (!ASSERT_GE(accept_fd, 0, "accept"))
+ goto done;
+
+ if (!ASSERT_OK(settimeo(accept_fd, TIMEOUT_MILLIS), "settimeo"))
+ goto done;
+
+ n = write(client_fd, buf, sizeof(buf));
+ if (!ASSERT_EQ(n, sizeof(buf), "send to server"))
+ goto done;
+
+ n = read(accept_fd, buf, sizeof(buf));
+ ASSERT_EQ(n, sizeof(buf), "recv from server");
+
+done:
+ if (nstoken)
+ close_netns(nstoken);
+ if (listen_fd >= 0)
+ close(listen_fd);
+ if (accept_fd >= 0)
+ close(accept_fd);
+ if (client_fd >= 0)
+ close(client_fd);
+}
+
+static int test_ping(int family, const char *addr)
+{
+ const char *ping = family == AF_INET6 ? "ping6" : "ping";
+
+ SYS("ip netns exec " NS_SRC " %s " PING_ARGS " %s > /dev/null", ping, addr);
+ return 0;
+fail:
+ return -1;
+}
+
+static void test_connectivity(void)
+{
+ test_tcp(AF_INET, IP4_DST, IP4_PORT);
+ test_ping(AF_INET, IP4_DST);
+ test_tcp(AF_INET6, IP6_DST, IP6_PORT);
+ test_ping(AF_INET6, IP6_DST);
+}
+
+static int set_forwarding(bool enable)
+{
+ int err;
+
+ err = write_file("/proc/sys/net/ipv4/ip_forward", enable ? "1" : "0");
+ if (!ASSERT_OK(err, "set ipv4.ip_forward=0"))
+ return err;
+
+ err = write_file("/proc/sys/net/ipv6/conf/all/forwarding", enable ? "1" : "0");
+ if (!ASSERT_OK(err, "set ipv6.forwarding=0"))
+ return err;
+
+ return 0;
+}
+
+static void test_tc_redirect_neigh_fib(struct netns_setup_result *setup_result)
+{
+ struct nstoken *nstoken = NULL;
+ struct test_tc_neigh_fib *skel = NULL;
+ int err;
+
+ nstoken = open_netns(NS_FWD);
+ if (!ASSERT_OK_PTR(nstoken, "setns fwd"))
+ return;
+
+ skel = test_tc_neigh_fib__open();
+ if (!ASSERT_OK_PTR(skel, "test_tc_neigh_fib__open"))
+ goto done;
+
+ if (!ASSERT_OK(test_tc_neigh_fib__load(skel), "test_tc_neigh_fib__load"))
+ goto done;
+
+ err = bpf_program__pin(skel->progs.tc_src, SRC_PROG_PIN_FILE);
+ if (!ASSERT_OK(err, "pin " SRC_PROG_PIN_FILE))
+ goto done;
+
+ err = bpf_program__pin(skel->progs.tc_chk, CHK_PROG_PIN_FILE);
+ if (!ASSERT_OK(err, "pin " CHK_PROG_PIN_FILE))
+ goto done;
+
+ err = bpf_program__pin(skel->progs.tc_dst, DST_PROG_PIN_FILE);
+ if (!ASSERT_OK(err, "pin " DST_PROG_PIN_FILE))
+ goto done;
+
+ if (netns_load_bpf())
+ goto done;
+
+ /* bpf_fib_lookup() checks if forwarding is enabled */
+ if (!ASSERT_OK(set_forwarding(true), "enable forwarding"))
+ goto done;
+
+ test_connectivity();
+
+done:
+ if (skel)
+ test_tc_neigh_fib__destroy(skel);
+ close_netns(nstoken);
+}
+
+static void test_tc_redirect_neigh(struct netns_setup_result *setup_result)
+{
+ struct nstoken *nstoken = NULL;
+ struct test_tc_neigh *skel = NULL;
+ int err;
+
+ nstoken = open_netns(NS_FWD);
+ if (!ASSERT_OK_PTR(nstoken, "setns fwd"))
+ return;
+
+ skel = test_tc_neigh__open();
+ if (!ASSERT_OK_PTR(skel, "test_tc_neigh__open"))
+ goto done;
+
+ skel->rodata->IFINDEX_SRC = setup_result->ifindex_veth_src_fwd;
+ skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd;
+
+ err = test_tc_neigh__load(skel);
+ if (!ASSERT_OK(err, "test_tc_neigh__load"))
+ goto done;
+
+ err = bpf_program__pin(skel->progs.tc_src, SRC_PROG_PIN_FILE);
+ if (!ASSERT_OK(err, "pin " SRC_PROG_PIN_FILE))
+ goto done;
+
+ err = bpf_program__pin(skel->progs.tc_chk, CHK_PROG_PIN_FILE);
+ if (!ASSERT_OK(err, "pin " CHK_PROG_PIN_FILE))
+ goto done;
+
+ err = bpf_program__pin(skel->progs.tc_dst, DST_PROG_PIN_FILE);
+ if (!ASSERT_OK(err, "pin " DST_PROG_PIN_FILE))
+ goto done;
+
+ if (netns_load_bpf())
+ goto done;
+
+ if (!ASSERT_OK(set_forwarding(false), "disable forwarding"))
+ goto done;
+
+ test_connectivity();
+
+done:
+ if (skel)
+ test_tc_neigh__destroy(skel);
+ close_netns(nstoken);
+}
+
+static void test_tc_redirect_peer(struct netns_setup_result *setup_result)
+{
+ struct nstoken *nstoken;
+ struct test_tc_peer *skel;
+ int err;
+
+ nstoken = open_netns(NS_FWD);
+ if (!ASSERT_OK_PTR(nstoken, "setns fwd"))
+ return;
+
+ skel = test_tc_peer__open();
+ if (!ASSERT_OK_PTR(skel, "test_tc_peer__open"))
+ goto done;
+
+ skel->rodata->IFINDEX_SRC = setup_result->ifindex_veth_src_fwd;
+ skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd;
+
+ err = test_tc_peer__load(skel);
+ if (!ASSERT_OK(err, "test_tc_peer__load"))
+ goto done;
+
+ err = bpf_program__pin(skel->progs.tc_src, SRC_PROG_PIN_FILE);
+ if (!ASSERT_OK(err, "pin " SRC_PROG_PIN_FILE))
+ goto done;
+
+ err = bpf_program__pin(skel->progs.tc_chk, CHK_PROG_PIN_FILE);
+ if (!ASSERT_OK(err, "pin " CHK_PROG_PIN_FILE))
+ goto done;
+
+ err = bpf_program__pin(skel->progs.tc_dst, DST_PROG_PIN_FILE);
+ if (!ASSERT_OK(err, "pin " DST_PROG_PIN_FILE))
+ goto done;
+
+ if (netns_load_bpf())
+ goto done;
+
+ if (!ASSERT_OK(set_forwarding(false), "disable forwarding"))
+ goto done;
+
+ test_connectivity();
+
+done:
+ if (skel)
+ test_tc_peer__destroy(skel);
+ close_netns(nstoken);
+}
+
+static int tun_open(char *name)
+{
+ struct ifreq ifr;
+ int fd, err;
+
+ fd = open("/dev/net/tun", O_RDWR);
+ if (!ASSERT_GE(fd, 0, "open /dev/net/tun"))
+ return -1;
+
+ memset(&ifr, 0, sizeof(ifr));
+
+ ifr.ifr_flags = IFF_TUN | IFF_NO_PI;
+ if (*name)
+ strncpy(ifr.ifr_name, name, IFNAMSIZ);
+
+ err = ioctl(fd, TUNSETIFF, &ifr);
+ if (!ASSERT_OK(err, "ioctl TUNSETIFF"))
+ goto fail;
+
+ SYS("ip link set dev %s up", name);
+
+ return fd;
+fail:
+ close(fd);
+ return -1;
+}
+
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+enum {
+ SRC_TO_TARGET = 0,
+ TARGET_TO_SRC = 1,
+};
+
+static int tun_relay_loop(int src_fd, int target_fd)
+{
+ fd_set rfds, wfds;
+
+ FD_ZERO(&rfds);
+ FD_ZERO(&wfds);
+
+ for (;;) {
+ char buf[1500];
+ int direction, nread, nwrite;
+
+ FD_SET(src_fd, &rfds);
+ FD_SET(target_fd, &rfds);
+
+ if (select(1 + MAX(src_fd, target_fd), &rfds, NULL, NULL, NULL) < 0) {
+ log_err("select failed");
+ return 1;
+ }
+
+ direction = FD_ISSET(src_fd, &rfds) ? SRC_TO_TARGET : TARGET_TO_SRC;
+
+ nread = read(direction == SRC_TO_TARGET ? src_fd : target_fd, buf, sizeof(buf));
+ if (nread < 0) {
+ log_err("read failed");
+ return 1;
+ }
+
+ nwrite = write(direction == SRC_TO_TARGET ? target_fd : src_fd, buf, nread);
+ if (nwrite != nread) {
+ log_err("write failed");
+ return 1;
+ }
+ }
+}
+
+static void test_tc_redirect_peer_l3(struct netns_setup_result *setup_result)
+{
+ struct test_tc_peer *skel = NULL;
+ struct nstoken *nstoken = NULL;
+ int err;
+ int tunnel_pid = -1;
+ int src_fd, target_fd;
+ int ifindex;
+
+ /* Start a L3 TUN/TAP tunnel between the src and dst namespaces.
+ * This test is using TUN/TAP instead of e.g. IPIP or GRE tunnel as those
+ * expose the L2 headers encapsulating the IP packet to BPF and hence
+ * don't have skb in suitable state for this test. Alternative to TUN/TAP
+ * would be e.g. Wireguard which would appear as a pure L3 device to BPF,
+ * but that requires much more complicated setup.
+ */
+ nstoken = open_netns(NS_SRC);
+ if (!ASSERT_OK_PTR(nstoken, "setns " NS_SRC))
+ return;
+
+ src_fd = tun_open("tun_src");
+ if (!ASSERT_GE(src_fd, 0, "tun_open tun_src"))
+ goto fail;
+
+ close_netns(nstoken);
+
+ nstoken = open_netns(NS_FWD);
+ if (!ASSERT_OK_PTR(nstoken, "setns " NS_FWD))
+ goto fail;
+
+ target_fd = tun_open("tun_fwd");
+ if (!ASSERT_GE(target_fd, 0, "tun_open tun_fwd"))
+ goto fail;
+
+ tunnel_pid = fork();
+ if (!ASSERT_GE(tunnel_pid, 0, "fork tun_relay_loop"))
+ goto fail;
+
+ if (tunnel_pid == 0)
+ exit(tun_relay_loop(src_fd, target_fd));
+
+ skel = test_tc_peer__open();
+ if (!ASSERT_OK_PTR(skel, "test_tc_peer__open"))
+ goto fail;
+
+ ifindex = get_ifindex("tun_fwd");
+ if (!ASSERT_GE(ifindex, 0, "get_ifindex tun_fwd"))
+ goto fail;
+
+ skel->rodata->IFINDEX_SRC = ifindex;
+ skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd;
+
+ err = test_tc_peer__load(skel);
+ if (!ASSERT_OK(err, "test_tc_peer__load"))
+ goto fail;
+
+ err = bpf_program__pin(skel->progs.tc_src_l3, SRC_PROG_PIN_FILE);
+ if (!ASSERT_OK(err, "pin " SRC_PROG_PIN_FILE))
+ goto fail;
+
+ err = bpf_program__pin(skel->progs.tc_dst_l3, DST_PROG_PIN_FILE);
+ if (!ASSERT_OK(err, "pin " DST_PROG_PIN_FILE))
+ goto fail;
+
+ err = bpf_program__pin(skel->progs.tc_chk, CHK_PROG_PIN_FILE);
+ if (!ASSERT_OK(err, "pin " CHK_PROG_PIN_FILE))
+ goto fail;
+
+ /* Load "tc_src_l3" to the tun_fwd interface to redirect packets
+ * towards dst, and "tc_dst" to redirect packets
+ * and "tc_chk" on veth_dst_fwd to drop non-redirected packets.
+ */
+ SYS("tc qdisc add dev tun_fwd clsact");
+ SYS("tc filter add dev tun_fwd ingress bpf da object-pinned "
+ SRC_PROG_PIN_FILE);
+
+ SYS("tc qdisc add dev veth_dst_fwd clsact");
+ SYS("tc filter add dev veth_dst_fwd ingress bpf da object-pinned "
+ DST_PROG_PIN_FILE);
+ SYS("tc filter add dev veth_dst_fwd egress bpf da object-pinned "
+ CHK_PROG_PIN_FILE);
+
+ /* Setup route and neigh tables */
+ SYS("ip -netns " NS_SRC " addr add dev tun_src " IP4_TUN_SRC "/24");
+ SYS("ip -netns " NS_FWD " addr add dev tun_fwd " IP4_TUN_FWD "/24");
+
+ SYS("ip -netns " NS_SRC " addr add dev tun_src " IP6_TUN_SRC "/64 nodad");
+ SYS("ip -netns " NS_FWD " addr add dev tun_fwd " IP6_TUN_FWD "/64 nodad");
+
+ SYS("ip -netns " NS_SRC " route del " IP4_DST "/32 dev veth_src scope global");
+ SYS("ip -netns " NS_SRC " route add " IP4_DST "/32 via " IP4_TUN_FWD
+ " dev tun_src scope global");
+ SYS("ip -netns " NS_DST " route add " IP4_TUN_SRC "/32 dev veth_dst scope global");
+ SYS("ip -netns " NS_SRC " route del " IP6_DST "/128 dev veth_src scope global");
+ SYS("ip -netns " NS_SRC " route add " IP6_DST "/128 via " IP6_TUN_FWD
+ " dev tun_src scope global");
+ SYS("ip -netns " NS_DST " route add " IP6_TUN_SRC "/128 dev veth_dst scope global");
+
+ SYS("ip -netns " NS_DST " neigh add " IP4_TUN_SRC " dev veth_dst lladdr " MAC_DST_FWD);
+ SYS("ip -netns " NS_DST " neigh add " IP6_TUN_SRC " dev veth_dst lladdr " MAC_DST_FWD);
+
+ if (!ASSERT_OK(set_forwarding(false), "disable forwarding"))
+ goto fail;
+
+ test_connectivity();
+
+fail:
+ if (tunnel_pid > 0) {
+ kill(tunnel_pid, SIGTERM);
+ waitpid(tunnel_pid, NULL, 0);
+ }
+ if (src_fd >= 0)
+ close(src_fd);
+ if (target_fd >= 0)
+ close(target_fd);
+ if (skel)
+ test_tc_peer__destroy(skel);
+ if (nstoken)
+ close_netns(nstoken);
+}
+
+#define RUN_TEST(name) \
+ ({ \
+ struct netns_setup_result setup_result; \
+ if (test__start_subtest(#name)) \
+ if (ASSERT_OK(netns_setup_namespaces("add"), "setup namespaces")) { \
+ if (ASSERT_OK(netns_setup_links_and_routes(&setup_result), \
+ "setup links and routes")) \
+ test_ ## name(&setup_result); \
+ netns_setup_namespaces("delete"); \
+ } \
+ })
+
+static void *test_tc_redirect_run_tests(void *arg)
+{
+ RUN_TEST(tc_redirect_peer);
+ RUN_TEST(tc_redirect_peer_l3);
+ RUN_TEST(tc_redirect_neigh);
+ RUN_TEST(tc_redirect_neigh_fib);
+ return NULL;
+}
+
+void test_tc_redirect(void)
+{
+ pthread_t test_thread;
+ int err;
+
+ /* Run the tests in their own thread to isolate the namespace changes
+ * so they do not affect the environment of other tests.
+ * (specifically needed because of unshare(CLONE_NEWNS) in open_netns())
+ */
+ err = pthread_create(&test_thread, NULL, &test_tc_redirect_run_tests, NULL);
+ if (ASSERT_OK(err, "pthread_create"))
+ ASSERT_OK(pthread_join(test_thread, NULL), "pthread_join");
+}
diff --git a/tools/testing/selftests/bpf/progs/test_tc_neigh.c b/tools/testing/selftests/bpf/progs/test_tc_neigh.c
index b985ac4e7a81..0c93d326a663 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_neigh.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_neigh.c
@@ -33,17 +33,8 @@
a.s6_addr32[3] == b.s6_addr32[3])
#endif
-enum {
- dev_src,
- dev_dst,
-};
-
-struct bpf_map_def SEC("maps") ifindex_map = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(int),
- .max_entries = 2,
-};
+volatile const __u32 IFINDEX_SRC;
+volatile const __u32 IFINDEX_DST;
static __always_inline bool is_remote_ep_v4(struct __sk_buff *skb,
__be32 addr)
@@ -79,14 +70,8 @@ static __always_inline bool is_remote_ep_v6(struct __sk_buff *skb,
return v6_equal(ip6h->daddr, addr);
}
-static __always_inline int get_dev_ifindex(int which)
-{
- int *ifindex = bpf_map_lookup_elem(&ifindex_map, &which);
-
- return ifindex ? *ifindex : 0;
-}
-
-SEC("chk_egress") int tc_chk(struct __sk_buff *skb)
+SEC("classifier/chk_egress")
+int tc_chk(struct __sk_buff *skb)
{
void *data_end = ctx_ptr(skb->data_end);
void *data = ctx_ptr(skb->data);
@@ -98,7 +83,8 @@ SEC("chk_egress") int tc_chk(struct __sk_buff *skb)
return !raw[0] && !raw[1] && !raw[2] ? TC_ACT_SHOT : TC_ACT_OK;
}
-SEC("dst_ingress") int tc_dst(struct __sk_buff *skb)
+SEC("classifier/dst_ingress")
+int tc_dst(struct __sk_buff *skb)
{
__u8 zero[ETH_ALEN * 2];
bool redirect = false;
@@ -119,10 +105,11 @@ SEC("dst_ingress") int tc_dst(struct __sk_buff *skb)
if (bpf_skb_store_bytes(skb, 0, &zero, sizeof(zero), 0) < 0)
return TC_ACT_SHOT;
- return bpf_redirect_neigh(get_dev_ifindex(dev_src), NULL, 0, 0);
+ return bpf_redirect_neigh(IFINDEX_SRC, NULL, 0, 0);
}
-SEC("src_ingress") int tc_src(struct __sk_buff *skb)
+SEC("classifier/src_ingress")
+int tc_src(struct __sk_buff *skb)
{
__u8 zero[ETH_ALEN * 2];
bool redirect = false;
@@ -143,7 +130,7 @@ SEC("src_ingress") int tc_src(struct __sk_buff *skb)
if (bpf_skb_store_bytes(skb, 0, &zero, sizeof(zero), 0) < 0)
return TC_ACT_SHOT;
- return bpf_redirect_neigh(get_dev_ifindex(dev_dst), NULL, 0, 0);
+ return bpf_redirect_neigh(IFINDEX_DST, NULL, 0, 0);
}
char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c b/tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c
index d82ed3457030..f7ab69cf018e 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c
@@ -75,7 +75,8 @@ static __always_inline int fill_fib_params_v6(struct __sk_buff *skb,
return 0;
}
-SEC("chk_egress") int tc_chk(struct __sk_buff *skb)
+SEC("classifier/chk_egress")
+int tc_chk(struct __sk_buff *skb)
{
void *data_end = ctx_ptr(skb->data_end);
void *data = ctx_ptr(skb->data);
@@ -142,12 +143,14 @@ static __always_inline int tc_redir(struct __sk_buff *skb)
/* these are identical, but keep them separate for compatibility with the
* section names expected by test_tc_redirect.sh
*/
-SEC("dst_ingress") int tc_dst(struct __sk_buff *skb)
+SEC("classifier/dst_ingress")
+int tc_dst(struct __sk_buff *skb)
{
return tc_redir(skb);
}
-SEC("src_ingress") int tc_src(struct __sk_buff *skb)
+SEC("classifier/src_ingress")
+int tc_src(struct __sk_buff *skb)
{
return tc_redir(skb);
}
diff --git a/tools/testing/selftests/bpf/progs/test_tc_peer.c b/tools/testing/selftests/bpf/progs/test_tc_peer.c
index fc84a7685aa2..fe818cd5f010 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_peer.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_peer.c
@@ -5,41 +5,59 @@
#include <linux/bpf.h>
#include <linux/stddef.h>
#include <linux/pkt_cls.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
#include <bpf/bpf_helpers.h>
-enum {
- dev_src,
- dev_dst,
-};
+volatile const __u32 IFINDEX_SRC;
+volatile const __u32 IFINDEX_DST;
-struct bpf_map_def SEC("maps") ifindex_map = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(int),
- .max_entries = 2,
-};
+static const __u8 src_mac[] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55};
+static const __u8 dst_mac[] = {0x00, 0x22, 0x33, 0x44, 0x55, 0x66};
-static __always_inline int get_dev_ifindex(int which)
+SEC("classifier/chk_egress")
+int tc_chk(struct __sk_buff *skb)
{
- int *ifindex = bpf_map_lookup_elem(&ifindex_map, &which);
+ return TC_ACT_SHOT;
+}
- return ifindex ? *ifindex : 0;
+SEC("classifier/dst_ingress")
+int tc_dst(struct __sk_buff *skb)
+{
+ return bpf_redirect_peer(IFINDEX_SRC, 0);
}
-SEC("chk_egress") int tc_chk(struct __sk_buff *skb)
+SEC("classifier/src_ingress")
+int tc_src(struct __sk_buff *skb)
{
- return TC_ACT_SHOT;
+ return bpf_redirect_peer(IFINDEX_DST, 0);
}
-SEC("dst_ingress") int tc_dst(struct __sk_buff *skb)
+SEC("classifier/dst_ingress_l3")
+int tc_dst_l3(struct __sk_buff *skb)
{
- return bpf_redirect_peer(get_dev_ifindex(dev_src), 0);
+ return bpf_redirect(IFINDEX_SRC, 0);
}
-SEC("src_ingress") int tc_src(struct __sk_buff *skb)
+SEC("classifier/src_ingress_l3")
+int tc_src_l3(struct __sk_buff *skb)
{
- return bpf_redirect_peer(get_dev_ifindex(dev_dst), 0);
+ __u16 proto = skb->protocol;
+
+ if (bpf_skb_change_head(skb, ETH_HLEN, 0) != 0)
+ return TC_ACT_SHOT;
+
+ if (bpf_skb_store_bytes(skb, 0, &src_mac, ETH_ALEN, 0) != 0)
+ return TC_ACT_SHOT;
+
+ if (bpf_skb_store_bytes(skb, ETH_ALEN, &dst_mac, ETH_ALEN, 0) != 0)
+ return TC_ACT_SHOT;
+
+ if (bpf_skb_store_bytes(skb, ETH_ALEN + ETH_ALEN, &proto, sizeof(__u16), 0) != 0)
+ return TC_ACT_SHOT;
+
+ return bpf_redirect_peer(IFINDEX_DST, 0);
}
char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_tc_redirect.sh b/tools/testing/selftests/bpf/test_tc_redirect.sh
deleted file mode 100755
index 8868aa1ca902..000000000000
--- a/tools/testing/selftests/bpf/test_tc_redirect.sh
+++ /dev/null
@@ -1,216 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-#
-# This test sets up 3 netns (src <-> fwd <-> dst). There is no direct veth link
-# between src and dst. The netns fwd has veth links to each src and dst. The
-# client is in src and server in dst. The test installs a TC BPF program to each
-# host facing veth in fwd which calls into i) bpf_redirect_neigh() to perform the
-# neigh addr population and redirect or ii) bpf_redirect_peer() for namespace
-# switch from ingress side; it also installs a checker prog on the egress side
-# to drop unexpected traffic.
-
-if [[ $EUID -ne 0 ]]; then
- echo "This script must be run as root"
- echo "FAIL"
- exit 1
-fi
-
-# check that needed tools are present
-command -v nc >/dev/null 2>&1 || \
- { echo >&2 "nc is not available"; exit 1; }
-command -v dd >/dev/null 2>&1 || \
- { echo >&2 "dd is not available"; exit 1; }
-command -v timeout >/dev/null 2>&1 || \
- { echo >&2 "timeout is not available"; exit 1; }
-command -v ping >/dev/null 2>&1 || \
- { echo >&2 "ping is not available"; exit 1; }
-if command -v ping6 >/dev/null 2>&1; then PING6=ping6; else PING6=ping; fi
-command -v perl >/dev/null 2>&1 || \
- { echo >&2 "perl is not available"; exit 1; }
-command -v jq >/dev/null 2>&1 || \
- { echo >&2 "jq is not available"; exit 1; }
-command -v bpftool >/dev/null 2>&1 || \
- { echo >&2 "bpftool is not available"; exit 1; }
-
-readonly GREEN='\033[0;92m'
-readonly RED='\033[0;31m'
-readonly NC='\033[0m' # No Color
-
-readonly PING_ARG="-c 3 -w 10 -q"
-
-readonly TIMEOUT=10
-
-readonly NS_SRC="ns-src-$(mktemp -u XXXXXX)"
-readonly NS_FWD="ns-fwd-$(mktemp -u XXXXXX)"
-readonly NS_DST="ns-dst-$(mktemp -u XXXXXX)"
-
-readonly IP4_SRC="172.16.1.100"
-readonly IP4_DST="172.16.2.100"
-
-readonly IP6_SRC="::1:dead:beef:cafe"
-readonly IP6_DST="::2:dead:beef:cafe"
-
-readonly IP4_SLL="169.254.0.1"
-readonly IP4_DLL="169.254.0.2"
-readonly IP4_NET="169.254.0.0"
-
-netns_cleanup()
-{
- ip netns del ${NS_SRC}
- ip netns del ${NS_FWD}
- ip netns del ${NS_DST}
-}
-
-netns_setup()
-{
- ip netns add "${NS_SRC}"
- ip netns add "${NS_FWD}"
- ip netns add "${NS_DST}"
-
- ip link add veth_src type veth peer name veth_src_fwd
- ip link add veth_dst type veth peer name veth_dst_fwd
-
- ip link set veth_src netns ${NS_SRC}
- ip link set veth_src_fwd netns ${NS_FWD}
-
- ip link set veth_dst netns ${NS_DST}
- ip link set veth_dst_fwd netns ${NS_FWD}
-
- ip -netns ${NS_SRC} addr add ${IP4_SRC}/32 dev veth_src
- ip -netns ${NS_DST} addr add ${IP4_DST}/32 dev veth_dst
-
- # The fwd netns automatically get a v6 LL address / routes, but also
- # needs v4 one in order to start ARP probing. IP4_NET route is added
- # to the endpoints so that the ARP processing will reply.
-
- ip -netns ${NS_FWD} addr add ${IP4_SLL}/32 dev veth_src_fwd
- ip -netns ${NS_FWD} addr add ${IP4_DLL}/32 dev veth_dst_fwd
-
- ip -netns ${NS_SRC} addr add ${IP6_SRC}/128 dev veth_src nodad
- ip -netns ${NS_DST} addr add ${IP6_DST}/128 dev veth_dst nodad
-
- ip -netns ${NS_SRC} link set dev veth_src up
- ip -netns ${NS_FWD} link set dev veth_src_fwd up
-
- ip -netns ${NS_DST} link set dev veth_dst up
- ip -netns ${NS_FWD} link set dev veth_dst_fwd up
-
- ip -netns ${NS_SRC} route add ${IP4_DST}/32 dev veth_src scope global
- ip -netns ${NS_SRC} route add ${IP4_NET}/16 dev veth_src scope global
- ip -netns ${NS_FWD} route add ${IP4_SRC}/32 dev veth_src_fwd scope global
-
- ip -netns ${NS_SRC} route add ${IP6_DST}/128 dev veth_src scope global
- ip -netns ${NS_FWD} route add ${IP6_SRC}/128 dev veth_src_fwd scope global
-
- ip -netns ${NS_DST} route add ${IP4_SRC}/32 dev veth_dst scope global
- ip -netns ${NS_DST} route add ${IP4_NET}/16 dev veth_dst scope global
- ip -netns ${NS_FWD} route add ${IP4_DST}/32 dev veth_dst_fwd scope global
-
- ip -netns ${NS_DST} route add ${IP6_SRC}/128 dev veth_dst scope global
- ip -netns ${NS_FWD} route add ${IP6_DST}/128 dev veth_dst_fwd scope global
-
- fmac_src=$(ip netns exec ${NS_FWD} cat /sys/class/net/veth_src_fwd/address)
- fmac_dst=$(ip netns exec ${NS_FWD} cat /sys/class/net/veth_dst_fwd/address)
-
- ip -netns ${NS_SRC} neigh add ${IP4_DST} dev veth_src lladdr $fmac_src
- ip -netns ${NS_DST} neigh add ${IP4_SRC} dev veth_dst lladdr $fmac_dst
-
- ip -netns ${NS_SRC} neigh add ${IP6_DST} dev veth_src lladdr $fmac_src
- ip -netns ${NS_DST} neigh add ${IP6_SRC} dev veth_dst lladdr $fmac_dst
-}
-
-netns_test_connectivity()
-{
- set +e
-
- ip netns exec ${NS_DST} bash -c "nc -4 -l -p 9004 &"
- ip netns exec ${NS_DST} bash -c "nc -6 -l -p 9006 &"
-
- TEST="TCPv4 connectivity test"
- ip netns exec ${NS_SRC} bash -c "timeout ${TIMEOUT} dd if=/dev/zero bs=1000 count=100 > /dev/tcp/${IP4_DST}/9004"
- if [ $? -ne 0 ]; then
- echo -e "${TEST}: ${RED}FAIL${NC}"
- exit 1
- fi
- echo -e "${TEST}: ${GREEN}PASS${NC}"
-
- TEST="TCPv6 connectivity test"
- ip netns exec ${NS_SRC} bash -c "timeout ${TIMEOUT} dd if=/dev/zero bs=1000 count=100 > /dev/tcp/${IP6_DST}/9006"
- if [ $? -ne 0 ]; then
- echo -e "${TEST}: ${RED}FAIL${NC}"
- exit 1
- fi
- echo -e "${TEST}: ${GREEN}PASS${NC}"
-
- TEST="ICMPv4 connectivity test"
- ip netns exec ${NS_SRC} ping $PING_ARG ${IP4_DST}
- if [ $? -ne 0 ]; then
- echo -e "${TEST}: ${RED}FAIL${NC}"
- exit 1
- fi
- echo -e "${TEST}: ${GREEN}PASS${NC}"
-
- TEST="ICMPv6 connectivity test"
- ip netns exec ${NS_SRC} $PING6 $PING_ARG ${IP6_DST}
- if [ $? -ne 0 ]; then
- echo -e "${TEST}: ${RED}FAIL${NC}"
- exit 1
- fi
- echo -e "${TEST}: ${GREEN}PASS${NC}"
-
- set -e
-}
-
-hex_mem_str()
-{
- perl -e 'print join(" ", unpack("(H2)8", pack("L", @ARGV)))' $1
-}
-
-netns_setup_bpf()
-{
- local obj=$1
- local use_forwarding=${2:-0}
-
- ip netns exec ${NS_FWD} tc qdisc add dev veth_src_fwd clsact
- ip netns exec ${NS_FWD} tc filter add dev veth_src_fwd ingress bpf da obj $obj sec src_ingress
- ip netns exec ${NS_FWD} tc filter add dev veth_src_fwd egress bpf da obj $obj sec chk_egress
-
- ip netns exec ${NS_FWD} tc qdisc add dev veth_dst_fwd clsact
- ip netns exec ${NS_FWD} tc filter add dev veth_dst_fwd ingress bpf da obj $obj sec dst_ingress
- ip netns exec ${NS_FWD} tc filter add dev veth_dst_fwd egress bpf da obj $obj sec chk_egress
-
- if [ "$use_forwarding" -eq "1" ]; then
- # bpf_fib_lookup() checks if forwarding is enabled
- ip netns exec ${NS_FWD} sysctl -w net.ipv4.ip_forward=1
- ip netns exec ${NS_FWD} sysctl -w net.ipv6.conf.veth_dst_fwd.forwarding=1
- ip netns exec ${NS_FWD} sysctl -w net.ipv6.conf.veth_src_fwd.forwarding=1
- return 0
- fi
-
- veth_src=$(ip netns exec ${NS_FWD} cat /sys/class/net/veth_src_fwd/ifindex)
- veth_dst=$(ip netns exec ${NS_FWD} cat /sys/class/net/veth_dst_fwd/ifindex)
-
- progs=$(ip netns exec ${NS_FWD} bpftool net --json | jq -r '.[] | .tc | map(.id) | .[]')
- for prog in $progs; do
- map=$(bpftool prog show id $prog --json | jq -r '.map_ids | .? | .[]')
- if [ ! -z "$map" ]; then
- bpftool map update id $map key hex $(hex_mem_str 0) value hex $(hex_mem_str $veth_src)
- bpftool map update id $map key hex $(hex_mem_str 1) value hex $(hex_mem_str $veth_dst)
- fi
- done
-}
-
-trap netns_cleanup EXIT
-set -e
-
-netns_setup
-netns_setup_bpf test_tc_neigh.o
-netns_test_connectivity
-netns_cleanup
-netns_setup
-netns_setup_bpf test_tc_neigh_fib.o 1
-netns_test_connectivity
-netns_cleanup
-netns_setup
-netns_setup_bpf test_tc_peer.o
-netns_test_connectivity
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 1512092e1e68..3a9e332c5e36 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -1147,7 +1147,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
}
}
- if (test->insn_processed) {
+ if (!unpriv && test->insn_processed) {
uint32_t insn_processed;
char *proc;
diff --git a/tools/testing/selftests/bpf/verifier/and.c b/tools/testing/selftests/bpf/verifier/and.c
index ca8fdb1b3f01..7d7ebee5cc7a 100644
--- a/tools/testing/selftests/bpf/verifier/and.c
+++ b/tools/testing/selftests/bpf/verifier/and.c
@@ -61,6 +61,8 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R1 !read_ok",
+ .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 0
},
diff --git a/tools/testing/selftests/bpf/verifier/bounds.c b/tools/testing/selftests/bpf/verifier/bounds.c
index 8a1caf46ffbc..e061e8799ce2 100644
--- a/tools/testing/selftests/bpf/verifier/bounds.c
+++ b/tools/testing/selftests/bpf/verifier/bounds.c
@@ -508,6 +508,8 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .result_unpriv = REJECT,
.result = ACCEPT
},
{
@@ -528,6 +530,8 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .result_unpriv = REJECT,
.result = ACCEPT
},
{
@@ -569,6 +573,8 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 min value is outside of the allowed memory range",
+ .result_unpriv = REJECT,
.fixup_map_hash_8b = { 3 },
.result = ACCEPT,
},
@@ -589,6 +595,8 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 min value is outside of the allowed memory range",
+ .result_unpriv = REJECT,
.fixup_map_hash_8b = { 3 },
.result = ACCEPT,
},
@@ -609,6 +617,8 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 min value is outside of the allowed memory range",
+ .result_unpriv = REJECT,
.fixup_map_hash_8b = { 3 },
.result = ACCEPT,
},
@@ -674,6 +684,8 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 min value is outside of the allowed memory range",
+ .result_unpriv = REJECT,
.fixup_map_hash_8b = { 3 },
.result = ACCEPT,
},
@@ -695,6 +707,8 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 min value is outside of the allowed memory range",
+ .result_unpriv = REJECT,
.fixup_map_hash_8b = { 3 },
.result = ACCEPT,
},
diff --git a/tools/testing/selftests/bpf/verifier/dead_code.c b/tools/testing/selftests/bpf/verifier/dead_code.c
index 17fe33a75034..2c8935b3e65d 100644
--- a/tools/testing/selftests/bpf/verifier/dead_code.c
+++ b/tools/testing/selftests/bpf/verifier/dead_code.c
@@ -8,6 +8,8 @@
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, -4),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R9 !read_ok",
+ .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 7,
},
diff --git a/tools/testing/selftests/bpf/verifier/jmp32.c b/tools/testing/selftests/bpf/verifier/jmp32.c
index bd5cae4a7f73..1c857b2fbdf0 100644
--- a/tools/testing/selftests/bpf/verifier/jmp32.c
+++ b/tools/testing/selftests/bpf/verifier/jmp32.c
@@ -87,6 +87,8 @@
BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R9 !read_ok",
+ .result_unpriv = REJECT,
.result = ACCEPT,
},
{
@@ -150,6 +152,8 @@
BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R9 !read_ok",
+ .result_unpriv = REJECT,
.result = ACCEPT,
},
{
@@ -213,6 +217,8 @@
BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R9 !read_ok",
+ .result_unpriv = REJECT,
.result = ACCEPT,
},
{
@@ -280,6 +286,8 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
},
@@ -348,6 +356,8 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
},
@@ -416,6 +426,8 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
},
@@ -484,6 +496,8 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
},
@@ -552,6 +566,8 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
},
@@ -620,6 +636,8 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
},
@@ -688,6 +706,8 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
},
@@ -756,6 +776,8 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
},
diff --git a/tools/testing/selftests/bpf/verifier/jset.c b/tools/testing/selftests/bpf/verifier/jset.c
index 8dcd4e0383d5..11fc68da735e 100644
--- a/tools/testing/selftests/bpf/verifier/jset.c
+++ b/tools/testing/selftests/bpf/verifier/jset.c
@@ -82,8 +82,8 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .retval_unpriv = 1,
- .result_unpriv = ACCEPT,
+ .errstr_unpriv = "R9 !read_ok",
+ .result_unpriv = REJECT,
.retval = 1,
.result = ACCEPT,
},
@@ -141,7 +141,8 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .result_unpriv = ACCEPT,
+ .errstr_unpriv = "R9 !read_ok",
+ .result_unpriv = REJECT,
.result = ACCEPT,
},
{
@@ -162,6 +163,7 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .result_unpriv = ACCEPT,
+ .errstr_unpriv = "R9 !read_ok",
+ .result_unpriv = REJECT,
.result = ACCEPT,
},
diff --git a/tools/testing/selftests/bpf/verifier/stack_ptr.c b/tools/testing/selftests/bpf/verifier/stack_ptr.c
index 07eaa04412ae..8ab94d65f3d5 100644
--- a/tools/testing/selftests/bpf/verifier/stack_ptr.c
+++ b/tools/testing/selftests/bpf/verifier/stack_ptr.c
@@ -295,8 +295,6 @@
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
BPF_EXIT_INSN(),
},
- .result_unpriv = REJECT,
- .errstr_unpriv = "invalid write to stack R1 off=0 size=1",
.result = ACCEPT,
.retval = 42,
},
diff --git a/tools/testing/selftests/bpf/verifier/unpriv.c b/tools/testing/selftests/bpf/verifier/unpriv.c
index bd436df5cc32..111801aea5e3 100644
--- a/tools/testing/selftests/bpf/verifier/unpriv.c
+++ b/tools/testing/selftests/bpf/verifier/unpriv.c
@@ -420,6 +420,8 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
BPF_EXIT_INSN(),
},
+ .errstr_unpriv = "R7 invalid mem access 'inv'",
+ .result_unpriv = REJECT,
.result = ACCEPT,
.retval = 0,
},
diff --git a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
index e5913fd3b903..a3e593ddfafc 100644
--- a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
+++ b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
@@ -120,7 +120,7 @@
.fixup_map_array_48b = { 1 },
.result = ACCEPT,
.result_unpriv = REJECT,
- .errstr_unpriv = "R2 tried to add from different maps, paths or scalars",
+ .errstr_unpriv = "R2 pointer comparison prohibited",
.retval = 0,
},
{
@@ -159,7 +159,8 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
// fake-dead code; targeted from branch A to
- // prevent dead code sanitization
+ // prevent dead code sanitization, rejected
+ // via branch B however
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
@@ -167,7 +168,7 @@
.fixup_map_array_48b = { 1 },
.result = ACCEPT,
.result_unpriv = REJECT,
- .errstr_unpriv = "R2 tried to add from different maps, paths or scalars",
+ .errstr_unpriv = "R0 invalid mem access 'inv'",
.retval = 0,
},
{
@@ -300,8 +301,6 @@
},
.fixup_map_array_48b = { 3 },
.result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
.retval = 1,
},
{
@@ -371,8 +370,6 @@
},
.fixup_map_array_48b = { 3 },
.result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
.retval = 1,
},
{
@@ -472,8 +469,6 @@
},
.fixup_map_array_48b = { 3 },
.result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
.retval = 1,
},
{
@@ -766,8 +761,6 @@
},
.fixup_map_array_48b = { 3 },
.result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
.retval = 1,
},
{
diff --git a/tools/testing/selftests/exec/Makefile b/tools/testing/selftests/exec/Makefile
index cf69b2fcce59..dd61118df66e 100644
--- a/tools/testing/selftests/exec/Makefile
+++ b/tools/testing/selftests/exec/Makefile
@@ -28,8 +28,8 @@ $(OUTPUT)/execveat.denatured: $(OUTPUT)/execveat
cp $< $@
chmod -x $@
$(OUTPUT)/load_address_4096: load_address.c
- $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000 -pie $< -o $@
+ $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000 -pie -static $< -o $@
$(OUTPUT)/load_address_2097152: load_address.c
- $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x200000 -pie $< -o $@
+ $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x200000 -pie -static $< -o $@
$(OUTPUT)/load_address_16777216: load_address.c
- $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000000 -pie $< -o $@
+ $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000000 -pie -static $< -o $@
diff --git a/tools/testing/selftests/futex/functional/.gitignore b/tools/testing/selftests/futex/functional/.gitignore
index 0efcd494daab..0e78b49d0f2f 100644
--- a/tools/testing/selftests/futex/functional/.gitignore
+++ b/tools/testing/selftests/futex/functional/.gitignore
@@ -6,3 +6,5 @@ futex_wait_private_mapped_file
futex_wait_timeout
futex_wait_uninitialized_heap
futex_wait_wouldblock
+futex_wait
+futex_requeue
diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile
index 23207829ec75..bd1fec59e010 100644
--- a/tools/testing/selftests/futex/functional/Makefile
+++ b/tools/testing/selftests/futex/functional/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-INCLUDES := -I../include -I../../
+INCLUDES := -I../include -I../../ -I../../../../../usr/include/ \
+ -I$(KBUILD_OUTPUT)/kselftest/usr/include
CFLAGS := $(CFLAGS) -g -O2 -Wall -D_GNU_SOURCE -pthread $(INCLUDES)
LDLIBS := -lpthread -lrt
@@ -14,7 +15,9 @@ TEST_GEN_FILES := \
futex_requeue_pi_signal_restart \
futex_requeue_pi_mismatched_ops \
futex_wait_uninitialized_heap \
- futex_wait_private_mapped_file
+ futex_wait_private_mapped_file \
+ futex_wait \
+ futex_requeue
TEST_PROGS := run.sh
diff --git a/tools/testing/selftests/futex/functional/futex_requeue.c b/tools/testing/selftests/futex/functional/futex_requeue.c
new file mode 100644
index 000000000000..51485be6eb2f
--- /dev/null
+++ b/tools/testing/selftests/futex/functional/futex_requeue.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright Collabora Ltd., 2021
+ *
+ * futex cmp requeue test by André Almeida <andrealmeid@collabora.com>
+ */
+
+#include <pthread.h>
+#include <limits.h>
+#include "logging.h"
+#include "futextest.h"
+
+#define TEST_NAME "futex-requeue"
+#define timeout_ns 30000000
+#define WAKE_WAIT_US 10000
+
+volatile futex_t *f1;
+
+void usage(char *prog)
+{
+ printf("Usage: %s\n", prog);
+ printf(" -c Use color\n");
+ printf(" -h Display this help message\n");
+ printf(" -v L Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n",
+ VQUIET, VCRITICAL, VINFO);
+}
+
+void *waiterfn(void *arg)
+{
+ struct timespec to;
+
+ to.tv_sec = 0;
+ to.tv_nsec = timeout_ns;
+
+ if (futex_wait(f1, *f1, &to, 0))
+ printf("waiter failed errno %d\n", errno);
+
+ return NULL;
+}
+
+int main(int argc, char *argv[])
+{
+ pthread_t waiter[10];
+ int res, ret = RET_PASS;
+ int c, i;
+ volatile futex_t _f1 = 0;
+ volatile futex_t f2 = 0;
+
+ f1 = &_f1;
+
+ while ((c = getopt(argc, argv, "cht:v:")) != -1) {
+ switch (c) {
+ case 'c':
+ log_color(1);
+ break;
+ case 'h':
+ usage(basename(argv[0]));
+ exit(0);
+ case 'v':
+ log_verbosity(atoi(optarg));
+ break;
+ default:
+ usage(basename(argv[0]));
+ exit(1);
+ }
+ }
+
+ ksft_print_header();
+ ksft_set_plan(2);
+ ksft_print_msg("%s: Test futex_requeue\n",
+ basename(argv[0]));
+
+ /*
+ * Requeue a waiter from f1 to f2, and wake f2.
+ */
+ if (pthread_create(&waiter[0], NULL, waiterfn, NULL))
+ error("pthread_create failed\n", errno);
+
+ usleep(WAKE_WAIT_US);
+
+ info("Requeuing 1 futex from f1 to f2\n");
+ res = futex_cmp_requeue(f1, 0, &f2, 0, 1, 0);
+ if (res != 1) {
+ ksft_test_result_fail("futex_requeue simple returned: %d %s\n",
+ res ? errno : res,
+ res ? strerror(errno) : "");
+ ret = RET_FAIL;
+ }
+
+
+ info("Waking 1 futex at f2\n");
+ res = futex_wake(&f2, 1, 0);
+ if (res != 1) {
+ ksft_test_result_fail("futex_requeue simple returned: %d %s\n",
+ res ? errno : res,
+ res ? strerror(errno) : "");
+ ret = RET_FAIL;
+ } else {
+ ksft_test_result_pass("futex_requeue simple succeeds\n");
+ }
+
+
+ /*
+ * Create 10 waiters at f1. At futex_requeue, wake 3 and requeue 7.
+ * At futex_wake, wake INT_MAX (should be exactly 7).
+ */
+ for (i = 0; i < 10; i++) {
+ if (pthread_create(&waiter[i], NULL, waiterfn, NULL))
+ error("pthread_create failed\n", errno);
+ }
+
+ usleep(WAKE_WAIT_US);
+
+ info("Waking 3 futexes at f1 and requeuing 7 futexes from f1 to f2\n");
+ res = futex_cmp_requeue(f1, 0, &f2, 3, 7, 0);
+ if (res != 10) {
+ ksft_test_result_fail("futex_requeue many returned: %d %s\n",
+ res ? errno : res,
+ res ? strerror(errno) : "");
+ ret = RET_FAIL;
+ }
+
+ info("Waking INT_MAX futexes at f2\n");
+ res = futex_wake(&f2, INT_MAX, 0);
+ if (res != 7) {
+ ksft_test_result_fail("futex_requeue many returned: %d %s\n",
+ res ? errno : res,
+ res ? strerror(errno) : "");
+ ret = RET_FAIL;
+ } else {
+ ksft_test_result_pass("futex_requeue many succeeds\n");
+ }
+
+ ksft_print_cnts();
+ return ret;
+}
diff --git a/tools/testing/selftests/futex/functional/futex_wait.c b/tools/testing/selftests/futex/functional/futex_wait.c
new file mode 100644
index 000000000000..685140d9b93d
--- /dev/null
+++ b/tools/testing/selftests/futex/functional/futex_wait.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright Collabora Ltd., 2021
+ *
+ * futex cmp requeue test by André Almeida <andrealmeid@collabora.com>
+ */
+
+#include <pthread.h>
+#include <sys/shm.h>
+#include <sys/mman.h>
+#include <fcntl.h>
+#include "logging.h"
+#include "futextest.h"
+
+#define TEST_NAME "futex-wait"
+#define timeout_ns 30000000
+#define WAKE_WAIT_US 10000
+#define SHM_PATH "futex_shm_file"
+
+void *futex;
+
+void usage(char *prog)
+{
+ printf("Usage: %s\n", prog);
+ printf(" -c Use color\n");
+ printf(" -h Display this help message\n");
+ printf(" -v L Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n",
+ VQUIET, VCRITICAL, VINFO);
+}
+
+static void *waiterfn(void *arg)
+{
+ struct timespec to;
+ unsigned int flags = 0;
+
+ if (arg)
+ flags = *((unsigned int *) arg);
+
+ to.tv_sec = 0;
+ to.tv_nsec = timeout_ns;
+
+ if (futex_wait(futex, 0, &to, flags))
+ printf("waiter failed errno %d\n", errno);
+
+ return NULL;
+}
+
+int main(int argc, char *argv[])
+{
+ int res, ret = RET_PASS, fd, c, shm_id;
+ u_int32_t f_private = 0, *shared_data;
+ unsigned int flags = FUTEX_PRIVATE_FLAG;
+ pthread_t waiter;
+ void *shm;
+
+ futex = &f_private;
+
+ while ((c = getopt(argc, argv, "cht:v:")) != -1) {
+ switch (c) {
+ case 'c':
+ log_color(1);
+ break;
+ case 'h':
+ usage(basename(argv[0]));
+ exit(0);
+ case 'v':
+ log_verbosity(atoi(optarg));
+ break;
+ default:
+ usage(basename(argv[0]));
+ exit(1);
+ }
+ }
+
+ ksft_print_header();
+ ksft_set_plan(3);
+ ksft_print_msg("%s: Test futex_wait\n", basename(argv[0]));
+
+ /* Testing a private futex */
+ info("Calling private futex_wait on futex: %p\n", futex);
+ if (pthread_create(&waiter, NULL, waiterfn, (void *) &flags))
+ error("pthread_create failed\n", errno);
+
+ usleep(WAKE_WAIT_US);
+
+ info("Calling private futex_wake on futex: %p\n", futex);
+ res = futex_wake(futex, 1, FUTEX_PRIVATE_FLAG);
+ if (res != 1) {
+ ksft_test_result_fail("futex_wake private returned: %d %s\n",
+ errno, strerror(errno));
+ ret = RET_FAIL;
+ } else {
+ ksft_test_result_pass("futex_wake private succeeds\n");
+ }
+
+ /* Testing an anon page shared memory */
+ shm_id = shmget(IPC_PRIVATE, 4096, IPC_CREAT | 0666);
+ if (shm_id < 0) {
+ perror("shmget");
+ exit(1);
+ }
+
+ shared_data = shmat(shm_id, NULL, 0);
+
+ *shared_data = 0;
+ futex = shared_data;
+
+ info("Calling shared (page anon) futex_wait on futex: %p\n", futex);
+ if (pthread_create(&waiter, NULL, waiterfn, NULL))
+ error("pthread_create failed\n", errno);
+
+ usleep(WAKE_WAIT_US);
+
+ info("Calling shared (page anon) futex_wake on futex: %p\n", futex);
+ res = futex_wake(futex, 1, 0);
+ if (res != 1) {
+ ksft_test_result_fail("futex_wake shared (page anon) returned: %d %s\n",
+ errno, strerror(errno));
+ ret = RET_FAIL;
+ } else {
+ ksft_test_result_pass("futex_wake shared (page anon) succeeds\n");
+ }
+
+
+ /* Testing a file backed shared memory */
+ fd = open(SHM_PATH, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
+ if (fd < 0) {
+ perror("open");
+ exit(1);
+ }
+
+ if (ftruncate(fd, sizeof(f_private))) {
+ perror("ftruncate");
+ exit(1);
+ }
+
+ shm = mmap(NULL, sizeof(f_private), PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ if (shm == MAP_FAILED) {
+ perror("mmap");
+ exit(1);
+ }
+
+ memcpy(shm, &f_private, sizeof(f_private));
+
+ futex = shm;
+
+ info("Calling shared (file backed) futex_wait on futex: %p\n", futex);
+ if (pthread_create(&waiter, NULL, waiterfn, NULL))
+ error("pthread_create failed\n", errno);
+
+ usleep(WAKE_WAIT_US);
+
+ info("Calling shared (file backed) futex_wake on futex: %p\n", futex);
+ res = futex_wake(shm, 1, 0);
+ if (res != 1) {
+ ksft_test_result_fail("futex_wake shared (file backed) returned: %d %s\n",
+ errno, strerror(errno));
+ ret = RET_FAIL;
+ } else {
+ ksft_test_result_pass("futex_wake shared (file backed) succeeds\n");
+ }
+
+ /* Freeing resources */
+ shmdt(shared_data);
+ munmap(shm, sizeof(f_private));
+ remove(SHM_PATH);
+ close(fd);
+
+ ksft_print_cnts();
+ return ret;
+}
diff --git a/tools/testing/selftests/futex/functional/futex_wait_timeout.c b/tools/testing/selftests/futex/functional/futex_wait_timeout.c
index ee55e6d389a3..1f8f6daaf1e7 100644
--- a/tools/testing/selftests/futex/functional/futex_wait_timeout.c
+++ b/tools/testing/selftests/futex/functional/futex_wait_timeout.c
@@ -11,21 +11,18 @@
*
* HISTORY
* 2009-Nov-6: Initial version by Darren Hart <dvhart@linux.intel.com>
+ * 2021-Apr-26: More test cases by André Almeida <andrealmeid@collabora.com>
*
*****************************************************************************/
-#include <errno.h>
-#include <getopt.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <time.h>
+#include <pthread.h>
#include "futextest.h"
#include "logging.h"
#define TEST_NAME "futex-wait-timeout"
static long timeout_ns = 100000; /* 100us default timeout */
+static futex_t futex_pi;
void usage(char *prog)
{
@@ -37,11 +34,67 @@ void usage(char *prog)
VQUIET, VCRITICAL, VINFO);
}
+/*
+ * Get a PI lock and hold it forever, so the main thread lock_pi will block
+ * and we can test the timeout
+ */
+void *get_pi_lock(void *arg)
+{
+ int ret;
+ volatile futex_t lock = 0;
+
+ ret = futex_lock_pi(&futex_pi, NULL, 0, 0);
+ if (ret != 0)
+ error("futex_lock_pi failed\n", ret);
+
+ /* Blocks forever */
+ ret = futex_wait(&lock, 0, NULL, 0);
+ error("futex_wait failed\n", ret);
+
+ return NULL;
+}
+
+/*
+ * Check if the function returned the expected error
+ */
+static void test_timeout(int res, int *ret, char *test_name, int err)
+{
+ if (!res || errno != err) {
+ ksft_test_result_fail("%s returned %d\n", test_name,
+ res < 0 ? errno : res);
+ *ret = RET_FAIL;
+ } else {
+ ksft_test_result_pass("%s succeeds\n", test_name);
+ }
+}
+
+/*
+ * Calculate absolute timeout and correct overflow
+ */
+static int futex_get_abs_timeout(clockid_t clockid, struct timespec *to,
+ long timeout_ns)
+{
+ if (clock_gettime(clockid, to)) {
+ error("clock_gettime failed\n", errno);
+ return errno;
+ }
+
+ to->tv_nsec += timeout_ns;
+
+ if (to->tv_nsec >= 1000000000) {
+ to->tv_sec++;
+ to->tv_nsec -= 1000000000;
+ }
+
+ return 0;
+}
+
int main(int argc, char *argv[])
{
futex_t f1 = FUTEX_INITIALIZER;
- struct timespec to;
int res, ret = RET_PASS;
+ struct timespec to;
+ pthread_t thread;
int c;
while ((c = getopt(argc, argv, "cht:v:")) != -1) {
@@ -65,22 +118,63 @@ int main(int argc, char *argv[])
}
ksft_print_header();
- ksft_set_plan(1);
+ ksft_set_plan(7);
ksft_print_msg("%s: Block on a futex and wait for timeout\n",
basename(argv[0]));
ksft_print_msg("\tArguments: timeout=%ldns\n", timeout_ns);
- /* initialize timeout */
+ pthread_create(&thread, NULL, get_pi_lock, NULL);
+
+ /* initialize relative timeout */
to.tv_sec = 0;
to.tv_nsec = timeout_ns;
- info("Calling futex_wait on f1: %u @ %p\n", f1, &f1);
- res = futex_wait(&f1, f1, &to, FUTEX_PRIVATE_FLAG);
- if (!res || errno != ETIMEDOUT) {
- fail("futex_wait returned %d\n", ret < 0 ? errno : ret);
- ret = RET_FAIL;
- }
+ res = futex_wait(&f1, f1, &to, 0);
+ test_timeout(res, &ret, "futex_wait relative", ETIMEDOUT);
+
+ /* FUTEX_WAIT_BITSET with CLOCK_REALTIME */
+ if (futex_get_abs_timeout(CLOCK_REALTIME, &to, timeout_ns))
+ return RET_FAIL;
+ res = futex_wait_bitset(&f1, f1, &to, 1, FUTEX_CLOCK_REALTIME);
+ test_timeout(res, &ret, "futex_wait_bitset realtime", ETIMEDOUT);
+
+ /* FUTEX_WAIT_BITSET with CLOCK_MONOTONIC */
+ if (futex_get_abs_timeout(CLOCK_MONOTONIC, &to, timeout_ns))
+ return RET_FAIL;
+ res = futex_wait_bitset(&f1, f1, &to, 1, 0);
+ test_timeout(res, &ret, "futex_wait_bitset monotonic", ETIMEDOUT);
+
+ /* FUTEX_WAIT_REQUEUE_PI with CLOCK_REALTIME */
+ if (futex_get_abs_timeout(CLOCK_REALTIME, &to, timeout_ns))
+ return RET_FAIL;
+ res = futex_wait_requeue_pi(&f1, f1, &futex_pi, &to, FUTEX_CLOCK_REALTIME);
+ test_timeout(res, &ret, "futex_wait_requeue_pi realtime", ETIMEDOUT);
+
+ /* FUTEX_WAIT_REQUEUE_PI with CLOCK_MONOTONIC */
+ if (futex_get_abs_timeout(CLOCK_MONOTONIC, &to, timeout_ns))
+ return RET_FAIL;
+ res = futex_wait_requeue_pi(&f1, f1, &futex_pi, &to, 0);
+ test_timeout(res, &ret, "futex_wait_requeue_pi monotonic", ETIMEDOUT);
+
+ /*
+ * FUTEX_LOCK_PI with CLOCK_REALTIME
+ * Due to historical reasons, FUTEX_LOCK_PI supports only realtime
+ * clock, but requires the caller to not set CLOCK_REALTIME flag.
+ *
+ * If you call FUTEX_LOCK_PI with a monotonic clock, it'll be
+ * interpreted as a realtime clock, and (unless you mess your machine's
+ * time or your time machine) the monotonic clock value is always
+ * smaller than realtime and the syscall will timeout immediately.
+ */
+ if (futex_get_abs_timeout(CLOCK_REALTIME, &to, timeout_ns))
+ return RET_FAIL;
+ res = futex_lock_pi(&futex_pi, &to, 0, 0);
+ test_timeout(res, &ret, "futex_lock_pi realtime", ETIMEDOUT);
+
+ /* Test operations that don't support FUTEX_CLOCK_REALTIME */
+ res = futex_lock_pi(&futex_pi, NULL, 0, FUTEX_CLOCK_REALTIME);
+ test_timeout(res, &ret, "futex_lock_pi invalid timeout flag", ENOSYS);
- print_result(TEST_NAME, ret);
+ ksft_print_cnts();
return ret;
}
diff --git a/tools/testing/selftests/futex/functional/run.sh b/tools/testing/selftests/futex/functional/run.sh
index 1acb6ace1680..11a9d62290f5 100755
--- a/tools/testing/selftests/futex/functional/run.sh
+++ b/tools/testing/selftests/futex/functional/run.sh
@@ -73,3 +73,9 @@ echo
echo
./futex_wait_uninitialized_heap $COLOR
./futex_wait_private_mapped_file $COLOR
+
+echo
+./futex_wait $COLOR
+
+echo
+./futex_requeue $COLOR
diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index bd83158e0e0b..524c857a049c 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -41,5 +41,6 @@
/kvm_create_max_vcpus
/kvm_page_table_test
/memslot_modification_stress_test
+/memslot_perf_test
/set_memory_region_test
/steal_time
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index e439d027939d..daaee1888b12 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -33,7 +33,7 @@ ifeq ($(ARCH),s390)
UNAME_M := s390x
endif
-LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c lib/test_util.c lib/guest_modes.c lib/perf_test_util.c
+LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/rbtree.c lib/sparsebit.c lib/test_util.c lib/guest_modes.c lib/perf_test_util.c
LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/svm.c lib/x86_64/ucall.c lib/x86_64/handlers.S
LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c
LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c lib/s390x/diag318_test_handler.c
@@ -74,6 +74,7 @@ TEST_GEN_PROGS_x86_64 += hardware_disable_test
TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus
TEST_GEN_PROGS_x86_64 += kvm_page_table_test
TEST_GEN_PROGS_x86_64 += memslot_modification_stress_test
+TEST_GEN_PROGS_x86_64 += memslot_perf_test
TEST_GEN_PROGS_x86_64 += set_memory_region_test
TEST_GEN_PROGS_x86_64 += steal_time
diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c
index 5f7a229c3af1..b74704305835 100644
--- a/tools/testing/selftests/kvm/demand_paging_test.c
+++ b/tools/testing/selftests/kvm/demand_paging_test.c
@@ -9,6 +9,7 @@
#define _GNU_SOURCE /* for pipe2 */
+#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
@@ -38,6 +39,7 @@
static int nr_vcpus = 1;
static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
+static size_t demand_paging_size;
static char *guest_data_prototype;
static void *vcpu_worker(void *data)
@@ -71,36 +73,51 @@ static void *vcpu_worker(void *data)
return NULL;
}
-static int handle_uffd_page_request(int uffd, uint64_t addr)
+static int handle_uffd_page_request(int uffd_mode, int uffd, uint64_t addr)
{
- pid_t tid;
+ pid_t tid = syscall(__NR_gettid);
struct timespec start;
struct timespec ts_diff;
- struct uffdio_copy copy;
int r;
- tid = syscall(__NR_gettid);
+ clock_gettime(CLOCK_MONOTONIC, &start);
- copy.src = (uint64_t)guest_data_prototype;
- copy.dst = addr;
- copy.len = perf_test_args.host_page_size;
- copy.mode = 0;
+ if (uffd_mode == UFFDIO_REGISTER_MODE_MISSING) {
+ struct uffdio_copy copy;
- clock_gettime(CLOCK_MONOTONIC, &start);
+ copy.src = (uint64_t)guest_data_prototype;
+ copy.dst = addr;
+ copy.len = demand_paging_size;
+ copy.mode = 0;
- r = ioctl(uffd, UFFDIO_COPY, &copy);
- if (r == -1) {
- pr_info("Failed Paged in 0x%lx from thread %d with errno: %d\n",
- addr, tid, errno);
- return r;
+ r = ioctl(uffd, UFFDIO_COPY, &copy);
+ if (r == -1) {
+ pr_info("Failed UFFDIO_COPY in 0x%lx from thread %d with errno: %d\n",
+ addr, tid, errno);
+ return r;
+ }
+ } else if (uffd_mode == UFFDIO_REGISTER_MODE_MINOR) {
+ struct uffdio_continue cont = {0};
+
+ cont.range.start = addr;
+ cont.range.len = demand_paging_size;
+
+ r = ioctl(uffd, UFFDIO_CONTINUE, &cont);
+ if (r == -1) {
+ pr_info("Failed UFFDIO_CONTINUE in 0x%lx from thread %d with errno: %d\n",
+ addr, tid, errno);
+ return r;
+ }
+ } else {
+ TEST_FAIL("Invalid uffd mode %d", uffd_mode);
}
ts_diff = timespec_elapsed(start);
- PER_PAGE_DEBUG("UFFDIO_COPY %d \t%ld ns\n", tid,
+ PER_PAGE_DEBUG("UFFD page-in %d \t%ld ns\n", tid,
timespec_to_ns(ts_diff));
PER_PAGE_DEBUG("Paged in %ld bytes at 0x%lx from thread %d\n",
- perf_test_args.host_page_size, addr, tid);
+ demand_paging_size, addr, tid);
return 0;
}
@@ -108,6 +125,7 @@ static int handle_uffd_page_request(int uffd, uint64_t addr)
bool quit_uffd_thread;
struct uffd_handler_args {
+ int uffd_mode;
int uffd;
int pipefd;
useconds_t delay;
@@ -169,7 +187,7 @@ static void *uffd_handler_thread_fn(void *arg)
if (r == -1) {
if (errno == EAGAIN)
continue;
- pr_info("Read of uffd gor errno %d", errno);
+ pr_info("Read of uffd got errno %d\n", errno);
return NULL;
}
@@ -184,7 +202,7 @@ static void *uffd_handler_thread_fn(void *arg)
if (delay)
usleep(delay);
addr = msg.arg.pagefault.address;
- r = handle_uffd_page_request(uffd, addr);
+ r = handle_uffd_page_request(uffd_args->uffd_mode, uffd, addr);
if (r < 0)
return NULL;
pages++;
@@ -198,43 +216,53 @@ static void *uffd_handler_thread_fn(void *arg)
return NULL;
}
-static int setup_demand_paging(struct kvm_vm *vm,
- pthread_t *uffd_handler_thread, int pipefd,
- useconds_t uffd_delay,
- struct uffd_handler_args *uffd_args,
- void *hva, uint64_t len)
+static void setup_demand_paging(struct kvm_vm *vm,
+ pthread_t *uffd_handler_thread, int pipefd,
+ int uffd_mode, useconds_t uffd_delay,
+ struct uffd_handler_args *uffd_args,
+ void *hva, void *alias, uint64_t len)
{
+ bool is_minor = (uffd_mode == UFFDIO_REGISTER_MODE_MINOR);
int uffd;
struct uffdio_api uffdio_api;
struct uffdio_register uffdio_register;
+ uint64_t expected_ioctls = ((uint64_t) 1) << _UFFDIO_COPY;
- uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
- if (uffd == -1) {
- pr_info("uffd creation failed\n");
- return -1;
+ PER_PAGE_DEBUG("Userfaultfd %s mode, faults resolved with %s\n",
+ is_minor ? "MINOR" : "MISSING",
+ is_minor ? "UFFDIO_CONINUE" : "UFFDIO_COPY");
+
+ /* In order to get minor faults, prefault via the alias. */
+ if (is_minor) {
+ size_t p;
+
+ expected_ioctls = ((uint64_t) 1) << _UFFDIO_CONTINUE;
+
+ TEST_ASSERT(alias != NULL, "Alias required for minor faults");
+ for (p = 0; p < (len / demand_paging_size); ++p) {
+ memcpy(alias + (p * demand_paging_size),
+ guest_data_prototype, demand_paging_size);
+ }
}
+ uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
+ TEST_ASSERT(uffd >= 0, "uffd creation failed, errno: %d", errno);
+
uffdio_api.api = UFFD_API;
uffdio_api.features = 0;
- if (ioctl(uffd, UFFDIO_API, &uffdio_api) == -1) {
- pr_info("ioctl uffdio_api failed\n");
- return -1;
- }
+ TEST_ASSERT(ioctl(uffd, UFFDIO_API, &uffdio_api) != -1,
+ "ioctl UFFDIO_API failed: %" PRIu64,
+ (uint64_t)uffdio_api.api);
uffdio_register.range.start = (uint64_t)hva;
uffdio_register.range.len = len;
- uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
- if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) == -1) {
- pr_info("ioctl uffdio_register failed\n");
- return -1;
- }
-
- if ((uffdio_register.ioctls & UFFD_API_RANGE_IOCTLS) !=
- UFFD_API_RANGE_IOCTLS) {
- pr_info("unexpected userfaultfd ioctl set\n");
- return -1;
- }
+ uffdio_register.mode = uffd_mode;
+ TEST_ASSERT(ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) != -1,
+ "ioctl UFFDIO_REGISTER failed");
+ TEST_ASSERT((uffdio_register.ioctls & expected_ioctls) ==
+ expected_ioctls, "missing userfaultfd ioctls");
+ uffd_args->uffd_mode = uffd_mode;
uffd_args->uffd = uffd;
uffd_args->pipefd = pipefd;
uffd_args->delay = uffd_delay;
@@ -243,13 +271,12 @@ static int setup_demand_paging(struct kvm_vm *vm,
PER_VCPU_DEBUG("Created uffd thread for HVA range [%p, %p)\n",
hva, hva + len);
-
- return 0;
}
struct test_params {
- bool use_uffd;
+ int uffd_mode;
useconds_t uffd_delay;
+ enum vm_mem_backing_src_type src_type;
bool partition_vcpu_memory_access;
};
@@ -267,14 +294,16 @@ static void run_test(enum vm_guest_mode mode, void *arg)
int r;
vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size,
- VM_MEM_SRC_ANONYMOUS);
+ p->src_type);
perf_test_args.wr_fract = 1;
- guest_data_prototype = malloc(perf_test_args.host_page_size);
+ demand_paging_size = get_backing_src_pagesz(p->src_type);
+
+ guest_data_prototype = malloc(demand_paging_size);
TEST_ASSERT(guest_data_prototype,
"Failed to allocate buffer for guest data pattern");
- memset(guest_data_prototype, 0xAB, perf_test_args.host_page_size);
+ memset(guest_data_prototype, 0xAB, demand_paging_size);
vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
TEST_ASSERT(vcpu_threads, "Memory allocation failed");
@@ -282,7 +311,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size,
p->partition_vcpu_memory_access);
- if (p->use_uffd) {
+ if (p->uffd_mode) {
uffd_handler_threads =
malloc(nr_vcpus * sizeof(*uffd_handler_threads));
TEST_ASSERT(uffd_handler_threads, "Memory allocation failed");
@@ -296,6 +325,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
vm_paddr_t vcpu_gpa;
void *vcpu_hva;
+ void *vcpu_alias;
uint64_t vcpu_mem_size;
@@ -310,8 +340,9 @@ static void run_test(enum vm_guest_mode mode, void *arg)
PER_VCPU_DEBUG("Added VCPU %d with test mem gpa [%lx, %lx)\n",
vcpu_id, vcpu_gpa, vcpu_gpa + vcpu_mem_size);
- /* Cache the HVA pointer of the region */
+ /* Cache the host addresses of the region */
vcpu_hva = addr_gpa2hva(vm, vcpu_gpa);
+ vcpu_alias = addr_gpa2alias(vm, vcpu_gpa);
/*
* Set up user fault fd to handle demand paging
@@ -321,13 +352,11 @@ static void run_test(enum vm_guest_mode mode, void *arg)
O_CLOEXEC | O_NONBLOCK);
TEST_ASSERT(!r, "Failed to set up pipefd");
- r = setup_demand_paging(vm,
- &uffd_handler_threads[vcpu_id],
- pipefds[vcpu_id * 2],
- p->uffd_delay, &uffd_args[vcpu_id],
- vcpu_hva, vcpu_mem_size);
- if (r < 0)
- exit(-r);
+ setup_demand_paging(vm, &uffd_handler_threads[vcpu_id],
+ pipefds[vcpu_id * 2], p->uffd_mode,
+ p->uffd_delay, &uffd_args[vcpu_id],
+ vcpu_hva, vcpu_alias,
+ vcpu_mem_size);
}
}
@@ -355,7 +384,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
pr_info("All vCPU threads joined\n");
- if (p->use_uffd) {
+ if (p->uffd_mode) {
char c;
/* Tell the user fault fd handler threads to quit */
@@ -377,7 +406,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
free(guest_data_prototype);
free(vcpu_threads);
- if (p->use_uffd) {
+ if (p->uffd_mode) {
free(uffd_handler_threads);
free(uffd_args);
free(pipefds);
@@ -387,17 +416,19 @@ static void run_test(enum vm_guest_mode mode, void *arg)
static void help(char *name)
{
puts("");
- printf("usage: %s [-h] [-m mode] [-u] [-d uffd_delay_usec]\n"
- " [-b memory] [-v vcpus] [-o]\n", name);
+ printf("usage: %s [-h] [-m vm_mode] [-u uffd_mode] [-d uffd_delay_usec]\n"
+ " [-b memory] [-t type] [-v vcpus] [-o]\n", name);
guest_modes_help();
- printf(" -u: use User Fault FD to handle vCPU page\n"
- " faults.\n");
+ printf(" -u: use userfaultfd to handle vCPU page faults. Mode is a\n"
+ " UFFD registration mode: 'MISSING' or 'MINOR'.\n");
printf(" -d: add a delay in usec to the User Fault\n"
" FD handler to simulate demand paging\n"
" overheads. Ignored without -u.\n");
printf(" -b: specify the size of the memory region which should be\n"
" demand paged by each vCPU. e.g. 10M or 3G.\n"
" Default: 1G\n");
+ printf(" -t: The type of backing memory to use. Default: anonymous\n");
+ backing_src_help();
printf(" -v: specify the number of vCPUs to run.\n");
printf(" -o: Overlap guest memory accesses instead of partitioning\n"
" them into a separate region of memory for each vCPU.\n");
@@ -409,19 +440,24 @@ int main(int argc, char *argv[])
{
int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
struct test_params p = {
+ .src_type = VM_MEM_SRC_ANONYMOUS,
.partition_vcpu_memory_access = true,
};
int opt;
guest_modes_append_default();
- while ((opt = getopt(argc, argv, "hm:ud:b:v:o")) != -1) {
+ while ((opt = getopt(argc, argv, "hm:u:d:b:t:v:o")) != -1) {
switch (opt) {
case 'm':
guest_modes_cmdline(optarg);
break;
case 'u':
- p.use_uffd = true;
+ if (!strcmp("MISSING", optarg))
+ p.uffd_mode = UFFDIO_REGISTER_MODE_MISSING;
+ else if (!strcmp("MINOR", optarg))
+ p.uffd_mode = UFFDIO_REGISTER_MODE_MINOR;
+ TEST_ASSERT(p.uffd_mode, "UFFD mode must be 'MISSING' or 'MINOR'.");
break;
case 'd':
p.uffd_delay = strtoul(optarg, NULL, 0);
@@ -430,6 +466,9 @@ int main(int argc, char *argv[])
case 'b':
guest_percpu_mem_size = parse_size(optarg);
break;
+ case 't':
+ p.src_type = parse_backing_src_type(optarg);
+ break;
case 'v':
nr_vcpus = atoi(optarg);
TEST_ASSERT(nr_vcpus > 0 && nr_vcpus <= max_vcpus,
@@ -445,6 +484,11 @@ int main(int argc, char *argv[])
}
}
+ if (p.uffd_mode == UFFDIO_REGISTER_MODE_MINOR &&
+ !backing_src_is_shared(p.src_type)) {
+ TEST_FAIL("userfaultfd MINOR mode requires shared memory; pick a different -t");
+ }
+
for_each_guest_mode(run_test, &p);
return 0;
diff --git a/tools/testing/selftests/kvm/hardware_disable_test.c b/tools/testing/selftests/kvm/hardware_disable_test.c
index 5aadf84c91c0..4b8db3bce610 100644
--- a/tools/testing/selftests/kvm/hardware_disable_test.c
+++ b/tools/testing/selftests/kvm/hardware_disable_test.c
@@ -132,6 +132,36 @@ static void run_test(uint32_t run)
TEST_ASSERT(false, "%s: [%d] child escaped the ninja\n", __func__, run);
}
+void wait_for_child_setup(pid_t pid)
+{
+ /*
+ * Wait for the child to post to the semaphore, but wake up periodically
+ * to check if the child exited prematurely.
+ */
+ for (;;) {
+ const struct timespec wait_period = { .tv_sec = 1 };
+ int status;
+
+ if (!sem_timedwait(sem, &wait_period))
+ return;
+
+ /* Child is still running, keep waiting. */
+ if (pid != waitpid(pid, &status, WNOHANG))
+ continue;
+
+ /*
+ * Child is no longer running, which is not expected.
+ *
+ * If it exited with a non-zero status, we explicitly forward
+ * the child's status in case it exited with KSFT_SKIP.
+ */
+ if (WIFEXITED(status))
+ exit(WEXITSTATUS(status));
+ else
+ TEST_ASSERT(false, "Child exited unexpectedly");
+ }
+}
+
int main(int argc, char **argv)
{
uint32_t i;
@@ -148,7 +178,7 @@ int main(int argc, char **argv)
run_test(i); /* This function always exits */
pr_debug("%s: [%d] waiting semaphore\n", __func__, i);
- sem_wait(sem);
+ wait_for_child_setup(pid);
r = (rand() % DELAY_US_MAX) + 1;
pr_debug("%s: [%d] waiting %dus\n", __func__, i, r);
usleep(r);
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index a8f022794ce3..35739567189e 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -43,6 +43,7 @@ enum vm_guest_mode {
VM_MODE_P40V48_4K,
VM_MODE_P40V48_64K,
VM_MODE_PXXV48_4K, /* For 48bits VA but ANY bits PA */
+ VM_MODE_P47V64_4K,
NUM_VM_MODES,
};
@@ -60,7 +61,7 @@ enum vm_guest_mode {
#elif defined(__s390x__)
-#define VM_MODE_DEFAULT VM_MODE_P52V48_4K
+#define VM_MODE_DEFAULT VM_MODE_P47V64_4K
#define MIN_PAGE_SHIFT 12U
#define ptes_per_page(page_size) ((page_size) / 16)
@@ -77,6 +78,7 @@ struct vm_guest_mode_params {
};
extern const struct vm_guest_mode_params vm_guest_mode_params[];
+int open_kvm_dev_path_or_exit(void);
int kvm_check_cap(long cap);
int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap);
int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id,
@@ -146,6 +148,7 @@ void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
+void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa);
/*
* Address Guest Virtual to Guest Physical
@@ -283,10 +286,11 @@ struct kvm_vm *vm_create_default_with_vcpus(uint32_t nr_vcpus, uint64_t extra_me
uint32_t num_percpu_pages, void *guest_code,
uint32_t vcpuids[]);
-/* Like vm_create_default_with_vcpus, but accepts mode as a parameter */
+/* Like vm_create_default_with_vcpus, but accepts mode and slot0 memory as a parameter */
struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
- uint64_t extra_mem_pages, uint32_t num_percpu_pages,
- void *guest_code, uint32_t vcpuids[]);
+ uint64_t slot0_mem_pages, uint64_t extra_mem_pages,
+ uint32_t num_percpu_pages, void *guest_code,
+ uint32_t vcpuids[]);
/*
* Adds a vCPU with reasonable defaults (e.g. a stack)
@@ -302,7 +306,7 @@ bool vm_is_unrestricted_guest(struct kvm_vm *vm);
unsigned int vm_get_page_size(struct kvm_vm *vm);
unsigned int vm_get_page_shift(struct kvm_vm *vm);
-unsigned int vm_get_max_gfn(struct kvm_vm *vm);
+uint64_t vm_get_max_gfn(struct kvm_vm *vm);
int vm_get_fd(struct kvm_vm *vm);
unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size);
diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h
index fade3130eb01..d79be15dd3d2 100644
--- a/tools/testing/selftests/kvm/include/test_util.h
+++ b/tools/testing/selftests/kvm/include/test_util.h
@@ -17,6 +17,7 @@
#include <errno.h>
#include <unistd.h>
#include <fcntl.h>
+#include <sys/mman.h>
#include "kselftest.h"
static inline int _no_printf(const char *format, ...) { return 0; }
@@ -84,6 +85,8 @@ enum vm_mem_backing_src_type {
VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB,
VM_MEM_SRC_ANONYMOUS_HUGETLB_2GB,
VM_MEM_SRC_ANONYMOUS_HUGETLB_16GB,
+ VM_MEM_SRC_SHMEM,
+ VM_MEM_SRC_SHARED_HUGETLB,
NUM_SRC_TYPES,
};
@@ -100,4 +103,13 @@ size_t get_backing_src_pagesz(uint32_t i);
void backing_src_help(void);
enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name);
+/*
+ * Whether or not the given source type is shared memory (as opposed to
+ * anonymous).
+ */
+static inline bool backing_src_is_shared(enum vm_mem_backing_src_type t)
+{
+ return vm_mem_backing_src_alias(t)->flag & MAP_SHARED;
+}
+
#endif /* SELFTEST_KVM_TEST_UTIL_H */
diff --git a/tools/testing/selftests/kvm/kvm_page_table_test.c b/tools/testing/selftests/kvm/kvm_page_table_test.c
index 1c4753fff19e..82171f17c1d7 100644
--- a/tools/testing/selftests/kvm/kvm_page_table_test.c
+++ b/tools/testing/selftests/kvm/kvm_page_table_test.c
@@ -268,7 +268,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
/* Create a VM with enough guest pages */
guest_num_pages = test_mem_size / guest_page_size;
- vm = vm_create_with_vcpus(mode, nr_vcpus,
+ vm = vm_create_with_vcpus(mode, nr_vcpus, DEFAULT_GUEST_PHY_PAGES,
guest_num_pages, 0, guest_code, NULL);
/* Align down GPA of the testing memslot */
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index fc83f6c5902d..a2b732cf96ea 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -32,6 +32,34 @@ static void *align(void *x, size_t size)
}
/*
+ * Open KVM_DEV_PATH if available, otherwise exit the entire program.
+ *
+ * Input Args:
+ * flags - The flags to pass when opening KVM_DEV_PATH.
+ *
+ * Return:
+ * The opened file descriptor of /dev/kvm.
+ */
+static int _open_kvm_dev_path_or_exit(int flags)
+{
+ int fd;
+
+ fd = open(KVM_DEV_PATH, flags);
+ if (fd < 0) {
+ print_skip("%s not available, is KVM loaded? (errno: %d)",
+ KVM_DEV_PATH, errno);
+ exit(KSFT_SKIP);
+ }
+
+ return fd;
+}
+
+int open_kvm_dev_path_or_exit(void)
+{
+ return _open_kvm_dev_path_or_exit(O_RDONLY);
+}
+
+/*
* Capability
*
* Input Args:
@@ -52,12 +80,9 @@ int kvm_check_cap(long cap)
int ret;
int kvm_fd;
- kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
- if (kvm_fd < 0)
- exit(KSFT_SKIP);
-
+ kvm_fd = open_kvm_dev_path_or_exit();
ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap);
- TEST_ASSERT(ret != -1, "KVM_CHECK_EXTENSION IOCTL failed,\n"
+ TEST_ASSERT(ret >= 0, "KVM_CHECK_EXTENSION IOCTL failed,\n"
" rc: %i errno: %i", ret, errno);
close(kvm_fd);
@@ -128,9 +153,7 @@ void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size)
static void vm_open(struct kvm_vm *vm, int perm)
{
- vm->kvm_fd = open(KVM_DEV_PATH, perm);
- if (vm->kvm_fd < 0)
- exit(KSFT_SKIP);
+ vm->kvm_fd = _open_kvm_dev_path_or_exit(perm);
if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) {
print_skip("immediate_exit not available");
@@ -152,6 +175,7 @@ const char *vm_guest_mode_string(uint32_t i)
[VM_MODE_P40V48_4K] = "PA-bits:40, VA-bits:48, 4K pages",
[VM_MODE_P40V48_64K] = "PA-bits:40, VA-bits:48, 64K pages",
[VM_MODE_PXXV48_4K] = "PA-bits:ANY, VA-bits:48, 4K pages",
+ [VM_MODE_P47V64_4K] = "PA-bits:47, VA-bits:64, 4K pages",
};
_Static_assert(sizeof(strings)/sizeof(char *) == NUM_VM_MODES,
"Missing new mode strings?");
@@ -169,6 +193,7 @@ const struct vm_guest_mode_params vm_guest_mode_params[] = {
{ 40, 48, 0x1000, 12 },
{ 40, 48, 0x10000, 16 },
{ 0, 0, 0x1000, 12 },
+ { 47, 64, 0x1000, 12 },
};
_Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
"Missing new mode params?");
@@ -203,7 +228,9 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
TEST_ASSERT(vm != NULL, "Insufficient Memory");
INIT_LIST_HEAD(&vm->vcpus);
- INIT_LIST_HEAD(&vm->userspace_mem_regions);
+ vm->regions.gpa_tree = RB_ROOT;
+ vm->regions.hva_tree = RB_ROOT;
+ hash_init(vm->regions.slot_hash);
vm->mode = mode;
vm->type = 0;
@@ -252,6 +279,9 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
TEST_FAIL("VM_MODE_PXXV48_4K not supported on non-x86 platforms");
#endif
break;
+ case VM_MODE_P47V64_4K:
+ vm->pgtable_levels = 5;
+ break;
default:
TEST_FAIL("Unknown guest mode, mode: 0x%x", mode);
}
@@ -283,21 +313,50 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
return vm;
}
+/*
+ * VM Create with customized parameters
+ *
+ * Input Args:
+ * mode - VM Mode (e.g. VM_MODE_P52V48_4K)
+ * nr_vcpus - VCPU count
+ * slot0_mem_pages - Slot0 physical memory size
+ * extra_mem_pages - Non-slot0 physical memory total size
+ * num_percpu_pages - Per-cpu physical memory pages
+ * guest_code - Guest entry point
+ * vcpuids - VCPU IDs
+ *
+ * Output Args: None
+ *
+ * Return:
+ * Pointer to opaque structure that describes the created VM.
+ *
+ * Creates a VM with the mode specified by mode (e.g. VM_MODE_P52V48_4K),
+ * with customized slot0 memory size, at least 512 pages currently.
+ * extra_mem_pages is only used to calculate the maximum page table size,
+ * no real memory allocation for non-slot0 memory in this function.
+ */
struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
- uint64_t extra_mem_pages, uint32_t num_percpu_pages,
- void *guest_code, uint32_t vcpuids[])
+ uint64_t slot0_mem_pages, uint64_t extra_mem_pages,
+ uint32_t num_percpu_pages, void *guest_code,
+ uint32_t vcpuids[])
{
+ uint64_t vcpu_pages, extra_pg_pages, pages;
+ struct kvm_vm *vm;
+ int i;
+
+ /* Force slot0 memory size not small than DEFAULT_GUEST_PHY_PAGES */
+ if (slot0_mem_pages < DEFAULT_GUEST_PHY_PAGES)
+ slot0_mem_pages = DEFAULT_GUEST_PHY_PAGES;
+
/* The maximum page table size for a memory region will be when the
* smallest pages are used. Considering each page contains x page
* table descriptors, the total extra size for page tables (for extra
* N pages) will be: N/x+N/x^2+N/x^3+... which is definitely smaller
* than N/x*2.
*/
- uint64_t vcpu_pages = (DEFAULT_STACK_PGS + num_percpu_pages) * nr_vcpus;
- uint64_t extra_pg_pages = (extra_mem_pages + vcpu_pages) / PTES_PER_MIN_PAGE * 2;
- uint64_t pages = DEFAULT_GUEST_PHY_PAGES + vcpu_pages + extra_pg_pages;
- struct kvm_vm *vm;
- int i;
+ vcpu_pages = (DEFAULT_STACK_PGS + num_percpu_pages) * nr_vcpus;
+ extra_pg_pages = (slot0_mem_pages + extra_mem_pages + vcpu_pages) / PTES_PER_MIN_PAGE * 2;
+ pages = slot0_mem_pages + vcpu_pages + extra_pg_pages;
TEST_ASSERT(nr_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS),
"nr_vcpus = %d too large for host, max-vcpus = %d",
@@ -329,8 +388,8 @@ struct kvm_vm *vm_create_default_with_vcpus(uint32_t nr_vcpus, uint64_t extra_me
uint32_t num_percpu_pages, void *guest_code,
uint32_t vcpuids[])
{
- return vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, extra_mem_pages,
- num_percpu_pages, guest_code, vcpuids);
+ return vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, DEFAULT_GUEST_PHY_PAGES,
+ extra_mem_pages, num_percpu_pages, guest_code, vcpuids);
}
struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
@@ -355,13 +414,14 @@ struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
*/
void kvm_vm_restart(struct kvm_vm *vmp, int perm)
{
+ int ctr;
struct userspace_mem_region *region;
vm_open(vmp, perm);
if (vmp->has_irqchip)
vm_create_irqchip(vmp);
- list_for_each_entry(region, &vmp->userspace_mem_regions, list) {
+ hash_for_each(vmp->regions.slot_hash, ctr, region, slot_node) {
int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, &region->region);
TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
" rc: %i errno: %i\n"
@@ -424,14 +484,21 @@ uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm)
static struct userspace_mem_region *
userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end)
{
- struct userspace_mem_region *region;
+ struct rb_node *node;
- list_for_each_entry(region, &vm->userspace_mem_regions, list) {
+ for (node = vm->regions.gpa_tree.rb_node; node; ) {
+ struct userspace_mem_region *region =
+ container_of(node, struct userspace_mem_region, gpa_node);
uint64_t existing_start = region->region.guest_phys_addr;
uint64_t existing_end = region->region.guest_phys_addr
+ region->region.memory_size - 1;
if (start <= existing_end && end >= existing_start)
return region;
+
+ if (start < existing_start)
+ node = node->rb_left;
+ else
+ node = node->rb_right;
}
return NULL;
@@ -546,11 +613,16 @@ void kvm_vm_release(struct kvm_vm *vmp)
}
static void __vm_mem_region_delete(struct kvm_vm *vm,
- struct userspace_mem_region *region)
+ struct userspace_mem_region *region,
+ bool unlink)
{
int ret;
- list_del(&region->list);
+ if (unlink) {
+ rb_erase(&region->gpa_node, &vm->regions.gpa_tree);
+ rb_erase(&region->hva_node, &vm->regions.hva_tree);
+ hash_del(&region->slot_node);
+ }
region->region.memory_size = 0;
ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, &region->region);
@@ -569,14 +641,16 @@ static void __vm_mem_region_delete(struct kvm_vm *vm,
*/
void kvm_vm_free(struct kvm_vm *vmp)
{
- struct userspace_mem_region *region, *tmp;
+ int ctr;
+ struct hlist_node *node;
+ struct userspace_mem_region *region;
if (vmp == NULL)
return;
/* Free userspace_mem_regions. */
- list_for_each_entry_safe(region, tmp, &vmp->userspace_mem_regions, list)
- __vm_mem_region_delete(vmp, region);
+ hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node)
+ __vm_mem_region_delete(vmp, region, false);
/* Free sparsebit arrays. */
sparsebit_free(&vmp->vpages_valid);
@@ -658,13 +732,64 @@ int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len)
return 0;
}
+static void vm_userspace_mem_region_gpa_insert(struct rb_root *gpa_tree,
+ struct userspace_mem_region *region)
+{
+ struct rb_node **cur, *parent;
+
+ for (cur = &gpa_tree->rb_node, parent = NULL; *cur; ) {
+ struct userspace_mem_region *cregion;
+
+ cregion = container_of(*cur, typeof(*cregion), gpa_node);
+ parent = *cur;
+ if (region->region.guest_phys_addr <
+ cregion->region.guest_phys_addr)
+ cur = &(*cur)->rb_left;
+ else {
+ TEST_ASSERT(region->region.guest_phys_addr !=
+ cregion->region.guest_phys_addr,
+ "Duplicate GPA in region tree");
+
+ cur = &(*cur)->rb_right;
+ }
+ }
+
+ rb_link_node(&region->gpa_node, parent, cur);
+ rb_insert_color(&region->gpa_node, gpa_tree);
+}
+
+static void vm_userspace_mem_region_hva_insert(struct rb_root *hva_tree,
+ struct userspace_mem_region *region)
+{
+ struct rb_node **cur, *parent;
+
+ for (cur = &hva_tree->rb_node, parent = NULL; *cur; ) {
+ struct userspace_mem_region *cregion;
+
+ cregion = container_of(*cur, typeof(*cregion), hva_node);
+ parent = *cur;
+ if (region->host_mem < cregion->host_mem)
+ cur = &(*cur)->rb_left;
+ else {
+ TEST_ASSERT(region->host_mem !=
+ cregion->host_mem,
+ "Duplicate HVA in region tree");
+
+ cur = &(*cur)->rb_right;
+ }
+ }
+
+ rb_link_node(&region->hva_node, parent, cur);
+ rb_insert_color(&region->hva_node, hva_tree);
+}
+
/*
* VM Userspace Memory Region Add
*
* Input Args:
* vm - Virtual Machine
- * backing_src - Storage source for this region.
- * NULL to use anonymous memory.
+ * src_type - Storage source for this region.
+ * NULL to use anonymous memory.
* guest_paddr - Starting guest physical address
* slot - KVM region slot
* npages - Number of physical pages
@@ -722,7 +847,8 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
(uint64_t) region->region.memory_size);
/* Confirm no region with the requested slot already exists. */
- list_for_each_entry(region, &vm->userspace_mem_regions, list) {
+ hash_for_each_possible(vm->regions.slot_hash, region, slot_node,
+ slot) {
if (region->region.slot != slot)
continue;
@@ -755,11 +881,30 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
if (alignment > 1)
region->mmap_size += alignment;
+ region->fd = -1;
+ if (backing_src_is_shared(src_type)) {
+ int memfd_flags = MFD_CLOEXEC;
+
+ if (src_type == VM_MEM_SRC_SHARED_HUGETLB)
+ memfd_flags |= MFD_HUGETLB;
+
+ region->fd = memfd_create("kvm_selftest", memfd_flags);
+ TEST_ASSERT(region->fd != -1,
+ "memfd_create failed, errno: %i", errno);
+
+ ret = ftruncate(region->fd, region->mmap_size);
+ TEST_ASSERT(ret == 0, "ftruncate failed, errno: %i", errno);
+
+ ret = fallocate(region->fd,
+ FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0,
+ region->mmap_size);
+ TEST_ASSERT(ret == 0, "fallocate failed, errno: %i", errno);
+ }
+
region->mmap_start = mmap(NULL, region->mmap_size,
PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS
- | vm_mem_backing_src_alias(src_type)->flag,
- -1, 0);
+ vm_mem_backing_src_alias(src_type)->flag,
+ region->fd, 0);
TEST_ASSERT(region->mmap_start != MAP_FAILED,
"test_malloc failed, mmap_start: %p errno: %i",
region->mmap_start, errno);
@@ -793,8 +938,23 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
ret, errno, slot, flags,
guest_paddr, (uint64_t) region->region.memory_size);
- /* Add to linked-list of memory regions. */
- list_add(&region->list, &vm->userspace_mem_regions);
+ /* Add to quick lookup data structures */
+ vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region);
+ vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region);
+ hash_add(vm->regions.slot_hash, &region->slot_node, slot);
+
+ /* If shared memory, create an alias. */
+ if (region->fd >= 0) {
+ region->mmap_alias = mmap(NULL, region->mmap_size,
+ PROT_READ | PROT_WRITE,
+ vm_mem_backing_src_alias(src_type)->flag,
+ region->fd, 0);
+ TEST_ASSERT(region->mmap_alias != MAP_FAILED,
+ "mmap of alias failed, errno: %i", errno);
+
+ /* Align host alias address */
+ region->host_alias = align(region->mmap_alias, alignment);
+ }
}
/*
@@ -817,10 +977,10 @@ memslot2region(struct kvm_vm *vm, uint32_t memslot)
{
struct userspace_mem_region *region;
- list_for_each_entry(region, &vm->userspace_mem_regions, list) {
+ hash_for_each_possible(vm->regions.slot_hash, region, slot_node,
+ memslot)
if (region->region.slot == memslot)
return region;
- }
fprintf(stderr, "No mem region with the requested slot found,\n"
" requested slot: %u\n", memslot);
@@ -905,7 +1065,7 @@ void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa)
*/
void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot)
{
- __vm_mem_region_delete(vm, memslot2region(vm, slot));
+ __vm_mem_region_delete(vm, memslot2region(vm, slot), true);
}
/*
@@ -925,9 +1085,7 @@ static int vcpu_mmap_sz(void)
{
int dev_fd, ret;
- dev_fd = open(KVM_DEV_PATH, O_RDONLY);
- if (dev_fd < 0)
- exit(KSFT_SKIP);
+ dev_fd = open_kvm_dev_path_or_exit();
ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
TEST_ASSERT(ret >= sizeof(struct kvm_run),
@@ -1099,6 +1257,9 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
virt_pgd_alloc(vm, pgd_memslot);
+ vm_paddr_t paddr = vm_phy_pages_alloc(vm, pages,
+ KVM_UTIL_MIN_PFN * vm->page_size,
+ data_memslot);
/*
* Find an unused range of virtual page addresses of at least
@@ -1108,11 +1269,7 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
/* Map the virtual pages. */
for (vm_vaddr_t vaddr = vaddr_start; pages > 0;
- pages--, vaddr += vm->page_size) {
- vm_paddr_t paddr;
-
- paddr = vm_phy_page_alloc(vm,
- KVM_UTIL_MIN_PFN * vm->page_size, data_memslot);
+ pages--, vaddr += vm->page_size, paddr += vm->page_size) {
virt_pg_map(vm, vaddr, paddr, pgd_memslot);
@@ -1177,16 +1334,14 @@ void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
{
struct userspace_mem_region *region;
- list_for_each_entry(region, &vm->userspace_mem_regions, list) {
- if ((gpa >= region->region.guest_phys_addr)
- && (gpa <= (region->region.guest_phys_addr
- + region->region.memory_size - 1)))
- return (void *) ((uintptr_t) region->host_mem
- + (gpa - region->region.guest_phys_addr));
+ region = userspace_mem_region_find(vm, gpa, gpa);
+ if (!region) {
+ TEST_FAIL("No vm physical memory at 0x%lx", gpa);
+ return NULL;
}
- TEST_FAIL("No vm physical memory at 0x%lx", gpa);
- return NULL;
+ return (void *)((uintptr_t)region->host_mem
+ + (gpa - region->region.guest_phys_addr));
}
/*
@@ -1208,15 +1363,22 @@ void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
*/
vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
{
- struct userspace_mem_region *region;
+ struct rb_node *node;
- list_for_each_entry(region, &vm->userspace_mem_regions, list) {
- if ((hva >= region->host_mem)
- && (hva <= (region->host_mem
- + region->region.memory_size - 1)))
- return (vm_paddr_t) ((uintptr_t)
- region->region.guest_phys_addr
- + (hva - (uintptr_t) region->host_mem));
+ for (node = vm->regions.hva_tree.rb_node; node; ) {
+ struct userspace_mem_region *region =
+ container_of(node, struct userspace_mem_region, hva_node);
+
+ if (hva >= region->host_mem) {
+ if (hva <= (region->host_mem
+ + region->region.memory_size - 1))
+ return (vm_paddr_t)((uintptr_t)
+ region->region.guest_phys_addr
+ + (hva - (uintptr_t)region->host_mem));
+
+ node = node->rb_right;
+ } else
+ node = node->rb_left;
}
TEST_FAIL("No mapping to a guest physical address, hva: %p", hva);
@@ -1224,6 +1386,42 @@ vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
}
/*
+ * Address VM physical to Host Virtual *alias*.
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ * gpa - VM physical address
+ *
+ * Output Args: None
+ *
+ * Return:
+ * Equivalent address within the host virtual *alias* area, or NULL
+ * (without failing the test) if the guest memory is not shared (so
+ * no alias exists).
+ *
+ * When vm_create() and related functions are called with a shared memory
+ * src_type, we also create a writable, shared alias mapping of the
+ * underlying guest memory. This allows the host to manipulate guest memory
+ * without mapping that memory in the guest's address space. And, for
+ * userfaultfd-based demand paging, we can do so without triggering userfaults.
+ */
+void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa)
+{
+ struct userspace_mem_region *region;
+ uintptr_t offset;
+
+ region = userspace_mem_region_find(vm, gpa, gpa);
+ if (!region)
+ return NULL;
+
+ if (!region->host_alias)
+ return NULL;
+
+ offset = gpa - region->region.guest_phys_addr;
+ return (void *) ((uintptr_t) region->host_alias + offset);
+}
+
+/*
* VM Create IRQ Chip
*
* Input Args:
@@ -1822,6 +2020,7 @@ int kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
*/
void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
{
+ int ctr;
struct userspace_mem_region *region;
struct vcpu *vcpu;
@@ -1829,7 +2028,7 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd);
fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size);
fprintf(stream, "%*sMem Regions:\n", indent, "");
- list_for_each_entry(region, &vm->userspace_mem_regions, list) {
+ hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) {
fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx "
"host_virt: %p\n", indent + 2, "",
(uint64_t) region->region.guest_phys_addr,
@@ -2015,10 +2214,7 @@ bool vm_is_unrestricted_guest(struct kvm_vm *vm)
if (vm == NULL) {
/* Ensure that the KVM vendor-specific module is loaded. */
- f = fopen(KVM_DEV_PATH, "r");
- TEST_ASSERT(f != NULL, "Error in opening KVM dev file: %d",
- errno);
- fclose(f);
+ close(open_kvm_dev_path_or_exit());
}
f = fopen("/sys/module/kvm_intel/parameters/unrestricted_guest", "r");
@@ -2041,7 +2237,7 @@ unsigned int vm_get_page_shift(struct kvm_vm *vm)
return vm->page_shift;
}
-unsigned int vm_get_max_gfn(struct kvm_vm *vm)
+uint64_t vm_get_max_gfn(struct kvm_vm *vm)
{
return vm->max_gfn;
}
diff --git a/tools/testing/selftests/kvm/lib/kvm_util_internal.h b/tools/testing/selftests/kvm/lib/kvm_util_internal.h
index 91ce1b5d480b..a03febc24ba6 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util_internal.h
+++ b/tools/testing/selftests/kvm/lib/kvm_util_internal.h
@@ -8,6 +8,9 @@
#ifndef SELFTEST_KVM_UTIL_INTERNAL_H
#define SELFTEST_KVM_UTIL_INTERNAL_H
+#include "linux/hashtable.h"
+#include "linux/rbtree.h"
+
#include "sparsebit.h"
struct userspace_mem_region {
@@ -16,9 +19,13 @@ struct userspace_mem_region {
int fd;
off_t offset;
void *host_mem;
+ void *host_alias;
void *mmap_start;
+ void *mmap_alias;
size_t mmap_size;
- struct list_head list;
+ struct rb_node gpa_node;
+ struct rb_node hva_node;
+ struct hlist_node slot_node;
};
struct vcpu {
@@ -31,6 +38,12 @@ struct vcpu {
uint32_t dirty_gfns_count;
};
+struct userspace_mem_regions {
+ struct rb_root gpa_tree;
+ struct rb_root hva_tree;
+ DECLARE_HASHTABLE(slot_hash, 9);
+};
+
struct kvm_vm {
int mode;
unsigned long type;
@@ -43,7 +56,7 @@ struct kvm_vm {
unsigned int va_bits;
uint64_t max_gfn;
struct list_head vcpus;
- struct list_head userspace_mem_regions;
+ struct userspace_mem_regions regions;
struct sparsebit *vpages_valid;
struct sparsebit *vpages_mapped;
bool has_irqchip;
diff --git a/tools/testing/selftests/kvm/lib/perf_test_util.c b/tools/testing/selftests/kvm/lib/perf_test_util.c
index 81490b9b4e32..7397ca299835 100644
--- a/tools/testing/selftests/kvm/lib/perf_test_util.c
+++ b/tools/testing/selftests/kvm/lib/perf_test_util.c
@@ -2,6 +2,7 @@
/*
* Copyright (C) 2020, Google LLC.
*/
+#include <inttypes.h>
#include "kvm_util.h"
#include "perf_test_util.h"
@@ -68,7 +69,7 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
TEST_ASSERT(vcpu_memory_bytes % perf_test_args.guest_page_size == 0,
"Guest memory size is not guest page size aligned.");
- vm = vm_create_with_vcpus(mode, vcpus,
+ vm = vm_create_with_vcpus(mode, vcpus, DEFAULT_GUEST_PHY_PAGES,
(vcpus * vcpu_memory_bytes) / perf_test_args.guest_page_size,
0, guest_code, NULL);
@@ -80,7 +81,8 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
*/
TEST_ASSERT(guest_num_pages < vm_get_max_gfn(vm),
"Requested more guest memory than address space allows.\n"
- " guest pages: %lx max gfn: %x vcpus: %d wss: %lx]\n",
+ " guest pages: %" PRIx64 " max gfn: %" PRIx64
+ " vcpus: %d wss: %" PRIx64 "]\n",
guest_num_pages, vm_get_max_gfn(vm), vcpus,
vcpu_memory_bytes);
diff --git a/tools/testing/selftests/kvm/lib/rbtree.c b/tools/testing/selftests/kvm/lib/rbtree.c
new file mode 100644
index 000000000000..a703f0194ea3
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/rbtree.c
@@ -0,0 +1 @@
+#include "../../../../lib/rbtree.c"
diff --git a/tools/testing/selftests/kvm/lib/test_util.c b/tools/testing/selftests/kvm/lib/test_util.c
index 63d2bc7d757b..af1031fed97f 100644
--- a/tools/testing/selftests/kvm/lib/test_util.c
+++ b/tools/testing/selftests/kvm/lib/test_util.c
@@ -166,72 +166,89 @@ size_t get_def_hugetlb_pagesz(void)
return 0;
}
+#define ANON_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS)
+#define ANON_HUGE_FLAGS (ANON_FLAGS | MAP_HUGETLB)
+
const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i)
{
static const struct vm_mem_backing_src_alias aliases[] = {
[VM_MEM_SRC_ANONYMOUS] = {
.name = "anonymous",
- .flag = 0,
+ .flag = ANON_FLAGS,
},
[VM_MEM_SRC_ANONYMOUS_THP] = {
.name = "anonymous_thp",
- .flag = 0,
+ .flag = ANON_FLAGS,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB] = {
.name = "anonymous_hugetlb",
- .flag = MAP_HUGETLB,
+ .flag = ANON_HUGE_FLAGS,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_16KB] = {
.name = "anonymous_hugetlb_16kb",
- .flag = MAP_HUGETLB | MAP_HUGE_16KB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_16KB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_64KB] = {
.name = "anonymous_hugetlb_64kb",
- .flag = MAP_HUGETLB | MAP_HUGE_64KB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_64KB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_512KB] = {
.name = "anonymous_hugetlb_512kb",
- .flag = MAP_HUGETLB | MAP_HUGE_512KB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_512KB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_1MB] = {
.name = "anonymous_hugetlb_1mb",
- .flag = MAP_HUGETLB | MAP_HUGE_1MB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_1MB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_2MB] = {
.name = "anonymous_hugetlb_2mb",
- .flag = MAP_HUGETLB | MAP_HUGE_2MB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_2MB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_8MB] = {
.name = "anonymous_hugetlb_8mb",
- .flag = MAP_HUGETLB | MAP_HUGE_8MB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_8MB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_16MB] = {
.name = "anonymous_hugetlb_16mb",
- .flag = MAP_HUGETLB | MAP_HUGE_16MB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_16MB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_32MB] = {
.name = "anonymous_hugetlb_32mb",
- .flag = MAP_HUGETLB | MAP_HUGE_32MB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_32MB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_256MB] = {
.name = "anonymous_hugetlb_256mb",
- .flag = MAP_HUGETLB | MAP_HUGE_256MB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_256MB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_512MB] = {
.name = "anonymous_hugetlb_512mb",
- .flag = MAP_HUGETLB | MAP_HUGE_512MB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_512MB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB] = {
.name = "anonymous_hugetlb_1gb",
- .flag = MAP_HUGETLB | MAP_HUGE_1GB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_1GB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_2GB] = {
.name = "anonymous_hugetlb_2gb",
- .flag = MAP_HUGETLB | MAP_HUGE_2GB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_2GB,
},
[VM_MEM_SRC_ANONYMOUS_HUGETLB_16GB] = {
.name = "anonymous_hugetlb_16gb",
- .flag = MAP_HUGETLB | MAP_HUGE_16GB,
+ .flag = ANON_HUGE_FLAGS | MAP_HUGE_16GB,
+ },
+ [VM_MEM_SRC_SHMEM] = {
+ .name = "shmem",
+ .flag = MAP_SHARED,
+ },
+ [VM_MEM_SRC_SHARED_HUGETLB] = {
+ .name = "shared_hugetlb",
+ /*
+ * No MAP_HUGETLB, we use MFD_HUGETLB instead. Since
+ * we're using "file backed" memory, we need to specify
+ * this when the FD is created, not when the area is
+ * mapped.
+ */
+ .flag = MAP_SHARED,
},
};
_Static_assert(ARRAY_SIZE(aliases) == NUM_SRC_TYPES,
@@ -250,10 +267,12 @@ size_t get_backing_src_pagesz(uint32_t i)
switch (i) {
case VM_MEM_SRC_ANONYMOUS:
+ case VM_MEM_SRC_SHMEM:
return getpagesize();
case VM_MEM_SRC_ANONYMOUS_THP:
return get_trans_hugepagesz();
case VM_MEM_SRC_ANONYMOUS_HUGETLB:
+ case VM_MEM_SRC_SHARED_HUGETLB:
return get_def_hugetlb_pagesz();
default:
return MAP_HUGE_PAGE_SIZE(flag);
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index a8906e60a108..efe235044421 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -657,9 +657,7 @@ struct kvm_cpuid2 *kvm_get_supported_cpuid(void)
return cpuid;
cpuid = allocate_kvm_cpuid2();
- kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
- if (kvm_fd < 0)
- exit(KSFT_SKIP);
+ kvm_fd = open_kvm_dev_path_or_exit();
ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, cpuid);
TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_CPUID failed %d %d\n",
@@ -691,9 +689,7 @@ uint64_t kvm_get_feature_msr(uint64_t msr_index)
buffer.header.nmsrs = 1;
buffer.entry.index = msr_index;
- kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
- if (kvm_fd < 0)
- exit(KSFT_SKIP);
+ kvm_fd = open_kvm_dev_path_or_exit();
r = ioctl(kvm_fd, KVM_GET_MSRS, &buffer.header);
TEST_ASSERT(r == 1, "KVM_GET_MSRS IOCTL failed,\n"
@@ -986,9 +982,7 @@ struct kvm_msr_list *kvm_get_msr_index_list(void)
struct kvm_msr_list *list;
int nmsrs, r, kvm_fd;
- kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
- if (kvm_fd < 0)
- exit(KSFT_SKIP);
+ kvm_fd = open_kvm_dev_path_or_exit();
nmsrs = kvm_get_num_msrs_fd(kvm_fd);
list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0]));
@@ -1312,9 +1306,7 @@ struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void)
return cpuid;
cpuid = allocate_kvm_cpuid2();
- kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
- if (kvm_fd < 0)
- exit(KSFT_SKIP);
+ kvm_fd = open_kvm_dev_path_or_exit();
ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_HV_CPUID failed %d %d\n",
diff --git a/tools/testing/selftests/kvm/memslot_modification_stress_test.c b/tools/testing/selftests/kvm/memslot_modification_stress_test.c
index 6096bf0a5b34..98351ba0933c 100644
--- a/tools/testing/selftests/kvm/memslot_modification_stress_test.c
+++ b/tools/testing/selftests/kvm/memslot_modification_stress_test.c
@@ -71,14 +71,22 @@ struct memslot_antagonist_args {
};
static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay,
- uint64_t nr_modifications, uint64_t gpa)
+ uint64_t nr_modifications)
{
+ const uint64_t pages = 1;
+ uint64_t gpa;
int i;
+ /*
+ * Add the dummy memslot just below the perf_test_util memslot, which is
+ * at the top of the guest physical address space.
+ */
+ gpa = guest_test_phys_mem - pages * vm_get_page_size(vm);
+
for (i = 0; i < nr_modifications; i++) {
usleep(delay);
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, gpa,
- DUMMY_MEMSLOT_INDEX, 1, 0);
+ DUMMY_MEMSLOT_INDEX, pages, 0);
vm_mem_region_delete(vm, DUMMY_MEMSLOT_INDEX);
}
@@ -120,11 +128,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
pr_info("Started all vCPUs\n");
add_remove_memslot(vm, p->memslot_modification_delay,
- p->nr_memslot_modifications,
- guest_test_phys_mem +
- (guest_percpu_mem_size * nr_vcpus) +
- perf_test_args.host_page_size +
- perf_test_args.guest_page_size);
+ p->nr_memslot_modifications);
run_vcpus = false;
diff --git a/tools/testing/selftests/kvm/memslot_perf_test.c b/tools/testing/selftests/kvm/memslot_perf_test.c
new file mode 100644
index 000000000000..11239652d805
--- /dev/null
+++ b/tools/testing/selftests/kvm/memslot_perf_test.c
@@ -0,0 +1,1037 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * A memslot-related performance benchmark.
+ *
+ * Copyright (C) 2021 Oracle and/or its affiliates.
+ *
+ * Basic guest setup / host vCPU thread code lifted from set_memory_region_test.
+ */
+#include <pthread.h>
+#include <sched.h>
+#include <semaphore.h>
+#include <stdatomic.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <time.h>
+#include <unistd.h>
+
+#include <linux/compiler.h>
+
+#include <test_util.h>
+#include <kvm_util.h>
+#include <processor.h>
+
+#define VCPU_ID 0
+
+#define MEM_SIZE ((512U << 20) + 4096)
+#define MEM_SIZE_PAGES (MEM_SIZE / 4096)
+#define MEM_GPA 0x10000000UL
+#define MEM_AUX_GPA MEM_GPA
+#define MEM_SYNC_GPA MEM_AUX_GPA
+#define MEM_TEST_GPA (MEM_AUX_GPA + 4096)
+#define MEM_TEST_SIZE (MEM_SIZE - 4096)
+static_assert(MEM_SIZE % 4096 == 0, "invalid mem size");
+static_assert(MEM_TEST_SIZE % 4096 == 0, "invalid mem test size");
+
+/*
+ * 32 MiB is max size that gets well over 100 iterations on 509 slots.
+ * Considering that each slot needs to have at least one page up to
+ * 8194 slots in use can then be tested (although with slightly
+ * limited resolution).
+ */
+#define MEM_SIZE_MAP ((32U << 20) + 4096)
+#define MEM_SIZE_MAP_PAGES (MEM_SIZE_MAP / 4096)
+#define MEM_TEST_MAP_SIZE (MEM_SIZE_MAP - 4096)
+#define MEM_TEST_MAP_SIZE_PAGES (MEM_TEST_MAP_SIZE / 4096)
+static_assert(MEM_SIZE_MAP % 4096 == 0, "invalid map test region size");
+static_assert(MEM_TEST_MAP_SIZE % 4096 == 0, "invalid map test region size");
+static_assert(MEM_TEST_MAP_SIZE_PAGES % 2 == 0, "invalid map test region size");
+static_assert(MEM_TEST_MAP_SIZE_PAGES > 2, "invalid map test region size");
+
+/*
+ * 128 MiB is min size that fills 32k slots with at least one page in each
+ * while at the same time gets 100+ iterations in such test
+ */
+#define MEM_TEST_UNMAP_SIZE (128U << 20)
+#define MEM_TEST_UNMAP_SIZE_PAGES (MEM_TEST_UNMAP_SIZE / 4096)
+/* 2 MiB chunk size like a typical huge page */
+#define MEM_TEST_UNMAP_CHUNK_PAGES (2U << (20 - 12))
+static_assert(MEM_TEST_UNMAP_SIZE <= MEM_TEST_SIZE,
+ "invalid unmap test region size");
+static_assert(MEM_TEST_UNMAP_SIZE % 4096 == 0,
+ "invalid unmap test region size");
+static_assert(MEM_TEST_UNMAP_SIZE_PAGES %
+ (2 * MEM_TEST_UNMAP_CHUNK_PAGES) == 0,
+ "invalid unmap test region size");
+
+/*
+ * For the move active test the middle of the test area is placed on
+ * a memslot boundary: half lies in the memslot being moved, half in
+ * other memslot(s).
+ *
+ * When running this test with 32k memslots (32764, really) each memslot
+ * contains 4 pages.
+ * The last one additionally contains the remaining 21 pages of memory,
+ * for the total size of 25 pages.
+ * Hence, the maximum size here is 50 pages.
+ */
+#define MEM_TEST_MOVE_SIZE_PAGES (50)
+#define MEM_TEST_MOVE_SIZE (MEM_TEST_MOVE_SIZE_PAGES * 4096)
+#define MEM_TEST_MOVE_GPA_DEST (MEM_GPA + MEM_SIZE)
+static_assert(MEM_TEST_MOVE_SIZE <= MEM_TEST_SIZE,
+ "invalid move test region size");
+
+#define MEM_TEST_VAL_1 0x1122334455667788
+#define MEM_TEST_VAL_2 0x99AABBCCDDEEFF00
+
+struct vm_data {
+ struct kvm_vm *vm;
+ pthread_t vcpu_thread;
+ uint32_t nslots;
+ uint64_t npages;
+ uint64_t pages_per_slot;
+ void **hva_slots;
+ bool mmio_ok;
+ uint64_t mmio_gpa_min;
+ uint64_t mmio_gpa_max;
+};
+
+struct sync_area {
+ atomic_bool start_flag;
+ atomic_bool exit_flag;
+ atomic_bool sync_flag;
+ void *move_area_ptr;
+};
+
+/*
+ * Technically, we need also for the atomic bool to be address-free, which
+ * is recommended, but not strictly required, by C11 for lockless
+ * implementations.
+ * However, in practice both GCC and Clang fulfill this requirement on
+ * all KVM-supported platforms.
+ */
+static_assert(ATOMIC_BOOL_LOCK_FREE == 2, "atomic bool is not lockless");
+
+static sem_t vcpu_ready;
+
+static bool map_unmap_verify;
+
+static bool verbose;
+#define pr_info_v(...) \
+ do { \
+ if (verbose) \
+ pr_info(__VA_ARGS__); \
+ } while (0)
+
+static void *vcpu_worker(void *data)
+{
+ struct vm_data *vm = data;
+ struct kvm_run *run;
+ struct ucall uc;
+ uint64_t cmd;
+
+ run = vcpu_state(vm->vm, VCPU_ID);
+ while (1) {
+ vcpu_run(vm->vm, VCPU_ID);
+
+ if (run->exit_reason == KVM_EXIT_IO) {
+ cmd = get_ucall(vm->vm, VCPU_ID, &uc);
+ if (cmd != UCALL_SYNC)
+ break;
+
+ sem_post(&vcpu_ready);
+ continue;
+ }
+
+ if (run->exit_reason != KVM_EXIT_MMIO)
+ break;
+
+ TEST_ASSERT(vm->mmio_ok, "Unexpected mmio exit");
+ TEST_ASSERT(run->mmio.is_write, "Unexpected mmio read");
+ TEST_ASSERT(run->mmio.len == 8,
+ "Unexpected exit mmio size = %u", run->mmio.len);
+ TEST_ASSERT(run->mmio.phys_addr >= vm->mmio_gpa_min &&
+ run->mmio.phys_addr <= vm->mmio_gpa_max,
+ "Unexpected exit mmio address = 0x%llx",
+ run->mmio.phys_addr);
+ }
+
+ if (run->exit_reason == KVM_EXIT_IO && cmd == UCALL_ABORT)
+ TEST_FAIL("%s at %s:%ld, val = %lu", (const char *)uc.args[0],
+ __FILE__, uc.args[1], uc.args[2]);
+
+ return NULL;
+}
+
+static void wait_for_vcpu(void)
+{
+ struct timespec ts;
+
+ TEST_ASSERT(!clock_gettime(CLOCK_REALTIME, &ts),
+ "clock_gettime() failed: %d\n", errno);
+
+ ts.tv_sec += 2;
+ TEST_ASSERT(!sem_timedwait(&vcpu_ready, &ts),
+ "sem_timedwait() failed: %d\n", errno);
+}
+
+static void *vm_gpa2hva(struct vm_data *data, uint64_t gpa, uint64_t *rempages)
+{
+ uint64_t gpage, pgoffs;
+ uint32_t slot, slotoffs;
+ void *base;
+
+ TEST_ASSERT(gpa >= MEM_GPA, "Too low gpa to translate");
+ TEST_ASSERT(gpa < MEM_GPA + data->npages * 4096,
+ "Too high gpa to translate");
+ gpa -= MEM_GPA;
+
+ gpage = gpa / 4096;
+ pgoffs = gpa % 4096;
+ slot = min(gpage / data->pages_per_slot, (uint64_t)data->nslots - 1);
+ slotoffs = gpage - (slot * data->pages_per_slot);
+
+ if (rempages) {
+ uint64_t slotpages;
+
+ if (slot == data->nslots - 1)
+ slotpages = data->npages - slot * data->pages_per_slot;
+ else
+ slotpages = data->pages_per_slot;
+
+ TEST_ASSERT(!pgoffs,
+ "Asking for remaining pages in slot but gpa not page aligned");
+ *rempages = slotpages - slotoffs;
+ }
+
+ base = data->hva_slots[slot];
+ return (uint8_t *)base + slotoffs * 4096 + pgoffs;
+}
+
+static uint64_t vm_slot2gpa(struct vm_data *data, uint32_t slot)
+{
+ TEST_ASSERT(slot < data->nslots, "Too high slot number");
+
+ return MEM_GPA + slot * data->pages_per_slot * 4096;
+}
+
+static struct vm_data *alloc_vm(void)
+{
+ struct vm_data *data;
+
+ data = malloc(sizeof(*data));
+ TEST_ASSERT(data, "malloc(vmdata) failed");
+
+ data->vm = NULL;
+ data->hva_slots = NULL;
+
+ return data;
+}
+
+static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots,
+ void *guest_code, uint64_t mempages,
+ struct timespec *slot_runtime)
+{
+ uint32_t max_mem_slots;
+ uint64_t rempages;
+ uint64_t guest_addr;
+ uint32_t slot;
+ struct timespec tstart;
+ struct sync_area *sync;
+
+ max_mem_slots = kvm_check_cap(KVM_CAP_NR_MEMSLOTS);
+ TEST_ASSERT(max_mem_slots > 1,
+ "KVM_CAP_NR_MEMSLOTS should be greater than 1");
+ TEST_ASSERT(nslots > 1 || nslots == -1,
+ "Slot count cap should be greater than 1");
+ if (nslots != -1)
+ max_mem_slots = min(max_mem_slots, (uint32_t)nslots);
+ pr_info_v("Allowed number of memory slots: %"PRIu32"\n", max_mem_slots);
+
+ TEST_ASSERT(mempages > 1,
+ "Can't test without any memory");
+
+ data->npages = mempages;
+ data->nslots = max_mem_slots - 1;
+ data->pages_per_slot = mempages / data->nslots;
+ if (!data->pages_per_slot) {
+ *maxslots = mempages + 1;
+ return false;
+ }
+
+ rempages = mempages % data->nslots;
+ data->hva_slots = malloc(sizeof(*data->hva_slots) * data->nslots);
+ TEST_ASSERT(data->hva_slots, "malloc() fail");
+
+ data->vm = vm_create_default(VCPU_ID, mempages, guest_code);
+
+ pr_info_v("Adding slots 1..%i, each slot with %"PRIu64" pages + %"PRIu64" extra pages last\n",
+ max_mem_slots - 1, data->pages_per_slot, rempages);
+
+ clock_gettime(CLOCK_MONOTONIC, &tstart);
+ for (slot = 1, guest_addr = MEM_GPA; slot < max_mem_slots; slot++) {
+ uint64_t npages;
+
+ npages = data->pages_per_slot;
+ if (slot == max_mem_slots - 1)
+ npages += rempages;
+
+ vm_userspace_mem_region_add(data->vm, VM_MEM_SRC_ANONYMOUS,
+ guest_addr, slot, npages,
+ 0);
+ guest_addr += npages * 4096;
+ }
+ *slot_runtime = timespec_elapsed(tstart);
+
+ for (slot = 0, guest_addr = MEM_GPA; slot < max_mem_slots - 1; slot++) {
+ uint64_t npages;
+ uint64_t gpa;
+
+ npages = data->pages_per_slot;
+ if (slot == max_mem_slots - 2)
+ npages += rempages;
+
+ gpa = vm_phy_pages_alloc(data->vm, npages, guest_addr,
+ slot + 1);
+ TEST_ASSERT(gpa == guest_addr,
+ "vm_phy_pages_alloc() failed\n");
+
+ data->hva_slots[slot] = addr_gpa2hva(data->vm, guest_addr);
+ memset(data->hva_slots[slot], 0, npages * 4096);
+
+ guest_addr += npages * 4096;
+ }
+
+ virt_map(data->vm, MEM_GPA, MEM_GPA, mempages, 0);
+
+ sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL);
+ atomic_init(&sync->start_flag, false);
+ atomic_init(&sync->exit_flag, false);
+ atomic_init(&sync->sync_flag, false);
+
+ data->mmio_ok = false;
+
+ return true;
+}
+
+static void launch_vm(struct vm_data *data)
+{
+ pr_info_v("Launching the test VM\n");
+
+ pthread_create(&data->vcpu_thread, NULL, vcpu_worker, data);
+
+ /* Ensure the guest thread is spun up. */
+ wait_for_vcpu();
+}
+
+static void free_vm(struct vm_data *data)
+{
+ kvm_vm_free(data->vm);
+ free(data->hva_slots);
+ free(data);
+}
+
+static void wait_guest_exit(struct vm_data *data)
+{
+ pthread_join(data->vcpu_thread, NULL);
+}
+
+static void let_guest_run(struct sync_area *sync)
+{
+ atomic_store_explicit(&sync->start_flag, true, memory_order_release);
+}
+
+static void guest_spin_until_start(void)
+{
+ struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
+
+ while (!atomic_load_explicit(&sync->start_flag, memory_order_acquire))
+ ;
+}
+
+static void make_guest_exit(struct sync_area *sync)
+{
+ atomic_store_explicit(&sync->exit_flag, true, memory_order_release);
+}
+
+static bool _guest_should_exit(void)
+{
+ struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
+
+ return atomic_load_explicit(&sync->exit_flag, memory_order_acquire);
+}
+
+#define guest_should_exit() unlikely(_guest_should_exit())
+
+/*
+ * noinline so we can easily see how much time the host spends waiting
+ * for the guest.
+ * For the same reason use alarm() instead of polling clock_gettime()
+ * to implement a wait timeout.
+ */
+static noinline void host_perform_sync(struct sync_area *sync)
+{
+ alarm(2);
+
+ atomic_store_explicit(&sync->sync_flag, true, memory_order_release);
+ while (atomic_load_explicit(&sync->sync_flag, memory_order_acquire))
+ ;
+
+ alarm(0);
+}
+
+static bool guest_perform_sync(void)
+{
+ struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
+ bool expected;
+
+ do {
+ if (guest_should_exit())
+ return false;
+
+ expected = true;
+ } while (!atomic_compare_exchange_weak_explicit(&sync->sync_flag,
+ &expected, false,
+ memory_order_acq_rel,
+ memory_order_relaxed));
+
+ return true;
+}
+
+static void guest_code_test_memslot_move(void)
+{
+ struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
+ uintptr_t base = (typeof(base))READ_ONCE(sync->move_area_ptr);
+
+ GUEST_SYNC(0);
+
+ guest_spin_until_start();
+
+ while (!guest_should_exit()) {
+ uintptr_t ptr;
+
+ for (ptr = base; ptr < base + MEM_TEST_MOVE_SIZE;
+ ptr += 4096)
+ *(uint64_t *)ptr = MEM_TEST_VAL_1;
+
+ /*
+ * No host sync here since the MMIO exits are so expensive
+ * that the host would spend most of its time waiting for
+ * the guest and so instead of measuring memslot move
+ * performance we would measure the performance and
+ * likelihood of MMIO exits
+ */
+ }
+
+ GUEST_DONE();
+}
+
+static void guest_code_test_memslot_map(void)
+{
+ struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
+
+ GUEST_SYNC(0);
+
+ guest_spin_until_start();
+
+ while (1) {
+ uintptr_t ptr;
+
+ for (ptr = MEM_TEST_GPA;
+ ptr < MEM_TEST_GPA + MEM_TEST_MAP_SIZE / 2; ptr += 4096)
+ *(uint64_t *)ptr = MEM_TEST_VAL_1;
+
+ if (!guest_perform_sync())
+ break;
+
+ for (ptr = MEM_TEST_GPA + MEM_TEST_MAP_SIZE / 2;
+ ptr < MEM_TEST_GPA + MEM_TEST_MAP_SIZE; ptr += 4096)
+ *(uint64_t *)ptr = MEM_TEST_VAL_2;
+
+ if (!guest_perform_sync())
+ break;
+ }
+
+ GUEST_DONE();
+}
+
+static void guest_code_test_memslot_unmap(void)
+{
+ struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
+
+ GUEST_SYNC(0);
+
+ guest_spin_until_start();
+
+ while (1) {
+ uintptr_t ptr = MEM_TEST_GPA;
+
+ /*
+ * We can afford to access (map) just a small number of pages
+ * per host sync as otherwise the host will spend
+ * a significant amount of its time waiting for the guest
+ * (instead of doing unmap operations), so this will
+ * effectively turn this test into a map performance test.
+ *
+ * Just access a single page to be on the safe side.
+ */
+ *(uint64_t *)ptr = MEM_TEST_VAL_1;
+
+ if (!guest_perform_sync())
+ break;
+
+ ptr += MEM_TEST_UNMAP_SIZE / 2;
+ *(uint64_t *)ptr = MEM_TEST_VAL_2;
+
+ if (!guest_perform_sync())
+ break;
+ }
+
+ GUEST_DONE();
+}
+
+static void guest_code_test_memslot_rw(void)
+{
+ GUEST_SYNC(0);
+
+ guest_spin_until_start();
+
+ while (1) {
+ uintptr_t ptr;
+
+ for (ptr = MEM_TEST_GPA;
+ ptr < MEM_TEST_GPA + MEM_TEST_SIZE; ptr += 4096)
+ *(uint64_t *)ptr = MEM_TEST_VAL_1;
+
+ if (!guest_perform_sync())
+ break;
+
+ for (ptr = MEM_TEST_GPA + 4096 / 2;
+ ptr < MEM_TEST_GPA + MEM_TEST_SIZE; ptr += 4096) {
+ uint64_t val = *(uint64_t *)ptr;
+
+ GUEST_ASSERT_1(val == MEM_TEST_VAL_2, val);
+ *(uint64_t *)ptr = 0;
+ }
+
+ if (!guest_perform_sync())
+ break;
+ }
+
+ GUEST_DONE();
+}
+
+static bool test_memslot_move_prepare(struct vm_data *data,
+ struct sync_area *sync,
+ uint64_t *maxslots, bool isactive)
+{
+ uint64_t movesrcgpa, movetestgpa;
+
+ movesrcgpa = vm_slot2gpa(data, data->nslots - 1);
+
+ if (isactive) {
+ uint64_t lastpages;
+
+ vm_gpa2hva(data, movesrcgpa, &lastpages);
+ if (lastpages < MEM_TEST_MOVE_SIZE_PAGES / 2) {
+ *maxslots = 0;
+ return false;
+ }
+ }
+
+ movetestgpa = movesrcgpa - (MEM_TEST_MOVE_SIZE / (isactive ? 2 : 1));
+ sync->move_area_ptr = (void *)movetestgpa;
+
+ if (isactive) {
+ data->mmio_ok = true;
+ data->mmio_gpa_min = movesrcgpa;
+ data->mmio_gpa_max = movesrcgpa + MEM_TEST_MOVE_SIZE / 2 - 1;
+ }
+
+ return true;
+}
+
+static bool test_memslot_move_prepare_active(struct vm_data *data,
+ struct sync_area *sync,
+ uint64_t *maxslots)
+{
+ return test_memslot_move_prepare(data, sync, maxslots, true);
+}
+
+static bool test_memslot_move_prepare_inactive(struct vm_data *data,
+ struct sync_area *sync,
+ uint64_t *maxslots)
+{
+ return test_memslot_move_prepare(data, sync, maxslots, false);
+}
+
+static void test_memslot_move_loop(struct vm_data *data, struct sync_area *sync)
+{
+ uint64_t movesrcgpa;
+
+ movesrcgpa = vm_slot2gpa(data, data->nslots - 1);
+ vm_mem_region_move(data->vm, data->nslots - 1 + 1,
+ MEM_TEST_MOVE_GPA_DEST);
+ vm_mem_region_move(data->vm, data->nslots - 1 + 1, movesrcgpa);
+}
+
+static void test_memslot_do_unmap(struct vm_data *data,
+ uint64_t offsp, uint64_t count)
+{
+ uint64_t gpa, ctr;
+
+ for (gpa = MEM_TEST_GPA + offsp * 4096, ctr = 0; ctr < count; ) {
+ uint64_t npages;
+ void *hva;
+ int ret;
+
+ hva = vm_gpa2hva(data, gpa, &npages);
+ TEST_ASSERT(npages, "Empty memory slot at gptr 0x%"PRIx64, gpa);
+ npages = min(npages, count - ctr);
+ ret = madvise(hva, npages * 4096, MADV_DONTNEED);
+ TEST_ASSERT(!ret,
+ "madvise(%p, MADV_DONTNEED) on VM memory should not fail for gptr 0x%"PRIx64,
+ hva, gpa);
+ ctr += npages;
+ gpa += npages * 4096;
+ }
+ TEST_ASSERT(ctr == count,
+ "madvise(MADV_DONTNEED) should exactly cover all of the requested area");
+}
+
+static void test_memslot_map_unmap_check(struct vm_data *data,
+ uint64_t offsp, uint64_t valexp)
+{
+ uint64_t gpa;
+ uint64_t *val;
+
+ if (!map_unmap_verify)
+ return;
+
+ gpa = MEM_TEST_GPA + offsp * 4096;
+ val = (typeof(val))vm_gpa2hva(data, gpa, NULL);
+ TEST_ASSERT(*val == valexp,
+ "Guest written values should read back correctly before unmap (%"PRIu64" vs %"PRIu64" @ %"PRIx64")",
+ *val, valexp, gpa);
+ *val = 0;
+}
+
+static void test_memslot_map_loop(struct vm_data *data, struct sync_area *sync)
+{
+ /*
+ * Unmap the second half of the test area while guest writes to (maps)
+ * the first half.
+ */
+ test_memslot_do_unmap(data, MEM_TEST_MAP_SIZE_PAGES / 2,
+ MEM_TEST_MAP_SIZE_PAGES / 2);
+
+ /*
+ * Wait for the guest to finish writing the first half of the test
+ * area, verify the written value on the first and the last page of
+ * this area and then unmap it.
+ * Meanwhile, the guest is writing to (mapping) the second half of
+ * the test area.
+ */
+ host_perform_sync(sync);
+ test_memslot_map_unmap_check(data, 0, MEM_TEST_VAL_1);
+ test_memslot_map_unmap_check(data,
+ MEM_TEST_MAP_SIZE_PAGES / 2 - 1,
+ MEM_TEST_VAL_1);
+ test_memslot_do_unmap(data, 0, MEM_TEST_MAP_SIZE_PAGES / 2);
+
+
+ /*
+ * Wait for the guest to finish writing the second half of the test
+ * area and verify the written value on the first and the last page
+ * of this area.
+ * The area will be unmapped at the beginning of the next loop
+ * iteration.
+ * Meanwhile, the guest is writing to (mapping) the first half of
+ * the test area.
+ */
+ host_perform_sync(sync);
+ test_memslot_map_unmap_check(data, MEM_TEST_MAP_SIZE_PAGES / 2,
+ MEM_TEST_VAL_2);
+ test_memslot_map_unmap_check(data, MEM_TEST_MAP_SIZE_PAGES - 1,
+ MEM_TEST_VAL_2);
+}
+
+static void test_memslot_unmap_loop_common(struct vm_data *data,
+ struct sync_area *sync,
+ uint64_t chunk)
+{
+ uint64_t ctr;
+
+ /*
+ * Wait for the guest to finish mapping page(s) in the first half
+ * of the test area, verify the written value and then perform unmap
+ * of this area.
+ * Meanwhile, the guest is writing to (mapping) page(s) in the second
+ * half of the test area.
+ */
+ host_perform_sync(sync);
+ test_memslot_map_unmap_check(data, 0, MEM_TEST_VAL_1);
+ for (ctr = 0; ctr < MEM_TEST_UNMAP_SIZE_PAGES / 2; ctr += chunk)
+ test_memslot_do_unmap(data, ctr, chunk);
+
+ /* Likewise, but for the opposite host / guest areas */
+ host_perform_sync(sync);
+ test_memslot_map_unmap_check(data, MEM_TEST_UNMAP_SIZE_PAGES / 2,
+ MEM_TEST_VAL_2);
+ for (ctr = MEM_TEST_UNMAP_SIZE_PAGES / 2;
+ ctr < MEM_TEST_UNMAP_SIZE_PAGES; ctr += chunk)
+ test_memslot_do_unmap(data, ctr, chunk);
+}
+
+static void test_memslot_unmap_loop(struct vm_data *data,
+ struct sync_area *sync)
+{
+ test_memslot_unmap_loop_common(data, sync, 1);
+}
+
+static void test_memslot_unmap_loop_chunked(struct vm_data *data,
+ struct sync_area *sync)
+{
+ test_memslot_unmap_loop_common(data, sync, MEM_TEST_UNMAP_CHUNK_PAGES);
+}
+
+static void test_memslot_rw_loop(struct vm_data *data, struct sync_area *sync)
+{
+ uint64_t gptr;
+
+ for (gptr = MEM_TEST_GPA + 4096 / 2;
+ gptr < MEM_TEST_GPA + MEM_TEST_SIZE; gptr += 4096)
+ *(uint64_t *)vm_gpa2hva(data, gptr, NULL) = MEM_TEST_VAL_2;
+
+ host_perform_sync(sync);
+
+ for (gptr = MEM_TEST_GPA;
+ gptr < MEM_TEST_GPA + MEM_TEST_SIZE; gptr += 4096) {
+ uint64_t *vptr = (typeof(vptr))vm_gpa2hva(data, gptr, NULL);
+ uint64_t val = *vptr;
+
+ TEST_ASSERT(val == MEM_TEST_VAL_1,
+ "Guest written values should read back correctly (is %"PRIu64" @ %"PRIx64")",
+ val, gptr);
+ *vptr = 0;
+ }
+
+ host_perform_sync(sync);
+}
+
+struct test_data {
+ const char *name;
+ uint64_t mem_size;
+ void (*guest_code)(void);
+ bool (*prepare)(struct vm_data *data, struct sync_area *sync,
+ uint64_t *maxslots);
+ void (*loop)(struct vm_data *data, struct sync_area *sync);
+};
+
+static bool test_execute(int nslots, uint64_t *maxslots,
+ unsigned int maxtime,
+ const struct test_data *tdata,
+ uint64_t *nloops,
+ struct timespec *slot_runtime,
+ struct timespec *guest_runtime)
+{
+ uint64_t mem_size = tdata->mem_size ? : MEM_SIZE_PAGES;
+ struct vm_data *data;
+ struct sync_area *sync;
+ struct timespec tstart;
+ bool ret = true;
+
+ data = alloc_vm();
+ if (!prepare_vm(data, nslots, maxslots, tdata->guest_code,
+ mem_size, slot_runtime)) {
+ ret = false;
+ goto exit_free;
+ }
+
+ sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL);
+
+ if (tdata->prepare &&
+ !tdata->prepare(data, sync, maxslots)) {
+ ret = false;
+ goto exit_free;
+ }
+
+ launch_vm(data);
+
+ clock_gettime(CLOCK_MONOTONIC, &tstart);
+ let_guest_run(sync);
+
+ while (1) {
+ *guest_runtime = timespec_elapsed(tstart);
+ if (guest_runtime->tv_sec >= maxtime)
+ break;
+
+ tdata->loop(data, sync);
+
+ (*nloops)++;
+ }
+
+ make_guest_exit(sync);
+ wait_guest_exit(data);
+
+exit_free:
+ free_vm(data);
+
+ return ret;
+}
+
+static const struct test_data tests[] = {
+ {
+ .name = "map",
+ .mem_size = MEM_SIZE_MAP_PAGES,
+ .guest_code = guest_code_test_memslot_map,
+ .loop = test_memslot_map_loop,
+ },
+ {
+ .name = "unmap",
+ .mem_size = MEM_TEST_UNMAP_SIZE_PAGES + 1,
+ .guest_code = guest_code_test_memslot_unmap,
+ .loop = test_memslot_unmap_loop,
+ },
+ {
+ .name = "unmap chunked",
+ .mem_size = MEM_TEST_UNMAP_SIZE_PAGES + 1,
+ .guest_code = guest_code_test_memslot_unmap,
+ .loop = test_memslot_unmap_loop_chunked,
+ },
+ {
+ .name = "move active area",
+ .guest_code = guest_code_test_memslot_move,
+ .prepare = test_memslot_move_prepare_active,
+ .loop = test_memslot_move_loop,
+ },
+ {
+ .name = "move inactive area",
+ .guest_code = guest_code_test_memslot_move,
+ .prepare = test_memslot_move_prepare_inactive,
+ .loop = test_memslot_move_loop,
+ },
+ {
+ .name = "RW",
+ .guest_code = guest_code_test_memslot_rw,
+ .loop = test_memslot_rw_loop
+ },
+};
+
+#define NTESTS ARRAY_SIZE(tests)
+
+struct test_args {
+ int tfirst;
+ int tlast;
+ int nslots;
+ int seconds;
+ int runs;
+};
+
+static void help(char *name, struct test_args *targs)
+{
+ int ctr;
+
+ pr_info("usage: %s [-h] [-v] [-d] [-s slots] [-f first_test] [-e last_test] [-l test_length] [-r run_count]\n",
+ name);
+ pr_info(" -h: print this help screen.\n");
+ pr_info(" -v: enable verbose mode (not for benchmarking).\n");
+ pr_info(" -d: enable extra debug checks.\n");
+ pr_info(" -s: specify memslot count cap (-1 means no cap; currently: %i)\n",
+ targs->nslots);
+ pr_info(" -f: specify the first test to run (currently: %i; max %zu)\n",
+ targs->tfirst, NTESTS - 1);
+ pr_info(" -e: specify the last test to run (currently: %i; max %zu)\n",
+ targs->tlast, NTESTS - 1);
+ pr_info(" -l: specify the test length in seconds (currently: %i)\n",
+ targs->seconds);
+ pr_info(" -r: specify the number of runs per test (currently: %i)\n",
+ targs->runs);
+
+ pr_info("\nAvailable tests:\n");
+ for (ctr = 0; ctr < NTESTS; ctr++)
+ pr_info("%d: %s\n", ctr, tests[ctr].name);
+}
+
+static bool parse_args(int argc, char *argv[],
+ struct test_args *targs)
+{
+ int opt;
+
+ while ((opt = getopt(argc, argv, "hvds:f:e:l:r:")) != -1) {
+ switch (opt) {
+ case 'h':
+ default:
+ help(argv[0], targs);
+ return false;
+ case 'v':
+ verbose = true;
+ break;
+ case 'd':
+ map_unmap_verify = true;
+ break;
+ case 's':
+ targs->nslots = atoi(optarg);
+ if (targs->nslots <= 0 && targs->nslots != -1) {
+ pr_info("Slot count cap has to be positive or -1 for no cap\n");
+ return false;
+ }
+ break;
+ case 'f':
+ targs->tfirst = atoi(optarg);
+ if (targs->tfirst < 0) {
+ pr_info("First test to run has to be non-negative\n");
+ return false;
+ }
+ break;
+ case 'e':
+ targs->tlast = atoi(optarg);
+ if (targs->tlast < 0 || targs->tlast >= NTESTS) {
+ pr_info("Last test to run has to be non-negative and less than %zu\n",
+ NTESTS);
+ return false;
+ }
+ break;
+ case 'l':
+ targs->seconds = atoi(optarg);
+ if (targs->seconds < 0) {
+ pr_info("Test length in seconds has to be non-negative\n");
+ return false;
+ }
+ break;
+ case 'r':
+ targs->runs = atoi(optarg);
+ if (targs->runs <= 0) {
+ pr_info("Runs per test has to be positive\n");
+ return false;
+ }
+ break;
+ }
+ }
+
+ if (optind < argc) {
+ help(argv[0], targs);
+ return false;
+ }
+
+ if (targs->tfirst > targs->tlast) {
+ pr_info("First test to run cannot be greater than the last test to run\n");
+ return false;
+ }
+
+ return true;
+}
+
+struct test_result {
+ struct timespec slot_runtime, guest_runtime, iter_runtime;
+ int64_t slottimens, runtimens;
+ uint64_t nloops;
+};
+
+static bool test_loop(const struct test_data *data,
+ const struct test_args *targs,
+ struct test_result *rbestslottime,
+ struct test_result *rbestruntime)
+{
+ uint64_t maxslots;
+ struct test_result result;
+
+ result.nloops = 0;
+ if (!test_execute(targs->nslots, &maxslots, targs->seconds, data,
+ &result.nloops,
+ &result.slot_runtime, &result.guest_runtime)) {
+ if (maxslots)
+ pr_info("Memslot count too high for this test, decrease the cap (max is %"PRIu64")\n",
+ maxslots);
+ else
+ pr_info("Memslot count may be too high for this test, try adjusting the cap\n");
+
+ return false;
+ }
+
+ pr_info("Test took %ld.%.9lds for slot setup + %ld.%.9lds all iterations\n",
+ result.slot_runtime.tv_sec, result.slot_runtime.tv_nsec,
+ result.guest_runtime.tv_sec, result.guest_runtime.tv_nsec);
+ if (!result.nloops) {
+ pr_info("No full loops done - too short test time or system too loaded?\n");
+ return true;
+ }
+
+ result.iter_runtime = timespec_div(result.guest_runtime,
+ result.nloops);
+ pr_info("Done %"PRIu64" iterations, avg %ld.%.9lds each\n",
+ result.nloops,
+ result.iter_runtime.tv_sec,
+ result.iter_runtime.tv_nsec);
+ result.slottimens = timespec_to_ns(result.slot_runtime);
+ result.runtimens = timespec_to_ns(result.iter_runtime);
+
+ /*
+ * Only rank the slot setup time for tests using the whole test memory
+ * area so they are comparable
+ */
+ if (!data->mem_size &&
+ (!rbestslottime->slottimens ||
+ result.slottimens < rbestslottime->slottimens))
+ *rbestslottime = result;
+ if (!rbestruntime->runtimens ||
+ result.runtimens < rbestruntime->runtimens)
+ *rbestruntime = result;
+
+ return true;
+}
+
+int main(int argc, char *argv[])
+{
+ struct test_args targs = {
+ .tfirst = 0,
+ .tlast = NTESTS - 1,
+ .nslots = -1,
+ .seconds = 5,
+ .runs = 1,
+ };
+ struct test_result rbestslottime;
+ int tctr;
+
+ /* Tell stdout not to buffer its content */
+ setbuf(stdout, NULL);
+
+ if (!parse_args(argc, argv, &targs))
+ return -1;
+
+ rbestslottime.slottimens = 0;
+ for (tctr = targs.tfirst; tctr <= targs.tlast; tctr++) {
+ const struct test_data *data = &tests[tctr];
+ unsigned int runctr;
+ struct test_result rbestruntime;
+
+ if (tctr > targs.tfirst)
+ pr_info("\n");
+
+ pr_info("Testing %s performance with %i runs, %d seconds each\n",
+ data->name, targs.runs, targs.seconds);
+
+ rbestruntime.runtimens = 0;
+ for (runctr = 0; runctr < targs.runs; runctr++)
+ if (!test_loop(data, &targs,
+ &rbestslottime, &rbestruntime))
+ break;
+
+ if (rbestruntime.runtimens)
+ pr_info("Best runtime result was %ld.%.9lds per iteration (with %"PRIu64" iterations)\n",
+ rbestruntime.iter_runtime.tv_sec,
+ rbestruntime.iter_runtime.tv_nsec,
+ rbestruntime.nloops);
+ }
+
+ if (rbestslottime.slottimens)
+ pr_info("Best slot setup time for the whole test area was %ld.%.9lds\n",
+ rbestslottime.slot_runtime.tv_sec,
+ rbestslottime.slot_runtime.tv_nsec);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/set_memory_region_test.c b/tools/testing/selftests/kvm/set_memory_region_test.c
index 978f5b5f4dc0..d8812f27648c 100644
--- a/tools/testing/selftests/kvm/set_memory_region_test.c
+++ b/tools/testing/selftests/kvm/set_memory_region_test.c
@@ -376,7 +376,7 @@ static void test_add_max_memory_regions(void)
pr_info("Adding slots 0..%i, each memory region with %dK size\n",
(max_mem_slots - 1), MEM_REGION_SIZE >> 10);
- mem = mmap(NULL, MEM_REGION_SIZE * max_mem_slots + alignment,
+ mem = mmap(NULL, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment,
PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
TEST_ASSERT(mem != MAP_FAILED, "Failed to mmap() host");
mem_aligned = (void *)(((size_t) mem + alignment - 1) & ~(alignment - 1));
@@ -401,7 +401,7 @@ static void test_add_max_memory_regions(void)
TEST_ASSERT(ret == -1 && errno == EINVAL,
"Adding one more memory slot should fail with EINVAL");
- munmap(mem, MEM_REGION_SIZE * max_mem_slots + alignment);
+ munmap(mem, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment);
munmap(mem_extra, MEM_REGION_SIZE);
kvm_vm_free(vm);
}
diff --git a/tools/testing/selftests/kvm/x86_64/get_cpuid_test.c b/tools/testing/selftests/kvm/x86_64/get_cpuid_test.c
index 9b78e8889638..8c77537af5a1 100644
--- a/tools/testing/selftests/kvm/x86_64/get_cpuid_test.c
+++ b/tools/testing/selftests/kvm/x86_64/get_cpuid_test.c
@@ -19,7 +19,12 @@ struct {
u32 function;
u32 index;
} mangled_cpuids[] = {
+ /*
+ * These entries depend on the vCPU's XCR0 register and IA32_XSS MSR,
+ * which are not controlled for by this test.
+ */
{.function = 0xd, .index = 0},
+ {.function = 0xd, .index = 1},
};
static void test_guest_cpuids(struct kvm_cpuid2 *guest_cpuid)
diff --git a/tools/testing/selftests/kvm/x86_64/get_msr_index_features.c b/tools/testing/selftests/kvm/x86_64/get_msr_index_features.c
index cb953df4d7d0..8aed0db1331d 100644
--- a/tools/testing/selftests/kvm/x86_64/get_msr_index_features.c
+++ b/tools/testing/selftests/kvm/x86_64/get_msr_index_features.c
@@ -37,9 +37,7 @@ static void test_get_msr_index(void)
int old_res, res, kvm_fd, r;
struct kvm_msr_list *list;
- kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
- if (kvm_fd < 0)
- exit(KSFT_SKIP);
+ kvm_fd = open_kvm_dev_path_or_exit();
old_res = kvm_num_index_msrs(kvm_fd, 0);
TEST_ASSERT(old_res != 0, "Expecting nmsrs to be > 0");
@@ -101,9 +99,7 @@ static void test_get_msr_feature(void)
int res, old_res, i, kvm_fd;
struct kvm_msr_list *feature_list;
- kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
- if (kvm_fd < 0)
- exit(KSFT_SKIP);
+ kvm_fd = open_kvm_dev_path_or_exit();
old_res = kvm_num_feature_msrs(kvm_fd, 0);
TEST_ASSERT(old_res != 0, "Expecting nmsrs to be > 0");
diff --git a/tools/testing/selftests/nci/.gitignore b/tools/testing/selftests/nci/.gitignore
new file mode 100644
index 000000000000..448eeb4590fc
--- /dev/null
+++ b/tools/testing/selftests/nci/.gitignore
@@ -0,0 +1 @@
+/nci_dev
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index 76d9487fb03c..5abe92d55b69 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -1384,12 +1384,37 @@ ipv4_rt_replace()
ipv4_rt_replace_mpath
}
+# checks that cached input route on VRF port is deleted
+# when VRF is deleted
+ipv4_local_rt_cache()
+{
+ run_cmd "ip addr add 10.0.0.1/32 dev lo"
+ run_cmd "ip netns add test-ns"
+ run_cmd "ip link add veth-outside type veth peer name veth-inside"
+ run_cmd "ip link add vrf-100 type vrf table 1100"
+ run_cmd "ip link set veth-outside master vrf-100"
+ run_cmd "ip link set veth-inside netns test-ns"
+ run_cmd "ip link set veth-outside up"
+ run_cmd "ip link set vrf-100 up"
+ run_cmd "ip route add 10.1.1.1/32 dev veth-outside table 1100"
+ run_cmd "ip netns exec test-ns ip link set veth-inside up"
+ run_cmd "ip netns exec test-ns ip addr add 10.1.1.1/32 dev veth-inside"
+ run_cmd "ip netns exec test-ns ip route add 10.0.0.1/32 dev veth-inside"
+ run_cmd "ip netns exec test-ns ip route add default via 10.0.0.1"
+ run_cmd "ip netns exec test-ns ping 10.0.0.1 -c 1 -i 1"
+ run_cmd "ip link delete vrf-100"
+
+ # if we do not hang test is a success
+ log_test $? 0 "Cached route removed from VRF port device"
+}
+
ipv4_route_test()
{
route_setup
ipv4_rt_add
ipv4_rt_replace
+ ipv4_local_rt_cache
route_cleanup
}
diff --git a/tools/testing/selftests/net/icmp.sh b/tools/testing/selftests/net/icmp.sh
new file mode 100755
index 000000000000..e4b04cd1644a
--- /dev/null
+++ b/tools/testing/selftests/net/icmp.sh
@@ -0,0 +1,74 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test for checking ICMP response with dummy address instead of 0.0.0.0.
+# Sets up two namespaces like:
+# +----------------------+ +--------------------+
+# | ns1 | v4-via-v6 routes: | ns2 |
+# | | ' | |
+# | +--------+ -> 172.16.1.0/24 -> +--------+ |
+# | | veth0 +--------------------------+ veth0 | |
+# | +--------+ <- 172.16.0.0/24 <- +--------+ |
+# | 172.16.0.1 | | 2001:db8:1::2/64 |
+# | 2001:db8:1::2/64 | | |
+# +----------------------+ +--------------------+
+#
+# And then tries to ping 172.16.1.1 from ns1. This results in a "net
+# unreachable" message being sent from ns2, but there is no IPv4 address set in
+# that address space, so the kernel should substitute the dummy address
+# 192.0.0.8 defined in RFC7600.
+
+NS1=ns1
+NS2=ns2
+H1_IP=172.16.0.1/32
+H1_IP6=2001:db8:1::1
+RT1=172.16.1.0/24
+PINGADDR=172.16.1.1
+RT2=172.16.0.0/24
+H2_IP6=2001:db8:1::2
+
+TMPFILE=$(mktemp)
+
+cleanup()
+{
+ rm -f "$TMPFILE"
+ ip netns del $NS1
+ ip netns del $NS2
+}
+
+trap cleanup EXIT
+
+# Namespaces
+ip netns add $NS1
+ip netns add $NS2
+
+# Connectivity
+ip -netns $NS1 link add veth0 type veth peer name veth0 netns $NS2
+ip -netns $NS1 link set dev veth0 up
+ip -netns $NS2 link set dev veth0 up
+ip -netns $NS1 addr add $H1_IP dev veth0
+ip -netns $NS1 addr add $H1_IP6/64 dev veth0 nodad
+ip -netns $NS2 addr add $H2_IP6/64 dev veth0 nodad
+ip -netns $NS1 route add $RT1 via inet6 $H2_IP6
+ip -netns $NS2 route add $RT2 via inet6 $H1_IP6
+
+# Make sure ns2 will respond with ICMP unreachable
+ip netns exec $NS2 sysctl -qw net.ipv4.icmp_ratelimit=0 net.ipv4.ip_forward=1
+
+# Run the test - a ping runs in the background, and we capture ICMP responses
+# with tcpdump; -c 1 means it should exit on the first ping, but add a timeout
+# in case something goes wrong
+ip netns exec $NS1 ping -w 3 -i 0.5 $PINGADDR >/dev/null &
+ip netns exec $NS1 timeout 10 tcpdump -tpni veth0 -c 1 'icmp and icmp[icmptype] != icmp-echo' > $TMPFILE 2>/dev/null
+
+# Parse response and check for dummy address
+# tcpdump output looks like:
+# IP 192.0.0.8 > 172.16.0.1: ICMP net 172.16.1.1 unreachable, length 92
+RESP_IP=$(awk '{print $2}' < $TMPFILE)
+if [[ "$RESP_IP" != "192.0.0.8" ]]; then
+ echo "FAIL - got ICMP response from $RESP_IP, should be 192.0.0.8"
+ exit 1
+else
+ echo "OK"
+ exit 0
+fi
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
index 3c4cb72ed8a4..2b495dc8d78e 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
@@ -197,9 +197,6 @@ ip -net "$ns4" link set ns4eth3 up
ip -net "$ns4" route add default via 10.0.3.2
ip -net "$ns4" route add default via dead:beef:3::2
-# use TCP syn cookies, even if no flooding was detected.
-ip netns exec "$ns2" sysctl -q net.ipv4.tcp_syncookies=2
-
set_ethtool_flags() {
local ns="$1"
local dev="$2"
@@ -501,6 +498,7 @@ do_transfer()
local stat_ackrx_now_l=$(get_mib_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
local stat_cookietx_now=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesSent")
local stat_cookierx_now=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesRecv")
+ local stat_ooo_now=$(get_mib_counter "${listener_ns}" "TcpExtTCPOFOQueue")
expect_synrx=$((stat_synrx_last_l))
expect_ackrx=$((stat_ackrx_last_l))
@@ -518,10 +516,14 @@ do_transfer()
"${stat_synrx_now_l}" "${expect_synrx}" 1>&2
retc=1
fi
- if [ ${stat_ackrx_now_l} -lt ${expect_ackrx} ]; then
- printf "[ FAIL ] lower MPC ACK rx (%d) than expected (%d)\n" \
- "${stat_ackrx_now_l}" "${expect_ackrx}" 1>&2
- rets=1
+ if [ ${stat_ackrx_now_l} -lt ${expect_ackrx} -a ${stat_ooo_now} -eq 0 ]; then
+ if [ ${stat_ooo_now} -eq 0 ]; then
+ printf "[ FAIL ] lower MPC ACK rx (%d) than expected (%d)\n" \
+ "${stat_ackrx_now_l}" "${expect_ackrx}" 1>&2
+ rets=1
+ else
+ printf "[ Note ] fallback due to TCP OoO"
+ fi
fi
if [ $retc -eq 0 ] && [ $rets -eq 0 ]; then
@@ -732,6 +734,14 @@ for sender in $ns1 $ns2 $ns3 $ns4;do
exit $ret
fi
+ # ns1<->ns2 is not subject to reordering/tc delays. Use it to test
+ # mptcp syncookie support.
+ if [ $sender = $ns1 ]; then
+ ip netns exec "$ns2" sysctl -q net.ipv4.tcp_syncookies=2
+ else
+ ip netns exec "$ns2" sysctl -q net.ipv4.tcp_syncookies=1
+ fi
+
run_tests "$ns2" $sender 10.0.1.2
run_tests "$ns2" $sender dead:beef:1::2
run_tests "$ns2" $sender 10.0.2.1
diff --git a/tools/testing/selftests/net/udpgro_fwd.sh b/tools/testing/selftests/net/udpgro_fwd.sh
index a8fa64136282..7f26591f236b 100755
--- a/tools/testing/selftests/net/udpgro_fwd.sh
+++ b/tools/testing/selftests/net/udpgro_fwd.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
readonly BASE="ns-$(mktemp -u XXXXXX)"
diff --git a/tools/testing/selftests/net/veth.sh b/tools/testing/selftests/net/veth.sh
index 2fedc0781ce8..11d7cdb898c0 100755
--- a/tools/testing/selftests/net/veth.sh
+++ b/tools/testing/selftests/net/veth.sh
@@ -18,7 +18,8 @@ ret=0
cleanup() {
local ns
- local -r jobs="$(jobs -p)"
+ local jobs
+ readonly jobs="$(jobs -p)"
[ -n "${jobs}" ] && kill -1 ${jobs} 2>/dev/null
rm -f $STATS
@@ -108,7 +109,7 @@ chk_gro() {
if [ ! -f ../bpf/xdp_dummy.o ]; then
echo "Missing xdp_dummy helper. Build bpf selftest first"
- exit -1
+ exit 1
fi
create_ns
diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
index 3171069a6b46..cd6430b39982 100644
--- a/tools/testing/selftests/netfilter/Makefile
+++ b/tools/testing/selftests/netfilter/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for netfilter selftests
-TEST_PROGS := nft_trans_stress.sh nft_nat.sh bridge_brouter.sh \
+TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \
conntrack_icmp_related.sh nft_flowtable.sh ipvs.sh \
nft_concat_range.sh nft_conntrack_helper.sh \
nft_queue.sh nft_meta.sh nf_nat_edemux.sh \
diff --git a/tools/testing/selftests/netfilter/nft_fib.sh b/tools/testing/selftests/netfilter/nft_fib.sh
new file mode 100755
index 000000000000..6caf6ac8c285
--- /dev/null
+++ b/tools/testing/selftests/netfilter/nft_fib.sh
@@ -0,0 +1,221 @@
+#!/bin/bash
+#
+# This tests the fib expression.
+#
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+sfx=$(mktemp -u "XXXXXXXX")
+ns1="ns1-$sfx"
+ns2="ns2-$sfx"
+nsrouter="nsrouter-$sfx"
+timeout=4
+
+log_netns=$(sysctl -n net.netfilter.nf_log_all_netns)
+
+cleanup()
+{
+ ip netns del ${ns1}
+ ip netns del ${ns2}
+ ip netns del ${nsrouter}
+
+ [ $log_netns -eq 0 ] && sysctl -q net.netfilter.nf_log_all_netns=$log_netns
+}
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without nft tool"
+ exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+ip netns add ${nsrouter}
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not create net namespace"
+ exit $ksft_skip
+fi
+
+trap cleanup EXIT
+
+dmesg | grep -q ' nft_rpfilter: '
+if [ $? -eq 0 ]; then
+ dmesg -c | grep ' nft_rpfilter: '
+ echo "WARN: a previous test run has failed" 1>&2
+fi
+
+sysctl -q net.netfilter.nf_log_all_netns=1
+ip netns add ${ns1}
+ip netns add ${ns2}
+
+load_ruleset() {
+ local netns=$1
+
+ip netns exec ${netns} nft -f /dev/stdin <<EOF
+table inet filter {
+ chain prerouting {
+ type filter hook prerouting priority 0; policy accept;
+ fib saddr . iif oif missing counter log prefix "$netns nft_rpfilter: " drop
+ }
+}
+EOF
+}
+
+load_ruleset_count() {
+ local netns=$1
+
+ip netns exec ${netns} nft -f /dev/stdin <<EOF
+table inet filter {
+ chain prerouting {
+ type filter hook prerouting priority 0; policy accept;
+ ip daddr 1.1.1.1 fib saddr . iif oif missing counter drop
+ ip6 daddr 1c3::c01d fib saddr . iif oif missing counter drop
+ }
+}
+EOF
+}
+
+check_drops() {
+ dmesg | grep -q ' nft_rpfilter: '
+ if [ $? -eq 0 ]; then
+ dmesg | grep ' nft_rpfilter: '
+ echo "FAIL: rpfilter did drop packets"
+ return 1
+ fi
+
+ return 0
+}
+
+check_fib_counter() {
+ local want=$1
+ local ns=$2
+ local address=$3
+
+ line=$(ip netns exec ${ns} nft list table inet filter | grep 'fib saddr . iif' | grep $address | grep "packets $want" )
+ ret=$?
+
+ if [ $ret -ne 0 ];then
+ echo "Netns $ns fib counter doesn't match expected packet count of $want for $address" 1>&2
+ ip netns exec ${ns} nft list table inet filter
+ return 1
+ fi
+
+ if [ $want -gt 0 ]; then
+ echo "PASS: fib expression did drop packets for $address"
+ fi
+
+ return 0
+}
+
+load_ruleset ${nsrouter}
+load_ruleset ${ns1}
+load_ruleset ${ns2}
+
+ip link add veth0 netns ${nsrouter} type veth peer name eth0 netns ${ns1} > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: No virtual ethernet pair device support in kernel"
+ exit $ksft_skip
+fi
+ip link add veth1 netns ${nsrouter} type veth peer name eth0 netns ${ns2}
+
+ip -net ${nsrouter} link set lo up
+ip -net ${nsrouter} link set veth0 up
+ip -net ${nsrouter} addr add 10.0.1.1/24 dev veth0
+ip -net ${nsrouter} addr add dead:1::1/64 dev veth0
+
+ip -net ${nsrouter} link set veth1 up
+ip -net ${nsrouter} addr add 10.0.2.1/24 dev veth1
+ip -net ${nsrouter} addr add dead:2::1/64 dev veth1
+
+ip -net ${ns1} link set lo up
+ip -net ${ns1} link set eth0 up
+
+ip -net ${ns2} link set lo up
+ip -net ${ns2} link set eth0 up
+
+ip -net ${ns1} addr add 10.0.1.99/24 dev eth0
+ip -net ${ns1} addr add dead:1::99/64 dev eth0
+ip -net ${ns1} route add default via 10.0.1.1
+ip -net ${ns1} route add default via dead:1::1
+
+ip -net ${ns2} addr add 10.0.2.99/24 dev eth0
+ip -net ${ns2} addr add dead:2::99/64 dev eth0
+ip -net ${ns2} route add default via 10.0.2.1
+ip -net ${ns2} route add default via dead:2::1
+
+test_ping() {
+ local daddr4=$1
+ local daddr6=$2
+
+ ip netns exec ${ns1} ping -c 1 -q $daddr4 > /dev/null
+ ret=$?
+ if [ $ret -ne 0 ];then
+ check_drops
+ echo "FAIL: ${ns1} cannot reach $daddr4, ret $ret" 1>&2
+ return 1
+ fi
+
+ ip netns exec ${ns1} ping -c 3 -q $daddr6 > /dev/null
+ ret=$?
+ if [ $ret -ne 0 ];then
+ check_drops
+ echo "FAIL: ${ns1} cannot reach $daddr6, ret $ret" 1>&2
+ return 1
+ fi
+
+ return 0
+}
+
+ip netns exec ${nsrouter} sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
+ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
+ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+
+sleep 3
+
+test_ping 10.0.2.1 dead:2::1 || exit 1
+check_drops || exit 1
+
+test_ping 10.0.2.99 dead:2::99 || exit 1
+check_drops || exit 1
+
+echo "PASS: fib expression did not cause unwanted packet drops"
+
+ip netns exec ${nsrouter} nft flush table inet filter
+
+ip -net ${ns1} route del default
+ip -net ${ns1} -6 route del default
+
+ip -net ${ns1} addr del 10.0.1.99/24 dev eth0
+ip -net ${ns1} addr del dead:1::99/64 dev eth0
+
+ip -net ${ns1} addr add 10.0.2.99/24 dev eth0
+ip -net ${ns1} addr add dead:2::99/64 dev eth0
+
+ip -net ${ns1} route add default via 10.0.2.1
+ip -net ${ns1} -6 route add default via dead:2::1
+
+ip -net ${nsrouter} addr add dead:2::1/64 dev veth0
+
+# switch to ruleset that doesn't log, this time
+# its expected that this does drop the packets.
+load_ruleset_count ${nsrouter}
+
+# ns1 has a default route, but nsrouter does not.
+# must not check return value, ping to 1.1.1.1 will
+# fail.
+check_fib_counter 0 ${nsrouter} 1.1.1.1 || exit 1
+check_fib_counter 0 ${nsrouter} 1c3::c01d || exit 1
+
+ip netns exec ${ns1} ping -c 1 -W 1 -q 1.1.1.1 > /dev/null
+check_fib_counter 1 ${nsrouter} 1.1.1.1 || exit 1
+
+sleep 2
+ip netns exec ${ns1} ping -c 3 -q 1c3::c01d > /dev/null
+check_fib_counter 3 ${nsrouter} 1c3::c01d || exit 1
+
+exit 0
diff --git a/tools/testing/selftests/perf_events/sigtrap_threads.c b/tools/testing/selftests/perf_events/sigtrap_threads.c
index 78ddf5e11625..8e83cf91513a 100644
--- a/tools/testing/selftests/perf_events/sigtrap_threads.c
+++ b/tools/testing/selftests/perf_events/sigtrap_threads.c
@@ -43,7 +43,7 @@ static struct {
siginfo_t first_siginfo; /* First observed siginfo_t. */
} ctx;
-/* Unique value to check si_perf is correctly set from perf_event_attr::sig_data. */
+/* Unique value to check si_perf_data is correctly set from perf_event_attr::sig_data. */
#define TEST_SIG_DATA(addr) (~(unsigned long)(addr))
static struct perf_event_attr make_event_attr(bool enabled, volatile void *addr)
@@ -164,8 +164,8 @@ TEST_F(sigtrap_threads, enable_event)
EXPECT_EQ(ctx.signal_count, NUM_THREADS);
EXPECT_EQ(ctx.tids_want_signal, 0);
EXPECT_EQ(ctx.first_siginfo.si_addr, &ctx.iterate_on);
- EXPECT_EQ(ctx.first_siginfo.si_errno, PERF_TYPE_BREAKPOINT);
- EXPECT_EQ(ctx.first_siginfo.si_perf, TEST_SIG_DATA(&ctx.iterate_on));
+ EXPECT_EQ(ctx.first_siginfo.si_perf_type, PERF_TYPE_BREAKPOINT);
+ EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on));
/* Check enabled for parent. */
ctx.iterate_on = 0;
@@ -183,8 +183,8 @@ TEST_F(sigtrap_threads, modify_and_enable_event)
EXPECT_EQ(ctx.signal_count, NUM_THREADS);
EXPECT_EQ(ctx.tids_want_signal, 0);
EXPECT_EQ(ctx.first_siginfo.si_addr, &ctx.iterate_on);
- EXPECT_EQ(ctx.first_siginfo.si_errno, PERF_TYPE_BREAKPOINT);
- EXPECT_EQ(ctx.first_siginfo.si_perf, TEST_SIG_DATA(&ctx.iterate_on));
+ EXPECT_EQ(ctx.first_siginfo.si_perf_type, PERF_TYPE_BREAKPOINT);
+ EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on));
/* Check enabled for parent. */
ctx.iterate_on = 0;
@@ -203,8 +203,8 @@ TEST_F(sigtrap_threads, signal_stress)
EXPECT_EQ(ctx.signal_count, NUM_THREADS * ctx.iterate_on);
EXPECT_EQ(ctx.tids_want_signal, 0);
EXPECT_EQ(ctx.first_siginfo.si_addr, &ctx.iterate_on);
- EXPECT_EQ(ctx.first_siginfo.si_errno, PERF_TYPE_BREAKPOINT);
- EXPECT_EQ(ctx.first_siginfo.si_perf, TEST_SIG_DATA(&ctx.iterate_on));
+ EXPECT_EQ(ctx.first_siginfo.si_perf_type, PERF_TYPE_BREAKPOINT);
+ EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on));
}
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/proc/.gitignore b/tools/testing/selftests/proc/.gitignore
index bed4b5318a86..8f3e72e626fa 100644
--- a/tools/testing/selftests/proc/.gitignore
+++ b/tools/testing/selftests/proc/.gitignore
@@ -10,6 +10,7 @@
/proc-self-map-files-002
/proc-self-syscall
/proc-self-wchan
+/proc-subset-pid
/proc-uptime-001
/proc-uptime-002
/read
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 98c3b647f54d..e3d5c77a8612 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -1753,16 +1753,25 @@ TEST_F(TRACE_poke, getpid_runs_normally)
# define SYSCALL_RET_SET(_regs, _val) \
do { \
typeof(_val) _result = (_val); \
- /* \
- * A syscall error is signaled by CR0 SO bit \
- * and the code is stored as a positive value. \
- */ \
- if (_result < 0) { \
- SYSCALL_RET(_regs) = -_result; \
- (_regs).ccr |= 0x10000000; \
- } else { \
+ if ((_regs.trap & 0xfff0) == 0x3000) { \
+ /* \
+ * scv 0 system call uses -ve result \
+ * for error, so no need to adjust. \
+ */ \
SYSCALL_RET(_regs) = _result; \
- (_regs).ccr &= ~0x10000000; \
+ } else { \
+ /* \
+ * A syscall error is signaled by the \
+ * CR0 SO bit and the code is stored as \
+ * a positive value. \
+ */ \
+ if (_result < 0) { \
+ SYSCALL_RET(_regs) = -_result; \
+ (_regs).ccr |= 0x10000000; \
+ } else { \
+ SYSCALL_RET(_regs) = _result; \
+ (_regs).ccr &= ~0x10000000; \
+ } \
} \
} while (0)
# define SYSCALL_RET_SET_ON_PTRACE_EXIT
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_pie.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_pie.json
index 1cda2e11b3ad..773c5027553d 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_pie.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_pie.json
@@ -9,11 +9,11 @@
"setup": [
"$IP link add dev $DUMMY type dummy || /bin/true"
],
- "cmdUnderTest": "$TC qdisc add dev $DUMMY root fq_pie flows 65536",
- "expExitCode": "2",
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq_pie flows 65536",
+ "expExitCode": "0",
"verifyCmd": "$TC qdisc show dev $DUMMY",
- "matchPattern": "qdisc",
- "matchCount": "0",
+ "matchPattern": "qdisc fq_pie 1: root refcnt 2 limit 10240p flows 65536",
+ "matchCount": "1",
"teardown": [
"$IP link del dev $DUMMY"
]
diff --git a/tools/testing/selftests/wireguard/netns.sh b/tools/testing/selftests/wireguard/netns.sh
index 7ed7cd95e58f..ebc4ee0fe179 100755
--- a/tools/testing/selftests/wireguard/netns.sh
+++ b/tools/testing/selftests/wireguard/netns.sh
@@ -363,6 +363,7 @@ ip1 -6 rule add table main suppress_prefixlength 0
ip1 -4 route add default dev wg0 table 51820
ip1 -4 rule add not fwmark 51820 table 51820
ip1 -4 rule add table main suppress_prefixlength 0
+n1 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/vethc/rp_filter'
# Flood the pings instead of sending just one, to trigger routing table reference counting bugs.
n1 ping -W 1 -c 100 -f 192.168.99.7
n1 ping -W 1 -c 100 -f abab::1111
diff --git a/tools/testing/selftests/wireguard/qemu/kernel.config b/tools/testing/selftests/wireguard/qemu/kernel.config
index 4eecb432a66c..74db83a0aedd 100644
--- a/tools/testing/selftests/wireguard/qemu/kernel.config
+++ b/tools/testing/selftests/wireguard/qemu/kernel.config
@@ -19,7 +19,6 @@ CONFIG_NETFILTER_XTABLES=y
CONFIG_NETFILTER_XT_NAT=y
CONFIG_NETFILTER_XT_MATCH_LENGTH=y
CONFIG_NETFILTER_XT_MARK=y
-CONFIG_NF_CONNTRACK_IPV4=y
CONFIG_NF_NAT_IPV4=y
CONFIG_IP_NF_IPTABLES=y
CONFIG_IP_NF_FILTER=y
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 6b4feb92dc79..46fb042837d2 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -307,6 +307,7 @@ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
{
return kvm_make_all_cpus_request_except(kvm, req, NULL);
}
+EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request);
#ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
void kvm_flush_remote_tlbs(struct kvm *kvm)
@@ -2054,6 +2055,13 @@ static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
return true;
}
+static int kvm_try_get_pfn(kvm_pfn_t pfn)
+{
+ if (kvm_is_reserved_pfn(pfn))
+ return 1;
+ return get_page_unless_zero(pfn_to_page(pfn));
+}
+
static int hva_to_pfn_remapped(struct vm_area_struct *vma,
unsigned long addr, bool *async,
bool write_fault, bool *writable,
@@ -2103,13 +2111,21 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
* Whoever called remap_pfn_range is also going to call e.g.
* unmap_mapping_range before the underlying pages are freed,
* causing a call to our MMU notifier.
+ *
+ * Certain IO or PFNMAP mappings can be backed with valid
+ * struct pages, but be allocated without refcounting e.g.,
+ * tail pages of non-compound higher order allocations, which
+ * would then underflow the refcount when the caller does the
+ * required put_page. Don't allow those pages here.
*/
- kvm_get_pfn(pfn);
+ if (!kvm_try_get_pfn(pfn))
+ r = -EFAULT;
out:
pte_unmap_unlock(ptep, ptl);
*p_pfn = pfn;
- return 0;
+
+ return r;
}
/*
@@ -2929,6 +2945,8 @@ static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
goto out;
if (signal_pending(current))
goto out;
+ if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu))
+ goto out;
ret = 0;
out:
@@ -2973,8 +2991,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
goto out;
}
poll_end = cur = ktime_get();
- } while (single_task_running() && !need_resched() &&
- ktime_before(cur, stop));
+ } while (kvm_vcpu_can_poll(cur, stop));
}
prepare_to_rcuwait(&vcpu->wait);
diff --git a/virt/lib/irqbypass.c b/virt/lib/irqbypass.c
index c9bb3957f58a..28fda42e471b 100644
--- a/virt/lib/irqbypass.c
+++ b/virt/lib/irqbypass.c
@@ -40,21 +40,17 @@ static int __connect(struct irq_bypass_producer *prod,
if (prod->add_consumer)
ret = prod->add_consumer(prod, cons);
- if (ret)
- goto err_add_consumer;
-
- ret = cons->add_producer(cons, prod);
- if (ret)
- goto err_add_producer;
+ if (!ret) {
+ ret = cons->add_producer(cons, prod);
+ if (ret && prod->del_consumer)
+ prod->del_consumer(prod, cons);
+ }
if (cons->start)
cons->start(cons);
if (prod->start)
prod->start(prod);
-err_add_producer:
- if (prod->del_consumer)
- prod->del_consumer(prod, cons);
-err_add_consumer:
+
return ret;
}