summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap4
-rw-r--r--Documentation/ABI/testing/sysfs-devices-deferred_probe12
-rw-r--r--Documentation/DocBook/Makefile2
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt13
-rw-r--r--Documentation/block/queue-sysfs.txt6
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c.txt8
-rw-r--r--Documentation/devicetree/bindings/input/tps65218-pwrbutton.txt4
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt3
-rw-r--r--Documentation/devicetree/bindings/mtd/tango-nand.txt6
-rw-r--r--Documentation/devicetree/bindings/net/mediatek-net.txt2
-rw-r--r--Documentation/devicetree/bindings/net/phy.txt5
-rw-r--r--Documentation/devicetree/bindings/net/ti,dp83867.txt6
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,imx7d-pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/marvell,armada-98dx3236-pinctrl.txt46
-rw-r--r--Documentation/devicetree/bindings/pinctrl/marvell,kirkwood-pinctrl.txt20
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt131
-rw-r--r--Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/ti,iodelay.txt47
-rw-r--r--Documentation/devicetree/bindings/power/supply/tps65217_charger.txt7
-rw-r--r--Documentation/devicetree/bindings/spi/sh-msiof.txt19
-rw-r--r--Documentation/driver-api/infrastructure.rst15
-rw-r--r--Documentation/filesystems/proc.txt5
-rw-r--r--Documentation/gpio/driver.txt9
-rw-r--r--Documentation/networking/mpls-sysctl.txt4
-rw-r--r--Documentation/pinctrl.txt4
-rw-r--r--Documentation/power/states.txt4
-rw-r--r--Documentation/unaligned-memory-access.txt2
-rw-r--r--Documentation/vfio-mediated-device.txt27
-rw-r--r--Documentation/vm/page_frags42
-rw-r--r--MAINTAINERS76
-rw-r--r--Makefile4
-rw-r--r--arch/arc/Kconfig2
-rw-r--r--arch/arc/include/asm/cache.h9
-rw-r--r--arch/arc/include/asm/delay.h4
-rw-r--r--arch/arc/include/asm/entry-arcv2.h2
-rw-r--r--arch/arc/include/asm/module.h4
-rw-r--r--arch/arc/include/asm/ptrace.h2
-rw-r--r--arch/arc/include/asm/setup.h1
-rw-r--r--arch/arc/kernel/head.S14
-rw-r--r--arch/arc/kernel/intc-arcv2.c6
-rw-r--r--arch/arc/kernel/intc-compact.c4
-rw-r--r--arch/arc/kernel/mcip.c59
-rw-r--r--arch/arc/kernel/module.c4
-rw-r--r--arch/arc/kernel/smp.c25
-rw-r--r--arch/arc/kernel/unaligned.c3
-rw-r--r--arch/arc/mm/cache.c155
-rw-r--r--arch/arc/mm/init.c5
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/boot/dts/Makefile2
-rw-r--r--arch/arm/boot/dts/am335x-bone-common.dtsi8
-rw-r--r--arch/arm/boot/dts/am335x-icev2.dts1
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi1
-rw-r--r--arch/arm/boot/dts/am4372.dtsi1
-rw-r--r--arch/arm/boot/dts/am571x-idk.dts10
-rw-r--r--arch/arm/boot/dts/am572x-idk.dts14
-rw-r--r--arch/arm/boot/dts/am57xx-idk-common.dtsi9
-rw-r--r--arch/arm/boot/dts/bcm-nsp.dtsi2
-rw-r--r--arch/arm/boot/dts/da850-evm.dts1
-rw-r--r--arch/arm/boot/dts/dm814x.dtsi1
-rw-r--r--arch/arm/boot/dts/dm816x.dtsi1
-rw-r--r--arch/arm/boot/dts/dra7.dtsi2
-rw-r--r--arch/arm/boot/dts/dra72-evm-revc.dts2
-rw-r--r--arch/arm/boot/dts/dra72-evm-tps65917.dtsi16
-rw-r--r--arch/arm/boot/dts/imx31.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi1
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi1
-rw-r--r--arch/arm/boot/dts/imx6sl.dtsi1
-rw-r--r--arch/arm/boot/dts/imx6sx.dtsi1
-rw-r--r--arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts11
-rw-r--r--arch/arm/boot/dts/omap2.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts2
-rw-r--r--arch/arm/boot/dts/omap3.dtsi1
-rw-r--r--arch/arm/boot/dts/omap4.dtsi1
-rw-r--r--arch/arm/boot/dts/omap5.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom-apq8064.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom-mdm9615.dtsi2
-rw-r--r--arch/arm/boot/dts/sun6i-a31-hummingbird.dts4
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi1
-rw-r--r--arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts2
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts2
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts2
-rw-r--r--arch/arm/boot/dts/vf610-zii-dev-rev-b.dts3
-rw-r--r--arch/arm/configs/multi_v7_defconfig4
-rw-r--r--arch/arm/configs/s3c2410_defconfig6
-rw-r--r--arch/arm/include/asm/cputype.h3
-rw-r--r--arch/arm/include/asm/ftrace.h18
-rw-r--r--arch/arm/include/asm/virt.h5
-rw-r--r--arch/arm/include/uapi/asm/types.h (renamed from arch/arm/include/asm/types.h)6
-rw-r--r--arch/arm/kernel/hw_breakpoint.c16
-rw-r--r--arch/arm/kernel/smp_tlb.c7
-rw-r--r--arch/arm/kvm/arm.c3
-rw-r--r--arch/arm/mach-davinci/clock.c12
-rw-r--r--arch/arm/mach-davinci/clock.h2
-rw-r--r--arch/arm/mach-davinci/da850.c32
-rw-r--r--arch/arm/mach-davinci/usb-da8xx.c34
-rw-r--r--arch/arm/mach-exynos/platsmp.c31
-rw-r--r--arch/arm/mach-exynos/suspend.c64
-rw-r--r--arch/arm/mach-imx/mach-imx1.c1
-rw-r--r--arch/arm/mach-omap1/dma.c16
-rw-r--r--arch/arm/mach-omap2/Makefile2
-rw-r--r--arch/arm/mach-omap2/board-generic.c2
-rw-r--r--arch/arm/mach-omap2/gpio.c160
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c8
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_common_data.h4
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c2
-rw-r--r--arch/arm/mach-omap2/prm_common.c4
-rw-r--r--arch/arm/mach-omap2/timer.c9
-rw-r--r--arch/arm/mach-s3c24xx/common.c76
-rw-r--r--arch/arm/mach-s5pv210/pm.c7
-rw-r--r--arch/arm/mach-s5pv210/regs-clock.h4
-rw-r--r--arch/arm/mach-ux500/pm.c4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx.dtsi16
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts16
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi16
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-nexbox-a95x.dts16
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl.dtsi6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts16
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm.dtsi4
-rw-r--r--arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts2
-rw-r--r--arch/arm64/boot/dts/exynos/exynos5433.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996.dtsi10
-rw-r--r--arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts2
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts2
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp.dtsi6
-rw-r--r--arch/arm64/configs/defconfig1
-rw-r--r--arch/arm64/include/asm/asm-uaccess.h65
-rw-r--r--arch/arm64/include/asm/assembler.h36
-rw-r--r--arch/arm64/include/asm/current.h10
-rw-r--r--arch/arm64/include/asm/memory.h2
-rw-r--r--arch/arm64/include/asm/uaccess.h64
-rw-r--r--arch/arm64/include/asm/virt.h9
-rw-r--r--arch/arm64/include/uapi/asm/ptrace.h1
-rw-r--r--arch/arm64/kernel/entry.S4
-rw-r--r--arch/arm64/kernel/ptrace.c16
-rw-r--r--arch/arm64/kernel/topology.c8
-rw-r--r--arch/arm64/kernel/traps.c28
-rw-r--r--arch/arm64/lib/clear_user.S2
-rw-r--r--arch/arm64/lib/copy_from_user.S2
-rw-r--r--arch/arm64/lib/copy_in_user.S2
-rw-r--r--arch/arm64/lib/copy_to_user.S2
-rw-r--r--arch/arm64/mm/cache.S2
-rw-r--r--arch/arm64/mm/dma-mapping.c3
-rw-r--r--arch/arm64/mm/fault.c8
-rw-r--r--arch/arm64/mm/hugetlbpage.c2
-rw-r--r--arch/arm64/mm/init.c5
-rw-r--r--arch/arm64/xen/hypercall.S2
-rw-r--r--arch/frv/include/asm/atomic.h35
-rw-r--r--arch/mips/kvm/entry.c5
-rw-r--r--arch/mips/kvm/mips.c4
-rw-r--r--arch/mn10300/include/asm/switch_to.h2
-rw-r--r--arch/openrisc/kernel/vmlinux.lds.S2
-rw-r--r--arch/parisc/include/asm/bitops.h8
-rw-r--r--arch/parisc/include/asm/thread_info.h1
-rw-r--r--arch/parisc/include/uapi/asm/bitsperlong.h2
-rw-r--r--arch/parisc/include/uapi/asm/swab.h5
-rw-r--r--arch/parisc/kernel/time.c23
-rw-r--r--arch/parisc/mm/fault.c2
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-4k.h5
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash.h4
-rw-r--r--arch/powerpc/include/asm/hugetlb.h14
-rw-r--r--arch/powerpc/include/asm/nohash/pgtable.h5
-rw-r--r--arch/powerpc/include/asm/page.h3
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h1
-rw-r--r--arch/powerpc/include/asm/pgtable-be-types.h8
-rw-r--r--arch/powerpc/include/asm/pgtable-types.h7
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h10
-rw-r--r--arch/powerpc/kernel/eeh.c10
-rw-r--r--arch/powerpc/kernel/ptrace.c14
-rw-r--r--arch/powerpc/mm/hash_utils_64.c4
-rw-r--r--arch/powerpc/mm/hugetlbpage-hash64.c5
-rw-r--r--arch/powerpc/mm/hugetlbpage.c31
-rw-r--r--arch/powerpc/mm/init-common.c13
-rw-r--r--arch/powerpc/mm/pgtable-book3s64.c18
-rw-r--r--arch/powerpc/perf/core-book3s.c2
-rw-r--r--arch/powerpc/perf/power9-events-list.h2
-rw-r--r--arch/powerpc/perf/power9-pmu.c2
-rw-r--r--arch/powerpc/sysdev/xics/icp-opal.c31
-rw-r--r--arch/s390/configs/default_defconfig27
-rw-r--r--arch/s390/configs/gcov_defconfig50
-rw-r--r--arch/s390/configs/performance_defconfig33
-rw-r--r--arch/s390/defconfig5
-rw-r--r--arch/s390/include/asm/asm-prototypes.h8
-rw-r--r--arch/s390/include/asm/ctl_reg.h4
-rw-r--r--arch/s390/kernel/ptrace.c8
-rw-r--r--arch/s390/kernel/vtime.c8
-rw-r--r--arch/s390/kvm/kvm-s390.c4
-rw-r--r--arch/s390/mm/pgtable.c7
-rw-r--r--arch/tile/kernel/ptrace.c2
-rw-r--r--arch/x86/boot/string.c1
-rw-r--r--arch/x86/boot/string.h9
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c3
-rw-r--r--arch/x86/entry/entry_32.S30
-rw-r--r--arch/x86/entry/entry_64.S11
-rw-r--r--arch/x86/events/amd/ibs.c2
-rw-r--r--arch/x86/events/core.c4
-rw-r--r--arch/x86/events/intel/core.c9
-rw-r--r--arch/x86/events/intel/cstate.c2
-rw-r--r--arch/x86/events/intel/ds.c6
-rw-r--r--arch/x86/events/intel/rapl.c1
-rw-r--r--arch/x86/events/intel/uncore.c1
-rw-r--r--arch/x86/events/intel/uncore_snbep.c2
-rw-r--r--arch/x86/include/asm/bitops.h13
-rw-r--r--arch/x86/include/asm/intel-family.h2
-rw-r--r--arch/x86/include/asm/microcode_intel.h15
-rw-r--r--arch/x86/include/asm/processor.h18
-rw-r--r--arch/x86/include/asm/stacktrace.h2
-rw-r--r--arch/x86/include/asm/switch_to.h10
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c9
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/intel.c11
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c3
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c70
-rw-r--r--arch/x86/kernel/pci-swiotlb.c6
-rw-r--r--arch/x86/kernel/tsc.c1
-rw-r--r--arch/x86/kernel/unwind_frame.c30
-rw-r--r--arch/x86/kvm/emulate.c70
-rw-r--r--arch/x86/kvm/lapic.c6
-rw-r--r--arch/x86/kvm/lapic.h1
-rw-r--r--arch/x86/kvm/vmx.c14
-rw-r--r--arch/x86/kvm/x86.c15
-rw-r--r--arch/x86/mm/mpx.c2
-rw-r--r--arch/x86/net/bpf_jit_comp.c2
-rw-r--r--arch/x86/pci/acpi.c10
-rw-r--r--arch/x86/platform/efi/efi.c66
-rw-r--r--arch/x86/platform/efi/quirks.c4
-rw-r--r--arch/x86/platform/intel-mid/device_libs/Makefile2
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c (renamed from arch/x86/platform/intel-mid/device_libs/platform_spidev.c)4
-rw-r--r--arch/x86/xen/pci-swiotlb-xen.c2
-rw-r--r--arch/x86/xen/setup.c6
-rw-r--r--block/blk-lib.c13
-rw-r--r--block/blk-mq.c1
-rw-r--r--block/blk-wbt.c13
-rw-r--r--block/blk-zoned.c4
-rw-r--r--block/partition-generic.c14
-rw-r--r--crypto/testmgr.c30
-rw-r--r--drivers/acpi/acpi_watchdog.c2
-rw-r--r--drivers/acpi/acpica/tbdata.c9
-rw-r--r--drivers/acpi/acpica/tbinstal.c17
-rw-r--r--drivers/acpi/glue.c11
-rw-r--r--drivers/acpi/internal.h1
-rw-r--r--drivers/acpi/scan.c1
-rw-r--r--drivers/acpi/sleep.c8
-rw-r--r--drivers/acpi/sysfs.c56
-rw-r--r--drivers/acpi/video_detect.c11
-rw-r--r--drivers/auxdisplay/Kconfig6
-rw-r--r--drivers/base/base.h2
-rw-r--r--drivers/base/core.c7
-rw-r--r--drivers/base/dd.c13
-rw-r--r--drivers/base/memory.c4
-rw-r--r--drivers/base/power/domain.c1
-rw-r--r--drivers/block/nbd.c12
-rw-r--r--drivers/block/virtio_blk.c7
-rw-r--r--drivers/block/xen-blkfront.c22
-rw-r--r--drivers/block/zram/zram_drv.c19
-rw-r--r--drivers/char/mem.c10
-rw-r--r--drivers/char/ppdev.c13
-rw-r--r--drivers/char/virtio_console.c2
-rw-r--r--drivers/clk/clk-stm32f4.c4
-rw-r--r--drivers/clk/renesas/clk-mstp.c27
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c14
-rw-r--r--drivers/clocksource/exynos_mct.c1
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c67
-rw-r--r--drivers/crypto/marvell/cesa.h3
-rw-r--r--drivers/crypto/marvell/hash.c34
-rw-r--r--drivers/crypto/marvell/tdma.c9
-rw-r--r--drivers/devfreq/devfreq.c15
-rw-r--r--drivers/devfreq/exynos-bus.c2
-rw-r--r--drivers/dma/dw/Kconfig2
-rw-r--r--drivers/dma/ioat/hw.h2
-rw-r--r--drivers/dma/ioat/init.c15
-rw-r--r--drivers/dma/omap-dma.c61
-rw-r--r--drivers/dma/pl330.c11
-rw-r--r--drivers/dma/sh/rcar-dmac.c8
-rw-r--r--drivers/dma/stm32-dma.c17
-rw-r--r--drivers/dma/ti-dma-crossbar.c2
-rw-r--r--drivers/extcon/extcon.c2
-rw-r--r--drivers/firmware/arm_scpi.c10
-rw-r--r--drivers/firmware/efi/fake_mem.c3
-rw-r--r--drivers/firmware/efi/libstub/efistub.h8
-rw-r--r--drivers/firmware/efi/libstub/fdt.c87
-rw-r--r--drivers/firmware/efi/memmap.c38
-rw-r--r--drivers/firmware/psci_checker.c4
-rw-r--r--drivers/gpio/gpio-aspeed.c188
-rw-r--r--drivers/gpio/gpio-bcm-kona.c14
-rw-r--r--drivers/gpio/gpio-dln2.c12
-rw-r--r--drivers/gpio/gpio-dwapb.c14
-rw-r--r--drivers/gpio/gpio-ep93xx.c11
-rw-r--r--drivers/gpio/gpio-f7188x.c19
-rw-r--r--drivers/gpio/gpio-lp873x.c14
-rw-r--r--drivers/gpio/gpio-max77620.c20
-rw-r--r--drivers/gpio/gpio-menz127.c34
-rw-r--r--drivers/gpio/gpio-merrifield.c14
-rw-r--r--drivers/gpio/gpio-mxs.c2
-rw-r--r--drivers/gpio/gpio-omap.c14
-rw-r--r--drivers/gpio/gpio-tc3589x.c15
-rw-r--r--drivers/gpio/gpio-tegra.c14
-rw-r--r--drivers/gpio/gpio-tps65218.c14
-rw-r--r--drivers/gpio/gpio-vx855.c13
-rw-r--r--drivers/gpio/gpio-wcove.c13
-rw-r--r--drivers/gpio/gpio-wm831x.c21
-rw-r--r--drivers/gpio/gpio-wm8994.c13
-rw-r--r--drivers/gpio/gpiolib.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c10
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c24
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c1
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h1
-rw-r--r--drivers/gpu/drm/ast/ast_main.c157
-rw-r--r--drivers/gpu/drm/ast/ast_post.c18
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c7
-rw-r--r--drivers/gpu/drm/cirrus/Kconfig9
-rw-r--r--drivers/gpu/drm/drm_atomic.c12
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c4
-rw-r--r--drivers/gpu/drm/drm_modes.c7
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c12
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c15
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c36
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c78
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c66
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c120
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h9
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c103
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c86
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c84
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c14
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c162
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c67
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.h19
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c1
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c9
-rw-r--r--drivers/gpu/drm/i915/intel_display.c81
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c41
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c3
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c10
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c9
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c8
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c3
-rw-r--r--drivers/gpu/drm/meson/meson_venc.c19
-rw-r--r--drivers/gpu/drm/meson/meson_venc_cvbs.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c14
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c6
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c22
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c18
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c43
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c7
-rw-r--r--drivers/gpu/drm/radeon/si.c79
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c25
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c27
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fb.c2
-rw-r--r--drivers/hid/hid-asus.c17
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-corsair.c60
-rw-r--r--drivers/hid/hid-cypress.c3
-rw-r--r--drivers/hid/hid-ids.h7
-rw-r--r--drivers/hid/hid-sensor-hub.c3
-rw-r--r--drivers/hid/hid-sony.c36
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c9
-rw-r--r--drivers/hid/usbhid/hid-quirks.c2
-rw-r--r--drivers/hid/wacom_sys.c16
-rw-r--r--drivers/hid/wacom_wac.c10
-rw-r--r--drivers/hwmon/lm90.c2
-rw-r--r--drivers/i2c/busses/i2c-cadence.c8
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c20
-rw-r--r--drivers/i2c/busses/i2c-piix4.c22
-rw-r--r--drivers/i2c/i2c-core.c21
-rw-r--r--drivers/i2c/i2c-dev.c2
-rw-r--r--drivers/iio/accel/st_accel_core.c12
-rw-r--r--drivers/iio/adc/Kconfig2
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_buffer.c4
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c13
-rw-r--r--drivers/iio/counter/104-quad-8.c13
-rw-r--r--drivers/iio/imu/bmi160/bmi160_core.c25
-rw-r--r--drivers/iio/light/max44000.c2
-rw-r--r--drivers/infiniband/core/cma.c3
-rw-r--r--drivers/infiniband/core/umem.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c11
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c7
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c21
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c9
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h24
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c33
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c147
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c11
-rw-r--r--drivers/infiniband/hw/mlx4/main.c14
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c12
-rw-r--r--drivers/infiniband/hw/qedr/main.c23
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h8
-rw-r--r--drivers/infiniband/hw/qedr/qedr_cm.c14
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c62
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c3
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c11
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h2
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c13
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c15
-rw-r--r--drivers/input/joydev.c1
-rw-r--r--drivers/input/joystick/xpad.c6
-rw-r--r--drivers/input/misc/adxl34x-i2c.c4
-rw-r--r--drivers/input/mouse/alps.h2
-rw-r--r--drivers/input/mouse/synaptics_i2c.c4
-rw-r--r--drivers/input/rmi4/Kconfig3
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h6
-rw-r--r--drivers/input/touchscreen/elants_i2c.c4
-rw-r--r--drivers/iommu/amd_iommu.c2
-rw-r--r--drivers/iommu/dmar.c6
-rw-r--r--drivers/iommu/intel-iommu.c42
-rw-r--r--drivers/isdn/hardware/eicon/message.c3
-rw-r--r--drivers/md/md.c5
-rw-r--r--drivers/md/md.h8
-rw-r--r--drivers/md/raid0.c12
-rw-r--r--drivers/md/raid1.c275
-rw-r--r--drivers/md/raid10.c245
-rw-r--r--drivers/md/raid5-cache.c140
-rw-r--r--drivers/md/raid5.c128
-rw-r--r--drivers/md/raid5.h7
-rw-r--r--drivers/media/cec/cec-adap.c103
-rw-r--r--drivers/media/dvb-core/dvb_net.c15
-rw-r--r--drivers/media/i2c/Kconfig1
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c33
-rw-r--r--drivers/media/i2c/tvp5150.c56
-rw-r--r--drivers/media/i2c/tvp5150_reg.h9
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.c8
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.h2
-rw-r--r--drivers/media/usb/dvb-usb/pctv452e.c133
-rw-r--r--drivers/memstick/core/memstick.c2
-rw-r--r--drivers/misc/mei/bus-fixup.c3
-rw-r--r--drivers/misc/mei/bus.c2
-rw-r--r--drivers/misc/mei/client.c20
-rw-r--r--drivers/misc/mei/debugfs.c2
-rw-r--r--drivers/misc/mei/hbm.c4
-rw-r--r--drivers/misc/mei/hw.h6
-rw-r--r--drivers/misc/mei/mei_dev.h2
-rw-r--r--drivers/mmc/core/mmc_ops.c25
-rw-r--r--drivers/mmc/host/dw_mmc.c7
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c8
-rw-r--r--drivers/mmc/host/mxs-mmc.c6
-rw-r--r--drivers/mmc/host/sdhci-acpi.c3
-rw-r--r--drivers/mtd/nand/Kconfig3
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c2
-rw-r--r--drivers/mtd/nand/tango_nand.c4
-rw-r--r--drivers/mtd/nand/xway_nand.c5
-rw-r--r--drivers/net/appletalk/ipddp.c2
-rw-r--r--drivers/net/can/c_can/c_can_pci.c1
-rw-r--r--drivers/net/can/ti_hecc.c16
-rw-r--r--drivers/net/dsa/bcm_sf2.c11
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c15
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h2
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c11
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c6
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c48
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c80
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c3
-rw-r--r--drivers/net/ethernet/cadence/macb_pci.c27
-rw-r--r--drivers/net/ethernet/cavium/Kconfig2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c11
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c12
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c34
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c6
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c7
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c6
-rw-r--r--drivers/net/ethernet/korina.c8
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c92
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c88
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-phy.c7
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c6
-rw-r--r--drivers/net/ethernet/realtek/r8169.c3
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c133
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c8
-rw-r--r--drivers/net/ethernet/sfc/ef10.c3
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c2
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/siena.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c89
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c42
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c1
-rw-r--r--drivers/net/ethernet/ti/cpmac.c2
-rw-r--r--drivers/net/gtp.c13
-rw-r--r--drivers/net/hyperv/netvsc_drv.c3
-rw-r--r--drivers/net/ieee802154/at86rf230.c4
-rw-r--r--drivers/net/ieee802154/atusb.c59
-rw-r--r--drivers/net/ipvlan/ipvlan.h5
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c60
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c7
-rw-r--r--drivers/net/macvtap.c2
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/net/phy/bcm63xx.c21
-rw-r--r--drivers/net/phy/dp83848.c3
-rw-r--r--drivers/net/phy/dp83867.c18
-rw-r--r--drivers/net/phy/marvell.c5
-rw-r--r--drivers/net/phy/micrel.c14
-rw-r--r--drivers/net/phy/phy.c24
-rw-r--r--drivers/net/phy/phy_led_triggers.c9
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/usb/asix_devices.c1
-rw-r--r--drivers/net/usb/cdc_ether.c8
-rw-r--r--drivers/net/usb/qmi_wwan.c7
-rw-r--r--drivers/net/usb/r8152.c117
-rw-r--r--drivers/net/virtio_net.c25
-rw-r--r--drivers/net/vrf.c7
-rw-r--r--drivers/net/vxlan.c25
-rw-r--r--drivers/net/wan/slic_ds26522.c2
-rw-r--r--drivers/net/wireless/intersil/orinoco/mic.c44
-rw-r--r--drivers/net/wireless/intersil/orinoco/mic.h3
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c1
-rw-r--r--drivers/net/xen-netback/interface.c6
-rw-r--r--drivers/net/xen-netback/xenbus.c13
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/nvdimm/namespace_devs.c23
-rw-r--r--drivers/nvdimm/pmem.c4
-rw-r--r--drivers/nvme/host/core.c24
-rw-r--r--drivers/nvme/host/fc.c30
-rw-r--r--drivers/nvme/host/nvme.h9
-rw-r--r--drivers/nvme/host/pci.c32
-rw-r--r--drivers/nvme/host/rdma.c15
-rw-r--r--drivers/nvme/host/scsi.c27
-rw-r--r--drivers/nvme/target/admin-cmd.c4
-rw-r--r--drivers/nvme/target/configfs.c1
-rw-r--r--drivers/nvme/target/core.c15
-rw-r--r--drivers/nvme/target/fc.c36
-rw-r--r--drivers/nvme/target/fcloop.c4
-rw-r--r--drivers/nvme/target/nvmet.h1
-rw-r--r--drivers/nvme/target/rdma.c17
-rw-r--r--drivers/nvmem/core.c4
-rw-r--r--drivers/nvmem/imx-ocotp.c2
-rw-r--r--drivers/nvmem/qfprom.c14
-rw-r--r--drivers/parport/parport_gsc.c8
-rw-r--r--drivers/pci/host/pci-xgene-msi.c2
-rw-r--r--drivers/pci/host/pcie-designware.c10
-rw-r--r--drivers/pci/probe.c12
-rw-r--r--drivers/pinctrl/Kconfig12
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c1115
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c1524
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c165
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.h33
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm281xx.c6
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns2-mux.c6
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-gpio.c6
-rw-r--r--drivers/pinctrl/core.c401
-rw-r--r--drivers/pinctrl/core.h55
-rw-r--r--drivers/pinctrl/devicetree.c31
-rw-r--r--drivers/pinctrl/devicetree.h12
-rw-r--r--drivers/pinctrl/freescale/Kconfig3
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c300
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.h34
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c58
-rw-r--r--drivers/pinctrl/intel/pinctrl-broxton.c7
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c4
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c53
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h2
-rw-r--r--drivers/pinctrl/intel/pinctrl-sunrisepoint.c1
-rw-r--r--drivers/pinctrl/mediatek/Kconfig15
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7623.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c14
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt7623.h2
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c26
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxl.c34
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c4
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-370.c24
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-375.c24
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-38x.c24
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-39x.c24
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-xp.c191
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-dove.c96
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-kirkwood.c33
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-mvebu.c179
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-mvebu.h65
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-orion.c8
-rw-r--r--drivers/pinctrl/pinconf.c12
-rw-r--r--drivers/pinctrl/pinconf.h9
-rw-r--r--drivers/pinctrl/pinctrl-amd.c70
-rw-r--r--drivers/pinctrl/pinctrl-amd.h8
-rw-r--r--drivers/pinctrl/pinctrl-da850-pupd.c3
-rw-r--r--drivers/pinctrl/pinctrl-falcon.c2
-rw-r--r--drivers/pinctrl/pinctrl-lantiq.c2
-rw-r--r--drivers/pinctrl/pinctrl-lantiq.h2
-rw-r--r--drivers/pinctrl/pinctrl-lpc18xx.c10
-rw-r--r--drivers/pinctrl/pinctrl-max77620.c2
-rw-r--r--drivers/pinctrl/pinctrl-palmas.c2
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c2
-rw-r--r--drivers/pinctrl/pinctrl-single.c299
-rw-r--r--drivers/pinctrl/pinctrl-sx150x.c55
-rw-r--r--drivers/pinctrl/pinctrl-xway.c2
-rw-r--r--drivers/pinctrl/pinmux.c216
-rw-r--r--drivers/pinctrl/pinmux.h56
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c48
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8660.c6
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c317
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h31
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c64xx.c12
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c125
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h43
-rw-r--r--drivers/pinctrl/sh-pfc/pinctrl.c4
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas7.c16
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c1
-rw-r--r--drivers/pinctrl/sunxi/Kconfig14
-rw-r--r--drivers/pinctrl/sunxi/Makefile5
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-gr8.c536
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun5i-a13.c403
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun5i.c (renamed from drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c)201
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-v3s.c321
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c79
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h30
-rw-r--r--drivers/pinctrl/ti/Kconfig10
-rw-r--r--drivers/pinctrl/ti/Makefile1
-rw-r--r--drivers/pinctrl/ti/pinctrl-ti-iodelay.c937
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-core.c4
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c2
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c2
-rw-r--r--drivers/platform/x86/Kconfig2
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c42
-rw-r--r--drivers/platform/x86/ideapad-laptop.c1
-rw-r--r--drivers/platform/x86/intel_mid_powerbtn.c2
-rw-r--r--drivers/platform/x86/mlx-platform.c2
-rw-r--r--drivers/platform/x86/surface3-wmi.c6
-rw-r--r--drivers/remoteproc/remoteproc_core.c29
-rw-r--r--drivers/rpmsg/rpmsg_core.c4
-rw-r--r--drivers/rtc/rtc-omap.c2
-rw-r--r--drivers/s390/virtio/virtio_ccw.c29
-rw-r--r--drivers/scsi/bfa/bfad.c6
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c2
-rw-r--r--drivers/scsi/bfa/bfad_drv.h2
-rw-r--r--drivers/scsi/fnic/fnic.h1
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c16
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h12
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c40
-rw-r--r--drivers/scsi/qedi/Kconfig2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c21
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h6
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c92
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c37
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c17
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.h17
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c57
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h22
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c24
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c4
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h1
-rw-r--r--drivers/scsi/scsi_lib.c4
-rw-r--r--drivers/scsi/sd.c29
-rw-r--r--drivers/scsi/ses.c2
-rw-r--r--drivers/scsi/snic/snic_main.c3
-rw-r--r--drivers/soc/samsung/exynos-pmu.c22
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c1
-rw-r--r--drivers/spi/Kconfig1
-rw-r--r--drivers/spi/spi-armada-3700.c11
-rw-r--r--drivers/spi/spi-axi-spi-engine.c3
-rw-r--r--drivers/spi/spi-davinci.c4
-rw-r--r--drivers/spi/spi-dw-mid.c4
-rw-r--r--drivers/spi/spi-dw.c5
-rw-r--r--drivers/spi/spi-pxa2xx.c1
-rw-r--r--drivers/spi/spi-sh-msiof.c4
-rw-r--r--drivers/staging/greybus/gpio.c15
-rw-r--r--drivers/staging/octeon/ethernet.c2
-rw-r--r--drivers/target/target_core_transport.c24
-rw-r--r--drivers/target/target_core_xcopy.c157
-rw-r--r--drivers/target/target_core_xcopy.h7
-rw-r--r--drivers/thermal/rockchip_thermal.c153
-rw-r--r--drivers/thermal/thermal_core.c10
-rw-r--r--drivers/tty/serial/8250/8250_core.c2
-rw-r--r--drivers/tty/serial/8250/8250_pci.c12
-rw-r--r--drivers/tty/serial/8250/8250_port.c2
-rw-r--r--drivers/tty/serial/atmel_serial.c22
-rw-r--r--drivers/tty/sysrq.c4
-rw-r--r--drivers/usb/core/config.c10
-rw-r--r--drivers/usb/core/hub.c59
-rw-r--r--drivers/usb/dwc2/core.h4
-rw-r--r--drivers/usb/dwc2/gadget.c20
-rw-r--r--drivers/usb/dwc2/hcd.c7
-rw-r--r--drivers/usb/dwc2/params.c40
-rw-r--r--drivers/usb/dwc3/core.h10
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c4
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c6
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c17
-rw-r--r--drivers/usb/dwc3/ep0.c46
-rw-r--r--drivers/usb/dwc3/gadget.c24
-rw-r--r--drivers/usb/gadget/composite.c14
-rw-r--r--drivers/usb/gadget/function/f_fs.c26
-rw-r--r--drivers/usb/gadget/function/f_hid.c2
-rw-r--r--drivers/usb/gadget/legacy/inode.c18
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c3
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.h1
-rw-r--r--drivers/usb/gadget/udc/core.c6
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c6
-rw-r--r--drivers/usb/host/ohci-at91.c24
-rw-r--r--drivers/usb/host/xhci-mem.c46
-rw-r--r--drivers/usb/host/xhci-mtk.c4
-rw-r--r--drivers/usb/host/xhci-pci.c3
-rw-r--r--drivers/usb/host/xhci-plat.c2
-rw-r--r--drivers/usb/host/xhci-ring.c262
-rw-r--r--drivers/usb/host/xhci.c17
-rw-r--r--drivers/usb/host/xhci.h5
-rw-r--r--drivers/usb/musb/blackfin.c1
-rw-r--r--drivers/usb/musb/musb_core.c2
-rw-r--r--drivers/usb/musb/musb_core.h7
-rw-r--r--drivers/usb/musb/musb_debugfs.c20
-rw-r--r--drivers/usb/musb/musb_dsps.c12
-rw-r--r--drivers/usb/musb/musb_host.c10
-rw-r--r--drivers/usb/musb/musbhsdma.h2
-rw-r--r--drivers/usb/serial/ch341.c108
-rw-r--r--drivers/usb/serial/cp210x.c13
-rw-r--r--drivers/usb/serial/cyberjack.c10
-rw-r--r--drivers/usb/serial/f81534.c8
-rw-r--r--drivers/usb/serial/garmin_gps.c1
-rw-r--r--drivers/usb/serial/io_edgeport.c5
-rw-r--r--drivers/usb/serial/io_ti.c22
-rw-r--r--drivers/usb/serial/iuu_phoenix.c11
-rw-r--r--drivers/usb/serial/keyspan_pda.c14
-rw-r--r--drivers/usb/serial/kl5kusb105.c9
-rw-r--r--drivers/usb/serial/kobil_sct.c12
-rw-r--r--drivers/usb/serial/mos7720.c56
-rw-r--r--drivers/usb/serial/mos7840.c24
-rw-r--r--drivers/usb/serial/omninet.c13
-rw-r--r--drivers/usb/serial/oti6858.c16
-rw-r--r--drivers/usb/serial/pl2303.c8
-rw-r--r--drivers/usb/serial/quatech2.c4
-rw-r--r--drivers/usb/serial/spcp8x5.c14
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c7
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/usb/wusbcore/crypto.c3
-rw-r--r--drivers/vfio/mdev/mdev_core.c100
-rw-r--r--drivers/vfio/mdev/mdev_private.h29
-rw-r--r--drivers/vfio/mdev/mdev_sysfs.c8
-rw-r--r--drivers/vfio/mdev/vfio_mdev.c12
-rw-r--r--drivers/vfio/pci/vfio_pci.c4
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c5
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c4
-rw-r--r--drivers/vfio/vfio_iommu_type1.c102
-rw-r--r--drivers/vhost/scsi.c4
-rw-r--r--drivers/vhost/vsock.c13
-rw-r--r--drivers/video/fbdev/cobalt_lcdfb.c5
-rw-r--r--drivers/video/fbdev/core/fbcmap.c26
-rw-r--r--drivers/virtio/virtio_mmio.c20
-rw-r--r--drivers/virtio/virtio_ring.c7
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.c2
-rw-r--r--drivers/xen/arm-device.c8
-rw-r--r--drivers/xen/events/events_fifo.c3
-rw-r--r--drivers/xen/evtchn.c4
-rw-r--r--drivers/xen/platform-pci.c71
-rw-r--r--drivers/xen/swiotlb-xen.c13
-rw-r--r--drivers/xen/xenbus/xenbus_comms.h1
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c49
-rw-r--r--fs/Kconfig1
-rw-r--r--fs/aio.c6
-rw-r--r--fs/binfmt_elf.c1
-rw-r--r--fs/block_dev.c9
-rw-r--r--fs/btrfs/async-thread.c15
-rw-r--r--fs/btrfs/extent-tree.c8
-rw-r--r--fs/btrfs/inode.c39
-rw-r--r--fs/btrfs/tree-log.c13
-rw-r--r--fs/btrfs/uuid-tree.c4
-rw-r--r--fs/buffer.c2
-rw-r--r--fs/ceph/addr.c4
-rw-r--r--fs/ceph/caps.c7
-rw-r--r--fs/ceph/dir.c5
-rw-r--r--fs/ceph/inode.c3
-rw-r--r--fs/ceph/mds_client.c14
-rw-r--r--fs/coredump.c18
-rw-r--r--fs/crypto/keyinfo.c3
-rw-r--r--fs/crypto/policy.c5
-rw-r--r--fs/dax.c296
-rw-r--r--fs/dcache.c7
-rw-r--r--fs/direct-io.c3
-rw-r--r--fs/ext2/Kconfig1
-rw-r--r--fs/ext2/inode.c3
-rw-r--r--fs/ext4/Kconfig1
-rw-r--r--fs/ext4/file.c48
-rw-r--r--fs/f2fs/segment.c4
-rw-r--r--fs/f2fs/super.c6
-rw-r--r--fs/fuse/dev.c3
-rw-r--r--fs/fuse/dir.c2
-rw-r--r--fs/libfs.c3
-rw-r--r--fs/namespace.c64
-rw-r--r--fs/nfs/nfs4proc.c33
-rw-r--r--fs/nfs/nfs4state.c2
-rw-r--r--fs/nfs/pnfs.c2
-rw-r--r--fs/nfsd/nfs4xdr.c4
-rw-r--r--fs/notify/mark.c12
-rw-r--r--fs/ocfs2/dlmglue.c10
-rw-r--r--fs/ocfs2/stackglue.c6
-rw-r--r--fs/ocfs2/stackglue.h3
-rw-r--r--fs/overlayfs/namei.c27
-rw-r--r--fs/posix_acl.c9
-rw-r--r--fs/proc/base.c2
-rw-r--r--fs/proc/proc_sysctl.c3
-rw-r--r--fs/romfs/super.c23
-rw-r--r--fs/ubifs/Kconfig2
-rw-r--r--fs/ubifs/dir.c58
-rw-r--r--fs/ubifs/ioctl.c3
-rw-r--r--fs/ubifs/journal.c2
-rw-r--r--fs/ubifs/tnc.c25
-rw-r--r--fs/userfaultfd.c37
-rw-r--r--fs/xfs/libxfs/xfs_ag_resv.c73
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c115
-rw-r--r--fs/xfs/libxfs/xfs_alloc.h2
-rw-r--r--fs/xfs/libxfs/xfs_attr.c6
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c51
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h6
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.c3
-rw-r--r--fs/xfs/libxfs/xfs_dir2.c39
-rw-r--r--fs/xfs/libxfs/xfs_dir2.h8
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.c90
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.h3
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c10
-rw-r--r--fs/xfs/libxfs/xfs_refcount_btree.c9
-rw-r--r--fs/xfs/libxfs/xfs_refcount_btree.h3
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.c14
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.h3
-rw-r--r--fs/xfs/libxfs/xfs_sb.c2
-rw-r--r--fs/xfs/xfs_aops.c19
-rw-r--r--fs/xfs/xfs_bmap_util.c28
-rw-r--r--fs/xfs/xfs_buf.c1
-rw-r--r--fs/xfs/xfs_dquot.c4
-rw-r--r--fs/xfs/xfs_fsops.c14
-rw-r--r--fs/xfs/xfs_icache.c3
-rw-r--r--fs/xfs/xfs_inode.c23
-rw-r--r--fs/xfs/xfs_iomap.c2
-rw-r--r--fs/xfs/xfs_iops.c50
-rw-r--r--fs/xfs/xfs_linux.h6
-rw-r--r--fs/xfs/xfs_log.c12
-rw-r--r--fs/xfs/xfs_mount.h1
-rw-r--r--fs/xfs/xfs_qm.c3
-rw-r--r--fs/xfs/xfs_refcount_item.c3
-rw-r--r--fs/xfs/xfs_sysfs.c4
-rw-r--r--include/asm-generic/asm-prototypes.h6
-rw-r--r--include/drm/drm_atomic.h2
-rw-r--r--include/drm/drm_mode_config.h2
-rw-r--r--include/dt-bindings/mfd/tps65217.h26
-rw-r--r--include/kvm/arm_arch_timer.h1
-rw-r--r--include/linux/blkdev.h19
-rw-r--r--include/linux/bpf.h4
-rw-r--r--include/linux/coredump.h1
-rw-r--r--include/linux/cpuhotplug.h2
-rw-r--r--include/linux/dax.h3
-rw-r--r--include/linux/efi.h2
-rw-r--r--include/linux/filter.h7
-rw-r--r--include/linux/fsnotify_backend.h2
-rw-r--r--include/linux/genhd.h9
-rw-r--r--include/linux/gfp.h22
-rw-r--r--include/linux/gpio/driver.h107
-rw-r--r--include/linux/i2c.h1
-rw-r--r--include/linux/iio/common/st_sensors.h12
-rw-r--r--include/linux/jump_label_ratelimit.h5
-rw-r--r--include/linux/kernel.h4
-rw-r--r--include/linux/mdev.h56
-rw-r--r--include/linux/memcontrol.h26
-rw-r--r--include/linux/memory_hotplug.h4
-rw-r--r--include/linux/micrel_phy.h2
-rw-r--r--include/linux/mlx4/device.h2
-rw-r--r--include/linux/mlx5/device.h5
-rw-r--r--include/linux/mlx5/driver.h1
-rw-r--r--include/linux/mlx5/mlx5_ifc.h93
-rw-r--r--include/linux/mm.h4
-rw-r--r--include/linux/mm_inline.h2
-rw-r--r--include/linux/mmzone.h6
-rw-r--r--include/linux/netdevice.h9
-rw-r--r--include/linux/nfs4.h3
-rw-r--r--include/linux/nmi.h1
-rw-r--r--include/linux/page-flags.h2
-rw-r--r--include/linux/perf_event.h1
-rw-r--r--include/linux/phy.h1
-rw-r--r--include/linux/phy_led_triggers.h4
-rw-r--r--include/linux/pinctrl/consumer.h6
-rw-r--r--include/linux/pinctrl/pinconf-generic.h52
-rw-r--r--include/linux/pinctrl/pinctrl.h15
-rw-r--r--include/linux/radix-tree.h4
-rw-r--r--include/linux/rcupdate.h4
-rw-r--r--include/linux/remoteproc.h4
-rw-r--r--include/linux/sched.h10
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--include/linux/slab.h4
-rw-r--r--include/linux/soc/samsung/exynos-pmu.h10
-rw-r--r--include/linux/sunrpc/clnt.h1
-rw-r--r--include/linux/sunrpc/svc_xprt.h1
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/swap.h3
-rw-r--r--include/linux/swiotlb.h11
-rw-r--r--include/linux/tcp.h7
-rw-r--r--include/linux/timerfd.h20
-rw-r--r--include/linux/virtio_net.h6
-rw-r--r--include/net/ipv6.h2
-rw-r--r--include/net/lwtunnel.h13
-rw-r--r--include/net/netfilter/nf_tables.h6
-rw-r--r--include/net/netfilter/nft_fib.h6
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/tcp.h1
-rw-r--r--include/rdma/ib_verbs.h14
-rw-r--r--include/scsi/libfc.h6
-rw-r--r--include/soc/arc/mcip.h16
-rw-r--r--include/sound/hdmi-codec.h8
-rw-r--r--include/sound/soc.h3
-rw-r--r--include/target/target_core_base.h4
-rw-r--r--include/trace/events/btrfs.h146
-rw-r--r--include/trace/events/mmflags.h3
-rw-r--r--include/trace/events/swiotlb.h17
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/cec-funcs.h10
-rw-r--r--include/uapi/linux/netfilter/nf_log.h2
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h4
-rw-r--r--include/uapi/linux/nl80211.h4
-rw-r--r--include/uapi/linux/pkt_cls.h2
-rw-r--r--include/uapi/linux/tc_act/tc_bpf.h2
-rw-r--r--include/uapi/linux/timerfd.h36
-rw-r--r--include/uapi/linux/usb/functionfs.h1
-rw-r--r--include/uapi/rdma/Kbuild1
-rw-r--r--include/uapi/rdma/cxgb3-abi.h2
-rw-r--r--init/Kconfig4
-rw-r--r--ipc/sem.c2
-rw-r--r--kernel/audit_tree.c18
-rw-r--r--kernel/bpf/arraymap.c20
-rw-r--r--kernel/bpf/core.c14
-rw-r--r--kernel/bpf/hashtab.c24
-rw-r--r--kernel/bpf/stackmap.c20
-rw-r--r--kernel/bpf/syscall.c34
-rw-r--r--kernel/bpf/verifier.c2
-rw-r--r--kernel/capability.c1
-rw-r--r--kernel/cpu.c33
-rw-r--r--kernel/events/core.c175
-rw-r--r--kernel/jump_label.c7
-rw-r--r--kernel/memremap.c4
-rw-r--r--kernel/module.c2
-rw-r--r--kernel/panic.c4
-rw-r--r--kernel/pid_namespace.c10
-rw-r--r--kernel/power/suspend.c4
-rw-r--r--kernel/rcu/rcu.h1
-rw-r--r--kernel/rcu/tiny.c4
-rw-r--r--kernel/rcu/tiny_plugin.h9
-rw-r--r--kernel/rcu/tree.c33
-rw-r--r--kernel/rcu/tree_exp.h52
-rw-r--r--kernel/rcu/tree_plugin.h2
-rw-r--r--kernel/rcu/update.c38
-rw-r--r--kernel/signal.c4
-rw-r--r--kernel/sysctl.c1
-rw-r--r--kernel/time/tick-sched.c9
-rw-r--r--kernel/time/tick-sched.h2
-rw-r--r--kernel/ucount.c14
-rw-r--r--kernel/watchdog.c9
-rw-r--r--kernel/watchdog_hld.c3
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/ioremap.c1
-rw-r--r--lib/iov_iter.c54
-rw-r--r--lib/radix-tree.c11
-rw-r--r--lib/swiotlb.c58
-rw-r--r--mm/filemap.c38
-rw-r--r--mm/huge_memory.c27
-rw-r--r--mm/hugetlb.c37
-rw-r--r--mm/khugepaged.c26
-rw-r--r--mm/memcontrol.c22
-rw-r--r--mm/memory.c88
-rw-r--r--mm/memory_hotplug.c28
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/page_alloc.c118
-rw-r--r--mm/slab.c8
-rw-r--r--mm/slub.c23
-rw-r--r--mm/swapfile.c20
-rw-r--r--mm/truncate.c75
-rw-r--r--mm/vmscan.c27
-rw-r--r--mm/workingset.c3
-rw-r--r--net/Kconfig4
-rw-r--r--net/atm/lec.c2
-rw-r--r--net/ax25/ax25_subr.c2
-rw-r--r--net/batman-adv/fragmentation.c10
-rw-r--r--net/bridge/br_netfilter_hooks.c2
-rw-r--r--net/bridge/br_netlink.c33
-rw-r--r--net/ceph/crypto.c2
-rw-r--r--net/core/dev.c8
-rw-r--r--net/core/drop_monitor.c39
-rw-r--r--net/core/ethtool.c2
-rw-r--r--net/core/filter.c6
-rw-r--r--net/core/flow_dissector.c9
-rw-r--r--net/core/lwt_bpf.c1
-rw-r--r--net/core/lwtunnel.c66
-rw-r--r--net/core/rtnetlink.c6
-rw-r--r--net/core/skbuff.c8
-rw-r--r--net/core/sock.c6
-rw-r--r--net/dccp/ipv6.c4
-rw-r--r--net/dsa/dsa2.c11
-rw-r--r--net/dsa/slave.c8
-rw-r--r--net/ipv4/fib_frontend.c10
-rw-r--r--net/ipv4/fib_semantics.c20
-rw-r--r--net/ipv4/igmp.c7
-rw-r--r--net/ipv4/ip_output.c1
-rw-r--r--net/ipv4/ip_sockglue.c8
-rw-r--r--net/ipv4/ip_tunnel_core.c2
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c37
-rw-r--r--net/ipv4/netfilter/ipt_rpfilter.c8
-rw-r--r--net/ipv4/netfilter/nf_reject_ipv4.c2
-rw-r--r--net/ipv4/netfilter/nft_fib_ipv4.c15
-rw-r--r--net/ipv4/route.c5
-rw-r--r--net/ipv4/sysctl_net_ipv4.c16
-rw-r--r--net/ipv4/tcp_fastopen.c3
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/tcp_ipv4.c4
-rw-r--r--net/ipv4/tcp_metrics.c1
-rw-r--r--net/ipv6/addrconf.c4
-rw-r--r--net/ipv6/ila/ila_lwt.c1
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/ipv6/ip6_gre.c3
-rw-r--r--net/ipv6/ip6_offload.c1
-rw-r--r--net/ipv6/ip6_output.c6
-rw-r--r--net/ipv6/ip6_tunnel.c40
-rw-r--r--net/ipv6/ip6_vti.c2
-rw-r--r--net/ipv6/mcast.c51
-rw-r--r--net/ipv6/netfilter/ip6t_rpfilter.c8
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c3
-rw-r--r--net/ipv6/netfilter/nft_fib_ipv6.c13
-rw-r--r--net/ipv6/route.c19
-rw-r--r--net/ipv6/seg6.c2
-rw-r--r--net/ipv6/seg6_hmac.c2
-rw-r--r--net/ipv6/seg6_iptunnel.c5
-rw-r--r--net/ipv6/tcp_ipv6.c4
-rw-r--r--net/iucv/af_iucv.c25
-rw-r--r--net/l2tp/l2tp_ip.c19
-rw-r--r--net/l2tp/l2tp_ip6.c24
-rw-r--r--net/mac80211/chan.c3
-rw-r--r--net/mac80211/iface.c21
-rw-r--r--net/mac80211/main.c13
-rw-r--r--net/mac80211/rx.c38
-rw-r--r--net/mac80211/sta_info.c4
-rw-r--r--net/mac80211/tx.c20
-rw-r--r--net/mac80211/vht.c4
-rw-r--r--net/mpls/af_mpls.c48
-rw-r--r--net/mpls/mpls_iptunnel.c1
-rw-r--r--net/netfilter/Kconfig2
-rw-r--r--net/netfilter/nf_conntrack_core.c44
-rw-r--r--net/netfilter/nf_log.c1
-rw-r--r--net/netfilter/nf_tables_api.c69
-rw-r--r--net/netfilter/nft_dynset.c3
-rw-r--r--net/netfilter/nft_log.c3
-rw-r--r--net/netfilter/nft_lookup.c3
-rw-r--r--net/netfilter/nft_objref.c6
-rw-r--r--net/netfilter/nft_payload.c27
-rw-r--r--net/netfilter/nft_queue.c2
-rw-r--r--net/netfilter/nft_quota.c26
-rw-r--r--net/netfilter/nft_set_hash.c2
-rw-r--r--net/netfilter/nft_set_rbtree.c2
-rw-r--r--net/netlabel/netlabel_kapi.c5
-rw-r--r--net/openvswitch/conntrack.c6
-rw-r--r--net/openvswitch/datapath.c1
-rw-r--r--net/openvswitch/flow.c54
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/qrtr/qrtr.c4
-rw-r--r--net/sched/act_api.c5
-rw-r--r--net/sched/act_bpf.c5
-rw-r--r--net/sched/cls_api.c4
-rw-r--r--net/sched/cls_bpf.c4
-rw-r--r--net/sched/cls_flower.c4
-rw-r--r--net/sctp/ipv6.c3
-rw-r--r--net/sctp/offload.c2
-rw-r--r--net/sctp/outqueue.c2
-rw-r--r--net/sctp/socket.c6
-rw-r--r--net/socket.c4
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c2
-rw-r--r--net/sunrpc/clnt.c5
-rw-r--r--net/sunrpc/sunrpc_syms.c1
-rw-r--r--net/sunrpc/svc_xprt.c10
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c2
-rw-r--r--net/tipc/discover.c4
-rw-r--r--net/tipc/link.c2
-rw-r--r--net/tipc/msg.c16
-rw-r--r--net/tipc/msg.h2
-rw-r--r--net/tipc/name_distr.c2
-rw-r--r--net/tipc/node.c9
-rw-r--r--net/tipc/server.c48
-rw-r--r--net/tipc/socket.c24
-rw-r--r--net/tipc/subscr.c124
-rw-r--r--net/tipc/subscr.h1
-rw-r--r--net/unix/af_unix.c27
-rw-r--r--net/wireless/nl80211.c31
-rw-r--r--samples/Kconfig7
-rw-r--r--samples/Makefile3
-rw-r--r--samples/bpf/sock_example.h2
-rw-r--r--samples/bpf/tc_l2_redirect_kern.c1
-rw-r--r--samples/bpf/trace_output_user.c1
-rw-r--r--samples/bpf/xdp_tx_iptunnel_kern.c1
-rw-r--r--samples/vfio-mdev/Makefile14
-rw-r--r--samples/vfio-mdev/mtty.c55
-rw-r--r--scripts/gcc-plugins/gcc-common.h85
-rw-r--r--scripts/gcc-plugins/latent_entropy_plugin.c4
-rw-r--r--sound/firewire/fireworks/fireworks_stream.c2
-rw-r--r--sound/firewire/tascam/tascam-stream.c2
-rw-r--r--sound/pci/hda/patch_realtek.c2
-rw-r--r--sound/soc/codecs/nau8825.c9
-rw-r--r--sound/soc/codecs/nau8825.h7
-rw-r--r--sound/soc/codecs/rt5645.c3
-rw-r--r--sound/soc/codecs/tlv320aic3x.c13
-rw-r--r--sound/soc/codecs/wm_adsp.c25
-rw-r--r--sound/soc/dwc/designware_i2s.c25
-rw-r--r--sound/soc/fsl/fsl_ssi.c74
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c18
-rw-r--r--sound/soc/intel/skylake/skl-pcm.c3
-rw-r--r--sound/soc/intel/skylake/skl-sst.c3
-rw-r--r--sound/soc/sh/rcar/core.c4
-rw-r--r--sound/soc/soc-core.c10
-rw-r--r--sound/soc/soc-pcm.c4
-rw-r--r--sound/soc/soc-topology.c3
-rw-r--r--sound/usb/endpoint.c20
-rw-r--r--sound/usb/endpoint.h2
-rw-r--r--sound/usb/pcm.c10
-rw-r--r--sound/usb/quirks.c1
-rw-r--r--tools/lib/subcmd/parse-options.c3
-rw-r--r--tools/lib/subcmd/parse-options.h5
-rw-r--r--tools/lib/traceevent/plugin_sched_switch.c4
-rw-r--r--tools/perf/Documentation/perf-record.txt4
-rw-r--r--tools/perf/Makefile.perf4
-rw-r--r--tools/perf/builtin-kmem.c1
-rw-r--r--tools/perf/builtin-record.c4
-rw-r--r--tools/perf/builtin-sched.c17
-rw-r--r--tools/perf/util/probe-event.c166
-rw-r--r--tools/perf/util/probe-finder.c15
-rw-r--r--tools/perf/util/probe-finder.h3
-rw-r--r--tools/perf/util/symbol-elf.c6
-rw-r--r--tools/testing/selftests/Makefile2
-rwxr-xr-xtools/testing/selftests/bpf/test_kmod.sh2
-rw-r--r--tools/testing/selftests/bpf/test_lru_map.c53
-rwxr-xr-xtools/testing/selftests/net/run_netsocktests2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c2
-rw-r--r--tools/testing/selftests/x86/protection_keys.c2
-rw-r--r--tools/virtio/ringtest/main.h12
-rwxr-xr-xtools/virtio/ringtest/run-on-all.sh5
-rw-r--r--usr/Makefile16
-rw-r--r--virt/kvm/arm/arch_timer.c26
-rw-r--r--virt/kvm/arm/hyp/timer-sr.c33
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c18
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c2
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c2
-rw-r--r--virt/lib/irqbypass.c4
1223 files changed, 17958 insertions, 9260 deletions
diff --git a/.mailmap b/.mailmap
index 02d261407683..67dc22ffc9a8 100644
--- a/.mailmap
+++ b/.mailmap
@@ -137,6 +137,7 @@ Ricardo Ribalda Delgado <ricardo.ribalda@gmail.com>
Rudolf Marek <R.Marek@sh.cvut.cz>
Rui Saraiva <rmps@joel.ist.utl.pt>
Sachin P Sant <ssant@in.ibm.com>
+Sarangdhar Joshi <spjoshi@codeaurora.org>
Sam Ravnborg <sam@mars.ravnborg.org>
Santosh Shilimkar <ssantosh@kernel.org>
Santosh Shilimkar <santosh.shilimkar@oracle.org>
@@ -150,10 +151,13 @@ Shuah Khan <shuah@kernel.org> <shuah.kh@samsung.com>
Simon Kelley <simon@thekelleys.org.uk>
Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
Stephen Hemminger <shemminger@osdl.org>
+Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
+Subhash Jadavani <subhashj@codeaurora.org>
Sudeep Holla <sudeep.holla@arm.com> Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
Sumit Semwal <sumit.semwal@ti.com>
Tejun Heo <htejun@gmail.com>
Thomas Graf <tgraf@suug.ch>
+Thomas Pedersen <twp@codeaurora.org>
Tony Luck <tony.luck@intel.com>
Tsuneo Yoshioka <Tsuneo.Yoshioka@f-secure.com>
Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
diff --git a/Documentation/ABI/testing/sysfs-devices-deferred_probe b/Documentation/ABI/testing/sysfs-devices-deferred_probe
deleted file mode 100644
index 58553d7a321f..000000000000
--- a/Documentation/ABI/testing/sysfs-devices-deferred_probe
+++ /dev/null
@@ -1,12 +0,0 @@
-What: /sys/devices/.../deferred_probe
-Date: August 2016
-Contact: Ben Hutchings <ben.hutchings@codethink.co.uk>
-Description:
- The /sys/devices/.../deferred_probe attribute is
- present for all devices. If a driver detects during
- probing a device that a related device is not yet
- ready, it may defer probing of the first device. The
- kernel will retry probing the first device after any
- other device is successfully probed. This attribute
- reads as 1 if probing of this device is currently
- deferred, or 0 otherwise.
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index c75e5d6b8fa8..a6eb7dcd4dd5 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -12,7 +12,7 @@ DOCBOOKS := z8530book.xml \
kernel-api.xml filesystems.xml lsm.xml kgdb.xml \
gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
- 80211.xml sh.xml regulator.xml w1.xml \
+ sh.xml regulator.xml w1.xml \
writing_musb_glue_layer.xml iio.xml
ifeq ($(DOCBOOKS),)
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 21e2d8863705..be7c0d9506b1 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -106,6 +106,16 @@
use by PCI
Format: <irq>,<irq>...
+ acpi_mask_gpe= [HW,ACPI]
+ Due to the existence of _Lxx/_Exx, some GPEs triggered
+ by unsupported hardware/firmware features can result in
+ GPE floodings that cannot be automatically disabled by
+ the GPE dispatcher.
+ This facility can be used to prevent such uncontrolled
+ GPE floodings.
+ Format: <int>
+ Support masking of GPEs numbered from 0x00 to 0x7f.
+
acpi_no_auto_serialize [HW,ACPI]
Disable auto-serialization of AML methods
AML control methods that contain the opcodes to create
@@ -3811,10 +3821,11 @@
it if 0 is given (See Documentation/cgroup-v1/memory.txt)
swiotlb= [ARM,IA-64,PPC,MIPS,X86]
- Format: { <int> | force }
+ Format: { <int> | force | noforce }
<int> -- Number of I/O TLB slabs
force -- force using of bounce buffers even if they
wouldn't be automatically used by the kernel
+ noforce -- Never use bounce buffers (for debugging)
switches= [HW,M68k]
diff --git a/Documentation/block/queue-sysfs.txt b/Documentation/block/queue-sysfs.txt
index 51642159aedb..c0a3bb5a6e4e 100644
--- a/Documentation/block/queue-sysfs.txt
+++ b/Documentation/block/queue-sysfs.txt
@@ -54,9 +54,9 @@ This is the hardware sector size of the device, in bytes.
io_poll (RW)
------------
-When read, this file shows the total number of block IO polls and how
-many returned success. Writing '0' to this file will disable polling
-for this device. Writing any non-zero value will enable this feature.
+When read, this file shows whether polling is enabled (1) or disabled
+(0). Writing '0' to this file will disable polling for this device.
+Writing any non-zero value will enable this feature.
io_poll_delay (RW)
------------------
diff --git a/Documentation/devicetree/bindings/i2c/i2c.txt b/Documentation/devicetree/bindings/i2c/i2c.txt
index 5fa691e6f638..cee9d5055fa2 100644
--- a/Documentation/devicetree/bindings/i2c/i2c.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c.txt
@@ -62,6 +62,9 @@ wants to support one of the below features, it should adapt the bindings below.
"irq" and "wakeup" names are recognized by I2C core, other names are
left to individual drivers.
+- host-notify
+ device uses SMBus host notify protocol instead of interrupt line.
+
- multi-master
states that there is another master active on this bus. The OS can use
this information to adapt power management to keep the arbitration awake
@@ -81,6 +84,11 @@ Binding may contain optional "interrupts" property, describing interrupts
used by the device. I2C core will assign "irq" interrupt (or the very first
interrupt if not using interrupt names) as primary interrupt for the slave.
+Alternatively, devices supporting SMbus Host Notify, and connected to
+adapters that support this feature, may use "host-notify" property. I2C
+core will create a virtual interrupt for Host Notify and assign it as
+primary interrupt for the slave.
+
Also, if device is marked as a wakeup source, I2C core will set up "wakeup"
interrupt for the device. If "wakeup" interrupt name is not present in the
binding, then primary interrupt will be used as wakeup interrupt.
diff --git a/Documentation/devicetree/bindings/input/tps65218-pwrbutton.txt b/Documentation/devicetree/bindings/input/tps65218-pwrbutton.txt
index 3e5b9793341f..8682ab6d4a50 100644
--- a/Documentation/devicetree/bindings/input/tps65218-pwrbutton.txt
+++ b/Documentation/devicetree/bindings/input/tps65218-pwrbutton.txt
@@ -8,8 +8,9 @@ This driver provides a simple power button event via an Interrupt.
Required properties:
- compatible: should be "ti,tps65217-pwrbutton" or "ti,tps65218-pwrbutton"
-Required properties for TPS65218:
+Required properties:
- interrupts: should be one of the following
+ - <2>: For controllers compatible with tps65217
- <3 IRQ_TYPE_EDGE_BOTH>: For controllers compatible with tps65218
Examples:
@@ -17,6 +18,7 @@ Examples:
&tps {
tps65217-pwrbutton {
compatible = "ti,tps65217-pwrbutton";
+ interrupts = <2>;
};
};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
index 0dcb7c7d3e40..944657684d73 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
@@ -15,6 +15,9 @@ Properties:
Second cell specifies the irq distribution mode to cores
0=Round Robin; 1=cpu0, 2=cpu1, 4=cpu2, 8=cpu3
+ The second cell in interrupts property is deprecated and may be ignored by
+ the kernel.
+
intc accessed via the special ARC AUX register interface, hence "reg" property
is not specified.
diff --git a/Documentation/devicetree/bindings/mtd/tango-nand.txt b/Documentation/devicetree/bindings/mtd/tango-nand.txt
index ad5a02f2ac8c..cd1bf2ac9055 100644
--- a/Documentation/devicetree/bindings/mtd/tango-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/tango-nand.txt
@@ -5,7 +5,7 @@ Required properties:
- compatible: "sigma,smp8758-nand"
- reg: address/size of nfc_reg, nfc_mem, and pbus_reg
- dmas: reference to the DMA channel used by the controller
-- dma-names: "nfc_sbox"
+- dma-names: "rxtx"
- clocks: reference to the system clock
- #address-cells: <1>
- #size-cells: <0>
@@ -17,9 +17,9 @@ Example:
nandc: nand-controller@2c000 {
compatible = "sigma,smp8758-nand";
- reg = <0x2c000 0x30 0x2d000 0x800 0x20000 0x1000>;
+ reg = <0x2c000 0x30>, <0x2d000 0x800>, <0x20000 0x1000>;
dmas = <&dma0 3>;
- dma-names = "nfc_sbox";
+ dma-names = "rxtx";
clocks = <&clkgen SYS_CLK>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/net/mediatek-net.txt b/Documentation/devicetree/bindings/net/mediatek-net.txt
index c010fafc66a8..c7194e87d5f4 100644
--- a/Documentation/devicetree/bindings/net/mediatek-net.txt
+++ b/Documentation/devicetree/bindings/net/mediatek-net.txt
@@ -7,7 +7,7 @@ have dual GMAC each represented by a child node..
* Ethernet controller node
Required properties:
-- compatible: Should be "mediatek,mt7623-eth"
+- compatible: Should be "mediatek,mt2701-eth"
- reg: Address and length of the register set for the device
- interrupts: Should contain the three frame engines interrupts in numeric
order. These are fe_int0, fe_int1 and fe_int2.
diff --git a/Documentation/devicetree/bindings/net/phy.txt b/Documentation/devicetree/bindings/net/phy.txt
index ff1bc4b1bb3b..fb5056b22685 100644
--- a/Documentation/devicetree/bindings/net/phy.txt
+++ b/Documentation/devicetree/bindings/net/phy.txt
@@ -19,8 +19,9 @@ Optional Properties:
specifications. If neither of these are specified, the default is to
assume clause 22.
- If the phy's identifier is known then the list may contain an entry
- of the form: "ethernet-phy-idAAAA.BBBB" where
+ If the PHY reports an incorrect ID (or none at all) then the
+ "compatible" list may contain an entry with the correct PHY ID in the
+ form: "ethernet-phy-idAAAA.BBBB" where
AAAA - The value of the 16 bit Phy Identifier 1 register as
4 hex digits. This is the chip vendor OUI bits 3:18
BBBB - The value of the 16 bit Phy Identifier 2 register as
diff --git a/Documentation/devicetree/bindings/net/ti,dp83867.txt b/Documentation/devicetree/bindings/net/ti,dp83867.txt
index 85bf945b898f..afe9630a5e7d 100644
--- a/Documentation/devicetree/bindings/net/ti,dp83867.txt
+++ b/Documentation/devicetree/bindings/net/ti,dp83867.txt
@@ -3,9 +3,11 @@
Required properties:
- reg - The ID number for the phy, usually a small integer
- ti,rx-internal-delay - RGMII Receive Clock Delay - see dt-bindings/net/ti-dp83867.h
- for applicable values
+ for applicable values. Required only if interface type is
+ PHY_INTERFACE_MODE_RGMII_ID or PHY_INTERFACE_MODE_RGMII_RXID
- ti,tx-internal-delay - RGMII Transmit Clock Delay - see dt-bindings/net/ti-dp83867.h
- for applicable values
+ for applicable values. Required only if interface type is
+ PHY_INTERFACE_MODE_RGMII_ID or PHY_INTERFACE_MODE_RGMII_TXID
- ti,fifo-depth - Transmitt FIFO depth- see dt-bindings/net/ti-dp83867.h
for applicable values
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx7d-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/fsl,imx7d-pinctrl.txt
index 457b2c68d47b..8c5d27c5b562 100644
--- a/Documentation/devicetree/bindings/pinctrl/fsl,imx7d-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx7d-pinctrl.txt
@@ -19,7 +19,7 @@ iomuxc: iomuxc@30330000 {
reg = <0x30330000 0x10000>;
};
-Pheriparials using pads from iomuxc-lpsr support low state retention power
+Peripherals using pads from iomuxc-lpsr support low state retention power
state, under LPSR mode GPIO's state of pads are retain.
Please refer to fsl,imx-pinctrl.txt in this directory for common binding part
diff --git a/Documentation/devicetree/bindings/pinctrl/marvell,armada-98dx3236-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/marvell,armada-98dx3236-pinctrl.txt
new file mode 100644
index 000000000000..97aef67ee769
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/marvell,armada-98dx3236-pinctrl.txt
@@ -0,0 +1,46 @@
+* Marvell 98dx3236 pinctrl driver for mpp
+
+Please refer to marvell,mvebu-pinctrl.txt in this directory for common binding
+part and usage
+
+Required properties:
+- compatible: "marvell,98dx3236-pinctrl" or "marvell,98dx4251-pinctrl"
+- reg: register specifier of MPP registers
+
+This driver supports all 98dx3236, 98dx3336 and 98dx4251 variants
+
+name pins functions
+================================================================================
+mpp0 0 gpo, spi0(mosi), dev(ad8)
+mpp1 1 gpio, spi0(miso), dev(ad9)
+mpp2 2 gpo, spi0(sck), dev(ad10)
+mpp3 3 gpio, spi0(cs0), dev(ad11)
+mpp4 4 gpio, spi0(cs1), smi(mdc), dev(cs0)
+mpp5 5 gpio, pex(rsto), sd0(cmd), dev(bootcs)
+mpp6 6 gpo, sd0(clk), dev(a2)
+mpp7 7 gpio, sd0(d0), dev(ale0)
+mpp8 8 gpio, sd0(d1), dev(ale1)
+mpp9 9 gpio, sd0(d2), dev(ready0)
+mpp10 10 gpio, sd0(d3), dev(ad12)
+mpp11 11 gpio, uart1(rxd), uart0(cts), dev(ad13)
+mpp12 12 gpo, uart1(txd), uart0(rts), dev(ad14)
+mpp13 13 gpio, intr(out), dev(ad15)
+mpp14 14 gpio, i2c0(sck)
+mpp15 15 gpio, i2c0(sda)
+mpp16 16 gpo, dev(oe)
+mpp17 17 gpo, dev(clkout)
+mpp18 18 gpio, uart1(txd)
+mpp19 19 gpio, uart1(rxd), dev(rb)
+mpp20 20 gpo, dev(we0)
+mpp21 21 gpo, dev(ad0)
+mpp22 22 gpo, dev(ad1)
+mpp23 23 gpo, dev(ad2)
+mpp24 24 gpo, dev(ad3)
+mpp25 25 gpo, dev(ad4)
+mpp26 26 gpo, dev(ad5)
+mpp27 27 gpo, dev(ad6)
+mpp28 28 gpo, dev(ad7)
+mpp29 29 gpo, dev(a0)
+mpp30 30 gpo, dev(a1)
+mpp31 31 gpio, slv_smi(mdc), smi(mdc), dev(we1)
+mpp32 32 gpio, slv_smi(mdio), smi(mdio), dev(cs1)
diff --git a/Documentation/devicetree/bindings/pinctrl/marvell,kirkwood-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/marvell,kirkwood-pinctrl.txt
index 730444a9a4de..6c0ea155b708 100644
--- a/Documentation/devicetree/bindings/pinctrl/marvell,kirkwood-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/marvell,kirkwood-pinctrl.txt
@@ -44,16 +44,16 @@ mpp16 16 gpio, sdio(d2), uart0(cts), uart1(rxd), mii(crs)
mpp17 17 gpio, sdio(d3)
mpp18 18 gpo, nand(io0)
mpp19 19 gpo, nand(io1)
-mpp20 20 gpio, mii(rxerr)
-mpp21 21 gpio, audio(spdifi)
-mpp22 22 gpio, audio(spdifo)
-mpp23 23 gpio, audio(rmclk)
-mpp24 24 gpio, audio(bclk)
-mpp25 25 gpio, audio(sdo)
-mpp26 26 gpio, audio(lrclk)
-mpp27 27 gpio, audio(mclk)
-mpp28 28 gpio, audio(sdi)
-mpp29 29 gpio, audio(extclk)
+mpp35 35 gpio, mii(rxerr)
+mpp36 36 gpio, audio(spdifi)
+mpp37 37 gpio, audio(spdifo)
+mpp38 38 gpio, audio(rmclk)
+mpp39 39 gpio, audio(bclk)
+mpp40 40 gpio, audio(sdo)
+mpp41 41 gpio, audio(lrclk)
+mpp42 42 gpio, audio(mclk)
+mpp43 43 gpio, audio(sdi)
+mpp44 44 gpio, audio(extclk)
* Marvell Kirkwood 88f6190
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt
index 2ad18c4ea55c..b98e6f030da8 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt
@@ -1,25 +1,38 @@
+======================
Aspeed Pin Controllers
-----------------------
+======================
The Aspeed SoCs vary in functionality inside a generation but have a common mux
device register layout.
-Required properties:
-- compatible : Should be any one of the following:
- "aspeed,ast2400-pinctrl"
- "aspeed,g4-pinctrl"
- "aspeed,ast2500-pinctrl"
- "aspeed,g5-pinctrl"
+Required properties for g4:
+- compatible : Should be one of the following:
+ "aspeed,ast2400-pinctrl"
+ "aspeed,g4-pinctrl"
-The pin controller node should be a child of a syscon node with the required
+Required properties for g5:
+- compatible : Should be one of the following:
+ "aspeed,ast2500-pinctrl"
+ "aspeed,g5-pinctrl"
+
+- aspeed,external-nodes: A cell of phandles to external controller nodes:
+ 0: compatible with "aspeed,ast2500-gfx", "syscon"
+ 1: compatible with "aspeed,ast2500-lhc", "syscon"
+
+The pin controller node should be the child of a syscon node with the required
property:
-- compatible: "syscon", "simple-mfd"
+
+- compatible : Should be one of the following:
+ "aspeed,ast2400-scu", "syscon", "simple-mfd"
+ "aspeed,g4-scu", "syscon", "simple-mfd"
+ "aspeed,ast2500-scu", "syscon", "simple-mfd"
+ "aspeed,g5-scu", "syscon", "simple-mfd"
Refer to the the bindings described in
Documentation/devicetree/bindings/mfd/syscon.txt
Subnode Format
---------------
+==============
The required properties of child nodes are (as defined in pinctrl-bindings):
- function
@@ -31,26 +44,43 @@ supported:
aspeed,ast2400-pinctrl, aspeed,g4-pinctrl:
-ACPI BMCINT DDCCLK DDCDAT FLACK FLBUSY FLWP GPID0 GPIE0 GPIE2 GPIE4 GPIE6 I2C10
-I2C11 I2C12 I2C13 I2C3 I2C4 I2C5 I2C6 I2C7 I2C8 I2C9 LPCPD LPCPME LPCSMI MDIO1
-MDIO2 NCTS1 NCTS3 NCTS4 NDCD1 NDCD3 NDCD4 NDSR1 NDSR3 NDTR1 NDTR3 NRI1 NRI3
-NRI4 NRTS1 NRTS3 PWM0 PWM1 PWM2 PWM3 PWM4 PWM5 PWM6 PWM7 RGMII1 RMII1 ROM16
-ROM8 ROMCS1 ROMCS2 ROMCS3 ROMCS4 RXD1 RXD3 RXD4 SD1 SGPMI SIOPBI SIOPBO TIMER3
-TIMER5 TIMER6 TIMER7 TIMER8 TXD1 TXD3 TXD4 UART6 VGAHS VGAVS VPI18 VPI24 VPI30
-VPO12 VPO24
+ACPI ADC0 ADC1 ADC10 ADC11 ADC12 ADC13 ADC14 ADC15 ADC2 ADC3 ADC4 ADC5 ADC6
+ADC7 ADC8 ADC9 BMCINT DDCCLK DDCDAT EXTRST FLACK FLBUSY FLWP GPID GPID0 GPID2
+GPID4 GPID6 GPIE0 GPIE2 GPIE4 GPIE6 I2C10 I2C11 I2C12 I2C13 I2C14 I2C3 I2C4
+I2C5 I2C6 I2C7 I2C8 I2C9 LPCPD LPCPME LPCRST LPCSMI MAC1LINK MAC2LINK MDIO1
+MDIO2 NCTS1 NCTS2 NCTS3 NCTS4 NDCD1 NDCD2 NDCD3 NDCD4 NDSR1 NDSR2 NDSR3 NDSR4
+NDTR1 NDTR2 NDTR3 NDTR4 NDTS4 NRI1 NRI2 NRI3 NRI4 NRTS1 NRTS2 NRTS3 OSCCLK PWM0
+PWM1 PWM2 PWM3 PWM4 PWM5 PWM6 PWM7 RGMII1 RGMII2 RMII1 RMII2 ROM16 ROM8 ROMCS1
+ROMCS2 ROMCS3 ROMCS4 RXD1 RXD2 RXD3 RXD4 SALT1 SALT2 SALT3 SALT4 SD1 SD2 SGPMCK
+SGPMI SGPMLD SGPMO SGPSCK SGPSI0 SGPSI1 SGPSLD SIOONCTRL SIOPBI SIOPBO SIOPWREQ
+SIOPWRGD SIOS3 SIOS5 SIOSCI SPI1 SPI1DEBUG SPI1PASSTHRU SPICS1 TIMER3 TIMER4
+TIMER5 TIMER6 TIMER7 TIMER8 TXD1 TXD2 TXD3 TXD4 UART6 USBCKI VGABIOS_ROM VGAHS
+VGAVS VPI18 VPI24 VPI30 VPO12 VPO24 WDTRST1 WDTRST2
aspeed,ast2500-pinctrl, aspeed,g5-pinctrl:
-GPID0 GPID2 GPIE0 I2C10 I2C11 I2C12 I2C13 I2C14 I2C3 I2C4 I2C5 I2C6 I2C7 I2C8
-I2C9 MAC1LINK MDIO1 MDIO2 OSCCLK PEWAKE PWM0 PWM1 PWM2 PWM3 PWM4 PWM5 PWM6 PWM7
-RGMII1 RGMII2 RMII1 RMII2 SD1 SPI1 SPI1DEBUG SPI1PASSTHRU TIMER4 TIMER5 TIMER6
-TIMER7 TIMER8 VGABIOSROM
-
-
-Examples:
+ACPI ADC0 ADC1 ADC10 ADC11 ADC12 ADC13 ADC14 ADC15 ADC2 ADC3 ADC4 ADC5 ADC6
+ADC7 ADC8 ADC9 BMCINT DDCCLK DDCDAT ESPI FWSPICS1 FWSPICS2 GPID0 GPID2 GPID4
+GPID6 GPIE0 GPIE2 GPIE4 GPIE6 I2C10 I2C11 I2C12 I2C13 I2C14 I2C3 I2C4 I2C5 I2C6
+I2C7 I2C8 I2C9 LAD0 LAD1 LAD2 LAD3 LCLK LFRAME LPCHC LPCPD LPCPLUS LPCPME
+LPCRST LPCSMI LSIRQ MAC1LINK MAC2LINK MDIO1 MDIO2 NCTS1 NCTS2 NCTS3 NCTS4 NDCD1
+NDCD2 NDCD3 NDCD4 NDSR1 NDSR2 NDSR3 NDSR4 NDTR1 NDTR2 NDTR3 NDTR4 NRI1 NRI2
+NRI3 NRI4 NRTS1 NRTS2 NRTS3 NRTS4 OSCCLK PEWAKE PNOR PWM0 PWM1 PWM2 PWM3 PWM4
+PWM5 PWM6 PWM7 RGMII1 RGMII2 RMII1 RMII2 RXD1 RXD2 RXD3 RXD4 SALT1 SALT10
+SALT11 SALT12 SALT13 SALT14 SALT2 SALT3 SALT4 SALT5 SALT6 SALT7 SALT8 SALT9
+SCL1 SCL2 SD1 SD2 SDA1 SDA2 SGPS1 SGPS2 SIOONCTRL SIOPBI SIOPBO SIOPWREQ
+SIOPWRGD SIOS3 SIOS5 SIOSCI SPI1 SPI1CS1 SPI1DEBUG SPI1PASSTHRU SPI2CK SPI2CS0
+SPI2CS1 SPI2MISO SPI2MOSI TIMER3 TIMER4 TIMER5 TIMER6 TIMER7 TIMER8 TXD1 TXD2
+TXD3 TXD4 UART6 USBCKI VGABIOSROM VGAHS VGAVS VPI24 VPO WDTRST1 WDTRST2
+
+Examples
+========
+
+g4 Example
+----------
syscon: scu@1e6e2000 {
- compatible = "syscon", "simple-mfd";
+ compatible = "aspeed,ast2400-scu", "syscon", "simple-mfd";
reg = <0x1e6e2000 0x1a8>;
pinctrl: pinctrl {
@@ -63,5 +93,56 @@ syscon: scu@1e6e2000 {
};
};
+g5 Example
+----------
+
+ahb {
+ apb {
+ syscon: scu@1e6e2000 {
+ compatible = "aspeed,ast2500-scu", "syscon", "simple-mfd";
+ reg = <0x1e6e2000 0x1a8>;
+
+ pinctrl: pinctrl {
+ compatible = "aspeed,g5-pinctrl";
+ aspeed,external-nodes = <&gfx &lhc>;
+
+ pinctrl_i2c3_default: i2c3_default {
+ function = "I2C3";
+ groups = "I2C3";
+ };
+ };
+ };
+
+ gfx: display@1e6e6000 {
+ compatible = "aspeed,ast2500-gfx", "syscon";
+ reg = <0x1e6e6000 0x1000>;
+ };
+ };
+
+ lpc: lpc@1e789000 {
+ compatible = "aspeed,ast2500-lpc", "simple-mfd";
+ reg = <0x1e789000 0x1000>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x1e789000 0x1000>;
+
+ lpc_host: lpc-host@80 {
+ compatible = "aspeed,ast2500-lpc-host", "simple-mfd", "syscon";
+ reg = <0x80 0x1e0>;
+ reg-io-width = <4>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x80 0x1e0>;
+
+ lhc: lhc@20 {
+ compatible = "aspeed,ast2500-lhc";
+ reg = <0x20 0x24 0x48 0x8>;
+ };
+ };
+ };
+};
+
Please refer to pinctrl-bindings.txt in this directory for details of the
common pinctrl bindings used by client devices.
diff --git a/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
index 1baf19eecabf..5e00a21de2bf 100644
--- a/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
@@ -13,6 +13,7 @@ Required Properties:
- "samsung,s3c2450-pinctrl": for S3C2450-compatible pin-controller,
- "samsung,s3c64xx-pinctrl": for S3C64xx-compatible pin-controller,
- "samsung,s5pv210-pinctrl": for S5PV210-compatible pin-controller,
+ - "samsung,exynos3250-pinctrl": for Exynos3250 compatible pin-controller.
- "samsung,exynos4210-pinctrl": for Exynos4210 compatible pin-controller.
- "samsung,exynos4x12-pinctrl": for Exynos4x12 compatible pin-controller.
- "samsung,exynos5250-pinctrl": for Exynos5250 compatible pin-controller.
diff --git a/Documentation/devicetree/bindings/pinctrl/ti,iodelay.txt b/Documentation/devicetree/bindings/pinctrl/ti,iodelay.txt
new file mode 100644
index 000000000000..c3ed1232b6a3
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/ti,iodelay.txt
@@ -0,0 +1,47 @@
+* Pin configuration for TI IODELAY controller
+
+TI dra7 based SoCs such as am57xx have a controller for setting the IO delay
+for each pin. For most part the IO delay values are programmed by the bootloader,
+but some pins need to be configured dynamically by the kernel such as the
+MMC pins.
+
+Required Properties:
+
+ - compatible: Must be "ti,dra7-iodelay"
+ - reg: Base address and length of the memory resource used
+ - #address-cells: Number of address cells
+ - #size-cells: Size of cells
+ - #pinctrl-cells: Number of pinctrl cells, must be 2. See also
+ Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+
+Example
+-------
+
+In the SoC specific dtsi file:
+
+ dra7_iodelay_core: padconf@4844a000 {
+ compatible = "ti,dra7-iodelay";
+ reg = <0x4844a000 0x0d1c>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #pinctrl-cells = <2>;
+ };
+
+In board-specific file:
+
+&dra7_iodelay_core {
+ mmc2_iodelay_3v3_conf: mmc2_iodelay_3v3_conf {
+ pinctrl-pin-array = <
+ 0x18c A_DELAY_PS(0) G_DELAY_PS(120) /* CFG_GPMC_A19_IN */
+ 0x1a4 A_DELAY_PS(265) G_DELAY_PS(360) /* CFG_GPMC_A20_IN */
+ 0x1b0 A_DELAY_PS(0) G_DELAY_PS(120) /* CFG_GPMC_A21_IN */
+ 0x1bc A_DELAY_PS(0) G_DELAY_PS(120) /* CFG_GPMC_A22_IN */
+ 0x1c8 A_DELAY_PS(287) G_DELAY_PS(420) /* CFG_GPMC_A23_IN */
+ 0x1d4 A_DELAY_PS(144) G_DELAY_PS(240) /* CFG_GPMC_A24_IN */
+ 0x1e0 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_A25_IN */
+ 0x1ec A_DELAY_PS(120) G_DELAY_PS(0) /* CFG_GPMC_A26_IN */
+ 0x1f8 A_DELAY_PS(120) G_DELAY_PS(180) /* CFG_GPMC_A27_IN */
+ 0x360 A_DELAY_PS(0) G_DELAY_PS(0) /* CFG_GPMC_CS1_IN */
+ >;
+ };
+};
diff --git a/Documentation/devicetree/bindings/power/supply/tps65217_charger.txt b/Documentation/devicetree/bindings/power/supply/tps65217_charger.txt
index 98d131acee95..a11072c5a866 100644
--- a/Documentation/devicetree/bindings/power/supply/tps65217_charger.txt
+++ b/Documentation/devicetree/bindings/power/supply/tps65217_charger.txt
@@ -2,11 +2,16 @@ TPS65217 Charger
Required Properties:
-compatible: "ti,tps65217-charger"
+-interrupts: TPS65217 interrupt numbers for the AC and USB charger input change.
+ Should be <0> for the USB charger and <1> for the AC adapter.
+-interrupt-names: Should be "USB" and "AC"
This node is a subnode of the tps65217 PMIC.
Example:
tps65217-charger {
- compatible = "ti,tps65090-charger";
+ compatible = "ti,tps65217-charger";
+ interrupts = <0>, <1>;
+ interrupt-names = "USB", "AC";
};
diff --git a/Documentation/devicetree/bindings/spi/sh-msiof.txt b/Documentation/devicetree/bindings/spi/sh-msiof.txt
index da6614c63796..dc975064fa27 100644
--- a/Documentation/devicetree/bindings/spi/sh-msiof.txt
+++ b/Documentation/devicetree/bindings/spi/sh-msiof.txt
@@ -1,17 +1,23 @@
Renesas MSIOF spi controller
Required properties:
-- compatible : "renesas,msiof-<soctype>" for SoCs,
- "renesas,sh-msiof" for SuperH, or
- "renesas,sh-mobile-msiof" for SH Mobile series.
- Examples with soctypes are:
- "renesas,msiof-r8a7790" (R-Car H2)
+- compatible : "renesas,msiof-r8a7790" (R-Car H2)
"renesas,msiof-r8a7791" (R-Car M2-W)
"renesas,msiof-r8a7792" (R-Car V2H)
"renesas,msiof-r8a7793" (R-Car M2-N)
"renesas,msiof-r8a7794" (R-Car E2)
"renesas,msiof-r8a7796" (R-Car M3-W)
"renesas,msiof-sh73a0" (SH-Mobile AG5)
+ "renesas,sh-mobile-msiof" (generic SH-Mobile compatibile device)
+ "renesas,rcar-gen2-msiof" (generic R-Car Gen2 compatible device)
+ "renesas,rcar-gen3-msiof" (generic R-Car Gen3 compatible device)
+ "renesas,sh-msiof" (deprecated)
+
+ When compatible with the generic version, nodes
+ must list the SoC-specific version corresponding
+ to the platform first followed by the generic
+ version.
+
- reg : A list of offsets and lengths of the register sets for
the device.
If only one register set is present, it is to be used
@@ -61,7 +67,8 @@ Documentation/devicetree/bindings/pinctrl/renesas,*.
Example:
msiof0: spi@e6e20000 {
- compatible = "renesas,msiof-r8a7791";
+ compatible = "renesas,msiof-r8a7791",
+ "renesas,rcar-gen2-msiof";
reg = <0 0xe6e20000 0 0x0064>;
interrupts = <0 156 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp0_clks R8A7791_CLK_MSIOF0>;
diff --git a/Documentation/driver-api/infrastructure.rst b/Documentation/driver-api/infrastructure.rst
index 0bb0b5fc9512..6d9ff316b608 100644
--- a/Documentation/driver-api/infrastructure.rst
+++ b/Documentation/driver-api/infrastructure.rst
@@ -55,21 +55,6 @@ Device Drivers DMA Management
.. kernel-doc:: drivers/base/dma-mapping.c
:export:
-Device Drivers Power Management
--------------------------------
-
-.. kernel-doc:: drivers/base/power/main.c
- :export:
-
-Device Drivers ACPI Support
----------------------------
-
-.. kernel-doc:: drivers/acpi/scan.c
- :export:
-
-.. kernel-doc:: drivers/acpi/scan.c
- :internal:
-
Device drivers PnP support
--------------------------
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 72624a16b792..c94b4675d021 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -212,10 +212,11 @@ asynchronous manner and the value may not be very precise. To see a precise
snapshot of a moment, you can see /proc/<pid>/smaps file and scan page table.
It's slow but very precise.
-Table 1-2: Contents of the status files (as of 4.1)
+Table 1-2: Contents of the status files (as of 4.8)
..............................................................................
Field Content
Name filename of the executable
+ Umask file mode creation mask
State state (R is running, S is sleeping, D is sleeping
in an uninterruptible wait, Z is zombie,
T is traced or stopped)
@@ -226,7 +227,6 @@ Table 1-2: Contents of the status files (as of 4.1)
TracerPid PID of process tracing this process (0 if not)
Uid Real, effective, saved set, and file system UIDs
Gid Real, effective, saved set, and file system GIDs
- Umask file mode creation mask
FDSize number of file descriptor slots currently allocated
Groups supplementary group list
NStgid descendant namespace thread group ID hierarchy
@@ -236,6 +236,7 @@ Table 1-2: Contents of the status files (as of 4.1)
VmPeak peak virtual memory size
VmSize total program size
VmLck locked memory size
+ VmPin pinned memory size
VmHWM peak resident set size ("high water mark")
VmRSS size of memory portions. It contains the three
following parts (VmRSS = RssAnon + RssFile + RssShmem)
diff --git a/Documentation/gpio/driver.txt b/Documentation/gpio/driver.txt
index 747c721776ed..ad8f0c0cd13f 100644
--- a/Documentation/gpio/driver.txt
+++ b/Documentation/gpio/driver.txt
@@ -146,10 +146,11 @@ a pull-up resistor is needed on the outgoing rail to complete the circuit, and
in the second case, a pull-down resistor is needed on the rail.
Hardware that supports open drain or open source or both, can implement a
-special callback in the gpio_chip: .set_single_ended() that takes an enum flag
-telling whether to configure the line as open drain, open source or push-pull.
-This will happen in response to the GPIO_OPEN_DRAIN or GPIO_OPEN_SOURCE flag
-set in the machine file, or coming from other hardware descriptions.
+special callback in the gpio_chip: .set_config() that takes a generic
+pinconf packed value telling whether to configure the line as open drain,
+open source or push-pull. This will happen in response to the
+GPIO_OPEN_DRAIN or GPIO_OPEN_SOURCE flag set in the machine file, or coming
+from other hardware descriptions.
If this state can not be configured in hardware, i.e. if the GPIO hardware does
not support open drain/open source in hardware, the GPIO library will instead
diff --git a/Documentation/networking/mpls-sysctl.txt b/Documentation/networking/mpls-sysctl.txt
index 9ed15f86c17c..15d8d16934fd 100644
--- a/Documentation/networking/mpls-sysctl.txt
+++ b/Documentation/networking/mpls-sysctl.txt
@@ -5,8 +5,8 @@ platform_labels - INTEGER
possible to configure forwarding for label values equal to or
greater than the number of platform labels.
- A dense utliziation of the entries in the platform label table
- is possible and expected aas the platform labels are locally
+ A dense utilization of the entries in the platform label table
+ is possible and expected as the platform labels are locally
allocated.
If the number of platform label table entries is set to 0 no
diff --git a/Documentation/pinctrl.txt b/Documentation/pinctrl.txt
index 0d3b9ce0a0b9..54bd5faa8782 100644
--- a/Documentation/pinctrl.txt
+++ b/Documentation/pinctrl.txt
@@ -79,9 +79,7 @@ int __init foo_probe(void)
{
struct pinctrl_dev *pctl;
- pctl = pinctrl_register(&foo_desc, <PARENT>, NULL);
- if (!pctl)
- pr_err("could not register foo pin driver\n");
+ return pinctrl_register_and_init(&foo_desc, <PARENT>, NULL, &pctl);
}
To enable the pinctrl subsystem and the subgroups for PINMUX and PINCONF and
diff --git a/Documentation/power/states.txt b/Documentation/power/states.txt
index 8a39ce45d8a0..008ecb588317 100644
--- a/Documentation/power/states.txt
+++ b/Documentation/power/states.txt
@@ -35,9 +35,7 @@ only one way to cause the system to go into the Suspend-To-RAM state (write
The default suspend mode (ie. the one to be used without writing anything into
/sys/power/mem_sleep) is either "deep" (if Suspend-To-RAM is supported) or
"s2idle", but it can be overridden by the value of the "mem_sleep_default"
-parameter in the kernel command line. On some ACPI-based systems, depending on
-the information in the FADT, the default may be "s2idle" even if Suspend-To-RAM
-is supported.
+parameter in the kernel command line.
The properties of all of the sleep states are described below.
diff --git a/Documentation/unaligned-memory-access.txt b/Documentation/unaligned-memory-access.txt
index a445da098bc6..3f76c0c37920 100644
--- a/Documentation/unaligned-memory-access.txt
+++ b/Documentation/unaligned-memory-access.txt
@@ -151,7 +151,7 @@ bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
#else
const u16 *a = (const u16 *)addr1;
const u16 *b = (const u16 *)addr2;
- return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0;
+ return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) == 0;
#endif
}
diff --git a/Documentation/vfio-mediated-device.txt b/Documentation/vfio-mediated-device.txt
index b38afec35edc..d226c7a5ba8b 100644
--- a/Documentation/vfio-mediated-device.txt
+++ b/Documentation/vfio-mediated-device.txt
@@ -127,22 +127,22 @@ the VFIO when devices are unbound from the driver.
Physical Device Driver Interface
--------------------------------
-The physical device driver interface provides the parent_ops[3] structure to
-define the APIs to manage work in the mediated core driver that is related to
-the physical device.
+The physical device driver interface provides the mdev_parent_ops[3] structure
+to define the APIs to manage work in the mediated core driver that is related
+to the physical device.
-The structures in the parent_ops structure are as follows:
+The structures in the mdev_parent_ops structure are as follows:
* dev_attr_groups: attributes of the parent device
* mdev_attr_groups: attributes of the mediated device
* supported_config: attributes to define supported configurations
-The functions in the parent_ops structure are as follows:
+The functions in the mdev_parent_ops structure are as follows:
* create: allocate basic resources in a driver for a mediated device
* remove: free resources in a driver when a mediated device is destroyed
-The callbacks in the parent_ops structure are as follows:
+The callbacks in the mdev_parent_ops structure are as follows:
* open: open callback of mediated device
* close: close callback of mediated device
@@ -151,14 +151,14 @@ The callbacks in the parent_ops structure are as follows:
* write: write emulation callback
* mmap: mmap emulation callback
-A driver should use the parent_ops structure in the function call to register
-itself with the mdev core driver:
+A driver should use the mdev_parent_ops structure in the function call to
+register itself with the mdev core driver:
extern int mdev_register_device(struct device *dev,
- const struct parent_ops *ops);
+ const struct mdev_parent_ops *ops);
-However, the parent_ops structure is not required in the function call that a
-driver should use to unregister itself with the mdev core driver:
+However, the mdev_parent_ops structure is not required in the function call
+that a driver should use to unregister itself with the mdev core driver:
extern void mdev_unregister_device(struct device *dev);
@@ -223,6 +223,9 @@ Directories and files under the sysfs for Each Physical Device
sprintf(buf, "%s-%s", dev_driver_string(parent->dev), group->name);
+ (or using mdev_parent_dev(mdev) to arrive at the parent device outside
+ of the core mdev code)
+
* device_api
This attribute should show which device API is being created, for example,
@@ -394,5 +397,5 @@ References
[1] See Documentation/vfio.txt for more information on VFIO.
[2] struct mdev_driver in include/linux/mdev.h
-[3] struct parent_ops in include/linux/mdev.h
+[3] struct mdev_parent_ops in include/linux/mdev.h
[4] struct vfio_iommu_driver_ops in include/linux/vfio.h
diff --git a/Documentation/vm/page_frags b/Documentation/vm/page_frags
new file mode 100644
index 000000000000..a6714565dbf9
--- /dev/null
+++ b/Documentation/vm/page_frags
@@ -0,0 +1,42 @@
+Page fragments
+--------------
+
+A page fragment is an arbitrary-length arbitrary-offset area of memory
+which resides within a 0 or higher order compound page. Multiple
+fragments within that page are individually refcounted, in the page's
+reference counter.
+
+The page_frag functions, page_frag_alloc and page_frag_free, provide a
+simple allocation framework for page fragments. This is used by the
+network stack and network device drivers to provide a backing region of
+memory for use as either an sk_buff->head, or to be used in the "frags"
+portion of skb_shared_info.
+
+In order to make use of the page fragment APIs a backing page fragment
+cache is needed. This provides a central point for the fragment allocation
+and tracks allows multiple calls to make use of a cached page. The
+advantage to doing this is that multiple calls to get_page can be avoided
+which can be expensive at allocation time. However due to the nature of
+this caching it is required that any calls to the cache be protected by
+either a per-cpu limitation, or a per-cpu limitation and forcing interrupts
+to be disabled when executing the fragment allocation.
+
+The network stack uses two separate caches per CPU to handle fragment
+allocation. The netdev_alloc_cache is used by callers making use of the
+__netdev_alloc_frag and __netdev_alloc_skb calls. The napi_alloc_cache is
+used by callers of the __napi_alloc_frag and __napi_alloc_skb calls. The
+main difference between these two calls is the context in which they may be
+called. The "netdev" prefixed functions are usable in any context as these
+functions will disable interrupts, while the "napi" prefixed functions are
+only usable within the softirq context.
+
+Many network device drivers use a similar methodology for allocating page
+fragments, but the page fragments are cached at the ring or descriptor
+level. In order to enable these cases it is necessary to provide a generic
+way of tearing down a page cache. For this reason __page_frag_cache_drain
+was implemented. It allows for freeing multiple references from a single
+page via a single call. The advantage to doing this is that it allows for
+cleaning up the multiple references that were added to a page in order to
+avoid calling get_page per allocation.
+
+Alexander Duyck, Nov 29, 2016.
diff --git a/MAINTAINERS b/MAINTAINERS
index cfff2c9e3d94..5f10c28b2e15 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -81,7 +81,6 @@ Descriptions of section entries:
Q: Patchwork web based patch tracking system site
T: SCM tree type and location.
Type is one of: git, hg, quilt, stgit, topgit
- B: Bug tracking system location.
S: Status, one of the following:
Supported: Someone is actually paid to look after this.
Maintained: Someone actually looks after it.
@@ -977,6 +976,7 @@ M: Russell King <linux@armlinux.org.uk>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.armlinux.org.uk/
S: Maintained
+T: git git://git.armlinux.org.uk/~rmk/linux-arm.git
F: arch/arm/
ARM SUB-ARCHITECTURES
@@ -1154,6 +1154,7 @@ ARM/CLKDEV SUPPORT
M: Russell King <linux@armlinux.org.uk>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
+T: git git://git.armlinux.org.uk/~rmk/linux-arm.git clkdev
F: arch/arm/include/asm/clkdev.h
F: drivers/clk/clkdev.c
@@ -1689,6 +1690,7 @@ M: Krzysztof Kozlowski <krzk@kernel.org>
R: Javier Martinez Canillas <javier@osg.samsung.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
+Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/
S: Maintained
F: arch/arm/boot/dts/s3c*
F: arch/arm/boot/dts/s5p*
@@ -2194,14 +2196,6 @@ L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Supported
F: sound/soc/atmel
-ATMEL DMA DRIVER
-M: Nicolas Ferre <nicolas.ferre@atmel.com>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Supported
-F: drivers/dma/at_hdmac.c
-F: drivers/dma/at_hdmac_regs.h
-F: include/linux/platform_data/dma-atmel.h
-
ATMEL XDMA DRIVER
M: Ludovic Desroches <ludovic.desroches@atmel.com>
L: linux-arm-kernel@lists.infradead.org
@@ -3573,7 +3567,7 @@ F: drivers/infiniband/hw/cxgb3/
F: include/uapi/rdma/cxgb3-abi.h
CXGB4 ETHERNET DRIVER (CXGB4)
-M: Hariprasad S <hariprasad@chelsio.com>
+M: Ganesh Goudar <ganeshgr@chelsio.com>
L: netdev@vger.kernel.org
W: http://www.chelsio.com
S: Supported
@@ -3800,6 +3794,7 @@ F: include/linux/devcoredump.h
DEVICE FREQUENCY (DEVFREQ)
M: MyungJoo Ham <myungjoo.ham@samsung.com>
M: Kyungmin Park <kyungmin.park@samsung.com>
+R: Chanwoo Choi <cw00.choi@samsung.com>
L: linux-pm@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mzx/devfreq.git
S: Maintained
@@ -4105,18 +4100,24 @@ F: drivers/gpu/drm/bridge/
DRM DRIVER FOR BOCHS VIRTUAL GPU
M: Gerd Hoffmann <kraxel@redhat.com>
-S: Odd Fixes
+L: virtualization@lists.linux-foundation.org
+T: git git://git.kraxel.org/linux drm-qemu
+S: Maintained
F: drivers/gpu/drm/bochs/
DRM DRIVER FOR QEMU'S CIRRUS DEVICE
M: Dave Airlie <airlied@redhat.com>
-S: Odd Fixes
+M: Gerd Hoffmann <kraxel@redhat.com>
+L: virtualization@lists.linux-foundation.org
+T: git git://git.kraxel.org/linux drm-qemu
+S: Obsolete
+W: https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
F: drivers/gpu/drm/cirrus/
RADEON and AMDGPU DRM DRIVERS
M: Alex Deucher <alexander.deucher@amd.com>
M: Christian König <christian.koenig@amd.com>
-L: dri-devel@lists.freedesktop.org
+L: amd-gfx@lists.freedesktop.org
T: git git://people.freedesktop.org/~agd5f/linux
S: Supported
F: drivers/gpu/drm/radeon/
@@ -4152,7 +4153,7 @@ F: Documentation/gpu/i915.rst
INTEL GVT-g DRIVERS (Intel GPU Virtualization)
M: Zhenyu Wang <zhenyuw@linux.intel.com>
M: Zhi Wang <zhi.a.wang@intel.com>
-L: igvt-g-dev@lists.01.org
+L: intel-gvt-dev@lists.freedesktop.org
L: intel-gfx@lists.freedesktop.org
W: https://01.org/igvt-g
T: git https://github.com/01org/gvt-linux.git
@@ -4303,7 +4304,10 @@ F: Documentation/devicetree/bindings/display/renesas,du.txt
DRM DRIVER FOR QXL VIRTUAL GPU
M: Dave Airlie <airlied@redhat.com>
-S: Odd Fixes
+M: Gerd Hoffmann <kraxel@redhat.com>
+L: virtualization@lists.linux-foundation.org
+T: git git://git.kraxel.org/linux drm-qemu
+S: Maintained
F: drivers/gpu/drm/qxl/
F: include/uapi/drm/qxl_drm.h
@@ -5080,9 +5084,11 @@ F: drivers/net/wan/dlci.c
F: drivers/net/wan/sdla.c
FRAMEBUFFER LAYER
+M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
L: linux-fbdev@vger.kernel.org
+T: git git://github.com/bzolnier/linux.git
Q: http://patchwork.kernel.org/project/linux-fbdev/list/
-S: Orphan
+S: Maintained
F: Documentation/fb/
F: drivers/video/
F: include/video/
@@ -5504,6 +5510,7 @@ M: Alex Elder <elder@kernel.org>
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
S: Maintained
F: drivers/staging/greybus/
+L: greybus-dev@lists.linaro.org
GREYBUS AUDIO PROTOCOLS DRIVERS
M: Vaibhav Agarwal <vaibhav.sr@gmail.com>
@@ -5961,6 +5968,7 @@ F: drivers/media/platform/sti/hva
Hyper-V CORE AND DRIVERS
M: "K. Y. Srinivasan" <kys@microsoft.com>
M: Haiyang Zhang <haiyangz@microsoft.com>
+M: Stephen Hemminger <sthemmin@microsoft.com>
L: devel@linuxdriverproject.org
S: Maintained
F: arch/x86/include/asm/mshyperv.h
@@ -7701,8 +7709,10 @@ F: drivers/net/dsa/mv88e6xxx/
F: Documentation/devicetree/bindings/net/dsa/marvell.txt
MARVELL ARMADA DRM SUPPORT
-M: Russell King <rmk+kernel@armlinux.org.uk>
+M: Russell King <linux@armlinux.org.uk>
S: Maintained
+T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-armada-devel
+T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-armada-fixes
F: drivers/gpu/drm/armada/
F: include/uapi/drm/armada_drm.h
F: Documentation/devicetree/bindings/display/armada/
@@ -8174,6 +8184,15 @@ S: Maintained
F: drivers/tty/serial/atmel_serial.c
F: include/linux/atmel_serial.h
+MICROCHIP / ATMEL DMA DRIVER
+M: Ludovic Desroches <ludovic.desroches@microchip.com>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L: dmaengine@vger.kernel.org
+S: Supported
+F: drivers/dma/at_hdmac.c
+F: drivers/dma/at_hdmac_regs.h
+F: include/linux/platform_data/dma-atmel.h
+
MICROCHIP / ATMEL ISC DRIVER
M: Songjun Wu <songjun.wu@microchip.com>
L: linux-media@vger.kernel.org
@@ -8852,17 +8871,22 @@ F: drivers/video/fbdev/nvidia/
NVM EXPRESS DRIVER
M: Keith Busch <keith.busch@intel.com>
M: Jens Axboe <axboe@fb.com>
+M: Christoph Hellwig <hch@lst.de>
+M: Sagi Grimberg <sagi@grimberg.me>
L: linux-nvme@lists.infradead.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
-W: https://kernel.googlesource.com/pub/scm/linux/kernel/git/axboe/linux-block/
+T: git://git.infradead.org/nvme.git
+W: http://git.infradead.org/nvme.git
S: Supported
F: drivers/nvme/host/
F: include/linux/nvme.h
+F: include/uapi/linux/nvme_ioctl.h
NVM EXPRESS TARGET DRIVER
M: Christoph Hellwig <hch@lst.de>
M: Sagi Grimberg <sagi@grimberg.me>
L: linux-nvme@lists.infradead.org
+T: git://git.infradead.org/nvme.git
+W: http://git.infradead.org/nvme.git
S: Supported
F: drivers/nvme/target/
@@ -8893,8 +8917,10 @@ S: Supported
F: drivers/nfc/nxp-nci
NXP TDA998X DRM DRIVER
-M: Russell King <rmk+kernel@armlinux.org.uk>
+M: Russell King <linux@armlinux.org.uk>
S: Supported
+T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-devel
+T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-fixes
F: drivers/gpu/drm/i2c/tda998x_drv.c
F: include/drm/i2c/tda998x.h
@@ -9842,7 +9868,7 @@ M: Mark Rutland <mark.rutland@arm.com>
M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
L: linux-arm-kernel@lists.infradead.org
S: Maintained
-F: drivers/firmware/psci.c
+F: drivers/firmware/psci*.c
F: include/linux/psci.h
F: include/uapi/linux/psci.h
@@ -13075,6 +13101,7 @@ M: David Airlie <airlied@linux.ie>
M: Gerd Hoffmann <kraxel@redhat.com>
L: dri-devel@lists.freedesktop.org
L: virtualization@lists.linux-foundation.org
+T: git git://git.kraxel.org/linux drm-qemu
S: Maintained
F: drivers/gpu/drm/virtio/
F: include/uapi/linux/virtio_gpu.h
@@ -13426,6 +13453,7 @@ F: arch/x86/
X86 PLATFORM DRIVERS
M: Darren Hart <dvhart@infradead.org>
+M: Andy Shevchenko <andy@infradead.org>
L: platform-driver-x86@vger.kernel.org
T: git git://git.infradead.org/users/dvhart/linux-platform-drivers-x86.git
S: Maintained
@@ -13527,11 +13555,11 @@ F: arch/x86/xen/*swiotlb*
F: drivers/xen/*swiotlb*
XFS FILESYSTEM
-M: Dave Chinner <david@fromorbit.com>
+M: Darrick J. Wong <darrick.wong@oracle.com>
M: linux-xfs@vger.kernel.org
L: linux-xfs@vger.kernel.org
W: http://xfs.org/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/dgc/linux-xfs.git
+T: git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git
S: Supported
F: Documentation/filesystems/xfs.txt
F: fs/xfs/
@@ -13597,6 +13625,7 @@ F: drivers/net/hamradio/z8530.h
ZBUD COMPRESSED PAGE ALLOCATOR
M: Seth Jennings <sjenning@redhat.com>
+M: Dan Streetman <ddstreet@ieee.org>
L: linux-mm@kvack.org
S: Maintained
F: mm/zbud.c
@@ -13652,6 +13681,7 @@ F: Documentation/vm/zsmalloc.txt
ZSWAP COMPRESSED SWAP CACHING
M: Seth Jennings <sjenning@redhat.com>
+M: Dan Streetman <ddstreet@ieee.org>
L: linux-mm@kvack.org
S: Maintained
F: mm/zswap.c
diff --git a/Makefile b/Makefile
index ec411ba9e40f..96b27a888285 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
VERSION = 4
PATCHLEVEL = 10
SUBLEVEL = 0
-EXTRAVERSION = -rc1
-NAME = Roaring Lionus
+EXTRAVERSION = -rc6
+NAME = Fearless Coyote
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index c75d29077e4a..283099c9560a 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -29,7 +29,7 @@ config ARC
select HAVE_KPROBES
select HAVE_KRETPROBES
select HAVE_MEMBLOCK
- select HAVE_MOD_ARCH_SPECIFIC if ARC_DW2_UNWIND
+ select HAVE_MOD_ARCH_SPECIFIC
select HAVE_OPROFILE
select HAVE_PERF_EVENTS
select HANDLE_DOMAIN_IRQ
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index b3410ff6a62d..5008021fba98 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -67,7 +67,7 @@ extern unsigned long perip_base, perip_end;
#define ARC_REG_IC_PTAG_HI 0x1F
/* Bit val in IC_CTRL */
-#define IC_CTRL_CACHE_DISABLE 0x1
+#define IC_CTRL_DIS 0x1
/* Data cache related Auxiliary registers */
#define ARC_REG_DC_BCR 0x72 /* Build Config reg */
@@ -80,8 +80,9 @@ extern unsigned long perip_base, perip_end;
#define ARC_REG_DC_PTAG_HI 0x5F
/* Bit val in DC_CTRL */
-#define DC_CTRL_INV_MODE_FLUSH 0x40
-#define DC_CTRL_FLUSH_STATUS 0x100
+#define DC_CTRL_DIS 0x001
+#define DC_CTRL_INV_MODE_FLUSH 0x040
+#define DC_CTRL_FLUSH_STATUS 0x100
/*System-level cache (L2 cache) related Auxiliary registers */
#define ARC_REG_SLC_CFG 0x901
@@ -92,8 +93,8 @@ extern unsigned long perip_base, perip_end;
#define ARC_REG_SLC_RGN_END 0x916
/* Bit val in SLC_CONTROL */
+#define SLC_CTRL_DIS 0x001
#define SLC_CTRL_IM 0x040
-#define SLC_CTRL_DISABLE 0x001
#define SLC_CTRL_BUSY 0x100
#define SLC_CTRL_RGN_OP_INV 0x200
diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
index a36e8601114d..d5da2115d78a 100644
--- a/arch/arc/include/asm/delay.h
+++ b/arch/arc/include/asm/delay.h
@@ -26,7 +26,9 @@ static inline void __delay(unsigned long loops)
" lp 1f \n"
" nop \n"
"1: \n"
- : : "r"(loops));
+ :
+ : "r"(loops)
+ : "lp_count");
}
extern void __bad_udelay(void);
diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h
index b5ff87e6f4b7..aee1a77934cf 100644
--- a/arch/arc/include/asm/entry-arcv2.h
+++ b/arch/arc/include/asm/entry-arcv2.h
@@ -16,6 +16,7 @@
;
; Now manually save: r12, sp, fp, gp, r25
+ PUSH r30
PUSH r12
; Saving pt_regs->sp correctly requires some extra work due to the way
@@ -72,6 +73,7 @@
POPAX AUX_USER_SP
1:
POP r12
+ POP r30
.endm
diff --git a/arch/arc/include/asm/module.h b/arch/arc/include/asm/module.h
index 6e91d8b339c3..567590ea8f6c 100644
--- a/arch/arc/include/asm/module.h
+++ b/arch/arc/include/asm/module.h
@@ -14,13 +14,13 @@
#include <asm-generic/module.h>
-#ifdef CONFIG_ARC_DW2_UNWIND
struct mod_arch_specific {
+#ifdef CONFIG_ARC_DW2_UNWIND
void *unw_info;
int unw_sec_idx;
+#endif
const char *secstr;
};
-#endif
#define MODULE_PROC_FAMILY "ARC700"
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 69095da1fcfd..47111d565a95 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -84,7 +84,7 @@ struct pt_regs {
unsigned long fp;
unsigned long sp; /* user/kernel sp depending on where we came from */
- unsigned long r12;
+ unsigned long r12, r30;
/*------- Below list auto saved by h/w -----------*/
unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h
index cb954cdab070..c568a9df82b1 100644
--- a/arch/arc/include/asm/setup.h
+++ b/arch/arc/include/asm/setup.h
@@ -31,6 +31,7 @@ extern int root_mountflags, end_mem;
void setup_processor(void);
void __init setup_arch_memory(void);
+long __init arc_get_mem_sz(void);
/* Helpers used in arc_*_mumbojumbo routines */
#define IS_AVAIL1(v, s) ((v) ? s : "")
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 689dd867fdff..8b90d25a15cc 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -71,14 +71,14 @@ ENTRY(stext)
GET_CPU_ID r5
cmp r5, 0
mov.nz r0, r5
-#ifdef CONFIG_ARC_SMP_HALT_ON_RESET
- ; Non-Master can proceed as system would be booted sufficiently
- jnz first_lines_of_secondary
-#else
+ bz .Lmaster_proceed
+
; Non-Masters wait for Master to boot enough and bring them up
- jnz arc_platform_smp_wait_to_boot
-#endif
- ; Master falls thru
+ ; when they resume, tail-call to entry point
+ mov blink, @first_lines_of_secondary
+ j arc_platform_smp_wait_to_boot
+
+.Lmaster_proceed:
#endif
; Clear BSS before updating any globals
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
index 994dca7014db..ecef0fb0b66c 100644
--- a/arch/arc/kernel/intc-arcv2.c
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -77,20 +77,20 @@ void arc_init_IRQ(void)
static void arcv2_irq_mask(struct irq_data *data)
{
- write_aux_reg(AUX_IRQ_SELECT, data->irq);
+ write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
write_aux_reg(AUX_IRQ_ENABLE, 0);
}
static void arcv2_irq_unmask(struct irq_data *data)
{
- write_aux_reg(AUX_IRQ_SELECT, data->irq);
+ write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
write_aux_reg(AUX_IRQ_ENABLE, 1);
}
void arcv2_irq_enable(struct irq_data *data)
{
/* set default priority */
- write_aux_reg(AUX_IRQ_SELECT, data->irq);
+ write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
/*
diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c
index ce9deb953ca9..8c1fd5c00782 100644
--- a/arch/arc/kernel/intc-compact.c
+++ b/arch/arc/kernel/intc-compact.c
@@ -57,7 +57,7 @@ static void arc_irq_mask(struct irq_data *data)
unsigned int ienb;
ienb = read_aux_reg(AUX_IENABLE);
- ienb &= ~(1 << data->irq);
+ ienb &= ~(1 << data->hwirq);
write_aux_reg(AUX_IENABLE, ienb);
}
@@ -66,7 +66,7 @@ static void arc_irq_unmask(struct irq_data *data)
unsigned int ienb;
ienb = read_aux_reg(AUX_IENABLE);
- ienb |= (1 << data->irq);
+ ienb |= (1 << data->hwirq);
write_aux_reg(AUX_IENABLE, ienb);
}
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index 560c4afc2af4..9f6b68fd4f3b 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -10,6 +10,7 @@
#include <linux/smp.h>
#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/spinlock.h>
#include <soc/arc/mcip.h>
#include <asm/irqflags-arcv2.h>
@@ -92,11 +93,10 @@ static void mcip_probe_n_setup(void)
READ_BCR(ARC_REG_MCIP_BCR, mp);
sprintf(smp_cpuinfo_buf,
- "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s%s\n",
+ "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n",
mp.ver, mp.num_cores,
IS_AVAIL1(mp.ipi, "IPI "),
IS_AVAIL1(mp.idu, "IDU "),
- IS_AVAIL1(mp.llm, "LLM "),
IS_AVAIL1(mp.dbg, "DEBUG "),
IS_AVAIL1(mp.gfrc, "GFRC"));
@@ -174,7 +174,6 @@ static void idu_irq_unmask(struct irq_data *data)
raw_spin_unlock_irqrestore(&mcip_lock, flags);
}
-#ifdef CONFIG_SMP
static int
idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
bool force)
@@ -204,12 +203,27 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
return IRQ_SET_MASK_OK;
}
-#endif
+
+static void idu_irq_enable(struct irq_data *data)
+{
+ /*
+ * By default send all common interrupts to all available online CPUs.
+ * The affinity of common interrupts in IDU must be set manually since
+ * in some cases the kernel will not call irq_set_affinity() by itself:
+ * 1. When the kernel is not configured with support of SMP.
+ * 2. When the kernel is configured with support of SMP but upper
+ * interrupt controllers does not support setting of the affinity
+ * and cannot propagate it to IDU.
+ */
+ idu_irq_set_affinity(data, cpu_online_mask, false);
+ idu_irq_unmask(data);
+}
static struct irq_chip idu_irq_chip = {
.name = "MCIP IDU Intc",
.irq_mask = idu_irq_mask,
.irq_unmask = idu_irq_unmask,
+ .irq_enable = idu_irq_enable,
#ifdef CONFIG_SMP
.irq_set_affinity = idu_irq_set_affinity,
#endif
@@ -221,10 +235,13 @@ static irq_hw_number_t idu_first_hwirq;
static void idu_cascade_isr(struct irq_desc *desc)
{
struct irq_domain *idu_domain = irq_desc_get_handler_data(desc);
+ struct irq_chip *core_chip = irq_desc_get_chip(desc);
irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc));
irq_hw_number_t idu_hwirq = core_hwirq - idu_first_hwirq;
+ chained_irq_enter(core_chip, desc);
generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq));
+ chained_irq_exit(core_chip, desc);
}
static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
@@ -239,36 +256,14 @@ static int idu_irq_xlate(struct irq_domain *d, struct device_node *n,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_type)
{
- irq_hw_number_t hwirq = *out_hwirq = intspec[0];
- int distri = intspec[1];
- unsigned long flags;
-
+ /*
+ * Ignore value of interrupt distribution mode for common interrupts in
+ * IDU which resides in intspec[1] since setting an affinity using value
+ * from Device Tree is deprecated in ARC.
+ */
+ *out_hwirq = intspec[0];
*out_type = IRQ_TYPE_NONE;
- /* XXX: validate distribution scheme again online cpu mask */
- if (distri == 0) {
- /* 0 - Round Robin to all cpus, otherwise 1 bit per core */
- raw_spin_lock_irqsave(&mcip_lock, flags);
- idu_set_dest(hwirq, BIT(num_online_cpus()) - 1);
- idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);
- raw_spin_unlock_irqrestore(&mcip_lock, flags);
- } else {
- /*
- * DEST based distribution for Level Triggered intr can only
- * have 1 CPU, so generalize it to always contain 1 cpu
- */
- int cpu = ffs(distri);
-
- if (cpu != fls(distri))
- pr_warn("IDU irq %lx distri mode set to cpu %x\n",
- hwirq, cpu);
-
- raw_spin_lock_irqsave(&mcip_lock, flags);
- idu_set_dest(hwirq, cpu);
- idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_DEST);
- raw_spin_unlock_irqrestore(&mcip_lock, flags);
- }
-
return 0;
}
diff --git a/arch/arc/kernel/module.c b/arch/arc/kernel/module.c
index 42e964db2967..3d99a6091332 100644
--- a/arch/arc/kernel/module.c
+++ b/arch/arc/kernel/module.c
@@ -32,8 +32,8 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
#ifdef CONFIG_ARC_DW2_UNWIND
mod->arch.unw_sec_idx = 0;
mod->arch.unw_info = NULL;
- mod->arch.secstr = secstr;
#endif
+ mod->arch.secstr = secstr;
return 0;
}
@@ -113,8 +113,10 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
}
+#ifdef CONFIG_ARC_DW2_UNWIND
if (strcmp(module->arch.secstr+sechdrs[tgtsec].sh_name, ".eh_frame") == 0)
module->arch.unw_sec_idx = tgtsec;
+#endif
return 0;
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index 88674d972c9d..2afbafadb6ab 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -90,22 +90,37 @@ void __init smp_cpus_done(unsigned int max_cpus)
*/
static volatile int wake_flag;
+#ifdef CONFIG_ISA_ARCOMPACT
+
+#define __boot_read(f) f
+#define __boot_write(f, v) f = v
+
+#else
+
+#define __boot_read(f) arc_read_uncached_32(&f)
+#define __boot_write(f, v) arc_write_uncached_32(&f, v)
+
+#endif
+
static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
{
BUG_ON(cpu == 0);
- wake_flag = cpu;
+
+ __boot_write(wake_flag, cpu);
}
void arc_platform_smp_wait_to_boot(int cpu)
{
- while (wake_flag != cpu)
+ /* for halt-on-reset, we've waited already */
+ if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET))
+ return;
+
+ while (__boot_read(wake_flag) != cpu)
;
- wake_flag = 0;
- __asm__ __volatile__("j @first_lines_of_secondary \n");
+ __boot_write(wake_flag, 0);
}
-
const char *arc_platform_smp_cpuinfo(void)
{
return plat_smp_ops.info ? : "";
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
index abd961f3e763..91ebe382147f 100644
--- a/arch/arc/kernel/unaligned.c
+++ b/arch/arc/kernel/unaligned.c
@@ -241,8 +241,9 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
if (state.fault)
goto fault;
+ /* clear any remanants of delay slot */
if (delay_mode(regs)) {
- regs->ret = regs->bta;
+ regs->ret = regs->bta ~1U;
regs->status32 &= ~STATUS_DE_MASK;
} else {
regs->ret += state.instr_len;
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index ec86ac0e3321..d408fa21a07c 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -23,7 +23,7 @@
static int l2_line_sz;
static int ioc_exists;
-int slc_enable = 1, ioc_enable = 0;
+int slc_enable = 1, ioc_enable = 1;
unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
@@ -271,7 +271,11 @@ void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr,
/*
* For ARC700 MMUv3 I-cache and D-cache flushes
- * Also reused for HS38 aliasing I-cache configuration
+ * - ARC700 programming model requires paddr and vaddr be passed in seperate
+ * AUX registers (*_IV*L and *_PTAG respectively) irrespective of whether the
+ * caches actually alias or not.
+ * - For HS38, only the aliasing I-cache configuration uses the PTAG reg
+ * (non aliasing I-cache version doesn't; while D-cache can't possibly alias)
*/
static inline
void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
@@ -458,6 +462,21 @@ static inline void __dc_entire_op(const int op)
__after_dc_op(op);
}
+static inline void __dc_disable(void)
+{
+ const int r = ARC_REG_DC_CTRL;
+
+ __dc_entire_op(OP_FLUSH_N_INV);
+ write_aux_reg(r, read_aux_reg(r) | DC_CTRL_DIS);
+}
+
+static void __dc_enable(void)
+{
+ const int r = ARC_REG_DC_CTRL;
+
+ write_aux_reg(r, read_aux_reg(r) & ~DC_CTRL_DIS);
+}
+
/* For kernel mappings cache operation: index is same as paddr */
#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
@@ -483,6 +502,8 @@ static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
#else
#define __dc_entire_op(op)
+#define __dc_disable()
+#define __dc_enable()
#define __dc_line_op(paddr, vaddr, sz, op)
#define __dc_line_op_k(paddr, sz, op)
@@ -597,6 +618,40 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
#endif
}
+noinline static void slc_entire_op(const int op)
+{
+ unsigned int ctrl, r = ARC_REG_SLC_CTRL;
+
+ ctrl = read_aux_reg(r);
+
+ if (!(op & OP_FLUSH)) /* i.e. OP_INV */
+ ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
+ else
+ ctrl |= SLC_CTRL_IM;
+
+ write_aux_reg(r, ctrl);
+
+ write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
+
+ /* Important to wait for flush to complete */
+ while (read_aux_reg(r) & SLC_CTRL_BUSY);
+}
+
+static inline void arc_slc_disable(void)
+{
+ const int r = ARC_REG_SLC_CTRL;
+
+ slc_entire_op(OP_FLUSH_N_INV);
+ write_aux_reg(r, read_aux_reg(r) | SLC_CTRL_DIS);
+}
+
+static inline void arc_slc_enable(void)
+{
+ const int r = ARC_REG_SLC_CTRL;
+
+ write_aux_reg(r, read_aux_reg(r) & ~SLC_CTRL_DIS);
+}
+
/***********************************************************
* Exported APIs
*/
@@ -923,21 +978,54 @@ SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
return 0;
}
-void arc_cache_init(void)
+/*
+ * IO-Coherency (IOC) setup rules:
+ *
+ * 1. Needs to be at system level, so only once by Master core
+ * Non-Masters need not be accessing caches at that time
+ * - They are either HALT_ON_RESET and kick started much later or
+ * - if run on reset, need to ensure that arc_platform_smp_wait_to_boot()
+ * doesn't perturb caches or coherency unit
+ *
+ * 2. caches (L1 and SLC) need to be purged (flush+inv) before setting up IOC,
+ * otherwise any straggler data might behave strangely post IOC enabling
+ *
+ * 3. All Caches need to be disabled when setting up IOC to elide any in-flight
+ * Coherency transactions
+ */
+noinline void __init arc_ioc_setup(void)
{
- unsigned int __maybe_unused cpu = smp_processor_id();
- char str[256];
+ unsigned int ap_sz;
- printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
+ /* Flush + invalidate + disable L1 dcache */
+ __dc_disable();
+
+ /* Flush + invalidate SLC */
+ if (read_aux_reg(ARC_REG_SLC_BCR))
+ slc_entire_op(OP_FLUSH_N_INV);
+
+ /* IOC Aperture start: TDB: handle non default CONFIG_LINUX_LINK_BASE */
+ write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
/*
- * Only master CPU needs to execute rest of function:
- * - Assume SMP so all cores will have same cache config so
- * any geomtry checks will be same for all
- * - IOC setup / dma callbacks only need to be setup once
+ * IOC Aperture size:
+ * decoded as 2 ^ (SIZE + 2) KB: so setting 0x11 implies 512M
+ * TBD: fix for PGU + 1GB of low mem
+ * TBD: fix for PAE
*/
- if (cpu)
- return;
+ ap_sz = order_base_2(arc_get_mem_sz()/1024) - 2;
+ write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, ap_sz);
+
+ write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1);
+ write_aux_reg(ARC_REG_IO_COH_ENABLE, 1);
+
+ /* Re-enable L1 dcache */
+ __dc_enable();
+}
+
+void __init arc_cache_init_master(void)
+{
+ unsigned int __maybe_unused cpu = smp_processor_id();
if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
@@ -985,30 +1073,14 @@ void arc_cache_init(void)
}
}
- if (is_isa_arcv2() && l2_line_sz && !slc_enable) {
-
- /* IM set : flush before invalidate */
- write_aux_reg(ARC_REG_SLC_CTRL,
- read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_IM);
+ /* Note that SLC disable not formally supported till HS 3.0 */
+ if (is_isa_arcv2() && l2_line_sz && !slc_enable)
+ arc_slc_disable();
- write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
-
- /* Important to wait for flush to complete */
- while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
- write_aux_reg(ARC_REG_SLC_CTRL,
- read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_DISABLE);
- }
+ if (is_isa_arcv2() && ioc_enable)
+ arc_ioc_setup();
if (is_isa_arcv2() && ioc_enable) {
- /* IO coherency base - 0x8z */
- write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
- /* IO coherency aperture size - 512Mb: 0x8z-0xAz */
- write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, 0x11);
- /* Enable partial writes */
- write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1);
- /* Enable IO coherency */
- write_aux_reg(ARC_REG_IO_COH_ENABLE, 1);
-
__dma_cache_wback_inv = __dma_cache_wback_inv_ioc;
__dma_cache_inv = __dma_cache_inv_ioc;
__dma_cache_wback = __dma_cache_wback_ioc;
@@ -1022,3 +1094,20 @@ void arc_cache_init(void)
__dma_cache_wback = __dma_cache_wback_l1;
}
}
+
+void __ref arc_cache_init(void)
+{
+ unsigned int __maybe_unused cpu = smp_processor_id();
+ char str[256];
+
+ printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
+
+ /*
+ * Only master CPU needs to execute rest of function:
+ * - Assume SMP so all cores will have same cache config so
+ * any geomtry checks will be same for all
+ * - IOC setup / dma callbacks only need to be setup once
+ */
+ if (!cpu)
+ arc_cache_init_master();
+}
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index 399e2f223d25..8c9415ed6280 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -40,6 +40,11 @@ struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
EXPORT_SYMBOL(node_data);
#endif
+long __init arc_get_mem_sz(void)
+{
+ return low_mem_sz;
+}
+
/* User can over-ride above with "mem=nnn[KkMm]" in cmdline */
static int __init setup_mem_sz(char *str)
{
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 5fab553fd03a..186c4c214e0a 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1502,8 +1502,7 @@ source kernel/Kconfig.preempt
config HZ_FIXED
int
- default 200 if ARCH_EBSA110 || ARCH_S3C24XX || \
- ARCH_S5PV210 || ARCH_EXYNOS4
+ default 200 if ARCH_EBSA110
default 128 if SOC_AT91RM9200
default 0
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index cccdbcb557b6..f10fe8526239 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -501,6 +501,7 @@ dtb-$(CONFIG_ARCH_OMAP3) += \
am3517-evm.dtb \
am3517_mt_ventoux.dtb \
logicpd-torpedo-37xx-devkit.dtb \
+ logicpd-som-lv-37xx-devkit.dtb \
omap3430-sdp.dtb \
omap3-beagle.dtb \
omap3-beagle-xm.dtb \
@@ -845,6 +846,7 @@ dtb-$(CONFIG_MACH_SUN8I) += \
sun8i-a83t-allwinner-h8homlet-v2.dtb \
sun8i-a83t-cubietruck-plus.dtb \
sun8i-h3-bananapi-m2-plus.dtb \
+ sun8i-h3-nanopi-m1.dtb \
sun8i-h3-nanopi-neo.dtb \
sun8i-h3-orangepi-2.dtb \
sun8i-h3-orangepi-lite.dtb \
diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi
index dc561d505bbe..3e32dd18fd25 100644
--- a/arch/arm/boot/dts/am335x-bone-common.dtsi
+++ b/arch/arm/boot/dts/am335x-bone-common.dtsi
@@ -6,8 +6,6 @@
* published by the Free Software Foundation.
*/
-#include <dt-bindings/mfd/tps65217.h>
-
/ {
cpus {
cpu@0 {
@@ -319,13 +317,13 @@
ti,pmic-shutdown-controller;
charger {
- interrupts = <TPS65217_IRQ_AC>, <TPS65217_IRQ_USB>;
- interrupts-names = "AC", "USB";
+ interrupts = <0>, <1>;
+ interrupt-names = "USB", "AC";
status = "okay";
};
pwrbutton {
- interrupts = <TPS65217_IRQ_PB>;
+ interrupts = <2>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/am335x-icev2.dts b/arch/arm/boot/dts/am335x-icev2.dts
index 1463df3b5b19..8ed46f9d79b7 100644
--- a/arch/arm/boot/dts/am335x-icev2.dts
+++ b/arch/arm/boot/dts/am335x-icev2.dts
@@ -170,7 +170,6 @@
AM33XX_IOPAD(0x8fc, PIN_INPUT_PULLUP | MUX_MODE0) /* (G16) mmc0_dat0.mmc0_dat0 */
AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* (G17) mmc0_clk.mmc0_clk */
AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* (G18) mmc0_cmd.mmc0_cmd */
- AM33XX_IOPAD(0x960, PIN_INPUT_PULLUP | MUX_MODE5) /* (C15) spi0_cs1.mmc0_sdcd */
>;
};
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 64c8aa9057a3..18d72a245e88 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -16,6 +16,7 @@
interrupt-parent = <&intc>;
#address-cells = <1>;
#size-cells = <1>;
+ chosen { };
aliases {
i2c0 = &i2c0;
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index ac55f93fc91e..2df9e6050c2f 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -16,6 +16,7 @@
interrupt-parent = <&wakeupgen>;
#address-cells = <1>;
#size-cells = <1>;
+ chosen { };
memory@0 {
device_type = "memory";
diff --git a/arch/arm/boot/dts/am571x-idk.dts b/arch/arm/boot/dts/am571x-idk.dts
index d6e43e5184c1..ad68d1eb3bc3 100644
--- a/arch/arm/boot/dts/am571x-idk.dts
+++ b/arch/arm/boot/dts/am571x-idk.dts
@@ -62,11 +62,6 @@
linux,default-trigger = "mmc0";
};
};
-
- extcon_usb2: extcon_usb2 {
- compatible = "linux,extcon-usb-gpio";
- id-gpio = <&gpio5 7 GPIO_ACTIVE_HIGH>;
- };
};
&mmc1 {
@@ -79,3 +74,8 @@
&omap_dwc3_2 {
extcon = <&extcon_usb2>;
};
+
+&extcon_usb2 {
+ id-gpio = <&gpio5 7 GPIO_ACTIVE_HIGH>;
+ vbus-gpio = <&gpio7 22 GPIO_ACTIVE_HIGH>;
+};
diff --git a/arch/arm/boot/dts/am572x-idk.dts b/arch/arm/boot/dts/am572x-idk.dts
index 27d9149cedba..8350b4b34b08 100644
--- a/arch/arm/boot/dts/am572x-idk.dts
+++ b/arch/arm/boot/dts/am572x-idk.dts
@@ -23,11 +23,6 @@
reg = <0x0 0x80000000 0x0 0x80000000>;
};
- extcon_usb2: extcon_usb2 {
- compatible = "linux,extcon-usb-gpio";
- id-gpio = <&gpio3 16 GPIO_ACTIVE_HIGH>;
- };
-
status-leds {
compatible = "gpio-leds";
cpu0-led {
@@ -76,6 +71,11 @@
extcon = <&extcon_usb2>;
};
+&extcon_usb2 {
+ id-gpio = <&gpio3 16 GPIO_ACTIVE_HIGH>;
+ vbus-gpio = <&gpio3 26 GPIO_ACTIVE_HIGH>;
+};
+
&mmc1 {
status = "okay";
vmmc-supply = <&v3_3d>;
@@ -87,3 +87,7 @@
&sn65hvs882 {
load-gpios = <&gpio3 19 GPIO_ACTIVE_LOW>;
};
+
+&pcie1 {
+ gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
+};
diff --git a/arch/arm/boot/dts/am57xx-idk-common.dtsi b/arch/arm/boot/dts/am57xx-idk-common.dtsi
index 555ae21f2b9a..814a720d5c3d 100644
--- a/arch/arm/boot/dts/am57xx-idk-common.dtsi
+++ b/arch/arm/boot/dts/am57xx-idk-common.dtsi
@@ -303,6 +303,13 @@
gpio-controller;
#gpio-cells = <2>;
};
+
+ extcon_usb2: tps659038_usb {
+ compatible = "ti,palmas-usb-vid";
+ ti,enable-vbus-detection;
+ ti,enable-id-detection;
+ /* ID & VBUS GPIOs provided in board dts */
+ };
};
};
@@ -369,7 +376,7 @@
};
&usb2 {
- dr_mode = "otg";
+ dr_mode = "peripheral";
};
&mmc2 {
diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi
index b6142bda661e..15f07f9af3b3 100644
--- a/arch/arm/boot/dts/bcm-nsp.dtsi
+++ b/arch/arm/boot/dts/bcm-nsp.dtsi
@@ -160,7 +160,7 @@
axi {
compatible = "simple-bus";
- ranges = <0x00000000 0x18000000 0x0011c40a>;
+ ranges = <0x00000000 0x18000000 0x0011c40c>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts
index 41de15fe15a2..78492a0bbbab 100644
--- a/arch/arm/boot/dts/da850-evm.dts
+++ b/arch/arm/boot/dts/da850-evm.dts
@@ -99,6 +99,7 @@
#size-cells = <1>;
compatible = "m25p64";
spi-max-frequency = <30000000>;
+ m25p,fast-read;
reg = <0>;
partition@0 {
label = "U-Boot-SPL";
diff --git a/arch/arm/boot/dts/dm814x.dtsi b/arch/arm/boot/dts/dm814x.dtsi
index 1facc5f12cef..81b8cecb5820 100644
--- a/arch/arm/boot/dts/dm814x.dtsi
+++ b/arch/arm/boot/dts/dm814x.dtsi
@@ -12,6 +12,7 @@
interrupt-parent = <&intc>;
#address-cells = <1>;
#size-cells = <1>;
+ chosen { };
aliases {
i2c0 = &i2c1;
diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi
index 61dd2f6b02bc..6db652ae9bd5 100644
--- a/arch/arm/boot/dts/dm816x.dtsi
+++ b/arch/arm/boot/dts/dm816x.dtsi
@@ -12,6 +12,7 @@
interrupt-parent = <&intc>;
#address-cells = <1>;
#size-cells = <1>;
+ chosen { };
aliases {
i2c0 = &i2c1;
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index addb7530cfbe..5ba161679e01 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -18,6 +18,7 @@
compatible = "ti,dra7xx";
interrupt-parent = <&crossbar_mpu>;
+ chosen { };
aliases {
i2c0 = &i2c1;
@@ -1377,6 +1378,7 @@
phy-names = "sata-phy";
clocks = <&sata_ref_clk>;
ti,hwmods = "sata";
+ ports-implemented = <0x1>;
};
rtc: rtc@48838000 {
diff --git a/arch/arm/boot/dts/dra72-evm-revc.dts b/arch/arm/boot/dts/dra72-evm-revc.dts
index c3d939c9666c..3f808a47df03 100644
--- a/arch/arm/boot/dts/dra72-evm-revc.dts
+++ b/arch/arm/boot/dts/dra72-evm-revc.dts
@@ -75,6 +75,6 @@
ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>;
ti,tx-internal-delay = <DP83867_RGMIIDCTL_250_PS>;
ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_8_B_NIB>;
- ti,min-output-imepdance;
+ ti,min-output-impedance;
};
};
diff --git a/arch/arm/boot/dts/dra72-evm-tps65917.dtsi b/arch/arm/boot/dts/dra72-evm-tps65917.dtsi
index ee6dac44edf1..e6df676886c0 100644
--- a/arch/arm/boot/dts/dra72-evm-tps65917.dtsi
+++ b/arch/arm/boot/dts/dra72-evm-tps65917.dtsi
@@ -132,3 +132,19 @@
ti,palmas-long-press-seconds = <6>;
};
};
+
+&usb2_phy1 {
+ phy-supply = <&ldo4_reg>;
+};
+
+&usb2_phy2 {
+ phy-supply = <&ldo4_reg>;
+};
+
+&dss {
+ vdda_video-supply = <&ldo5_reg>;
+};
+
+&mmc1 {
+ vmmc_aux-supply = <&ldo1_reg>;
+};
diff --git a/arch/arm/boot/dts/imx31.dtsi b/arch/arm/boot/dts/imx31.dtsi
index 685916e3d8a1..85cd8be22f71 100644
--- a/arch/arm/boot/dts/imx31.dtsi
+++ b/arch/arm/boot/dts/imx31.dtsi
@@ -31,11 +31,11 @@
};
};
- avic: avic-interrupt-controller@60000000 {
+ avic: interrupt-controller@68000000 {
compatible = "fsl,imx31-avic", "fsl,avic";
interrupt-controller;
#interrupt-cells = <1>;
- reg = <0x60000000 0x100000>;
+ reg = <0x68000000 0x100000>;
};
soc {
diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
index 34887a10c5f1..47ba97229a48 100644
--- a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
@@ -319,8 +319,6 @@
compatible = "fsl,imx6q-nitrogen6_max-sgtl5000",
"fsl,imx-audio-sgtl5000";
model = "imx6q-nitrogen6_max-sgtl5000";
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_sgtl5000>;
ssi-controller = <&ssi1>;
audio-codec = <&codec>;
audio-routing =
@@ -402,6 +400,8 @@
codec: sgtl5000@0a {
compatible = "fsl,sgtl5000";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sgtl5000>;
reg = <0x0a>;
clocks = <&clks IMX6QDL_CLK_CKO>;
VDDA-supply = <&reg_2p5v>;
diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
index d80f21abea62..31d4cc62dbc7 100644
--- a/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
@@ -250,8 +250,6 @@
compatible = "fsl,imx6q-nitrogen6_som2-sgtl5000",
"fsl,imx-audio-sgtl5000";
model = "imx6q-nitrogen6_som2-sgtl5000";
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_sgtl5000>;
ssi-controller = <&ssi1>;
audio-codec = <&codec>;
audio-routing =
@@ -320,6 +318,8 @@
codec: sgtl5000@0a {
compatible = "fsl,sgtl5000";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sgtl5000>;
reg = <0x0a>;
clocks = <&clks IMX6QDL_CLK_CKO>;
VDDA-supply = <&reg_2p5v>;
diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
index e476d01959ea..26d060484728 100644
--- a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
@@ -533,7 +533,6 @@
MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17071
MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17071
MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x17071
- MX6QDL_PAD_NANDF_CS2__GPIO6_IO15 0x000b0
>;
};
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index 53e6e63cbb02..89b834f3fa17 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -1100,6 +1100,7 @@
interrupts = <0 14 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6QDL_CLK_EIM_SLOW>;
fsl,weim-cs-gpr = <&gpr>;
+ status = "disabled";
};
ocotp: ocotp@021bc000 {
diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi
index 4fd6de29f07d..19cbd879c448 100644
--- a/arch/arm/boot/dts/imx6sl.dtsi
+++ b/arch/arm/boot/dts/imx6sl.dtsi
@@ -900,6 +900,7 @@
reg = <0x021b8000 0x4000>;
interrupts = <0 14 IRQ_TYPE_LEVEL_HIGH>;
fsl,weim-cs-gpr = <&gpr>;
+ status = "disabled";
};
ocotp: ocotp@021bc000 {
diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
index 076a30f9bcae..10f333016197 100644
--- a/arch/arm/boot/dts/imx6sx.dtsi
+++ b/arch/arm/boot/dts/imx6sx.dtsi
@@ -977,6 +977,7 @@
interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SX_CLK_EIM_SLOW>;
fsl,weim-cs-gpr = <&gpr>;
+ status = "disabled";
};
ocotp: ocotp@021bc000 {
diff --git a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
index da8598402ab8..38faa90007d7 100644
--- a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
+++ b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
@@ -158,7 +158,7 @@
&mmc1 {
interrupts-extended = <&intc 83 &omap3_pmx_core 0x11a>;
pinctrl-names = "default";
- pinctrl-0 = <&mmc1_pins &mmc1_cd>;
+ pinctrl-0 = <&mmc1_pins>;
wp-gpios = <&gpio4 30 GPIO_ACTIVE_HIGH>; /* gpio_126 */
cd-gpios = <&gpio4 14 IRQ_TYPE_LEVEL_LOW>; /* gpio_110 */
vmmc-supply = <&vmmc1>;
@@ -193,7 +193,8 @@
OMAP3_CORE1_IOPAD(0x214a, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat1.sdmmc1_dat1 */
OMAP3_CORE1_IOPAD(0x214c, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat2.sdmmc1_dat2 */
OMAP3_CORE1_IOPAD(0x214e, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat3.sdmmc1_dat3 */
- OMAP3_CORE1_IOPAD(0x2132, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_strobe.gpio_126 sdmmc1_wp*/
+ OMAP3_CORE1_IOPAD(0x2132, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_strobe.gpio_126 */
+ OMAP3_CORE1_IOPAD(0x212c, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_d11.gpio_110 */
>;
};
@@ -242,12 +243,6 @@
OMAP3_WKUP_IOPAD(0x2a16, PIN_OUTPUT | PIN_OFF_OUTPUT_LOW | MUX_MODE4) /* sys_boot6.gpio_8 */
>;
};
-
- mmc1_cd: pinmux_mmc1_cd {
- pinctrl-single,pins = <
- OMAP3_WKUP_IOPAD(0x212c, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_d11.gpio_110 */
- >;
- };
};
diff --git a/arch/arm/boot/dts/omap2.dtsi b/arch/arm/boot/dts/omap2.dtsi
index 4f793a025a72..f1d6de8b3c19 100644
--- a/arch/arm/boot/dts/omap2.dtsi
+++ b/arch/arm/boot/dts/omap2.dtsi
@@ -17,6 +17,7 @@
interrupt-parent = <&intc>;
#address-cells = <1>;
#size-cells = <1>;
+ chosen { };
aliases {
serial0 = &uart1;
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index 87ca50b53002..4d448f145ed1 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -734,6 +734,8 @@
vmmc_aux-supply = <&vsim>;
bus-width = <8>;
non-removable;
+ no-sdio;
+ no-sd;
};
&mmc3 {
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index ecf5eb584c75..a3ff4933dbc1 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -17,6 +17,7 @@
interrupt-parent = <&intc>;
#address-cells = <1>;
#size-cells = <1>;
+ chosen { };
aliases {
i2c0 = &i2c1;
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index 8087456b5fbe..578c53f08309 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -15,6 +15,7 @@
interrupt-parent = <&wakeupgen>;
#address-cells = <1>;
#size-cells = <1>;
+ chosen { };
aliases {
i2c0 = &i2c1;
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 968c67a49dbd..0844737b72b2 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -17,6 +17,7 @@
compatible = "ti,omap5";
interrupt-parent = <&wakeupgen>;
+ chosen { };
aliases {
i2c0 = &i2c1;
@@ -987,6 +988,7 @@
phy-names = "sata-phy";
clocks = <&sata_ref_clk>;
ti,hwmods = "sata";
+ ports-implemented = <0x1>;
};
dss: dss@58000000 {
diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
index 268bd470c865..407a4610f4a7 100644
--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
@@ -4,6 +4,7 @@
#include <dt-bindings/clock/qcom,gcc-msm8960.h>
#include <dt-bindings/reset/qcom,gcc-msm8960.h>
#include <dt-bindings/clock/qcom,mmcc-msm8960.h>
+#include <dt-bindings/clock/qcom,rpmcc.h>
#include <dt-bindings/soc/qcom,gsbi.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -303,6 +304,9 @@
firmware {
scm {
compatible = "qcom,scm-apq8064";
+
+ clocks = <&rpmcc RPM_DAYTONA_FABRIC_CLK>;
+ clock-names = "core";
};
};
diff --git a/arch/arm/boot/dts/qcom-mdm9615.dtsi b/arch/arm/boot/dts/qcom-mdm9615.dtsi
index 5ae4ec59e6ea..c852b69229c9 100644
--- a/arch/arm/boot/dts/qcom-mdm9615.dtsi
+++ b/arch/arm/boot/dts/qcom-mdm9615.dtsi
@@ -357,7 +357,7 @@
};
amba {
- compatible = "arm,amba-bus";
+ compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges;
diff --git a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
index 735914f6ae44..7cae328398b1 100644
--- a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
+++ b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
@@ -140,6 +140,10 @@
cpu-supply = <&reg_dcdc3>;
};
+&de {
+ status = "okay";
+};
+
&ehci0 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index 2b26175d55d1..e78faaf9243c 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -234,6 +234,7 @@
de: display-engine {
compatible = "allwinner,sun6i-a31-display-engine";
allwinner,pipelines = <&fe0>;
+ status = "disabled";
};
soc@01c00000 {
diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts
index 5ea4915f6d75..10d307408f23 100644
--- a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts
+++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts
@@ -56,7 +56,7 @@
};
&pio {
- mmc2_pins_nrst: mmc2@0 {
+ mmc2_pins_nrst: mmc2-rst-pin {
allwinner,pins = "PC16";
allwinner,function = "gpio_out";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts b/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
index 102838fcc588..15f4fd3f4695 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
@@ -81,7 +81,7 @@
#address-cells = <0>;
interrupt-controller;
reg = <0 0x2c001000 0 0x1000>,
- <0 0x2c002000 0 0x1000>,
+ <0 0x2c002000 0 0x2000>,
<0 0x2c004000 0 0x2000>,
<0 0x2c006000 0 0x2000>;
interrupts = <1 9 0xf04>;
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
index 45d08cc37b01..bd107c5a0226 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
@@ -131,7 +131,7 @@
#address-cells = <0>;
interrupt-controller;
reg = <0 0x2c001000 0 0x1000>,
- <0 0x2c002000 0 0x1000>,
+ <0 0x2c002000 0 0x2000>,
<0 0x2c004000 0 0x2000>,
<0 0x2c006000 0 0x2000>;
interrupts = <1 9 0xf04>;
diff --git a/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
index 7ea617e47fe4..958b4c42d320 100644
--- a/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
+++ b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
@@ -153,7 +153,8 @@
switch0phy1: switch1phy0@1 {
reg = <1>;
interrupt-parent = <&switch0>;
- interrupts = <1 IRQ_TYPE_LEVEL_HIGH>; };
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH>;
+ };
switch0phy2: switch1phy0@2 {
reg = <2>;
interrupt-parent = <&switch0>;
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index b01a43851294..028d2b70e3b5 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -471,7 +471,7 @@ CONFIG_MESON_WATCHDOG=y
CONFIG_DW_WATCHDOG=y
CONFIG_DIGICOLOR_WATCHDOG=y
CONFIG_BCM2835_WDT=y
-CONFIG_BCM47XX_WATCHDOG=y
+CONFIG_BCM47XX_WDT=y
CONFIG_BCM7038_WDT=m
CONFIG_BCM_KONA_WDT=y
CONFIG_MFD_ACT8945A=y
@@ -893,7 +893,7 @@ CONFIG_BCM2835_MBOX=y
CONFIG_RASPBERRYPI_FIRMWARE=y
CONFIG_EFI_VARS=m
CONFIG_EFI_CAPSULE_LOADER=m
-CONFIG_CONFIG_BCM47XX_NVRAM=y
+CONFIG_BCM47XX_NVRAM=y
CONFIG_BCM47XX_SPROM=y
CONFIG_EXT4_FS=y
CONFIG_AUTOFS4_FS=y
diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig
index 4364040ed696..1e6c48dd7b11 100644
--- a/arch/arm/configs/s3c2410_defconfig
+++ b/arch/arm/configs/s3c2410_defconfig
@@ -86,9 +86,9 @@ CONFIG_IPV6_TUNNEL=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_SCTP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 522b5feb4eaa..b62eaeb147aa 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -94,6 +94,9 @@
#define ARM_CPU_XSCALE_ARCH_V2 0x4000
#define ARM_CPU_XSCALE_ARCH_V3 0x6000
+/* Qualcomm implemented cores */
+#define ARM_CPU_PART_SCORPION 0x510002d0
+
extern unsigned int processor_id;
#ifdef CONFIG_CPU_CP15
diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
index bfe2a2f5a644..22b73112b75f 100644
--- a/arch/arm/include/asm/ftrace.h
+++ b/arch/arm/include/asm/ftrace.h
@@ -54,6 +54,24 @@ static inline void *return_address(unsigned int level)
#define ftrace_return_address(n) return_address(n)
+#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
+
+static inline bool arch_syscall_match_sym_name(const char *sym,
+ const char *name)
+{
+ if (!strcmp(sym, "sys_mmap2"))
+ sym = "sys_mmap_pgoff";
+ else if (!strcmp(sym, "sys_statfs64_wrapper"))
+ sym = "sys_statfs64";
+ else if (!strcmp(sym, "sys_fstatfs64_wrapper"))
+ sym = "sys_fstatfs64";
+ else if (!strcmp(sym, "sys_arm_fadvise64_64"))
+ sym = "sys_fadvise64_64";
+
+ /* Ignore case since sym may start with "SyS" instead of "sys" */
+ return !strcasecmp(sym, name);
+}
+
#endif /* ifndef __ASSEMBLY__ */
#endif /* _ASM_ARM_FTRACE */
diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h
index a2e75b84e2ae..6dae1956c74d 100644
--- a/arch/arm/include/asm/virt.h
+++ b/arch/arm/include/asm/virt.h
@@ -80,6 +80,11 @@ static inline bool is_kernel_in_hyp_mode(void)
return false;
}
+static inline bool has_vhe(void)
+{
+ return false;
+}
+
/* The section containing the hypervisor idmap text */
extern char __hyp_idmap_text_start[];
extern char __hyp_idmap_text_end[];
diff --git a/arch/arm/include/asm/types.h b/arch/arm/include/uapi/asm/types.h
index a53cdb8f068c..9435a42f575e 100644
--- a/arch/arm/include/asm/types.h
+++ b/arch/arm/include/uapi/asm/types.h
@@ -1,5 +1,5 @@
-#ifndef _ASM_TYPES_H
-#define _ASM_TYPES_H
+#ifndef _UAPI_ASM_TYPES_H
+#define _UAPI_ASM_TYPES_H
#include <asm-generic/int-ll64.h>
@@ -37,4 +37,4 @@
#define __UINTPTR_TYPE__ unsigned long
#endif
-#endif /* _ASM_TYPES_H */
+#endif /* _UAPI_ASM_TYPES_H */
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 188180b5523d..be3b3fbd382f 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -1063,6 +1063,22 @@ static int __init arch_hw_breakpoint_init(void)
return 0;
}
+ /*
+ * Scorpion CPUs (at least those in APQ8060) seem to set DBGPRSR.SPD
+ * whenever a WFI is issued, even if the core is not powered down, in
+ * violation of the architecture. When DBGPRSR.SPD is set, accesses to
+ * breakpoint and watchpoint registers are treated as undefined, so
+ * this results in boot time and runtime failures when these are
+ * accessed and we unexpectedly take a trap.
+ *
+ * It's not clear if/how this can be worked around, so we blacklist
+ * Scorpion CPUs to avoid these issues.
+ */
+ if (read_cpuid_part() == ARM_CPU_PART_SCORPION) {
+ pr_info("Scorpion CPU detected. Hardware breakpoints and watchpoints disabled\n");
+ return 0;
+ }
+
has_ossr = core_has_os_save_restore();
/* Determine how many BRPs/WRPs are available. */
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index 22313cb53362..9af0701f7094 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -9,6 +9,7 @@
*/
#include <linux/preempt.h>
#include <linux/smp.h>
+#include <linux/uaccess.h>
#include <asm/smp_plat.h>
#include <asm/tlbflush.h>
@@ -40,8 +41,11 @@ static inline void ipi_flush_tlb_mm(void *arg)
static inline void ipi_flush_tlb_page(void *arg)
{
struct tlb_args *ta = (struct tlb_args *)arg;
+ unsigned int __ua_flags = uaccess_save_and_enable();
local_flush_tlb_page(ta->ta_vma, ta->ta_start);
+
+ uaccess_restore(__ua_flags);
}
static inline void ipi_flush_tlb_kernel_page(void *arg)
@@ -54,8 +58,11 @@ static inline void ipi_flush_tlb_kernel_page(void *arg)
static inline void ipi_flush_tlb_range(void *arg)
{
struct tlb_args *ta = (struct tlb_args *)arg;
+ unsigned int __ua_flags = uaccess_save_and_enable();
local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
+
+ uaccess_restore(__ua_flags);
}
static inline void ipi_flush_tlb_kernel_range(void *arg)
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 11676787ad49..9d7446456e0c 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -1099,6 +1099,9 @@ static void cpu_init_hyp_mode(void *dummy)
__cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
__cpu_init_stage2();
+ if (is_kernel_in_hyp_mode())
+ kvm_timer_init_vhe();
+
kvm_arm_init_debug();
}
diff --git a/arch/arm/mach-davinci/clock.c b/arch/arm/mach-davinci/clock.c
index df42c93a93d6..f5dce9b4e617 100644
--- a/arch/arm/mach-davinci/clock.c
+++ b/arch/arm/mach-davinci/clock.c
@@ -31,10 +31,10 @@ static LIST_HEAD(clocks);
static DEFINE_MUTEX(clocks_mutex);
static DEFINE_SPINLOCK(clockfw_lock);
-static void __clk_enable(struct clk *clk)
+void davinci_clk_enable(struct clk *clk)
{
if (clk->parent)
- __clk_enable(clk->parent);
+ davinci_clk_enable(clk->parent);
if (clk->usecount++ == 0) {
if (clk->flags & CLK_PSC)
davinci_psc_config(clk->domain, clk->gpsc, clk->lpsc,
@@ -44,7 +44,7 @@ static void __clk_enable(struct clk *clk)
}
}
-static void __clk_disable(struct clk *clk)
+void davinci_clk_disable(struct clk *clk)
{
if (WARN_ON(clk->usecount == 0))
return;
@@ -56,7 +56,7 @@ static void __clk_disable(struct clk *clk)
clk->clk_disable(clk);
}
if (clk->parent)
- __clk_disable(clk->parent);
+ davinci_clk_disable(clk->parent);
}
int davinci_clk_reset(struct clk *clk, bool reset)
@@ -103,7 +103,7 @@ int clk_enable(struct clk *clk)
return -EINVAL;
spin_lock_irqsave(&clockfw_lock, flags);
- __clk_enable(clk);
+ davinci_clk_enable(clk);
spin_unlock_irqrestore(&clockfw_lock, flags);
return 0;
@@ -118,7 +118,7 @@ void clk_disable(struct clk *clk)
return;
spin_lock_irqsave(&clockfw_lock, flags);
- __clk_disable(clk);
+ davinci_clk_disable(clk);
spin_unlock_irqrestore(&clockfw_lock, flags);
}
EXPORT_SYMBOL(clk_disable);
diff --git a/arch/arm/mach-davinci/clock.h b/arch/arm/mach-davinci/clock.h
index e2a5437a1aee..fa2b83752e03 100644
--- a/arch/arm/mach-davinci/clock.h
+++ b/arch/arm/mach-davinci/clock.h
@@ -132,6 +132,8 @@ int davinci_set_sysclk_rate(struct clk *clk, unsigned long rate);
int davinci_set_refclk_rate(unsigned long rate);
int davinci_simple_set_rate(struct clk *clk, unsigned long rate);
int davinci_clk_reset(struct clk *clk, bool reset);
+void davinci_clk_enable(struct clk *clk);
+void davinci_clk_disable(struct clk *clk);
extern struct platform_device davinci_wdt_device;
extern void davinci_watchdog_reset(struct platform_device *);
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
index e770c97ea45c..1d873d15b545 100644
--- a/arch/arm/mach-davinci/da850.c
+++ b/arch/arm/mach-davinci/da850.c
@@ -319,6 +319,16 @@ static struct clk emac_clk = {
.gpsc = 1,
};
+/*
+ * In order to avoid adding the emac_clk to the clock lookup table twice (and
+ * screwing up the linked list in the process) create a separate clock for
+ * mdio inheriting the rate from emac_clk.
+ */
+static struct clk mdio_clk = {
+ .name = "mdio",
+ .parent = &emac_clk,
+};
+
static struct clk mcasp_clk = {
.name = "mcasp",
.parent = &async3_clk,
@@ -367,6 +377,16 @@ static struct clk aemif_clk = {
.flags = ALWAYS_ENABLED,
};
+/*
+ * In order to avoid adding the aemif_clk to the clock lookup table twice (and
+ * screwing up the linked list in the process) create a separate clock for
+ * nand inheriting the rate from aemif_clk.
+ */
+static struct clk aemif_nand_clk = {
+ .name = "nand",
+ .parent = &aemif_clk,
+};
+
static struct clk usb11_clk = {
.name = "usb11",
.parent = &pll0_sysclk4,
@@ -529,7 +549,7 @@ static struct clk_lookup da850_clks[] = {
CLK(NULL, "arm", &arm_clk),
CLK(NULL, "rmii", &rmii_clk),
CLK("davinci_emac.1", NULL, &emac_clk),
- CLK("davinci_mdio.0", "fck", &emac_clk),
+ CLK("davinci_mdio.0", "fck", &mdio_clk),
CLK("davinci-mcasp.0", NULL, &mcasp_clk),
CLK("davinci-mcbsp.0", NULL, &mcbsp0_clk),
CLK("davinci-mcbsp.1", NULL, &mcbsp1_clk),
@@ -537,7 +557,15 @@ static struct clk_lookup da850_clks[] = {
CLK("da830-mmc.0", NULL, &mmcsd0_clk),
CLK("da830-mmc.1", NULL, &mmcsd1_clk),
CLK("ti-aemif", NULL, &aemif_clk),
- CLK(NULL, "aemif", &aemif_clk),
+ /*
+ * The only user of this clock is davinci_nand and it get's it through
+ * con_id. The nand node itself is created from within the aemif
+ * driver to guarantee that it's probed after the aemif timing
+ * parameters are configured. of_dev_auxdata is not accessible from
+ * the aemif driver and can't be passed to of_platform_populate(). For
+ * that reason we're leaving the dev_id here as NULL.
+ */
+ CLK(NULL, "aemif", &aemif_nand_clk),
CLK("ohci-da8xx", "usb11", &usb11_clk),
CLK("musb-da8xx", "usb20", &usb20_clk),
CLK("spi_davinci.0", NULL, &spi0_clk),
diff --git a/arch/arm/mach-davinci/usb-da8xx.c b/arch/arm/mach-davinci/usb-da8xx.c
index c6feecf7ae24..9a6af0bd5dc3 100644
--- a/arch/arm/mach-davinci/usb-da8xx.c
+++ b/arch/arm/mach-davinci/usb-da8xx.c
@@ -22,6 +22,8 @@
#define DA8XX_USB0_BASE 0x01e00000
#define DA8XX_USB1_BASE 0x01e25000
+static struct clk *usb20_clk;
+
static struct platform_device da8xx_usb_phy = {
.name = "da8xx-usb-phy",
.id = -1,
@@ -158,26 +160,13 @@ int __init da8xx_register_usb_refclkin(int rate)
static void usb20_phy_clk_enable(struct clk *clk)
{
- struct clk *usb20_clk;
- int err;
u32 val;
u32 timeout = 500000; /* 500 msec */
val = readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG));
- usb20_clk = clk_get(&da8xx_usb20_dev.dev, "usb20");
- if (IS_ERR(usb20_clk)) {
- pr_err("could not get usb20 clk: %ld\n", PTR_ERR(usb20_clk));
- return;
- }
-
/* The USB 2.O PLL requires that the USB 2.O PSC is enabled as well. */
- err = clk_prepare_enable(usb20_clk);
- if (err) {
- pr_err("failed to enable usb20 clk: %d\n", err);
- clk_put(usb20_clk);
- return;
- }
+ davinci_clk_enable(usb20_clk);
/*
* Turn on the USB 2.0 PHY, but just the PLL, and not OTG. The USB 1.1
@@ -197,8 +186,7 @@ static void usb20_phy_clk_enable(struct clk *clk)
pr_err("Timeout waiting for USB 2.0 PHY clock good\n");
done:
- clk_disable_unprepare(usb20_clk);
- clk_put(usb20_clk);
+ davinci_clk_disable(usb20_clk);
}
static void usb20_phy_clk_disable(struct clk *clk)
@@ -285,11 +273,19 @@ static struct clk_lookup usb20_phy_clk_lookup =
int __init da8xx_register_usb20_phy_clk(bool use_usb_refclkin)
{
struct clk *parent;
- int ret = 0;
+ int ret;
+
+ usb20_clk = clk_get(&da8xx_usb20_dev.dev, "usb20");
+ ret = PTR_ERR_OR_ZERO(usb20_clk);
+ if (ret)
+ return ret;
parent = clk_get(NULL, use_usb_refclkin ? "usb_refclkin" : "pll0_aux");
- if (IS_ERR(parent))
- return PTR_ERR(parent);
+ ret = PTR_ERR_OR_ZERO(parent);
+ if (ret) {
+ clk_put(usb20_clk);
+ return ret;
+ }
usb20_phy_clk.parent = parent;
ret = clk_register(&usb20_phy_clk);
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index 98ffe1e62ad5..a5d68411a037 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -385,36 +385,6 @@ fail:
return pen_release != -1 ? ret : 0;
}
-/*
- * Initialise the CPU possible map early - this describes the CPUs
- * which may be present or become present in the system.
- */
-
-static void __init exynos_smp_init_cpus(void)
-{
- void __iomem *scu_base = scu_base_addr();
- unsigned int i, ncores;
-
- if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
- ncores = scu_base ? scu_get_core_count(scu_base) : 1;
- else
- /*
- * CPU Nodes are passed thru DT and set_cpu_possible
- * is set by "arm_dt_init_cpu_maps".
- */
- return;
-
- /* sanity check */
- if (ncores > nr_cpu_ids) {
- pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
- ncores, nr_cpu_ids);
- ncores = nr_cpu_ids;
- }
-
- for (i = 0; i < ncores; i++)
- set_cpu_possible(i, true);
-}
-
static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
{
int i;
@@ -479,7 +449,6 @@ static void exynos_cpu_die(unsigned int cpu)
#endif /* CONFIG_HOTPLUG_CPU */
const struct smp_operations exynos_smp_ops __initconst = {
- .smp_init_cpus = exynos_smp_init_cpus,
.smp_prepare_cpus = exynos_smp_prepare_cpus,
.smp_secondary_init = exynos_secondary_init,
.smp_boot_secondary = exynos_boot_secondary,
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
index 06332f626565..10bc753624be 100644
--- a/arch/arm/mach-exynos/suspend.c
+++ b/arch/arm/mach-exynos/suspend.c
@@ -57,7 +57,6 @@ struct exynos_wkup_irq {
struct exynos_pm_data {
const struct exynos_wkup_irq *wkup_irq;
unsigned int wake_disable_mask;
- unsigned int *release_ret_regs;
void (*pm_prepare)(void);
void (*pm_resume_prepare)(void);
@@ -95,47 +94,6 @@ static const struct exynos_wkup_irq exynos5250_wkup_irq[] = {
{ /* sentinel */ },
};
-static unsigned int exynos_release_ret_regs[] = {
- S5P_PAD_RET_MAUDIO_OPTION,
- S5P_PAD_RET_GPIO_OPTION,
- S5P_PAD_RET_UART_OPTION,
- S5P_PAD_RET_MMCA_OPTION,
- S5P_PAD_RET_MMCB_OPTION,
- S5P_PAD_RET_EBIA_OPTION,
- S5P_PAD_RET_EBIB_OPTION,
- REG_TABLE_END,
-};
-
-static unsigned int exynos3250_release_ret_regs[] = {
- S5P_PAD_RET_MAUDIO_OPTION,
- S5P_PAD_RET_GPIO_OPTION,
- S5P_PAD_RET_UART_OPTION,
- S5P_PAD_RET_MMCA_OPTION,
- S5P_PAD_RET_MMCB_OPTION,
- S5P_PAD_RET_EBIA_OPTION,
- S5P_PAD_RET_EBIB_OPTION,
- S5P_PAD_RET_MMC2_OPTION,
- S5P_PAD_RET_SPI_OPTION,
- REG_TABLE_END,
-};
-
-static unsigned int exynos5420_release_ret_regs[] = {
- EXYNOS_PAD_RET_DRAM_OPTION,
- EXYNOS_PAD_RET_MAUDIO_OPTION,
- EXYNOS_PAD_RET_JTAG_OPTION,
- EXYNOS5420_PAD_RET_GPIO_OPTION,
- EXYNOS5420_PAD_RET_UART_OPTION,
- EXYNOS5420_PAD_RET_MMCA_OPTION,
- EXYNOS5420_PAD_RET_MMCB_OPTION,
- EXYNOS5420_PAD_RET_MMCC_OPTION,
- EXYNOS5420_PAD_RET_HSI_OPTION,
- EXYNOS_PAD_RET_EBIA_OPTION,
- EXYNOS_PAD_RET_EBIB_OPTION,
- EXYNOS5420_PAD_RET_SPI_OPTION,
- EXYNOS5420_PAD_RET_DRAM_COREBLK_OPTION,
- REG_TABLE_END,
-};
-
static int exynos_irq_set_wake(struct irq_data *data, unsigned int state)
{
const struct exynos_wkup_irq *wkup_irq;
@@ -442,15 +400,6 @@ static int exynos5420_pm_suspend(void)
return 0;
}
-static void exynos_pm_release_retention(void)
-{
- unsigned int i;
-
- for (i = 0; (pm_data->release_ret_regs[i] != REG_TABLE_END); i++)
- pmu_raw_writel(EXYNOS_WAKEUP_FROM_LOWPWR,
- pm_data->release_ret_regs[i]);
-}
-
static void exynos_pm_resume(void)
{
u32 cpuid = read_cpuid_part();
@@ -458,9 +407,6 @@ static void exynos_pm_resume(void)
if (exynos_pm_central_resume())
goto early_wakeup;
- /* For release retention */
- exynos_pm_release_retention();
-
if (cpuid == ARM_CPU_PART_CORTEX_A9)
scu_enable(S5P_VA_SCU);
@@ -482,9 +428,6 @@ static void exynos3250_pm_resume(void)
if (exynos_pm_central_resume())
goto early_wakeup;
- /* For release retention */
- exynos_pm_release_retention();
-
pmu_raw_writel(S5P_USE_STANDBY_WFI_ALL, S5P_CENTRAL_SEQ_OPTION);
if (call_firmware_op(resume) == -ENOSYS
@@ -522,9 +465,6 @@ static void exynos5420_pm_resume(void)
if (exynos_pm_central_resume())
goto early_wakeup;
- /* For release retention */
- exynos_pm_release_retention();
-
pmu_raw_writel(exynos_pmu_spare3, S5P_PMU_SPARE3);
early_wakeup:
@@ -637,7 +577,6 @@ static const struct platform_suspend_ops exynos_suspend_ops = {
static const struct exynos_pm_data exynos3250_pm_data = {
.wkup_irq = exynos3250_wkup_irq,
.wake_disable_mask = ((0xFF << 8) | (0x1F << 1)),
- .release_ret_regs = exynos3250_release_ret_regs,
.pm_suspend = exynos_pm_suspend,
.pm_resume = exynos3250_pm_resume,
.pm_prepare = exynos3250_pm_prepare,
@@ -647,7 +586,6 @@ static const struct exynos_pm_data exynos3250_pm_data = {
static const struct exynos_pm_data exynos4_pm_data = {
.wkup_irq = exynos4_wkup_irq,
.wake_disable_mask = ((0xFF << 8) | (0x1F << 1)),
- .release_ret_regs = exynos_release_ret_regs,
.pm_suspend = exynos_pm_suspend,
.pm_resume = exynos_pm_resume,
.pm_prepare = exynos_pm_prepare,
@@ -657,7 +595,6 @@ static const struct exynos_pm_data exynos4_pm_data = {
static const struct exynos_pm_data exynos5250_pm_data = {
.wkup_irq = exynos5250_wkup_irq,
.wake_disable_mask = ((0xFF << 8) | (0x1F << 1)),
- .release_ret_regs = exynos_release_ret_regs,
.pm_suspend = exynos_pm_suspend,
.pm_resume = exynos_pm_resume,
.pm_prepare = exynos_pm_prepare,
@@ -667,7 +604,6 @@ static const struct exynos_pm_data exynos5250_pm_data = {
static const struct exynos_pm_data exynos5420_pm_data = {
.wkup_irq = exynos5250_wkup_irq,
.wake_disable_mask = (0x7F << 7) | (0x1F << 1),
- .release_ret_regs = exynos5420_release_ret_regs,
.pm_resume_prepare = exynos5420_prepare_pm_resume,
.pm_resume = exynos5420_pm_resume,
.pm_suspend = exynos5420_pm_suspend,
diff --git a/arch/arm/mach-imx/mach-imx1.c b/arch/arm/mach-imx/mach-imx1.c
index de5ab8d88549..3a8406e45b65 100644
--- a/arch/arm/mach-imx/mach-imx1.c
+++ b/arch/arm/mach-imx/mach-imx1.c
@@ -37,7 +37,6 @@ static const char * const imx1_dt_board_compat[] __initconst = {
};
DT_MACHINE_START(IMX1_DT, "Freescale i.MX1 (Device Tree Support)")
- .map_io = debug_ll_io_init,
.init_early = imx1_init_early,
.init_irq = imx1_init_irq,
.dt_compat = imx1_dt_board_compat,
diff --git a/arch/arm/mach-omap1/dma.c b/arch/arm/mach-omap1/dma.c
index f6ba589cd312..c821c1d5610e 100644
--- a/arch/arm/mach-omap1/dma.c
+++ b/arch/arm/mach-omap1/dma.c
@@ -32,7 +32,6 @@
#include "soc.h"
#define OMAP1_DMA_BASE (0xfffed800)
-#define OMAP1_LOGICAL_DMA_CH_COUNT 17
static u32 enable_1510_mode;
@@ -348,8 +347,6 @@ static int __init omap1_system_dma_init(void)
goto exit_iounmap;
}
- d->lch_count = OMAP1_LOGICAL_DMA_CH_COUNT;
-
/* Valid attributes for omap1 plus processors */
if (cpu_is_omap15xx())
d->dev_caps = ENABLE_1510_MODE;
@@ -366,13 +363,14 @@ static int __init omap1_system_dma_init(void)
d->dev_caps |= CLEAR_CSR_ON_READ;
d->dev_caps |= IS_WORD_16;
- if (cpu_is_omap15xx())
- d->chan_count = 9;
- else if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
- if (!(d->dev_caps & ENABLE_1510_MODE))
- d->chan_count = 16;
+ /* available logical channels */
+ if (cpu_is_omap15xx()) {
+ d->lch_count = 9;
+ } else {
+ if (d->dev_caps & ENABLE_1510_MODE)
+ d->lch_count = 9;
else
- d->chan_count = 9;
+ d->lch_count = 16;
}
p = dma_plat_info;
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 469894082fea..093458b62c8d 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -7,7 +7,7 @@ ccflags-y := -I$(srctree)/$(src)/include \
# Common support
obj-y := id.o io.o control.o devices.o fb.o timer.o pm.o \
- common.o gpio.o dma.o wd_timer.o display.o i2c.o hdq1w.o omap_hwmod.o \
+ common.o dma.o wd_timer.o display.o i2c.o hdq1w.o omap_hwmod.o \
omap_device.o omap-headsmp.o sram.o drm.o
hwmod-common = omap_hwmod.o omap_hwmod_reset.o \
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 36d9943205ca..dc9e34e670a2 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -304,7 +304,7 @@ DT_MACHINE_START(AM43_DT, "Generic AM43 (Flattened Device Tree)")
.init_late = am43xx_init_late,
.init_irq = omap_gic_of_init,
.init_machine = omap_generic_init,
- .init_time = omap4_local_timer_init,
+ .init_time = omap3_gptimer_timer_init,
.dt_compat = am43_boards_compat,
.restart = omap44xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/gpio.c b/arch/arm/mach-omap2/gpio.c
deleted file mode 100644
index 7a577145b68b..000000000000
--- a/arch/arm/mach-omap2/gpio.c
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * OMAP2+ specific gpio initialization
- *
- * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
- *
- * Author:
- * Charulatha V <charu@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/gpio.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/of.h>
-#include <linux/platform_data/gpio-omap.h>
-
-#include "soc.h"
-#include "omap_hwmod.h"
-#include "omap_device.h"
-#include "omap-pm.h"
-
-#include "powerdomain.h"
-
-static int __init omap2_gpio_dev_init(struct omap_hwmod *oh, void *unused)
-{
- struct platform_device *pdev;
- struct omap_gpio_platform_data *pdata;
- struct omap_gpio_dev_attr *dev_attr;
- char *name = "omap_gpio";
- int id;
- struct powerdomain *pwrdm;
-
- /*
- * extract the device id from name field available in the
- * hwmod database and use the same for constructing ids for
- * gpio devices.
- * CAUTION: Make sure the name in the hwmod database does
- * not change. If changed, make corresponding change here
- * or make use of static variable mechanism to handle this.
- */
- sscanf(oh->name, "gpio%d", &id);
-
- pdata = kzalloc(sizeof(struct omap_gpio_platform_data), GFP_KERNEL);
- if (!pdata) {
- pr_err("gpio%d: Memory allocation failed\n", id);
- return -ENOMEM;
- }
-
- dev_attr = (struct omap_gpio_dev_attr *)oh->dev_attr;
- pdata->bank_width = dev_attr->bank_width;
- pdata->dbck_flag = dev_attr->dbck_flag;
- pdata->get_context_loss_count = omap_pm_get_dev_context_loss_count;
- pdata->regs = kzalloc(sizeof(struct omap_gpio_reg_offs), GFP_KERNEL);
- if (!pdata->regs) {
- pr_err("gpio%d: Memory allocation failed\n", id);
- kfree(pdata);
- return -ENOMEM;
- }
-
- switch (oh->class->rev) {
- case 0:
- if (id == 1)
- /* non-wakeup GPIO pins for OMAP2 Bank1 */
- pdata->non_wakeup_gpios = 0xe203ffc0;
- else if (id == 2)
- /* non-wakeup GPIO pins for OMAP2 Bank2 */
- pdata->non_wakeup_gpios = 0x08700040;
- /* fall through */
-
- case 1:
- pdata->regs->revision = OMAP24XX_GPIO_REVISION;
- pdata->regs->direction = OMAP24XX_GPIO_OE;
- pdata->regs->datain = OMAP24XX_GPIO_DATAIN;
- pdata->regs->dataout = OMAP24XX_GPIO_DATAOUT;
- pdata->regs->set_dataout = OMAP24XX_GPIO_SETDATAOUT;
- pdata->regs->clr_dataout = OMAP24XX_GPIO_CLEARDATAOUT;
- pdata->regs->irqstatus = OMAP24XX_GPIO_IRQSTATUS1;
- pdata->regs->irqstatus2 = OMAP24XX_GPIO_IRQSTATUS2;
- pdata->regs->irqenable = OMAP24XX_GPIO_IRQENABLE1;
- pdata->regs->irqenable2 = OMAP24XX_GPIO_IRQENABLE2;
- pdata->regs->set_irqenable = OMAP24XX_GPIO_SETIRQENABLE1;
- pdata->regs->clr_irqenable = OMAP24XX_GPIO_CLEARIRQENABLE1;
- pdata->regs->debounce = OMAP24XX_GPIO_DEBOUNCE_VAL;
- pdata->regs->debounce_en = OMAP24XX_GPIO_DEBOUNCE_EN;
- pdata->regs->ctrl = OMAP24XX_GPIO_CTRL;
- pdata->regs->wkup_en = OMAP24XX_GPIO_WAKE_EN;
- pdata->regs->leveldetect0 = OMAP24XX_GPIO_LEVELDETECT0;
- pdata->regs->leveldetect1 = OMAP24XX_GPIO_LEVELDETECT1;
- pdata->regs->risingdetect = OMAP24XX_GPIO_RISINGDETECT;
- pdata->regs->fallingdetect = OMAP24XX_GPIO_FALLINGDETECT;
- break;
- case 2:
- pdata->regs->revision = OMAP4_GPIO_REVISION;
- pdata->regs->direction = OMAP4_GPIO_OE;
- pdata->regs->datain = OMAP4_GPIO_DATAIN;
- pdata->regs->dataout = OMAP4_GPIO_DATAOUT;
- pdata->regs->set_dataout = OMAP4_GPIO_SETDATAOUT;
- pdata->regs->clr_dataout = OMAP4_GPIO_CLEARDATAOUT;
- pdata->regs->irqstatus_raw0 = OMAP4_GPIO_IRQSTATUSRAW0;
- pdata->regs->irqstatus_raw1 = OMAP4_GPIO_IRQSTATUSRAW1;
- pdata->regs->irqstatus = OMAP4_GPIO_IRQSTATUS0;
- pdata->regs->irqstatus2 = OMAP4_GPIO_IRQSTATUS1;
- pdata->regs->irqenable = OMAP4_GPIO_IRQSTATUSSET0;
- pdata->regs->irqenable2 = OMAP4_GPIO_IRQSTATUSSET1;
- pdata->regs->set_irqenable = OMAP4_GPIO_IRQSTATUSSET0;
- pdata->regs->clr_irqenable = OMAP4_GPIO_IRQSTATUSCLR0;
- pdata->regs->debounce = OMAP4_GPIO_DEBOUNCINGTIME;
- pdata->regs->debounce_en = OMAP4_GPIO_DEBOUNCENABLE;
- pdata->regs->ctrl = OMAP4_GPIO_CTRL;
- pdata->regs->wkup_en = OMAP4_GPIO_IRQWAKEN0;
- pdata->regs->leveldetect0 = OMAP4_GPIO_LEVELDETECT0;
- pdata->regs->leveldetect1 = OMAP4_GPIO_LEVELDETECT1;
- pdata->regs->risingdetect = OMAP4_GPIO_RISINGDETECT;
- pdata->regs->fallingdetect = OMAP4_GPIO_FALLINGDETECT;
- break;
- default:
- WARN(1, "Invalid gpio bank_type\n");
- kfree(pdata->regs);
- kfree(pdata);
- return -EINVAL;
- }
-
- pwrdm = omap_hwmod_get_pwrdm(oh);
- pdata->loses_context = pwrdm_can_ever_lose_context(pwrdm);
-
- pdev = omap_device_build(name, id - 1, oh, pdata, sizeof(*pdata));
- kfree(pdata);
-
- if (IS_ERR(pdev)) {
- WARN(1, "Can't build omap_device for %s:%s.\n",
- name, oh->name);
- return PTR_ERR(pdev);
- }
-
- return 0;
-}
-
-/*
- * gpio_init needs to be done before
- * machine_init functions access gpio APIs.
- * Hence gpio_init is a omap_postcore_initcall.
- */
-static int __init omap2_gpio_init(void)
-{
- /* If dtb is there, the devices will be created dynamically */
- if (of_have_populated_dt())
- return -ENODEV;
-
- return omap_hwmod_for_each_by_class("gpio", omap2_gpio_dev_init, NULL);
-}
-omap_postcore_initcall(omap2_gpio_init);
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 759e1d45ba25..e8b988714a09 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -741,14 +741,14 @@ static int _init_main_clk(struct omap_hwmod *oh)
int ret = 0;
char name[MOD_CLK_MAX_NAME_LEN];
struct clk *clk;
+ static const char modck[] = "_mod_ck";
- /* +7 magic comes from '_mod_ck' suffix */
- if (strlen(oh->name) + 7 > MOD_CLK_MAX_NAME_LEN)
+ if (strlen(oh->name) >= MOD_CLK_MAX_NAME_LEN - strlen(modck))
pr_warn("%s: warning: cropping name for %s\n", __func__,
oh->name);
- strncpy(name, oh->name, MOD_CLK_MAX_NAME_LEN - 7);
- strcat(name, "_mod_ck");
+ strlcpy(name, oh->name, MOD_CLK_MAX_NAME_LEN - strlen(modck));
+ strlcat(name, modck, MOD_CLK_MAX_NAME_LEN);
clk = clk_get(NULL, name);
if (!IS_ERR(clk)) {
diff --git a/arch/arm/mach-omap2/omap_hwmod_common_data.h b/arch/arm/mach-omap2/omap_hwmod_common_data.h
index cdfbb44ceb0c..f22e9cb39f4a 100644
--- a/arch/arm/mach-omap2/omap_hwmod_common_data.h
+++ b/arch/arm/mach-omap2/omap_hwmod_common_data.h
@@ -121,10 +121,6 @@ extern struct omap_hwmod_irq_info omap2_uart3_mpu_irqs[];
extern struct omap_hwmod_irq_info omap2_dispc_irqs[];
extern struct omap_hwmod_irq_info omap2_i2c1_mpu_irqs[];
extern struct omap_hwmod_irq_info omap2_i2c2_mpu_irqs[];
-extern struct omap_hwmod_irq_info omap2_gpio1_irqs[];
-extern struct omap_hwmod_irq_info omap2_gpio2_irqs[];
-extern struct omap_hwmod_irq_info omap2_gpio3_irqs[];
-extern struct omap_hwmod_irq_info omap2_gpio4_irqs[];
extern struct omap_hwmod_irq_info omap2_dma_system_irqs[];
extern struct omap_hwmod_irq_info omap2_mcspi1_mpu_irqs[];
extern struct omap_hwmod_irq_info omap2_mcspi2_mpu_irqs[];
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index 477910a48448..70c004794880 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -161,7 +161,7 @@ static struct ti_st_plat_data wilink7_pdata = {
.nshutdown_gpio = 162,
.dev_name = "/dev/ttyO1",
.flow_cntrl = 1,
- .baud_rate = 300000,
+ .baud_rate = 3000000,
};
static struct platform_device wl128x_device = {
diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c
index 5b2f5138d938..2b138b65129a 100644
--- a/arch/arm/mach-omap2/prm_common.c
+++ b/arch/arm/mach-omap2/prm_common.c
@@ -295,10 +295,8 @@ int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup)
GFP_KERNEL);
if (!prcm_irq_chips || !prcm_irq_setup->saved_mask ||
- !prcm_irq_setup->priority_mask) {
- pr_err("PRCM: kzalloc failed\n");
+ !prcm_irq_setup->priority_mask)
goto err;
- }
memset(mask, 0, sizeof(mask));
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 56128da23c3a..07dd692c4737 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -510,18 +510,19 @@ void __init omap3_secure_sync32k_timer_init(void)
}
#endif /* CONFIG_ARCH_OMAP3 */
-#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM33XX)
+#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM33XX) || \
+ defined(CONFIG_SOC_AM43XX)
void __init omap3_gptimer_timer_init(void)
{
__omap_sync32k_timer_init(2, "timer_sys_ck", NULL,
1, "timer_sys_ck", "ti,timer-alwon", true);
-
- clocksource_probe();
+ if (of_have_populated_dt())
+ clocksource_probe();
}
#endif
#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
- defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM43XX)
+ defined(CONFIG_SOC_DRA7XX)
static void __init omap4_sync32k_timer_init(void)
{
__omap_sync32k_timer_init(1, "timer_32k_ck", "ti,timer-alwon",
diff --git a/arch/arm/mach-s3c24xx/common.c b/arch/arm/mach-s3c24xx/common.c
index f6c3f151d0d4..b59f4f4f256f 100644
--- a/arch/arm/mach-s3c24xx/common.c
+++ b/arch/arm/mach-s3c24xx/common.c
@@ -345,10 +345,40 @@ static struct s3c24xx_dma_channel s3c2410_dma_channels[DMACH_MAX] = {
[DMACH_USB_EP4] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 3), },
};
+static const struct dma_slave_map s3c2410_dma_slave_map[] = {
+ { "s3c2410-sdi", "rx-tx", (void *)DMACH_SDI },
+ { "s3c2410-spi.0", "rx", (void *)DMACH_SPI0_RX },
+ { "s3c2410-spi.0", "tx", (void *)DMACH_SPI0_TX },
+ { "s3c2410-spi.1", "rx", (void *)DMACH_SPI1_RX },
+ { "s3c2410-spi.1", "tx", (void *)DMACH_SPI1_TX },
+ /*
+ * The DMA request source[1] (DMACH_UARTx_SRC2) are
+ * not used in the UART driver.
+ */
+ { "s3c2410-uart.0", "rx", (void *)DMACH_UART0 },
+ { "s3c2410-uart.0", "tx", (void *)DMACH_UART0 },
+ { "s3c2410-uart.1", "rx", (void *)DMACH_UART1 },
+ { "s3c2410-uart.1", "tx", (void *)DMACH_UART1 },
+ { "s3c2410-uart.2", "rx", (void *)DMACH_UART2 },
+ { "s3c2410-uart.2", "tx", (void *)DMACH_UART2 },
+ { "s3c24xx-iis", "rx", (void *)DMACH_I2S_IN },
+ { "s3c24xx-iis", "tx", (void *)DMACH_I2S_OUT },
+ { "s3c-hsudc", "rx0", (void *)DMACH_USB_EP1 },
+ { "s3c-hsudc", "tx0", (void *)DMACH_USB_EP1 },
+ { "s3c-hsudc", "rx1", (void *)DMACH_USB_EP2 },
+ { "s3c-hsudc", "tx1", (void *)DMACH_USB_EP2 },
+ { "s3c-hsudc", "rx2", (void *)DMACH_USB_EP3 },
+ { "s3c-hsudc", "tx2", (void *)DMACH_USB_EP3 },
+ { "s3c-hsudc", "rx3", (void *)DMACH_USB_EP4 },
+ { "s3c-hsudc", "tx3", (void *)DMACH_USB_EP4 }
+};
+
static struct s3c24xx_dma_platdata s3c2410_dma_platdata = {
.num_phy_channels = 4,
.channels = s3c2410_dma_channels,
.num_channels = DMACH_MAX,
+ .slave_map = s3c2410_dma_slave_map,
+ .slavecnt = ARRAY_SIZE(s3c2410_dma_slave_map),
};
struct platform_device s3c2410_device_dma = {
@@ -388,10 +418,36 @@ static struct s3c24xx_dma_channel s3c2412_dma_channels[DMACH_MAX] = {
[DMACH_USB_EP4] = { S3C24XX_DMA_APB, true, 16 },
};
+static const struct dma_slave_map s3c2412_dma_slave_map[] = {
+ { "s3c2412-sdi", "rx-tx", (void *)DMACH_SDI },
+ { "s3c2412-spi.0", "rx", (void *)DMACH_SPI0_RX },
+ { "s3c2412-spi.0", "tx", (void *)DMACH_SPI0_TX },
+ { "s3c2412-spi.1", "rx", (void *)DMACH_SPI1_RX },
+ { "s3c2412-spi.1", "tx", (void *)DMACH_SPI1_TX },
+ { "s3c2440-uart.0", "rx", (void *)DMACH_UART0 },
+ { "s3c2440-uart.0", "tx", (void *)DMACH_UART0 },
+ { "s3c2440-uart.1", "rx", (void *)DMACH_UART1 },
+ { "s3c2440-uart.1", "tx", (void *)DMACH_UART1 },
+ { "s3c2440-uart.2", "rx", (void *)DMACH_UART2 },
+ { "s3c2440-uart.2", "tx", (void *)DMACH_UART2 },
+ { "s3c2412-iis", "rx", (void *)DMACH_I2S_IN },
+ { "s3c2412-iis", "tx", (void *)DMACH_I2S_OUT },
+ { "s3c-hsudc", "rx0", (void *)DMACH_USB_EP1 },
+ { "s3c-hsudc", "tx0", (void *)DMACH_USB_EP1 },
+ { "s3c-hsudc", "rx1", (void *)DMACH_USB_EP2 },
+ { "s3c-hsudc", "tx1", (void *)DMACH_USB_EP2 },
+ { "s3c-hsudc", "rx2", (void *)DMACH_USB_EP3 },
+ { "s3c-hsudc", "tx2", (void *)DMACH_USB_EP3 },
+ { "s3c-hsudc", "rx3", (void *)DMACH_USB_EP4 },
+ { "s3c-hsudc", "tx3", (void *)DMACH_USB_EP4 }
+};
+
static struct s3c24xx_dma_platdata s3c2412_dma_platdata = {
.num_phy_channels = 4,
.channels = s3c2412_dma_channels,
.num_channels = DMACH_MAX,
+ .slave_map = s3c2412_dma_slave_map,
+ .slavecnt = ARRAY_SIZE(s3c2412_dma_slave_map),
};
struct platform_device s3c2412_device_dma = {
@@ -534,10 +590,30 @@ static struct s3c24xx_dma_channel s3c2443_dma_channels[DMACH_MAX] = {
[DMACH_MIC_IN] = { S3C24XX_DMA_APB, true, 29 },
};
+static const struct dma_slave_map s3c2443_dma_slave_map[] = {
+ { "s3c2440-sdi", "rx-tx", (void *)DMACH_SDI },
+ { "s3c2443-spi.0", "rx", (void *)DMACH_SPI0_RX },
+ { "s3c2443-spi.0", "tx", (void *)DMACH_SPI0_TX },
+ { "s3c2443-spi.1", "rx", (void *)DMACH_SPI1_RX },
+ { "s3c2443-spi.1", "tx", (void *)DMACH_SPI1_TX },
+ { "s3c2440-uart.0", "rx", (void *)DMACH_UART0 },
+ { "s3c2440-uart.0", "tx", (void *)DMACH_UART0 },
+ { "s3c2440-uart.1", "rx", (void *)DMACH_UART1 },
+ { "s3c2440-uart.1", "tx", (void *)DMACH_UART1 },
+ { "s3c2440-uart.2", "rx", (void *)DMACH_UART2 },
+ { "s3c2440-uart.2", "tx", (void *)DMACH_UART2 },
+ { "s3c2440-uart.3", "rx", (void *)DMACH_UART3 },
+ { "s3c2440-uart.3", "tx", (void *)DMACH_UART3 },
+ { "s3c24xx-iis", "rx", (void *)DMACH_I2S_IN },
+ { "s3c24xx-iis", "tx", (void *)DMACH_I2S_OUT },
+};
+
static struct s3c24xx_dma_platdata s3c2443_dma_platdata = {
.num_phy_channels = 6,
.channels = s3c2443_dma_channels,
.num_channels = DMACH_MAX,
+ .slave_map = s3c2443_dma_slave_map,
+ .slavecnt = ARRAY_SIZE(s3c2443_dma_slave_map),
};
struct platform_device s3c2443_device_dma = {
diff --git a/arch/arm/mach-s5pv210/pm.c b/arch/arm/mach-s5pv210/pm.c
index 21b4b13c5ab7..7d69666de5ba 100644
--- a/arch/arm/mach-s5pv210/pm.c
+++ b/arch/arm/mach-s5pv210/pm.c
@@ -155,13 +155,6 @@ static const struct platform_suspend_ops s5pv210_suspend_ops = {
*/
static void s5pv210_pm_resume(void)
{
- u32 tmp;
-
- tmp = __raw_readl(S5P_OTHERS);
- tmp |= (S5P_OTHERS_RET_IO | S5P_OTHERS_RET_CF |\
- S5P_OTHERS_RET_MMC | S5P_OTHERS_RET_UART);
- __raw_writel(tmp , S5P_OTHERS);
-
s3c_pm_do_restore_core(s5pv210_core_save, ARRAY_SIZE(s5pv210_core_save));
}
diff --git a/arch/arm/mach-s5pv210/regs-clock.h b/arch/arm/mach-s5pv210/regs-clock.h
index 4640f0f03c12..fb3eb77412db 100644
--- a/arch/arm/mach-s5pv210/regs-clock.h
+++ b/arch/arm/mach-s5pv210/regs-clock.h
@@ -188,10 +188,6 @@
#define S5P_SLEEP_CFG_USBOSC_EN (1 << 1)
/* OTHERS Resgister */
-#define S5P_OTHERS_RET_IO (1 << 31)
-#define S5P_OTHERS_RET_CF (1 << 30)
-#define S5P_OTHERS_RET_MMC (1 << 29)
-#define S5P_OTHERS_RET_UART (1 << 28)
#define S5P_OTHERS_USB_SIG_MASK (1 << 16)
/* S5P_DAC_CONTROL */
diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
index 8538910db202..a970e7fcba9e 100644
--- a/arch/arm/mach-ux500/pm.c
+++ b/arch/arm/mach-ux500/pm.c
@@ -134,8 +134,8 @@ bool prcmu_pending_irq(void)
*/
bool prcmu_is_cpu_in_wfi(int cpu)
{
- return readl(PRCM_ARM_WFI_STANDBY) & cpu ? PRCM_ARM_WFI_STANDBY_WFI1 :
- PRCM_ARM_WFI_STANDBY_WFI0;
+ return readl(PRCM_ARM_WFI_STANDBY) &
+ (cpu ? PRCM_ARM_WFI_STANDBY_WFI1 : PRCM_ARM_WFI_STANDBY_WFI0);
}
/*
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
index fc033c0d2a0f..eada0b58ba1c 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
@@ -356,5 +356,21 @@
status = "disabled";
};
};
+
+ vpu: vpu@d0100000 {
+ compatible = "amlogic,meson-gx-vpu";
+ reg = <0x0 0xd0100000 0x0 0x100000>,
+ <0x0 0xc883c000 0x0 0x1000>,
+ <0x0 0xc8838000 0x0 0x1000>;
+ reg-names = "vpu", "hhi", "dmc";
+ interrupts = <GIC_SPI 3 IRQ_TYPE_EDGE_RISING>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* CVBS VDAC output port */
+ cvbs_vdac_port: port@0 {
+ reg = <0>;
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
index 969682092e0f..4cbd626a9e88 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
@@ -142,6 +142,16 @@
clocks = <&wifi32k>;
clock-names = "ext_clock";
};
+
+ cvbs-connector {
+ compatible = "composite-video-connector";
+
+ port {
+ cvbs_connector_in: endpoint {
+ remote-endpoint = <&cvbs_vdac_out>;
+ };
+ };
+ };
};
&uart_AO {
@@ -229,3 +239,9 @@
clocks = <&clkc CLKID_FCLK_DIV4>;
clock-names = "clkin0";
};
+
+&cvbs_vdac_port {
+ cvbs_vdac_out: endpoint {
+ remote-endpoint = <&cvbs_connector_in>;
+ };
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
index 238fbeacd330..5d28e1cdc998 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
@@ -137,6 +137,10 @@
};
};
+&scpi_clocks {
+ status = "disabled";
+};
+
&uart_AO {
status = "okay";
pinctrl-0 = <&uart_ao_a_pins>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
index 203be28978d5..4a96e0f6f926 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
@@ -125,6 +125,16 @@
clocks = <&wifi32k>;
clock-names = "ext_clock";
};
+
+ cvbs-connector {
+ compatible = "composite-video-connector";
+
+ port {
+ cvbs_connector_in: endpoint {
+ remote-endpoint = <&cvbs_vdac_out>;
+ };
+ };
+ };
};
/* This UART is brought out to the DB9 connector */
@@ -234,3 +244,9 @@
clocks = <&clkc CLKID_FCLK_DIV4>;
clock-names = "clkin0";
};
+
+&cvbs_vdac_port {
+ cvbs_vdac_out: endpoint {
+ remote-endpoint = <&cvbs_connector_in>;
+ };
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
index 51edd5b5c460..b35307321b63 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
@@ -55,7 +55,7 @@
mboxes = <&mailbox 1 &mailbox 2>;
shmem = <&cpu_scp_lpri &cpu_scp_hpri>;
- clocks {
+ scpi_clocks: clocks {
compatible = "arm,scpi-clocks";
scpi_dvfs: scpi_clocks@0 {
@@ -506,3 +506,7 @@
<&clkc CLKID_FCLK_DIV2>;
clock-names = "core", "clkin0", "clkin1";
};
+
+&vpu {
+ compatible = "amlogic,meson-gxbb-vpu", "amlogic,meson-gx-vpu";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-nexbox-a95x.dts
index e99101ae9664..cea4a3eded9b 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-nexbox-a95x.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-nexbox-a95x.dts
@@ -117,6 +117,16 @@
clocks = <&wifi32k>;
clock-names = "ext_clock";
};
+
+ cvbs-connector {
+ compatible = "composite-video-connector";
+
+ port {
+ cvbs_connector_in: endpoint {
+ remote-endpoint = <&cvbs_vdac_out>;
+ };
+ };
+ };
};
&uart_AO {
@@ -203,3 +213,9 @@
clocks = <&clkc CLKID_FCLK_DIV4>;
clock-names = "clkin0";
};
+
+&cvbs_vdac_port {
+ cvbs_vdac_out: endpoint {
+ remote-endpoint = <&cvbs_connector_in>;
+ };
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
index 9f89b99c4806..69216246275d 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
@@ -43,7 +43,7 @@
#include "meson-gx.dtsi"
#include <dt-bindings/clock/gxbb-clkc.h>
-#include <dt-bindings/gpio/meson-gxbb-gpio.h>
+#include <dt-bindings/gpio/meson-gxl-gpio.h>
/ {
compatible = "amlogic,meson-gxl";
@@ -299,3 +299,7 @@
<&clkc CLKID_FCLK_DIV2>;
clock-names = "core", "clkin0", "clkin1";
};
+
+&vpu {
+ compatible = "amlogic,meson-gxl-vpu", "amlogic,meson-gx-vpu";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
index f859d75db8bd..5a337d339df1 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
@@ -90,6 +90,16 @@
compatible = "mmc-pwrseq-emmc";
reset-gpios = <&gpio BOOT_9 GPIO_ACTIVE_LOW>;
};
+
+ cvbs-connector {
+ compatible = "composite-video-connector";
+
+ port {
+ cvbs_connector_in: endpoint {
+ remote-endpoint = <&cvbs_vdac_out>;
+ };
+ };
+ };
};
/* This UART is brought out to the DB9 connector */
@@ -167,3 +177,9 @@
max-speed = <1000>;
};
};
+
+&cvbs_vdac_port {
+ cvbs_vdac_out: endpoint {
+ remote-endpoint = <&cvbs_connector_in>;
+ };
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
index c1974bbbddea..eb2f0c3e5e53 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
@@ -112,3 +112,7 @@
};
};
};
+
+&vpu {
+ compatible = "amlogic,meson-gxm-vpu", "amlogic,meson-gx-vpu";
+};
diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
index a852e28a40e1..a83ed2c6bbf7 100644
--- a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
+++ b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
@@ -81,7 +81,7 @@
#address-cells = <0>;
interrupt-controller;
reg = <0x0 0x2c001000 0 0x1000>,
- <0x0 0x2c002000 0 0x1000>,
+ <0x0 0x2c002000 0 0x2000>,
<0x0 0x2c004000 0 0x2000>,
<0x0 0x2c006000 0 0x2000>;
interrupts = <1 9 0xf04>;
diff --git a/arch/arm64/boot/dts/exynos/exynos5433.dtsi b/arch/arm64/boot/dts/exynos/exynos5433.dtsi
index 64226d5ae471..135890cd8a85 100644
--- a/arch/arm64/boot/dts/exynos/exynos5433.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos5433.dtsi
@@ -1367,7 +1367,7 @@
};
amba {
- compatible = "arm,amba-bus";
+ compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges;
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index 9d1d7ad9b075..29ed6b61c737 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -64,6 +64,16 @@
reg = <0x0 0x86000000 0x0 0x200000>;
no-map;
};
+
+ memory@85800000 {
+ reg = <0x0 0x85800000 0x0 0x800000>;
+ no-map;
+ };
+
+ memory@86200000 {
+ reg = <0x0 0x86200000 0x0 0x2600000>;
+ no-map;
+ };
};
cpus {
diff --git a/arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts b/arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts
index 6ffb0517421a..dbea2c3d8f0c 100644
--- a/arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts
+++ b/arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts
@@ -169,7 +169,7 @@
power-source = <3300>;
};
- sdhi0_pins_uhs: sd0 {
+ sdhi0_pins_uhs: sd0_uhs {
groups = "sdhi0_data4", "sdhi0_ctrl";
function = "sdhi0";
power-source = <1800>;
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
index 358089687a69..ef1b9e573af0 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
@@ -27,7 +27,7 @@
stdout-path = "serial0:115200n8";
};
- memory {
+ memory@0 {
device_type = "memory";
reg = <0x0 0x0 0x0 0x40000000>;
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
index 68a908334c7b..54dc28351c8c 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
@@ -72,7 +72,7 @@
<1 10 0xf08>;
};
- amba_apu {
+ amba_apu: amba_apu@0 {
compatible = "simple-bus";
#address-cells = <2>;
#size-cells = <1>;
@@ -175,7 +175,7 @@
};
i2c0: i2c@ff020000 {
- compatible = "cdns,i2c-r1p10";
+ compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 17 4>;
@@ -185,7 +185,7 @@
};
i2c1: i2c@ff030000 {
- compatible = "cdns,i2c-r1p10";
+ compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 18 4>;
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 869dded0f09f..33b744d54739 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -331,6 +331,7 @@ CONFIG_DRM_VC4=m
CONFIG_DRM_PANEL_SIMPLE=m
CONFIG_DRM_I2C_ADV7511=m
CONFIG_DRM_HISI_KIRIN=m
+CONFIG_DRM_MESON=m
CONFIG_FB=y
CONFIG_FB_ARMCLCD=y
CONFIG_BACKLIGHT_GENERIC=m
diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
new file mode 100644
index 000000000000..df411f3e083c
--- /dev/null
+++ b/arch/arm64/include/asm/asm-uaccess.h
@@ -0,0 +1,65 @@
+#ifndef __ASM_ASM_UACCESS_H
+#define __ASM_ASM_UACCESS_H
+
+#include <asm/alternative.h>
+#include <asm/kernel-pgtable.h>
+#include <asm/sysreg.h>
+#include <asm/assembler.h>
+
+/*
+ * User access enabling/disabling macros.
+ */
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ .macro __uaccess_ttbr0_disable, tmp1
+ mrs \tmp1, ttbr1_el1 // swapper_pg_dir
+ add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
+ msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
+ isb
+ .endm
+
+ .macro __uaccess_ttbr0_enable, tmp1
+ get_thread_info \tmp1
+ ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1
+ msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1
+ isb
+ .endm
+
+ .macro uaccess_ttbr0_disable, tmp1
+alternative_if_not ARM64_HAS_PAN
+ __uaccess_ttbr0_disable \tmp1
+alternative_else_nop_endif
+ .endm
+
+ .macro uaccess_ttbr0_enable, tmp1, tmp2
+alternative_if_not ARM64_HAS_PAN
+ save_and_disable_irq \tmp2 // avoid preemption
+ __uaccess_ttbr0_enable \tmp1
+ restore_irq \tmp2
+alternative_else_nop_endif
+ .endm
+#else
+ .macro uaccess_ttbr0_disable, tmp1
+ .endm
+
+ .macro uaccess_ttbr0_enable, tmp1, tmp2
+ .endm
+#endif
+
+/*
+ * These macros are no-ops when UAO is present.
+ */
+ .macro uaccess_disable_not_uao, tmp1
+ uaccess_ttbr0_disable \tmp1
+alternative_if ARM64_ALT_PAN_NOT_UAO
+ SET_PSTATE_PAN(1)
+alternative_else_nop_endif
+ .endm
+
+ .macro uaccess_enable_not_uao, tmp1, tmp2
+ uaccess_ttbr0_enable \tmp1, \tmp2
+alternative_if ARM64_ALT_PAN_NOT_UAO
+ SET_PSTATE_PAN(0)
+alternative_else_nop_endif
+ .endm
+
+#endif
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 446f6c46d4b1..3a4301163e04 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -164,22 +164,25 @@ lr .req x30 // link register
/*
* Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
- * <symbol> is within the range +/- 4 GB of the PC.
+ * <symbol> is within the range +/- 4 GB of the PC when running
+ * in core kernel context. In module context, a movz/movk sequence
+ * is used, since modules may be loaded far away from the kernel
+ * when KASLR is in effect.
*/
/*
* @dst: destination register (64 bit wide)
* @sym: name of the symbol
- * @tmp: optional scratch register to be used if <dst> == sp, which
- * is not allowed in an adrp instruction
*/
- .macro adr_l, dst, sym, tmp=
- .ifb \tmp
+ .macro adr_l, dst, sym
+#ifndef MODULE
adrp \dst, \sym
add \dst, \dst, :lo12:\sym
- .else
- adrp \tmp, \sym
- add \dst, \tmp, :lo12:\sym
- .endif
+#else
+ movz \dst, #:abs_g3:\sym
+ movk \dst, #:abs_g2_nc:\sym
+ movk \dst, #:abs_g1_nc:\sym
+ movk \dst, #:abs_g0_nc:\sym
+#endif
.endm
/*
@@ -190,6 +193,7 @@ lr .req x30 // link register
* the address
*/
.macro ldr_l, dst, sym, tmp=
+#ifndef MODULE
.ifb \tmp
adrp \dst, \sym
ldr \dst, [\dst, :lo12:\sym]
@@ -197,6 +201,15 @@ lr .req x30 // link register
adrp \tmp, \sym
ldr \dst, [\tmp, :lo12:\sym]
.endif
+#else
+ .ifb \tmp
+ adr_l \dst, \sym
+ ldr \dst, [\dst]
+ .else
+ adr_l \tmp, \sym
+ ldr \dst, [\tmp]
+ .endif
+#endif
.endm
/*
@@ -206,8 +219,13 @@ lr .req x30 // link register
* while <src> needs to be preserved.
*/
.macro str_l, src, sym, tmp
+#ifndef MODULE
adrp \tmp, \sym
str \src, [\tmp, :lo12:\sym]
+#else
+ adr_l \tmp, \sym
+ str \src, [\tmp]
+#endif
.endm
/*
diff --git a/arch/arm64/include/asm/current.h b/arch/arm64/include/asm/current.h
index f2bcbe2d9889..86c404171305 100644
--- a/arch/arm64/include/asm/current.h
+++ b/arch/arm64/include/asm/current.h
@@ -9,9 +9,17 @@
struct task_struct;
+/*
+ * We don't use read_sysreg() as we want the compiler to cache the value where
+ * possible.
+ */
static __always_inline struct task_struct *get_current(void)
{
- return (struct task_struct *)read_sysreg(sp_el0);
+ unsigned long sp_el0;
+
+ asm ("mrs %0, sp_el0" : "=r" (sp_el0));
+
+ return (struct task_struct *)sp_el0;
}
#define current get_current()
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index bfe632808d77..90c39a662379 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -222,7 +222,7 @@ static inline void *phys_to_virt(phys_addr_t x)
#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#else
#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
-#define __page_to_voff(page) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
+#define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET))
#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index d26750ca6e06..46da3ea638bb 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -22,8 +22,6 @@
#include <asm/kernel-pgtable.h>
#include <asm/sysreg.h>
-#ifndef __ASSEMBLY__
-
/*
* User space memory access functions
*/
@@ -424,66 +422,4 @@ extern long strncpy_from_user(char *dest, const char __user *src, long count);
extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *str, long n);
-#else /* __ASSEMBLY__ */
-
-#include <asm/assembler.h>
-
-/*
- * User access enabling/disabling macros.
- */
-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
- .macro __uaccess_ttbr0_disable, tmp1
- mrs \tmp1, ttbr1_el1 // swapper_pg_dir
- add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
- msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
- isb
- .endm
-
- .macro __uaccess_ttbr0_enable, tmp1
- get_thread_info \tmp1
- ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1
- msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1
- isb
- .endm
-
- .macro uaccess_ttbr0_disable, tmp1
-alternative_if_not ARM64_HAS_PAN
- __uaccess_ttbr0_disable \tmp1
-alternative_else_nop_endif
- .endm
-
- .macro uaccess_ttbr0_enable, tmp1, tmp2
-alternative_if_not ARM64_HAS_PAN
- save_and_disable_irq \tmp2 // avoid preemption
- __uaccess_ttbr0_enable \tmp1
- restore_irq \tmp2
-alternative_else_nop_endif
- .endm
-#else
- .macro uaccess_ttbr0_disable, tmp1
- .endm
-
- .macro uaccess_ttbr0_enable, tmp1, tmp2
- .endm
-#endif
-
-/*
- * These macros are no-ops when UAO is present.
- */
- .macro uaccess_disable_not_uao, tmp1
- uaccess_ttbr0_disable \tmp1
-alternative_if ARM64_ALT_PAN_NOT_UAO
- SET_PSTATE_PAN(1)
-alternative_else_nop_endif
- .endm
-
- .macro uaccess_enable_not_uao, tmp1, tmp2
- uaccess_ttbr0_enable \tmp1, \tmp2
-alternative_if ARM64_ALT_PAN_NOT_UAO
- SET_PSTATE_PAN(0)
-alternative_else_nop_endif
- .endm
-
-#endif /* __ASSEMBLY__ */
-
#endif /* __ASM_UACCESS_H */
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index fea10736b11f..439f6b5d31f6 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -47,6 +47,7 @@
#include <asm/ptrace.h>
#include <asm/sections.h>
#include <asm/sysreg.h>
+#include <asm/cpufeature.h>
/*
* __boot_cpu_mode records what mode CPUs were booted in.
@@ -80,6 +81,14 @@ static inline bool is_kernel_in_hyp_mode(void)
return read_sysreg(CurrentEL) == CurrentEL_EL2;
}
+static inline bool has_vhe(void)
+{
+ if (cpus_have_const_cap(ARM64_HAS_VIRT_HOST_EXTN))
+ return true;
+
+ return false;
+}
+
#ifdef CONFIG_ARM64_VHE
extern void verify_cpu_run_el(void);
#else
diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
index b5c3933ed441..d1ff83dfe5de 100644
--- a/arch/arm64/include/uapi/asm/ptrace.h
+++ b/arch/arm64/include/uapi/asm/ptrace.h
@@ -77,6 +77,7 @@ struct user_fpsimd_state {
__uint128_t vregs[32];
__u32 fpsr;
__u32 fpcr;
+ __u32 __reserved[2];
};
struct user_hwdebug_state {
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index a7504f40d7ee..43512d4d7df2 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -31,7 +31,7 @@
#include <asm/memory.h>
#include <asm/ptrace.h>
#include <asm/thread_info.h>
-#include <linux/uaccess.h>
+#include <asm/asm-uaccess.h>
#include <asm/unistd.h>
/*
@@ -683,7 +683,7 @@ el0_inv:
mov x0, sp
mov x1, #BAD_SYNC
mov x2, x25
- bl bad_mode
+ bl bad_el0_sync
b ret_to_user
ENDPROC(el0_sync)
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index fc35e06ccaac..a22161ccf447 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -551,6 +551,8 @@ static int hw_break_set(struct task_struct *target,
/* (address, ctrl) registers */
limit = regset->n * regset->size;
while (count && offset < limit) {
+ if (count < PTRACE_HBP_ADDR_SZ)
+ return -EINVAL;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
offset, offset + PTRACE_HBP_ADDR_SZ);
if (ret)
@@ -560,6 +562,8 @@ static int hw_break_set(struct task_struct *target,
return ret;
offset += PTRACE_HBP_ADDR_SZ;
+ if (!count)
+ break;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
offset, offset + PTRACE_HBP_CTRL_SZ);
if (ret)
@@ -596,7 +600,7 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
const void *kbuf, const void __user *ubuf)
{
int ret;
- struct user_pt_regs newregs;
+ struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
if (ret)
@@ -626,7 +630,8 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
const void *kbuf, const void __user *ubuf)
{
int ret;
- struct user_fpsimd_state newstate;
+ struct user_fpsimd_state newstate =
+ target->thread.fpsimd_state.user_fpsimd;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
if (ret)
@@ -650,7 +655,7 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset,
const void *kbuf, const void __user *ubuf)
{
int ret;
- unsigned long tls;
+ unsigned long tls = target->thread.tp_value;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
if (ret)
@@ -676,7 +681,8 @@ static int system_call_set(struct task_struct *target,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
- int syscallno, ret;
+ int syscallno = task_pt_regs(target)->syscallno;
+ int ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
if (ret)
@@ -948,7 +954,7 @@ static int compat_tls_set(struct task_struct *target,
const void __user *ubuf)
{
int ret;
- compat_ulong_t tls;
+ compat_ulong_t tls = target->thread.tp_value;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
if (ret)
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 23e9e13bd2aa..655e65f38f31 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -11,6 +11,7 @@
* for more details.
*/
+#include <linux/acpi.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/init.h>
@@ -209,7 +210,12 @@ static struct notifier_block init_cpu_capacity_notifier = {
static int __init register_cpufreq_notifier(void)
{
- if (cap_parsing_failed)
+ /*
+ * on ACPI-based systems we need to use the default cpu capacity
+ * until we have the necessary code to parse the cpu capacity, so
+ * skip registering cpufreq notifier.
+ */
+ if (!acpi_disabled || cap_parsing_failed)
return -EINVAL;
if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 5b830be79c01..659b2e6b6cf7 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -604,17 +604,34 @@ const char *esr_get_class_string(u32 esr)
}
/*
- * bad_mode handles the impossible case in the exception vector.
+ * bad_mode handles the impossible case in the exception vector. This is always
+ * fatal.
*/
asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
{
- siginfo_t info;
- void __user *pc = (void __user *)instruction_pointer(regs);
console_verbose();
pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
handler[reason], smp_processor_id(), esr,
esr_get_class_string(esr));
+
+ die("Oops - bad mode", regs, 0);
+ local_irq_disable();
+ panic("bad mode");
+}
+
+/*
+ * bad_el0_sync handles unexpected, but potentially recoverable synchronous
+ * exceptions taken from EL0. Unlike bad_mode, this returns.
+ */
+asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
+{
+ siginfo_t info;
+ void __user *pc = (void __user *)instruction_pointer(regs);
+ console_verbose();
+
+ pr_crit("Bad EL0 synchronous exception detected on CPU%d, code 0x%08x -- %s\n",
+ smp_processor_id(), esr, esr_get_class_string(esr));
__show_regs(regs);
info.si_signo = SIGILL;
@@ -622,7 +639,10 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
info.si_code = ILL_ILLOPC;
info.si_addr = pc;
- arm64_notify_die("Oops - bad mode", regs, &info, 0);
+ current->thread.fault_address = 0;
+ current->thread.fault_code = 0;
+
+ force_sig_info(info.si_signo, &info, current);
}
void __pte_error(const char *file, int line, unsigned long val)
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index add4a1334085..e88fb99c1561 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -17,7 +17,7 @@
*/
#include <linux/linkage.h>
-#include <linux/uaccess.h>
+#include <asm/asm-uaccess.h>
.text
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index fd6cd05593f9..4b5d826895ff 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -17,7 +17,7 @@
#include <linux/linkage.h>
#include <asm/cache.h>
-#include <linux/uaccess.h>
+#include <asm/asm-uaccess.h>
/*
* Copy from user space to a kernel buffer (alignment handled by the hardware)
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index d828540ded6f..47184c3a97da 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -19,7 +19,7 @@
#include <linux/linkage.h>
#include <asm/cache.h>
-#include <linux/uaccess.h>
+#include <asm/asm-uaccess.h>
/*
* Copy from user space to user space (alignment handled by the hardware)
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 3e6ae2663b82..351f0766f7a6 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -17,7 +17,7 @@
#include <linux/linkage.h>
#include <asm/cache.h>
-#include <linux/uaccess.h>
+#include <asm/asm-uaccess.h>
/*
* Copy to user space from a kernel buffer (alignment handled by the hardware)
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 17f422a4dc55..83c27b6e6dca 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -23,7 +23,7 @@
#include <asm/assembler.h>
#include <asm/cpufeature.h>
#include <asm/alternative.h>
-#include <linux/uaccess.h>
+#include <asm/asm-uaccess.h>
/*
* flush_icache_range(start,end)
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 290a84f3351f..e04082700bb1 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -524,7 +524,8 @@ EXPORT_SYMBOL(dummy_dma_ops);
static int __init arm64_dma_init(void)
{
- if (swiotlb_force || max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
+ if (swiotlb_force == SWIOTLB_FORCE ||
+ max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
swiotlb = 1;
return atomic_pool_init();
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index a78a5c401806..156169c6981b 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -88,21 +88,21 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
break;
pud = pud_offset(pgd, addr);
- printk(", *pud=%016llx", pud_val(*pud));
+ pr_cont(", *pud=%016llx", pud_val(*pud));
if (pud_none(*pud) || pud_bad(*pud))
break;
pmd = pmd_offset(pud, addr);
- printk(", *pmd=%016llx", pmd_val(*pmd));
+ pr_cont(", *pmd=%016llx", pmd_val(*pmd));
if (pmd_none(*pmd) || pmd_bad(*pmd))
break;
pte = pte_offset_map(pmd, addr);
- printk(", *pte=%016llx", pte_val(*pte));
+ pr_cont(", *pte=%016llx", pte_val(*pte));
pte_unmap(pte);
} while(0);
- printk("\n");
+ pr_cont("\n");
}
#ifdef CONFIG_ARM64_HW_AFDBM
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 964b7549af5c..e25584d72396 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -239,7 +239,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
ncontig = find_num_contig(vma->vm_mm, addr, cpte,
*cpte, &pgsize);
for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize) {
- changed = ptep_set_access_flags(vma, addr, cpte,
+ changed |= ptep_set_access_flags(vma, addr, cpte,
pfn_pte(pfn,
hugeprot),
dirty);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 212c4d1e2f26..380ebe705093 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -401,8 +401,11 @@ static void __init free_unused_memmap(void)
*/
void __init mem_init(void)
{
- if (swiotlb_force || max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
+ if (swiotlb_force == SWIOTLB_FORCE ||
+ max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
swiotlb_init(1);
+ else
+ swiotlb_force = SWIOTLB_NO_FORCE;
set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S
index 47cf3f9d89ff..947830a459d2 100644
--- a/arch/arm64/xen/hypercall.S
+++ b/arch/arm64/xen/hypercall.S
@@ -49,7 +49,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <linux/uaccess.h>
+#include <asm/asm-uaccess.h>
#include <xen/interface/xen.h>
diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
index 1c2a5e264fc7..e93c9494503a 100644
--- a/arch/frv/include/asm/atomic.h
+++ b/arch/frv/include/asm/atomic.h
@@ -139,7 +139,7 @@ static inline void atomic64_dec(atomic64_t *v)
#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
#define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0)
-
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
#define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new))
#define atomic_xchg(v, new) (xchg(&(v)->counter, new))
@@ -161,6 +161,39 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
return c;
}
+static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
+{
+ long long c, old;
+
+ c = atomic64_read(v);
+ for (;;) {
+ if (unlikely(c == u))
+ break;
+ old = atomic64_cmpxchg(v, c, c + i);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != u;
+}
+
+static inline long long atomic64_dec_if_positive(atomic64_t *v)
+{
+ long long c, old, dec;
+
+ c = atomic64_read(v);
+ for (;;) {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ old = atomic64_cmpxchg((v), c, dec);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return dec;
+}
+
#define ATOMIC_OP(op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c
index 6a02b3a3fa65..e92fb190e2d6 100644
--- a/arch/mips/kvm/entry.c
+++ b/arch/mips/kvm/entry.c
@@ -521,6 +521,9 @@ void *kvm_mips_build_exit(void *addr)
uasm_i_and(&p, V0, V0, AT);
uasm_i_lui(&p, AT, ST0_CU0 >> 16);
uasm_i_or(&p, V0, V0, AT);
+#ifdef CONFIG_64BIT
+ uasm_i_ori(&p, V0, V0, ST0_SX | ST0_UX);
+#endif
uasm_i_mtc0(&p, V0, C0_STATUS);
uasm_i_ehb(&p);
@@ -643,7 +646,7 @@ static void *kvm_mips_build_ret_to_guest(void *addr)
/* Setup status register for running guest in UM */
uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE);
- UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX));
+ UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX | ST0_SX | ST0_UX));
uasm_i_and(&p, V1, V1, AT);
uasm_i_mtc0(&p, V1, C0_STATUS);
uasm_i_ehb(&p);
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 06a60b19acfb..29ec9ab3fd55 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -360,8 +360,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run);
/* Invalidate the icache for these ranges */
- local_flush_icache_range((unsigned long)gebase,
- (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
+ flush_icache_range((unsigned long)gebase,
+ (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
/*
* Allocate comm page for guest kernel, a TLB will be reserved for
diff --git a/arch/mn10300/include/asm/switch_to.h b/arch/mn10300/include/asm/switch_to.h
index 393d311735c8..67e333aa7629 100644
--- a/arch/mn10300/include/asm/switch_to.h
+++ b/arch/mn10300/include/asm/switch_to.h
@@ -16,7 +16,7 @@
struct task_struct;
struct thread_struct;
-#if !defined(CONFIG_LAZY_SAVE_FPU)
+#if defined(CONFIG_FPU) && !defined(CONFIG_LAZY_SAVE_FPU)
struct fpu_state_struct;
extern asmlinkage void fpu_save(struct fpu_state_struct *);
#define switch_fpu(prev, next) \
diff --git a/arch/openrisc/kernel/vmlinux.lds.S b/arch/openrisc/kernel/vmlinux.lds.S
index ef31fc24344e..552544616b9d 100644
--- a/arch/openrisc/kernel/vmlinux.lds.S
+++ b/arch/openrisc/kernel/vmlinux.lds.S
@@ -44,6 +44,8 @@ SECTIONS
/* Read-only sections, merged into text segment: */
. = LOAD_BASE ;
+ _text = .;
+
/* _s_kernel_ro must be page aligned */
. = ALIGN(PAGE_SIZE);
_s_kernel_ro = .;
diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h
index 3f9406d9b9d6..da87943328a5 100644
--- a/arch/parisc/include/asm/bitops.h
+++ b/arch/parisc/include/asm/bitops.h
@@ -6,7 +6,7 @@
#endif
#include <linux/compiler.h>
-#include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */
+#include <asm/types.h>
#include <asm/byteorder.h>
#include <asm/barrier.h>
#include <linux/atomic.h>
@@ -17,6 +17,12 @@
* to include/asm-i386/bitops.h or kerneldoc
*/
+#if __BITS_PER_LONG == 64
+#define SHIFT_PER_LONG 6
+#else
+#define SHIFT_PER_LONG 5
+#endif
+
#define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index 7581330ea35b..88fe0aad4390 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -49,7 +49,6 @@ struct thread_info {
#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_32BIT 4 /* 32 bit binary */
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
-#define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
#define TIF_SINGLESTEP 9 /* single stepping? */
diff --git a/arch/parisc/include/uapi/asm/bitsperlong.h b/arch/parisc/include/uapi/asm/bitsperlong.h
index e0a23c7bdd43..07fa7e50bdc0 100644
--- a/arch/parisc/include/uapi/asm/bitsperlong.h
+++ b/arch/parisc/include/uapi/asm/bitsperlong.h
@@ -3,10 +3,8 @@
#if defined(__LP64__)
#define __BITS_PER_LONG 64
-#define SHIFT_PER_LONG 6
#else
#define __BITS_PER_LONG 32
-#define SHIFT_PER_LONG 5
#endif
#include <asm-generic/bitsperlong.h>
diff --git a/arch/parisc/include/uapi/asm/swab.h b/arch/parisc/include/uapi/asm/swab.h
index e78403b129ef..928e1bbac98f 100644
--- a/arch/parisc/include/uapi/asm/swab.h
+++ b/arch/parisc/include/uapi/asm/swab.h
@@ -1,6 +1,7 @@
#ifndef _PARISC_SWAB_H
#define _PARISC_SWAB_H
+#include <asm/bitsperlong.h>
#include <linux/types.h>
#include <linux/compiler.h>
@@ -38,7 +39,7 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
}
#define __arch_swab32 __arch_swab32
-#if BITS_PER_LONG > 32
+#if __BITS_PER_LONG > 32
/*
** From "PA-RISC 2.0 Architecture", HP Professional Books.
** See Appendix I page 8 , "Endian Byte Swapping".
@@ -61,6 +62,6 @@ static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
return x;
}
#define __arch_swab64 __arch_swab64
-#endif /* BITS_PER_LONG > 32 */
+#endif /* __BITS_PER_LONG > 32 */
#endif /* _PARISC_SWAB_H */
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index da0d9cb63403..1e22f981cd81 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -235,9 +235,26 @@ void __init time_init(void)
cr16_hz = 100 * PAGE0->mem_10msec; /* Hz */
- /* register at clocksource framework */
- clocksource_register_hz(&clocksource_cr16, cr16_hz);
-
/* register as sched_clock source */
sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_hz);
}
+
+static int __init init_cr16_clocksource(void)
+{
+ /*
+ * The cr16 interval timers are not syncronized across CPUs, so mark
+ * them unstable and lower rating on SMP systems.
+ */
+ if (num_online_cpus() > 1) {
+ clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
+ clocksource_cr16.rating = 0;
+ }
+
+ /* register at clocksource framework */
+ clocksource_register_hz(&clocksource_cr16,
+ 100 * PAGE0->mem_10msec);
+
+ return 0;
+}
+
+device_initcall(init_cr16_clocksource);
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 8ff9253930af..1a0b4f63f0e9 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -234,7 +234,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long code,
tsk->comm, code, address);
print_vma_addr(KERN_CONT " in ", regs->iaoq[0]);
- pr_cont(" trap #%lu: %s%c", code, trap_name(code),
+ pr_cont("\ntrap #%lu: %s%c", code, trap_name(code),
vma ? ',':'\n');
if (vma)
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
index 1c64bc6330bc..0c4e470571ca 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -36,12 +36,13 @@
#ifdef CONFIG_HUGETLB_PAGE
static inline int hash__hugepd_ok(hugepd_t hpd)
{
+ unsigned long hpdval = hpd_val(hpd);
/*
* if it is not a pte and have hugepd shift mask
* set, then it is a hugepd directory pointer
*/
- if (!(hpd.pd & _PAGE_PTE) &&
- ((hpd.pd & HUGEPD_SHIFT_MASK) != 0))
+ if (!(hpdval & _PAGE_PTE) &&
+ ((hpdval & HUGEPD_SHIFT_MASK) != 0))
return true;
return false;
}
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index f61cad3de4e6..4c935f7504f7 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -201,6 +201,10 @@ extern int __meminit hash__vmemmap_create_mapping(unsigned long start,
unsigned long phys);
extern void hash__vmemmap_remove_mapping(unsigned long start,
unsigned long page_size);
+
+int hash__create_section_mapping(unsigned long start, unsigned long end);
+int hash__remove_section_mapping(unsigned long start, unsigned long end);
+
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index ede215167d1a..7f4025a6c69e 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -21,12 +21,12 @@ static inline pte_t *hugepd_page(hugepd_t hpd)
* We have only four bits to encode, MMU page size
*/
BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
- return __va(hpd.pd & HUGEPD_ADDR_MASK);
+ return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK);
}
static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
{
- return (hpd.pd & HUGEPD_SHIFT_MASK) >> 2;
+ return (hpd_val(hpd) & HUGEPD_SHIFT_MASK) >> 2;
}
static inline unsigned int hugepd_shift(hugepd_t hpd)
@@ -52,18 +52,20 @@ static inline pte_t *hugepd_page(hugepd_t hpd)
{
BUG_ON(!hugepd_ok(hpd));
#ifdef CONFIG_PPC_8xx
- return (pte_t *)__va(hpd.pd & ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK));
+ return (pte_t *)__va(hpd_val(hpd) &
+ ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK));
#else
- return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE);
+ return (pte_t *)((hpd_val(hpd) &
+ ~HUGEPD_SHIFT_MASK) | PD_HUGE);
#endif
}
static inline unsigned int hugepd_shift(hugepd_t hpd)
{
#ifdef CONFIG_PPC_8xx
- return ((hpd.pd & _PMD_PAGE_MASK) >> 1) + 17;
+ return ((hpd_val(hpd) & _PMD_PAGE_MASK) >> 1) + 17;
#else
- return hpd.pd & HUGEPD_SHIFT_MASK;
+ return hpd_val(hpd) & HUGEPD_SHIFT_MASK;
#endif
}
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index 172849727054..0cd8a3852763 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -227,9 +227,10 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
static inline int hugepd_ok(hugepd_t hpd)
{
#ifdef CONFIG_PPC_8xx
- return ((hpd.pd & 0x4) != 0);
+ return ((hpd_val(hpd) & 0x4) != 0);
#else
- return (hpd.pd > 0);
+ /* We clear the top bit to indicate hugepd */
+ return ((hpd_val(hpd) & PD_HUGE) == 0);
#endif
}
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 56398e7e6100..47120bf2670c 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -294,15 +294,12 @@ extern long long virt_phys_offset;
#include <asm/pgtable-types.h>
#endif
-typedef struct { signed long pd; } hugepd_t;
#ifndef CONFIG_HUGETLB_PAGE
#define is_hugepd(pdep) (0)
#define pgd_huge(pgd) (0)
#endif /* CONFIG_HUGETLB_PAGE */
-#define __hugepd(x) ((hugepd_t) { (x) })
-
struct page;
extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
extern void copy_user_page(void *to, void *from, unsigned long vaddr,
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index e157489ee7a1..ae0a23091a9b 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -65,6 +65,7 @@ struct power_pmu {
#define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */
#define PPMU_HAS_SIER 0x00000040 /* Has SIER */
#define PPMU_ARCH_207S 0x00000080 /* PMC is architecture v2.07S */
+#define PPMU_NO_SIAR 0x00000100 /* Do not use SIAR */
/*
* Values for flags to get_alternatives()
diff --git a/arch/powerpc/include/asm/pgtable-be-types.h b/arch/powerpc/include/asm/pgtable-be-types.h
index 49c0a5a80efa..9c0f5db5cf46 100644
--- a/arch/powerpc/include/asm/pgtable-be-types.h
+++ b/arch/powerpc/include/asm/pgtable-be-types.h
@@ -104,4 +104,12 @@ static inline bool pmd_xchg(pmd_t *pmdp, pmd_t old, pmd_t new)
return pmd_raw(old) == prev;
}
+typedef struct { __be64 pdbe; } hugepd_t;
+#define __hugepd(x) ((hugepd_t) { cpu_to_be64(x) })
+
+static inline unsigned long hpd_val(hugepd_t x)
+{
+ return be64_to_cpu(x.pdbe);
+}
+
#endif /* _ASM_POWERPC_PGTABLE_BE_TYPES_H */
diff --git a/arch/powerpc/include/asm/pgtable-types.h b/arch/powerpc/include/asm/pgtable-types.h
index e7f4f3e0fcde..8bd3b13fe2fb 100644
--- a/arch/powerpc/include/asm/pgtable-types.h
+++ b/arch/powerpc/include/asm/pgtable-types.h
@@ -66,4 +66,11 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
}
#endif
+typedef struct { unsigned long pd; } hugepd_t;
+#define __hugepd(x) ((hugepd_t) { (x) })
+static inline unsigned long hpd_val(hugepd_t x)
+{
+ return x.pd;
+}
+
#endif /* _ASM_POWERPC_PGTABLE_TYPES_H */
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index c56ea8c84abb..c4ced1d01d57 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -157,7 +157,7 @@
#define PPC_INST_MCRXR 0x7c000400
#define PPC_INST_MCRXR_MASK 0xfc0007fe
#define PPC_INST_MFSPR_PVR 0x7c1f42a6
-#define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff
+#define PPC_INST_MFSPR_PVR_MASK 0xfc1ffffe
#define PPC_INST_MFTMR 0x7c0002dc
#define PPC_INST_MSGSND 0x7c00019c
#define PPC_INST_MSGCLR 0x7c0001dc
@@ -174,13 +174,13 @@
#define PPC_INST_RFDI 0x4c00004e
#define PPC_INST_RFMCI 0x4c00004c
#define PPC_INST_MFSPR_DSCR 0x7c1102a6
-#define PPC_INST_MFSPR_DSCR_MASK 0xfc1fffff
+#define PPC_INST_MFSPR_DSCR_MASK 0xfc1ffffe
#define PPC_INST_MTSPR_DSCR 0x7c1103a6
-#define PPC_INST_MTSPR_DSCR_MASK 0xfc1fffff
+#define PPC_INST_MTSPR_DSCR_MASK 0xfc1ffffe
#define PPC_INST_MFSPR_DSCR_USER 0x7c0302a6
-#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1fffff
+#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1ffffe
#define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6
-#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1fffff
+#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1ffffe
#define PPC_INST_MFVSRD 0x7c000066
#define PPC_INST_MTVSRD 0x7c000166
#define PPC_INST_SLBFEE 0x7c0007a7
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 8180bfd7ab93..9de7f79e702b 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -298,9 +298,17 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
*
* For pHyp, we have to enable IO for log retrieval. Otherwise,
* 0xFF's is always returned from PCI config space.
+ *
+ * When the @severity is EEH_LOG_PERM, the PE is going to be
+ * removed. Prior to that, the drivers for devices included in
+ * the PE will be closed. The drivers rely on working IO path
+ * to bring the devices to quiet state. Otherwise, PCI traffic
+ * from those devices after they are removed is like to cause
+ * another unexpected EEH error.
*/
if (!(pe->type & EEH_PE_PHB)) {
- if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG))
+ if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG) ||
+ severity == EEH_LOG_PERM)
eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
/*
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index e4744ff38a17..925a4ef90559 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -463,6 +463,10 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
flush_fp_to_thread(target);
+ for (i = 0; i < 32 ; i++)
+ buf[i] = target->thread.TS_FPR(i);
+ buf[32] = target->thread.fp_state.fpscr;
+
/* copy to local buffer then write that out */
i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
if (i)
@@ -672,6 +676,9 @@ static int vsr_set(struct task_struct *target, const struct user_regset *regset,
flush_altivec_to_thread(target);
flush_vsx_to_thread(target);
+ for (i = 0; i < 32 ; i++)
+ buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
+
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
buf, 0, 32 * sizeof(double));
if (!ret)
@@ -1019,6 +1026,10 @@ static int tm_cfpr_set(struct task_struct *target,
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
+ for (i = 0; i < 32; i++)
+ buf[i] = target->thread.TS_CKFPR(i);
+ buf[32] = target->thread.ckfp_state.fpscr;
+
/* copy to local buffer then write that out */
i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
if (i)
@@ -1283,6 +1294,9 @@ static int tm_cvsx_set(struct task_struct *target,
flush_altivec_to_thread(target);
flush_vsx_to_thread(target);
+ for (i = 0; i < 32 ; i++)
+ buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
+
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
buf, 0, 32 * sizeof(double));
if (!ret)
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 80334937e14f..67e19a0821be 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -747,7 +747,7 @@ static unsigned long __init htab_get_table_size(void)
}
#ifdef CONFIG_MEMORY_HOTPLUG
-int create_section_mapping(unsigned long start, unsigned long end)
+int hash__create_section_mapping(unsigned long start, unsigned long end)
{
int rc = htab_bolt_mapping(start, end, __pa(start),
pgprot_val(PAGE_KERNEL), mmu_linear_psize,
@@ -761,7 +761,7 @@ int create_section_mapping(unsigned long start, unsigned long end)
return rc;
}
-int remove_section_mapping(unsigned long start, unsigned long end)
+int hash__remove_section_mapping(unsigned long start, unsigned long end)
{
int rc = htab_remove_mapping(start, end, mmu_linear_psize,
mmu_kernel_ssize);
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
index d5026f3800b6..37b5f91e381b 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -125,11 +125,14 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
int hugepd_ok(hugepd_t hpd)
{
bool is_hugepd;
+ unsigned long hpdval;
+
+ hpdval = hpd_val(hpd);
/*
* We should not find this format in page directory, warn otherwise.
*/
- is_hugepd = (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0));
+ is_hugepd = (((hpdval & 0x3) == 0x0) && ((hpdval & HUGEPD_SHIFT_MASK) != 0));
WARN(is_hugepd, "Found wrong page directory format\n");
return 0;
}
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 289df38fb7e0..8c3389cbcd12 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -53,7 +53,7 @@ static u64 gpage_freearray[MAX_NUMBER_GPAGES];
static unsigned nr_gpages;
#endif
-#define hugepd_none(hpd) ((hpd).pd == 0)
+#define hugepd_none(hpd) (hpd_val(hpd) == 0)
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
@@ -103,24 +103,24 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
for (i = 0; i < num_hugepd; i++, hpdp++) {
if (unlikely(!hugepd_none(*hpdp)))
break;
- else
+ else {
#ifdef CONFIG_PPC_BOOK3S_64
- hpdp->pd = __pa(new) |
- (shift_to_mmu_psize(pshift) << 2);
+ *hpdp = __hugepd(__pa(new) |
+ (shift_to_mmu_psize(pshift) << 2));
#elif defined(CONFIG_PPC_8xx)
- hpdp->pd = __pa(new) |
- (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M :
- _PMD_PAGE_512K) |
- _PMD_PRESENT;
+ *hpdp = __hugepd(__pa(new) |
+ (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M :
+ _PMD_PAGE_512K) | _PMD_PRESENT);
#else
/* We use the old format for PPC_FSL_BOOK3E */
- hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
+ *hpdp = __hugepd(((unsigned long)new & ~PD_HUGE) | pshift);
#endif
+ }
}
/* If we bailed from the for loop early, an error occurred, clean up */
if (i < num_hugepd) {
for (i = i - 1 ; i >= 0; i--, hpdp--)
- hpdp->pd = 0;
+ *hpdp = __hugepd(0);
kmem_cache_free(cachep, new);
}
spin_unlock(&mm->page_table_lock);
@@ -454,7 +454,7 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
return;
for (i = 0; i < num_hugepd; i++, hpdp++)
- hpdp->pd = 0;
+ *hpdp = __hugepd(0);
if (shift >= pdshift)
hugepd_free(tlb, hugepte);
@@ -810,12 +810,8 @@ static int __init hugetlbpage_init(void)
* if we have pdshift and shift value same, we don't
* use pgt cache for hugepd.
*/
- if (pdshift > shift) {
+ if (pdshift > shift)
pgtable_cache_add(pdshift - shift, NULL);
- if (!PGT_CACHE(pdshift - shift))
- panic("hugetlbpage_init(): could not create "
- "pgtable cache for %d bit pagesize\n", shift);
- }
#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
else if (!hugepte_cache) {
/*
@@ -852,9 +848,6 @@ static int __init hugetlbpage_init(void)
else if (mmu_psize_defs[MMU_PAGE_2M].shift)
HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_2M].shift;
#endif
- else
- panic("%s: Unable to set default huge page size\n", __func__);
-
return 0;
}
diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c
index a175cd82ae8c..f2108c40e697 100644
--- a/arch/powerpc/mm/init-common.c
+++ b/arch/powerpc/mm/init-common.c
@@ -78,8 +78,12 @@ void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
align = max_t(unsigned long, align, minalign);
name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
new = kmem_cache_create(name, table_size, align, 0, ctor);
+ if (!new)
+ panic("Could not allocate pgtable cache for order %d", shift);
+
kfree(name);
pgtable_cache[shift - 1] = new;
+
pr_debug("Allocated pgtable cache for order %d\n", shift);
}
@@ -88,7 +92,7 @@ void pgtable_cache_init(void)
{
pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
- if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE))
+ if (PMD_CACHE_INDEX && !PGT_CACHE(PMD_CACHE_INDEX))
pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor);
/*
* In all current configs, when the PUD index exists it's the
@@ -97,11 +101,4 @@ void pgtable_cache_init(void)
*/
if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor);
-
- if (!PGT_CACHE(PGD_INDEX_SIZE))
- panic("Couldn't allocate pgd cache");
- if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE))
- panic("Couldn't allocate pmd pgtable caches");
- if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
- panic("Couldn't allocate pud pgtable caches");
}
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index ebf9782bacf9..653ff6c74ebe 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -126,3 +126,21 @@ void mmu_cleanup_all(void)
else if (mmu_hash_ops.hpte_clear_all)
mmu_hash_ops.hpte_clear_all();
}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+int create_section_mapping(unsigned long start, unsigned long end)
+{
+ if (radix_enabled())
+ return -ENODEV;
+
+ return hash__create_section_mapping(start, end);
+}
+
+int remove_section_mapping(unsigned long start, unsigned long end)
+{
+ if (radix_enabled())
+ return -ENODEV;
+
+ return hash__remove_section_mapping(start, end);
+}
+#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index fd3e4034c04d..270eb9b74e2e 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -295,6 +295,8 @@ static inline void perf_read_regs(struct pt_regs *regs)
*/
if (TRAP(regs) != 0xf00)
use_siar = 0;
+ else if ((ppmu->flags & PPMU_NO_SIAR))
+ use_siar = 0;
else if (marked)
use_siar = 1;
else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING))
diff --git a/arch/powerpc/perf/power9-events-list.h b/arch/powerpc/perf/power9-events-list.h
index 6447dc1c3d89..929b56d47ad9 100644
--- a/arch/powerpc/perf/power9-events-list.h
+++ b/arch/powerpc/perf/power9-events-list.h
@@ -16,7 +16,7 @@ EVENT(PM_CYC, 0x0001e)
EVENT(PM_ICT_NOSLOT_CYC, 0x100f8)
EVENT(PM_CMPLU_STALL, 0x1e054)
EVENT(PM_INST_CMPL, 0x00002)
-EVENT(PM_BRU_CMPL, 0x40060)
+EVENT(PM_BRU_CMPL, 0x10012)
EVENT(PM_BR_MPRED_CMPL, 0x400f6)
/* All L1 D cache load references counted at finish, gated by reject */
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index 346010e8d463..7332634e18c9 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -384,7 +384,7 @@ static struct power_pmu power9_isa207_pmu = {
.bhrb_filter_map = power9_bhrb_filter_map,
.get_constraint = isa207_get_constraint,
.disable_pmc = isa207_disable_pmc,
- .flags = PPMU_HAS_SIER | PPMU_ARCH_207S,
+ .flags = PPMU_NO_SIAR | PPMU_ARCH_207S,
.n_generic = ARRAY_SIZE(power9_generic_events),
.generic_events = power9_generic_events,
.cache_events = &power9_cache_events,
diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c
index d38e86fd5720..60c57657c772 100644
--- a/arch/powerpc/sysdev/xics/icp-opal.c
+++ b/arch/powerpc/sysdev/xics/icp-opal.c
@@ -20,6 +20,7 @@
#include <asm/xics.h>
#include <asm/io.h>
#include <asm/opal.h>
+#include <asm/kvm_ppc.h>
static void icp_opal_teardown_cpu(void)
{
@@ -39,7 +40,26 @@ static void icp_opal_flush_ipi(void)
* Should we be flagging idle loop instead?
* Or creating some task to be scheduled?
*/
- opal_int_eoi((0x00 << 24) | XICS_IPI);
+ if (opal_int_eoi((0x00 << 24) | XICS_IPI) > 0)
+ force_external_irq_replay();
+}
+
+static unsigned int icp_opal_get_xirr(void)
+{
+ unsigned int kvm_xirr;
+ __be32 hw_xirr;
+ int64_t rc;
+
+ /* Handle an interrupt latched by KVM first */
+ kvm_xirr = kvmppc_get_xics_latch();
+ if (kvm_xirr)
+ return kvm_xirr;
+
+ /* Then ask OPAL */
+ rc = opal_int_get_xirr(&hw_xirr, false);
+ if (rc < 0)
+ return 0;
+ return be32_to_cpu(hw_xirr);
}
static unsigned int icp_opal_get_irq(void)
@@ -47,12 +67,8 @@ static unsigned int icp_opal_get_irq(void)
unsigned int xirr;
unsigned int vec;
unsigned int irq;
- int64_t rc;
- rc = opal_int_get_xirr(&xirr, false);
- if (rc < 0)
- return 0;
- xirr = be32_to_cpu(xirr);
+ xirr = icp_opal_get_xirr();
vec = xirr & 0x00ffffff;
if (vec == XICS_IRQ_SPURIOUS)
return 0;
@@ -67,7 +83,8 @@ static unsigned int icp_opal_get_irq(void)
xics_mask_unknown_vec(vec);
/* We might learn about it later, so EOI it */
- opal_int_eoi(xirr);
+ if (opal_int_eoi(xirr) > 0)
+ force_external_irq_replay();
return 0;
}
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index e659daffe368..e00975361fec 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -69,7 +69,7 @@ CONFIG_CMA=y
CONFIG_CMA_DEBUG=y
CONFIG_CMA_DEBUGFS=y
CONFIG_MEM_SOFT_DIRTY=y
-CONFIG_ZPOOL=m
+CONFIG_ZSWAP=y
CONFIG_ZBUD=m
CONFIG_ZSMALLOC=m
CONFIG_ZSMALLOC_STAT=y
@@ -141,8 +141,6 @@ CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CONNTRACK_TIMEOUT=y
CONFIG_NF_CONNTRACK_TIMESTAMP=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
@@ -159,13 +157,12 @@ CONFIG_NF_TABLES=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
CONFIG_NFT_CT=m
-CONFIG_NFT_RBTREE=m
-CONFIG_NFT_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_NAT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_AUDIT=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -219,7 +216,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
-CONFIG_NETFILTER_XT_MATCH_SOCKET=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -258,7 +254,6 @@ CONFIG_IP_VS_NQ=m
CONFIG_IP_VS_FTP=m
CONFIG_IP_VS_PE_SIP=m
CONFIG_NF_CONNTRACK_IPV4=m
-# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
CONFIG_NF_TABLES_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NF_TABLES_ARP=m
@@ -436,7 +431,6 @@ CONFIG_EQUALIZER=m
CONFIG_IFB=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
-CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_TUN=m
CONFIG_VETH=m
@@ -480,6 +474,7 @@ CONFIG_VIRTIO_BALLOON=m
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
CONFIG_JBD2_DEBUG=y
CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
@@ -592,14 +587,12 @@ CONFIG_LOCK_STAT=y
CONFIG_DEBUG_LOCKDEP=y
CONFIG_DEBUG_ATOMIC_SLEEP=y
CONFIG_DEBUG_LOCKING_API_SELFTESTS=y
-CONFIG_DEBUG_LIST=y
CONFIG_DEBUG_SG=y
CONFIG_DEBUG_NOTIFIERS=y
CONFIG_DEBUG_CREDENTIALS=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=300
CONFIG_NOTIFIER_ERROR_INJECTION=m
-CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
CONFIG_PM_NOTIFIER_ERROR_INJECT=m
CONFIG_FAULT_INJECTION=y
CONFIG_FAILSLAB=y
@@ -618,6 +611,7 @@ CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_UPROBE_EVENT=y
CONFIG_FUNCTION_PROFILER=y
+CONFIG_HIST_TRIGGERS=y
CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_LKDTM=m
CONFIG_TEST_LIST_SORT=y
@@ -630,6 +624,7 @@ CONFIG_TEST_STRING_HELPERS=y
CONFIG_TEST_KSTRTOX=y
CONFIG_DMA_API_DEBUG=y
CONFIG_TEST_BPF=m
+CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_S390_PTDUMP=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
@@ -640,16 +635,18 @@ CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
CONFIG_SECURITY_SELINUX_DISABLE=y
CONFIG_IMA=y
CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_RSA=m
+CONFIG_CRYPTO_DH=m
+CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_USER=m
-# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
-CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
@@ -673,11 +670,13 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_ZCRYPT=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index 95ceac50bc65..f05d2d6e1087 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -12,6 +12,7 @@ CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_NUMA_BALANCING=y
+# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y
@@ -54,8 +55,9 @@ CONFIG_SOLARIS_X86_PARTITION=y
CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_CFQ_GROUP_IOSCHED=y
CONFIG_DEFAULT_DEADLINE=y
+CONFIG_LIVEPATCH=y
CONFIG_TUNE_ZEC12=y
-CONFIG_NR_CPUS=256
+CONFIG_NR_CPUS=512
CONFIG_NUMA=y
CONFIG_HZ_100=y
CONFIG_MEMORY_HOTPLUG=y
@@ -65,6 +67,7 @@ CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CLEANCACHE=y
CONFIG_FRONTSWAP=y
CONFIG_CMA=y
+CONFIG_MEM_SOFT_DIRTY=y
CONFIG_ZSWAP=y
CONFIG_ZBUD=m
CONFIG_ZSMALLOC=m
@@ -136,8 +139,6 @@ CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CONNTRACK_TIMEOUT=y
CONFIG_NF_CONNTRACK_TIMESTAMP=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
@@ -154,13 +155,12 @@ CONFIG_NF_TABLES=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
CONFIG_NFT_CT=m
-CONFIG_NFT_RBTREE=m
-CONFIG_NFT_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_NAT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_AUDIT=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -214,7 +214,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
-CONFIG_NETFILTER_XT_MATCH_SOCKET=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -253,7 +252,6 @@ CONFIG_IP_VS_NQ=m
CONFIG_IP_VS_FTP=m
CONFIG_IP_VS_PE_SIP=m
CONFIG_NF_CONNTRACK_IPV4=m
-# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
CONFIG_NF_TABLES_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NF_TABLES_ARP=m
@@ -430,7 +428,6 @@ CONFIG_EQUALIZER=m
CONFIG_IFB=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
-CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_TUN=m
CONFIG_VETH=m
@@ -460,6 +457,7 @@ CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_RAW_DRIVER=m
CONFIG_HANGCHECK_TIMER=m
CONFIG_TN3270_FS=y
+# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_SOFT_WATCHDOG=m
@@ -473,6 +471,7 @@ CONFIG_VIRTIO_BALLOON=m
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
CONFIG_JBD2_DEBUG=y
CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
@@ -495,6 +494,7 @@ CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=y
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
+CONFIG_OVERLAY_FS_REDIRECT_DIR=y
CONFIG_FSCACHE=m
CONFIG_CACHEFILES=m
CONFIG_ISO9660_FS=y
@@ -551,25 +551,27 @@ CONFIG_FRAME_WARN=1024
CONFIG_UNUSED_SYMBOLS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_MEMORY_INIT=y
-CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
CONFIG_PANIC_ON_OOPS=y
CONFIG_TIMER_STATS=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=60
-CONFIG_NOTIFIER_ERROR_INJECTION=m
-CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
-CONFIG_PM_NOTIFIER_ERROR_INJECT=m
CONFIG_LATENCYTOP=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
-# CONFIG_KPROBE_EVENT is not set
+CONFIG_UPROBE_EVENT=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_HIST_TRIGGERS=y
CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_LKDTM=m
-CONFIG_RBTREE_TEST=m
-CONFIG_INTERVAL_TREE_TEST=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
CONFIG_TEST_BPF=m
+CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_S390_PTDUMP=y
+CONFIG_PERSISTENT_KEYRINGS=y
+CONFIG_BIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
@@ -577,18 +579,25 @@ CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_INTEGRITY_SIGNATURE=y
+CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
CONFIG_IMA=y
+CONFIG_IMA_WRITE_POLICY=y
CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_DH=m
+CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_USER=m
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
-CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
@@ -598,6 +607,7 @@ CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
@@ -612,10 +622,13 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_ZCRYPT=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
@@ -624,9 +637,6 @@ CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y
-CONFIG_ASYMMETRIC_KEY_TYPE=y
-CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
-CONFIG_X509_CERTIFICATE_PARSER=m
CONFIG_CRC7=m
CONFIG_CRC8=m
CONFIG_CORDIC=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index bc7b176f5795..2cf87343b590 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -65,6 +65,7 @@ CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CLEANCACHE=y
CONFIG_FRONTSWAP=y
CONFIG_CMA=y
+CONFIG_MEM_SOFT_DIRTY=y
CONFIG_ZSWAP=y
CONFIG_ZBUD=m
CONFIG_ZSMALLOC=m
@@ -136,8 +137,6 @@ CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CONNTRACK_TIMEOUT=y
CONFIG_NF_CONNTRACK_TIMESTAMP=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
@@ -154,13 +153,12 @@ CONFIG_NF_TABLES=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
CONFIG_NFT_CT=m
-CONFIG_NFT_RBTREE=m
-CONFIG_NFT_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_NAT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_AUDIT=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -214,7 +212,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
-CONFIG_NETFILTER_XT_MATCH_SOCKET=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -253,7 +250,6 @@ CONFIG_IP_VS_NQ=m
CONFIG_IP_VS_FTP=m
CONFIG_IP_VS_PE_SIP=m
CONFIG_NF_CONNTRACK_IPV4=m
-# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
CONFIG_NF_TABLES_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NF_TABLES_ARP=m
@@ -430,7 +426,6 @@ CONFIG_EQUALIZER=m
CONFIG_IFB=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
-CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_TUN=m
CONFIG_VETH=m
@@ -474,6 +469,7 @@ CONFIG_VIRTIO_BALLOON=m
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
CONFIG_JBD2_DEBUG=y
CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
@@ -496,6 +492,7 @@ CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=y
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
+CONFIG_OVERLAY_FS_REDIRECT_DIR=y
CONFIG_FSCACHE=m
CONFIG_CACHEFILES=m
CONFIG_ISO9660_FS=y
@@ -563,12 +560,16 @@ CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_UPROBE_EVENT=y
CONFIG_FUNCTION_PROFILER=y
+CONFIG_HIST_TRIGGERS=y
CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_LKDTM=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
CONFIG_TEST_BPF=m
+CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_S390_PTDUMP=y
+CONFIG_PERSISTENT_KEYRINGS=y
+CONFIG_BIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
@@ -576,18 +577,25 @@ CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_INTEGRITY_SIGNATURE=y
+CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
CONFIG_IMA=y
+CONFIG_IMA_WRITE_POLICY=y
CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_DH=m
+CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_USER=m
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
-CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
@@ -597,6 +605,7 @@ CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
@@ -611,10 +620,13 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_ZCRYPT=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
@@ -623,9 +635,6 @@ CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y
-CONFIG_ASYMMETRIC_KEY_TYPE=y
-CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
-CONFIG_X509_CERTIFICATE_PARSER=m
CONFIG_CRC7=m
CONFIG_CRC8=m
CONFIG_CORDIC=m
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 2d40ef0a6295..d00e368fb5e6 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -38,7 +38,6 @@ CONFIG_JUMP_LABEL=y
CONFIG_STATIC_KEYS_SELFTEST=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-CONFIG_MODVERSIONS=y
CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
@@ -130,8 +129,11 @@ CONFIG_DUMMY=m
CONFIG_EQUALIZER=m
CONFIG_TUN=m
CONFIG_VIRTIO_NET=y
+# CONFIG_NET_VENDOR_ALACRITECH is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
+CONFIG_DEVKMEM=y
CONFIG_RAW_DRIVER=m
CONFIG_VIRTIO_BALLOON=y
CONFIG_EXT4_FS=y
@@ -183,7 +185,6 @@ CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_KPROBES_SANITY_TEST=y
CONFIG_S390_PTDUMP=y
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_AUTHENC=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
diff --git a/arch/s390/include/asm/asm-prototypes.h b/arch/s390/include/asm/asm-prototypes.h
new file mode 100644
index 000000000000..2c3413b0ca52
--- /dev/null
+++ b/arch/s390/include/asm/asm-prototypes.h
@@ -0,0 +1,8 @@
+#ifndef _ASM_S390_PROTOTYPES_H
+
+#include <linux/kvm_host.h>
+#include <linux/ftrace.h>
+#include <asm/fpu/api.h>
+#include <asm-generic/asm-prototypes.h>
+
+#endif /* _ASM_S390_PROTOTYPES_H */
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index d7697ab802f6..8e136b88cdf4 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -15,7 +15,9 @@
BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
asm volatile( \
" lctlg %1,%2,%0\n" \
- : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\
+ : \
+ : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high) \
+ : "memory"); \
}
#define __ctl_store(array, low, high) { \
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 7447ba509c30..12020b55887b 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -963,6 +963,11 @@ static int s390_fpregs_set(struct task_struct *target,
if (target == current)
save_fpu_regs();
+ if (MACHINE_HAS_VX)
+ convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
+ else
+ memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
+
/* If setting FPC, must validate it first. */
if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
@@ -1067,6 +1072,9 @@ static int s390_vxrs_low_set(struct task_struct *target,
if (target == current)
save_fpu_regs();
+ for (i = 0; i < __NUM_VXRS_LOW; i++)
+ vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
+
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
if (rc == 0)
for (i = 0; i < __NUM_VXRS_LOW; i++)
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 6b246aadf311..1b5c5ee9fc1b 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -94,7 +94,7 @@ static void update_mt_scaling(void)
* Update process times based on virtual cpu times stored by entry.S
* to the lowcore fields user_timer, system_timer & steal_clock.
*/
-static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
+static int do_account_vtime(struct task_struct *tsk)
{
u64 timer, clock, user, system, steal;
u64 user_scaled, system_scaled;
@@ -138,7 +138,7 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
}
account_user_time(tsk, user);
tsk->utimescaled += user_scaled;
- account_system_time(tsk, hardirq_offset, system);
+ account_system_time(tsk, 0, system);
tsk->stimescaled += system_scaled;
steal = S390_lowcore.steal_timer;
@@ -152,7 +152,7 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
void vtime_task_switch(struct task_struct *prev)
{
- do_account_vtime(prev, 0);
+ do_account_vtime(prev);
prev->thread.user_timer = S390_lowcore.user_timer;
prev->thread.system_timer = S390_lowcore.system_timer;
S390_lowcore.user_timer = current->thread.user_timer;
@@ -166,7 +166,7 @@ void vtime_task_switch(struct task_struct *prev)
*/
void vtime_account_user(struct task_struct *tsk)
{
- if (do_account_vtime(tsk, HARDIRQ_OFFSET))
+ if (do_account_vtime(tsk))
virt_timer_expire();
}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index bec71e902be3..6484a250021e 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -916,7 +916,7 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
S390_ARCH_FAC_LIST_SIZE_BYTE);
memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
- S390_ARCH_FAC_LIST_SIZE_BYTE);
+ sizeof(S390_lowcore.stfle_fac_list));
if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
ret = -EFAULT;
kfree(mach);
@@ -1437,7 +1437,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
/* Populate the facility mask initially. */
memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
- S390_ARCH_FAC_LIST_SIZE_BYTE);
+ sizeof(S390_lowcore.stfle_fac_list));
for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
if (i < kvm_s390_fac_list_mask_size())
kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 7a1897c51c54..d56ef26d4681 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -202,7 +202,7 @@ static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
return pgste;
}
-static inline void ptep_xchg_commit(struct mm_struct *mm,
+static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
unsigned long addr, pte_t *ptep,
pgste_t pgste, pte_t old, pte_t new)
{
@@ -220,6 +220,7 @@ static inline void ptep_xchg_commit(struct mm_struct *mm,
} else {
*ptep = new;
}
+ return old;
}
pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
@@ -231,7 +232,7 @@ pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
preempt_disable();
pgste = ptep_xchg_start(mm, addr, ptep);
old = ptep_flush_direct(mm, addr, ptep);
- ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
+ old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
preempt_enable();
return old;
}
@@ -246,7 +247,7 @@ pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
preempt_disable();
pgste = ptep_xchg_start(mm, addr, ptep);
old = ptep_flush_lazy(mm, addr, ptep);
- ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
+ old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
preempt_enable();
return old;
}
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index d89b7011667c..e279572824b1 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -111,7 +111,7 @@ static int tile_gpr_set(struct task_struct *target,
const void *kbuf, const void __user *ubuf)
{
int ret;
- struct pt_regs regs;
+ struct pt_regs regs = *task_pt_regs(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0,
sizeof(regs));
diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
index cc3bd583dce1..9e240fcba784 100644
--- a/arch/x86/boot/string.c
+++ b/arch/x86/boot/string.c
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include "ctype.h"
+#include "string.h"
int memcmp(const void *s1, const void *s2, size_t len)
{
diff --git a/arch/x86/boot/string.h b/arch/x86/boot/string.h
index 725e820602b1..113588ddb43f 100644
--- a/arch/x86/boot/string.h
+++ b/arch/x86/boot/string.h
@@ -18,4 +18,13 @@ int memcmp(const void *s1, const void *s2, size_t len);
#define memset(d,c,l) __builtin_memset(d,c,l)
#define memcmp __builtin_memcmp
+extern int strcmp(const char *str1, const char *str2);
+extern int strncmp(const char *cs, const char *ct, size_t count);
+extern size_t strlen(const char *s);
+extern char *strstr(const char *s1, const char *s2);
+extern size_t strnlen(const char *s, size_t maxlen);
+extern unsigned int atou(const char *s);
+extern unsigned long long simple_strtoull(const char *cp, char **endp,
+ unsigned int base);
+
#endif /* BOOT_STRING_H */
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 31c34ee131f3..6ef688a1ef3e 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -1020,7 +1020,8 @@ struct {
const char *basename;
struct simd_skcipher_alg *simd;
} aesni_simd_skciphers2[] = {
-#if IS_ENABLED(CONFIG_CRYPTO_PCBC)
+#if (defined(MODULE) && IS_ENABLED(CONFIG_CRYPTO_PCBC)) || \
+ IS_BUILTIN(CONFIG_CRYPTO_PCBC)
{
.algname = "pcbc(aes)",
.drvname = "pcbc-aes-aesni",
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 701d29f8e4d3..57f7ec35216e 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -255,23 +255,6 @@ ENTRY(__switch_to_asm)
END(__switch_to_asm)
/*
- * The unwinder expects the last frame on the stack to always be at the same
- * offset from the end of the page, which allows it to validate the stack.
- * Calling schedule_tail() directly would break that convention because its an
- * asmlinkage function so its argument has to be pushed on the stack. This
- * wrapper creates a proper "end of stack" frame header before the call.
- */
-ENTRY(schedule_tail_wrapper)
- FRAME_BEGIN
-
- pushl %eax
- call schedule_tail
- popl %eax
-
- FRAME_END
- ret
-ENDPROC(schedule_tail_wrapper)
-/*
* A newly forked process directly context switches into this address.
*
* eax: prev task we switched from
@@ -279,15 +262,24 @@ ENDPROC(schedule_tail_wrapper)
* edi: kernel thread arg
*/
ENTRY(ret_from_fork)
- call schedule_tail_wrapper
+ FRAME_BEGIN /* help unwinder find end of stack */
+
+ /*
+ * schedule_tail() is asmlinkage so we have to put its 'prev' argument
+ * on the stack.
+ */
+ pushl %eax
+ call schedule_tail
+ popl %eax
testl %ebx, %ebx
jnz 1f /* kernel threads are uncommon */
2:
/* When we fork, we trace the syscall return in the child, too. */
- movl %esp, %eax
+ leal FRAME_OFFSET(%esp), %eax
call syscall_return_slowpath
+ FRAME_END
jmp restore_all
/* kernel thread */
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 5b219707c2f2..044d18ebc43c 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -36,6 +36,7 @@
#include <asm/smap.h>
#include <asm/pgtable_types.h>
#include <asm/export.h>
+#include <asm/frame.h>
#include <linux/err.h>
.code64
@@ -408,17 +409,19 @@ END(__switch_to_asm)
* r12: kernel thread arg
*/
ENTRY(ret_from_fork)
+ FRAME_BEGIN /* help unwinder find end of stack */
movq %rax, %rdi
- call schedule_tail /* rdi: 'prev' task parameter */
+ call schedule_tail /* rdi: 'prev' task parameter */
- testq %rbx, %rbx /* from kernel_thread? */
- jnz 1f /* kernel threads are uncommon */
+ testq %rbx, %rbx /* from kernel_thread? */
+ jnz 1f /* kernel threads are uncommon */
2:
- movq %rsp, %rdi
+ leaq FRAME_OFFSET(%rsp),%rdi /* pt_regs pointer */
call syscall_return_slowpath /* returns with IRQs disabled */
TRACE_IRQS_ON /* user mode is traced as IRQS on */
SWAPGS
+ FRAME_END
jmp restore_regs_and_iret
1:
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index 05612a2529c8..496e60391fac 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -1010,7 +1010,7 @@ static __init int amd_ibs_init(void)
* all online cpus.
*/
cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
- "perf/x86/amd/ibs:STARTING",
+ "perf/x86/amd/ibs:starting",
x86_pmu_amd_ibs_starting_cpu,
x86_pmu_amd_ibs_dying_cpu);
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 019c5887b698..1635c0c8df23 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -505,6 +505,10 @@ int x86_pmu_hw_config(struct perf_event *event)
if (event->attr.precise_ip > precise)
return -EOPNOTSUPP;
+
+ /* There's no sense in having PEBS for non sampling events: */
+ if (!is_sampling_event(event))
+ return -EINVAL;
}
/*
* check that PEBS LBR correction does not conflict with
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 86138267b68a..eb1484c86bb4 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3176,13 +3176,16 @@ static void intel_pmu_cpu_starting(int cpu)
if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
for_each_cpu(i, topology_sibling_cpumask(cpu)) {
+ struct cpu_hw_events *sibling;
struct intel_excl_cntrs *c;
- c = per_cpu(cpu_hw_events, i).excl_cntrs;
+ sibling = &per_cpu(cpu_hw_events, i);
+ c = sibling->excl_cntrs;
if (c && c->core_id == core_id) {
cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
cpuc->excl_cntrs = c;
- cpuc->excl_thread_id = 1;
+ if (!sibling->excl_thread_id)
+ cpuc->excl_thread_id = 1;
break;
}
}
@@ -3987,7 +3990,7 @@ __init int intel_pmu_init(void)
x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
}
- x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
+ x86_pmu.intel_ctrl = (1ULL << x86_pmu.num_counters) - 1;
if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index fec8a461bdef..1076c9a77292 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -434,6 +434,7 @@ static struct pmu cstate_core_pmu = {
.stop = cstate_pmu_event_stop,
.read = cstate_pmu_event_update,
.capabilities = PERF_PMU_CAP_NO_INTERRUPT,
+ .module = THIS_MODULE,
};
static struct pmu cstate_pkg_pmu = {
@@ -447,6 +448,7 @@ static struct pmu cstate_pkg_pmu = {
.stop = cstate_pmu_event_stop,
.read = cstate_pmu_event_update,
.capabilities = PERF_PMU_CAP_NO_INTERRUPT,
+ .module = THIS_MODULE,
};
static const struct cstate_model nhm_cstates __initconst = {
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index be202390bbd3..9dfeeeca0ea8 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1389,9 +1389,13 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
continue;
/* log dropped samples number */
- if (error[bit])
+ if (error[bit]) {
perf_log_lost_samples(event, error[bit]);
+ if (perf_event_account_interrupt(event))
+ x86_pmu_stop(event, 0);
+ }
+
if (counts[bit]) {
__intel_pmu_pebs_event(event, iregs, base,
top, bit, counts[bit]);
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index bd34124449b0..17c3564d087a 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -697,6 +697,7 @@ static int __init init_rapl_pmus(void)
rapl_pmus->pmu.start = rapl_pmu_event_start;
rapl_pmus->pmu.stop = rapl_pmu_event_stop;
rapl_pmus->pmu.read = rapl_pmu_event_read;
+ rapl_pmus->pmu.module = THIS_MODULE;
return 0;
}
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 97c246f84dea..8c4ccdc3a3f3 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -733,6 +733,7 @@ static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
.start = uncore_pmu_event_start,
.stop = uncore_pmu_event_stop,
.read = uncore_pmu_event_read,
+ .module = THIS_MODULE,
};
} else {
pmu->pmu = *pmu->type->pmu;
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index e6832be714bc..dae2fedc1601 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -2686,7 +2686,7 @@ static struct intel_uncore_type *hswep_msr_uncores[] = {
void hswep_uncore_cpu_init(void)
{
- int pkg = topology_phys_to_logical_pkg(0);
+ int pkg = boot_cpu_data.logical_proc_id;
if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 68557f52b961..854022772c5b 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -139,6 +139,19 @@ static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
}
+static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
+{
+ bool negative;
+ asm volatile(LOCK_PREFIX "andb %2,%1\n\t"
+ CC_SET(s)
+ : CC_OUT(s) (negative), ADDR
+ : "ir" ((char) ~(1 << nr)) : "memory");
+ return negative;
+}
+
+// Let everybody know we have it
+#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
+
/*
* __clear_bit_unlock - Clears a bit in memory
* @nr: Bit to clear
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 34a46dc076d3..8167fdb67ae8 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -57,7 +57,7 @@
#define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */
#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */
#define INTEL_FAM6_ATOM_MERRIFIELD 0x4A /* Tangier */
-#define INTEL_FAM6_ATOM_MOOREFIELD 0x5A /* Annidale */
+#define INTEL_FAM6_ATOM_MOOREFIELD 0x5A /* Anniedale */
#define INTEL_FAM6_ATOM_GOLDMONT 0x5C
#define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */
diff --git a/arch/x86/include/asm/microcode_intel.h b/arch/x86/include/asm/microcode_intel.h
index 195becc6f780..e793fc9a9b20 100644
--- a/arch/x86/include/asm/microcode_intel.h
+++ b/arch/x86/include/asm/microcode_intel.h
@@ -52,6 +52,21 @@ struct extended_sigtable {
#define exttable_size(et) ((et)->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE)
+static inline u32 intel_get_microcode_revision(void)
+{
+ u32 rev, dummy;
+
+ native_wrmsrl(MSR_IA32_UCODE_REV, 0);
+
+ /* As documented in the SDM: Do a CPUID 1 here */
+ native_cpuid_eax(1);
+
+ /* get the current revision from MSR 0x8B */
+ native_rdmsr(MSR_IA32_UCODE_REV, dummy, rev);
+
+ return rev;
+}
+
#ifdef CONFIG_MICROCODE_INTEL
extern void __init load_ucode_intel_bsp(void);
extern void load_ucode_intel_ap(void);
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index eaf100508c36..1be64da0384e 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -219,6 +219,24 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
: "memory");
}
+#define native_cpuid_reg(reg) \
+static inline unsigned int native_cpuid_##reg(unsigned int op) \
+{ \
+ unsigned int eax = op, ebx, ecx = 0, edx; \
+ \
+ native_cpuid(&eax, &ebx, &ecx, &edx); \
+ \
+ return reg; \
+}
+
+/*
+ * Native CPUID functions returning a single datum.
+ */
+native_cpuid_reg(eax)
+native_cpuid_reg(ebx)
+native_cpuid_reg(ecx)
+native_cpuid_reg(edx)
+
static inline void load_cr3(pgd_t *pgdir)
{
write_cr3(__pa(pgdir));
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
index a3269c897ec5..2e41c50ddf47 100644
--- a/arch/x86/include/asm/stacktrace.h
+++ b/arch/x86/include/asm/stacktrace.h
@@ -58,7 +58,7 @@ get_frame_pointer(struct task_struct *task, struct pt_regs *regs)
if (task == current)
return __builtin_frame_address(0);
- return (unsigned long *)((struct inactive_task_frame *)task->thread.sp)->bp;
+ return &((struct inactive_task_frame *)task->thread.sp)->bp;
}
#else
static inline unsigned long *
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index 5cb436acd463..fcc5cd387fd1 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -36,7 +36,10 @@ static inline void prepare_switch_to(struct task_struct *prev,
asmlinkage void ret_from_fork(void);
-/* data that is pointed to by thread.sp */
+/*
+ * This is the structure pointed to by thread.sp for an inactive task. The
+ * order of the fields must match the code in __switch_to_asm().
+ */
struct inactive_task_frame {
#ifdef CONFIG_X86_64
unsigned long r15;
@@ -48,6 +51,11 @@ struct inactive_task_frame {
unsigned long di;
#endif
unsigned long bx;
+
+ /*
+ * These two fields must be together. They form a stack frame header,
+ * needed by get_frame_pointer().
+ */
unsigned long bp;
unsigned long ret_addr;
};
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 945e512a112a..1e35dd06b090 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1875,6 +1875,7 @@ static struct irq_chip ioapic_chip __read_mostly = {
.irq_ack = irq_chip_ack_parent,
.irq_eoi = ioapic_ack_level,
.irq_set_affinity = ioapic_set_affinity,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
.flags = IRQCHIP_SKIP_SET_WAKE,
};
@@ -1886,6 +1887,7 @@ static struct irq_chip ioapic_ir_chip __read_mostly = {
.irq_ack = irq_chip_ack_parent,
.irq_eoi = ioapic_ir_ack_level,
.irq_set_affinity = ioapic_set_affinity,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
.flags = IRQCHIP_SKIP_SET_WAKE,
};
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 71cae73a5076..1d3167269a67 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -309,15 +309,8 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
/* get information required for multi-node processors */
if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
- u32 eax, ebx, ecx, edx;
- cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
- node_id = ecx & 7;
-
- /* get compute unit information */
- smp_num_siblings = ((ebx >> 8) & 3) + 1;
- c->x86_max_cores /= smp_num_siblings;
- c->cpu_core_id = ebx & 0xff;
+ node_id = cpuid_ecx(0x8000001e) & 7;
/*
* We may have multiple LLCs if L3 caches exist, so check if we
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index dc1697ca5191..9bab7a8a4293 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1221,7 +1221,7 @@ static __init int setup_disablecpuid(char *arg)
{
int bit;
- if (get_option(&arg, &bit) && bit < NCAPINTS*32)
+ if (get_option(&arg, &bit) && bit >= 0 && bit < NCAPINTS * 32)
setup_clear_cpu_cap(bit);
else
return 0;
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index fcd484d2bb03..203f860d2ab3 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -14,6 +14,7 @@
#include <asm/bugs.h>
#include <asm/cpu.h>
#include <asm/intel-family.h>
+#include <asm/microcode_intel.h>
#ifdef CONFIG_X86_64
#include <linux/topology.h>
@@ -78,14 +79,8 @@ static void early_init_intel(struct cpuinfo_x86 *c)
(c->x86 == 0x6 && c->x86_model >= 0x0e))
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
- if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) {
- unsigned lower_word;
-
- wrmsr(MSR_IA32_UCODE_REV, 0, 0);
- /* Required by the SDM */
- sync_core();
- rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode);
- }
+ if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
+ c->microcode = intel_get_microcode_revision();
/*
* Atom erratum AAE44/AAF40/AAG38/AAH41:
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index ffacfdcacb85..a5fd137417a2 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -1182,6 +1182,9 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
const char *name = get_name(bank, NULL);
int err = 0;
+ if (!dev)
+ return -ENODEV;
+
if (is_shared_bank(bank)) {
nb = node_to_amd_nb(amd_get_nb_id(cpu));
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index b624b54912e1..3f329b74e040 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -150,7 +150,7 @@ static struct ucode_patch *__alloc_microcode_buf(void *data, unsigned int size)
{
struct ucode_patch *p;
- p = kzalloc(size, GFP_KERNEL);
+ p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL);
if (!p)
return ERR_PTR(-ENOMEM);
@@ -368,26 +368,6 @@ next:
return patch;
}
-static void cpuid_1(void)
-{
- /*
- * According to the Intel SDM, Volume 3, 9.11.7:
- *
- * CPUID returns a value in a model specific register in
- * addition to its usual register return values. The
- * semantics of CPUID cause it to deposit an update ID value
- * in the 64-bit model-specific register at address 08BH
- * (IA32_BIOS_SIGN_ID). If no update is present in the
- * processor, the value in the MSR remains unmodified.
- *
- * Use native_cpuid -- this code runs very early and we don't
- * want to mess with paravirt.
- */
- unsigned int eax = 1, ebx, ecx = 0, edx;
-
- native_cpuid(&eax, &ebx, &ecx, &edx);
-}
-
static int collect_cpu_info_early(struct ucode_cpu_info *uci)
{
unsigned int val[2];
@@ -410,15 +390,8 @@ static int collect_cpu_info_early(struct ucode_cpu_info *uci)
native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
csig.pf = 1 << ((val[1] >> 18) & 7);
}
- native_wrmsrl(MSR_IA32_UCODE_REV, 0);
-
- /* As documented in the SDM: Do a CPUID 1 here */
- cpuid_1();
- /* get the current revision from MSR 0x8B */
- native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
-
- csig.rev = val[1];
+ csig.rev = intel_get_microcode_revision();
uci->cpu_sig = csig;
uci->valid = 1;
@@ -602,7 +575,7 @@ static inline void print_ucode(struct ucode_cpu_info *uci)
static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
{
struct microcode_intel *mc;
- unsigned int val[2];
+ u32 rev;
mc = uci->mc;
if (!mc)
@@ -610,21 +583,16 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
/* write microcode via MSR 0x79 */
native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
- native_wrmsrl(MSR_IA32_UCODE_REV, 0);
- /* As documented in the SDM: Do a CPUID 1 here */
- cpuid_1();
-
- /* get the current revision from MSR 0x8B */
- native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
- if (val[1] != mc->hdr.rev)
+ rev = intel_get_microcode_revision();
+ if (rev != mc->hdr.rev)
return -1;
#ifdef CONFIG_X86_64
/* Flush global tlb. This is precaution. */
flush_tlb_early();
#endif
- uci->cpu_sig.rev = val[1];
+ uci->cpu_sig.rev = rev;
if (early)
print_ucode(uci);
@@ -804,8 +772,8 @@ static int apply_microcode_intel(int cpu)
struct microcode_intel *mc;
struct ucode_cpu_info *uci;
struct cpuinfo_x86 *c;
- unsigned int val[2];
static int prev_rev;
+ u32 rev;
/* We should bind the task to the CPU */
if (WARN_ON(raw_smp_processor_id() != cpu))
@@ -822,33 +790,28 @@ static int apply_microcode_intel(int cpu)
/* write microcode via MSR 0x79 */
wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
- wrmsrl(MSR_IA32_UCODE_REV, 0);
-
- /* As documented in the SDM: Do a CPUID 1 here */
- cpuid_1();
- /* get the current revision from MSR 0x8B */
- rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
+ rev = intel_get_microcode_revision();
- if (val[1] != mc->hdr.rev) {
+ if (rev != mc->hdr.rev) {
pr_err("CPU%d update to revision 0x%x failed\n",
cpu, mc->hdr.rev);
return -1;
}
- if (val[1] != prev_rev) {
+ if (rev != prev_rev) {
pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n",
- val[1],
+ rev,
mc->hdr.date & 0xffff,
mc->hdr.date >> 24,
(mc->hdr.date >> 16) & 0xff);
- prev_rev = val[1];
+ prev_rev = rev;
}
c = &cpu_data(cpu);
- uci->cpu_sig.rev = val[1];
- c->microcode = val[1];
+ uci->cpu_sig.rev = rev;
+ c->microcode = rev;
return 0;
}
@@ -860,7 +823,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
u8 *ucode_ptr = data, *new_mc = NULL, *mc = NULL;
int new_rev = uci->cpu_sig.rev;
unsigned int leftover = size;
- unsigned int curr_mc_size = 0;
+ unsigned int curr_mc_size = 0, new_mc_size = 0;
unsigned int csig, cpf;
while (leftover) {
@@ -901,6 +864,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
vfree(new_mc);
new_rev = mc_header.rev;
new_mc = mc;
+ new_mc_size = mc_size;
mc = NULL; /* trigger new vmalloc */
}
@@ -926,7 +890,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
* permanent memory. So it will be loaded early when a CPU is hot added
* or resumes.
*/
- save_mc_for_early(new_mc, curr_mc_size);
+ save_mc_for_early(new_mc, new_mc_size);
pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
cpu, new_rev, uci->cpu_sig.rev);
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index b47edb8f5256..410efb2c7b80 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -68,12 +68,10 @@ static struct dma_map_ops swiotlb_dma_ops = {
*/
int __init pci_swiotlb_detect_override(void)
{
- int use_swiotlb = swiotlb | swiotlb_force;
-
- if (swiotlb_force)
+ if (swiotlb_force == SWIOTLB_FORCE)
swiotlb = 1;
- return use_swiotlb;
+ return swiotlb;
}
IOMMU_INIT_FINISH(pci_swiotlb_detect_override,
pci_xen_swiotlb_detect,
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index be3a49ee0356..e41af597aed8 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -694,6 +694,7 @@ unsigned long native_calibrate_tsc(void)
crystal_khz = 24000; /* 24.0 MHz */
break;
case INTEL_FAM6_SKYLAKE_X:
+ case INTEL_FAM6_ATOM_DENVERTON:
crystal_khz = 25000; /* 25.0 MHz */
break;
case INTEL_FAM6_ATOM_GOLDMONT:
diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c
index 4443e499f279..23d15565d02a 100644
--- a/arch/x86/kernel/unwind_frame.c
+++ b/arch/x86/kernel/unwind_frame.c
@@ -6,6 +6,21 @@
#define FRAME_HEADER_SIZE (sizeof(long) * 2)
+/*
+ * This disables KASAN checking when reading a value from another task's stack,
+ * since the other task could be running on another CPU and could have poisoned
+ * the stack in the meantime.
+ */
+#define READ_ONCE_TASK_STACK(task, x) \
+({ \
+ unsigned long val; \
+ if (task == current) \
+ val = READ_ONCE(x); \
+ else \
+ val = READ_ONCE_NOCHECK(x); \
+ val; \
+})
+
static void unwind_dump(struct unwind_state *state, unsigned long *sp)
{
static bool dumped_before = false;
@@ -48,7 +63,8 @@ unsigned long unwind_get_return_address(struct unwind_state *state)
if (state->regs && user_mode(state->regs))
return 0;
- addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, *addr_p,
+ addr = READ_ONCE_TASK_STACK(state->task, *addr_p);
+ addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, addr,
addr_p);
return __kernel_text_address(addr) ? addr : 0;
@@ -162,7 +178,7 @@ bool unwind_next_frame(struct unwind_state *state)
if (state->regs)
next_bp = (unsigned long *)state->regs->bp;
else
- next_bp = (unsigned long *)*state->bp;
+ next_bp = (unsigned long *)READ_ONCE_TASK_STACK(state->task,*state->bp);
/* is the next frame pointer an encoded pointer to pt_regs? */
regs = decode_frame_pointer(next_bp);
@@ -207,6 +223,16 @@ bool unwind_next_frame(struct unwind_state *state)
return true;
bad_address:
+ /*
+ * When unwinding a non-current task, the task might actually be
+ * running on another CPU, in which case it could be modifying its
+ * stack while we're reading it. This is generally not a problem and
+ * can be ignored as long as the caller understands that unwinding
+ * another task will not always succeed.
+ */
+ if (state->task != current)
+ goto the_end;
+
if (state->regs) {
printk_deferred_once(KERN_WARNING
"WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n",
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 56628a44668b..cedbba0f3402 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -818,6 +818,20 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
}
+static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
+ struct segmented_address addr,
+ void *data,
+ unsigned int size)
+{
+ int rc;
+ ulong linear;
+
+ rc = linearize(ctxt, addr, size, true, &linear);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+ return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
+}
+
/*
* Prefetch the remaining bytes of the instruction without crossing page
* boundary if they are not in fetch_cache yet.
@@ -1571,7 +1585,6 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
&ctxt->exception);
}
-/* Does not support long mode */
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, int seg, u8 cpl,
enum x86_transfer_type transfer,
@@ -1608,20 +1621,34 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
rpl = selector & 3;
- /* NULL selector is not valid for TR, CS and SS (except for long mode) */
- if ((seg == VCPU_SREG_CS
- || (seg == VCPU_SREG_SS
- && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
- || seg == VCPU_SREG_TR)
- && null_selector)
- goto exception;
-
/* TR should be in GDT only */
if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
goto exception;
- if (null_selector) /* for NULL selector skip all following checks */
+ /* NULL selector is not valid for TR, CS and (except for long mode) SS */
+ if (null_selector) {
+ if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
+ goto exception;
+
+ if (seg == VCPU_SREG_SS) {
+ if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
+ goto exception;
+
+ /*
+ * ctxt->ops->set_segment expects the CPL to be in
+ * SS.DPL, so fake an expand-up 32-bit data segment.
+ */
+ seg_desc.type = 3;
+ seg_desc.p = 1;
+ seg_desc.s = 1;
+ seg_desc.dpl = cpl;
+ seg_desc.d = 1;
+ seg_desc.g = 1;
+ }
+
+ /* Skip all following checks */
goto load;
+ }
ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
if (ret != X86EMUL_CONTINUE)
@@ -1737,6 +1764,21 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, int seg)
{
u8 cpl = ctxt->ops->cpl(ctxt);
+
+ /*
+ * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
+ * they can load it at CPL<3 (Intel's manual says only LSS can,
+ * but it's wrong).
+ *
+ * However, the Intel manual says that putting IST=1/DPL=3 in
+ * an interrupt gate will result in SS=3 (the AMD manual instead
+ * says it doesn't), so allow SS=3 in __load_segment_descriptor
+ * and only forbid it here.
+ */
+ if (seg == VCPU_SREG_SS && selector == 3 &&
+ ctxt->mode == X86EMUL_MODE_PROT64)
+ return emulate_exception(ctxt, GP_VECTOR, 0, true);
+
return __load_segment_descriptor(ctxt, selector, seg, cpl,
X86_TRANSFER_NONE, NULL);
}
@@ -3685,8 +3727,8 @@ static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
}
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
- return segmented_write(ctxt, ctxt->dst.addr.mem,
- &desc_ptr, 2 + ctxt->op_bytes);
+ return segmented_write_std(ctxt, ctxt->dst.addr.mem,
+ &desc_ptr, 2 + ctxt->op_bytes);
}
static int em_sgdt(struct x86_emulate_ctxt *ctxt)
@@ -3932,7 +3974,7 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt)
else
size = offsetof(struct fxregs_state, xmm_space[0]);
- return segmented_write(ctxt, ctxt->memop.addr.mem, &fx_state, size);
+ return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
}
static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt,
@@ -3974,7 +4016,7 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
if (rc != X86EMUL_CONTINUE)
return rc;
- rc = segmented_read(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
+ rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
if (rc != X86EMUL_CONTINUE)
return rc;
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 5fe290c1b7d8..2f6ef5121a4c 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -2426,3 +2426,9 @@ void kvm_lapic_init(void)
jump_label_rate_limit(&apic_hw_disabled, HZ);
jump_label_rate_limit(&apic_sw_disabled, HZ);
}
+
+void kvm_lapic_exit(void)
+{
+ static_key_deferred_flush(&apic_hw_disabled);
+ static_key_deferred_flush(&apic_sw_disabled);
+}
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index e0c80233b3e1..ff8039d61672 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -110,6 +110,7 @@ static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu)
int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data);
void kvm_lapic_init(void);
+void kvm_lapic_exit(void);
#define VEC_POS(v) ((v) & (32 - 1))
#define REG_POS(v) (((v) >> 5) << 4)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 24db5fb6f575..a236decb81e4 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -132,12 +132,6 @@ module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
#define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
-#define VMX_VPID_EXTENT_SUPPORTED_MASK \
- (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \
- VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \
- VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \
- VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
-
/*
* Hyper-V requires all of these, so mark them as supported even though
* they are just treated the same as all-context.
@@ -10473,12 +10467,12 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)) {
nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
- goto out;
+ return 1;
}
if (vmcs12->vmcs_link_pointer != -1ull) {
nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_VMCS_LINK_PTR);
- goto out;
+ return 1;
}
/*
@@ -10498,7 +10492,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) {
nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
- goto out;
+ return 1;
}
}
@@ -10516,7 +10510,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) {
nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
- goto out;
+ return 1;
}
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 51ccfe08e32f..d153be8929a6 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3070,6 +3070,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
memset(&events->reserved, 0, sizeof(events->reserved));
}
+static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags);
+
static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{
@@ -3106,10 +3108,13 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
vcpu->arch.apic->sipi_vector = events->sipi_vector;
if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
+ u32 hflags = vcpu->arch.hflags;
if (events->smi.smm)
- vcpu->arch.hflags |= HF_SMM_MASK;
+ hflags |= HF_SMM_MASK;
else
- vcpu->arch.hflags &= ~HF_SMM_MASK;
+ hflags &= ~HF_SMM_MASK;
+ kvm_set_hflags(vcpu, hflags);
+
vcpu->arch.smi_pending = events->smi.pending;
if (events->smi.smm_inside_nmi)
vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
@@ -3337,6 +3342,8 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
switch (cap->cap) {
case KVM_CAP_HYPERV_SYNIC:
+ if (!irqchip_in_kernel(vcpu->kvm))
+ return -EINVAL;
return kvm_hv_activate_synic(vcpu);
default:
return -EINVAL;
@@ -6040,6 +6047,7 @@ out:
void kvm_arch_exit(void)
{
+ kvm_lapic_exit();
perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
@@ -6163,7 +6171,8 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
kvm_x86_ops->patch_hypercall(vcpu, instruction);
- return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
+ return emulator_write_emulated(ctxt, rip, instruction, 3,
+ &ctxt->exception);
}
static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index 324e5713d386..af59f808742f 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -293,7 +293,7 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs)
* We were not able to extract an address from the instruction,
* probably because there was something invalid in it.
*/
- if (info->si_addr == (void *)-1) {
+ if (info->si_addr == (void __user *)-1) {
err = -EINVAL;
goto err_out;
}
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index e76d1af60f7a..bb660e53cbd6 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1172,6 +1172,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
set_memory_ro((unsigned long)header, header->pages);
prog->bpf_func = (void *)image;
prog->jited = 1;
+ } else {
+ prog = orig_prog;
}
out_addrs:
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 3cd69832d7f4..3961103e9176 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -114,6 +114,16 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = {
DMI_MATCH(DMI_BIOS_VERSION, "6JET85WW (1.43 )"),
},
},
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=42606 */
+ {
+ .callback = set_nouse_crs,
+ .ident = "Supermicro X8DTH",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X8DTH-i/6/iF/6F"),
+ DMI_MATCH(DMI_BIOS_VERSION, "2.0a"),
+ },
+ },
/* https://bugzilla.kernel.org/show_bug.cgi?id=15362 */
{
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 936a488d6cf6..274dfc481849 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -210,6 +210,70 @@ int __init efi_memblock_x86_reserve_range(void)
return 0;
}
+#define OVERFLOW_ADDR_SHIFT (64 - EFI_PAGE_SHIFT)
+#define OVERFLOW_ADDR_MASK (U64_MAX << OVERFLOW_ADDR_SHIFT)
+#define U64_HIGH_BIT (~(U64_MAX >> 1))
+
+static bool __init efi_memmap_entry_valid(const efi_memory_desc_t *md, int i)
+{
+ u64 end = (md->num_pages << EFI_PAGE_SHIFT) + md->phys_addr - 1;
+ u64 end_hi = 0;
+ char buf[64];
+
+ if (md->num_pages == 0) {
+ end = 0;
+ } else if (md->num_pages > EFI_PAGES_MAX ||
+ EFI_PAGES_MAX - md->num_pages <
+ (md->phys_addr >> EFI_PAGE_SHIFT)) {
+ end_hi = (md->num_pages & OVERFLOW_ADDR_MASK)
+ >> OVERFLOW_ADDR_SHIFT;
+
+ if ((md->phys_addr & U64_HIGH_BIT) && !(end & U64_HIGH_BIT))
+ end_hi += 1;
+ } else {
+ return true;
+ }
+
+ pr_warn_once(FW_BUG "Invalid EFI memory map entries:\n");
+
+ if (end_hi) {
+ pr_warn("mem%02u: %s range=[0x%016llx-0x%llx%016llx] (invalid)\n",
+ i, efi_md_typeattr_format(buf, sizeof(buf), md),
+ md->phys_addr, end_hi, end);
+ } else {
+ pr_warn("mem%02u: %s range=[0x%016llx-0x%016llx] (invalid)\n",
+ i, efi_md_typeattr_format(buf, sizeof(buf), md),
+ md->phys_addr, end);
+ }
+ return false;
+}
+
+static void __init efi_clean_memmap(void)
+{
+ efi_memory_desc_t *out = efi.memmap.map;
+ const efi_memory_desc_t *in = out;
+ const efi_memory_desc_t *end = efi.memmap.map_end;
+ int i, n_removal;
+
+ for (i = n_removal = 0; in < end; i++) {
+ if (efi_memmap_entry_valid(in, i)) {
+ if (out != in)
+ memcpy(out, in, efi.memmap.desc_size);
+ out = (void *)out + efi.memmap.desc_size;
+ } else {
+ n_removal++;
+ }
+ in = (void *)in + efi.memmap.desc_size;
+ }
+
+ if (n_removal > 0) {
+ u64 size = efi.memmap.nr_map - n_removal;
+
+ pr_warn("Removing %d invalid memory map entries.\n", n_removal);
+ efi_memmap_install(efi.memmap.phys_map, size);
+ }
+}
+
void __init efi_print_memmap(void)
{
efi_memory_desc_t *md;
@@ -472,6 +536,8 @@ void __init efi_init(void)
}
}
+ efi_clean_memmap();
+
if (efi_enabled(EFI_DBG))
efi_print_memmap();
}
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 10aca63a50d7..30031d5293c4 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -214,7 +214,7 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
new_size = efi.memmap.desc_size * num_entries;
- new_phys = memblock_alloc(new_size, 0);
+ new_phys = efi_memmap_alloc(num_entries);
if (!new_phys) {
pr_err("Could not allocate boot services memmap\n");
return;
@@ -355,7 +355,7 @@ void __init efi_free_boot_services(void)
}
new_size = efi.memmap.desc_size * num_entries;
- new_phys = memblock_alloc(new_size, 0);
+ new_phys = efi_memmap_alloc(num_entries);
if (!new_phys) {
pr_err("Failed to allocate new EFI memmap\n");
return;
diff --git a/arch/x86/platform/intel-mid/device_libs/Makefile b/arch/x86/platform/intel-mid/device_libs/Makefile
index 61b5ed2b7d40..90e4f2a6625b 100644
--- a/arch/x86/platform/intel-mid/device_libs/Makefile
+++ b/arch/x86/platform/intel-mid/device_libs/Makefile
@@ -15,7 +15,7 @@ obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_msic_power_btn.o
obj-$(subst m,y,$(CONFIG_GPIO_INTEL_PMIC)) += platform_pmic_gpio.o
obj-$(subst m,y,$(CONFIG_INTEL_MFLD_THERMAL)) += platform_msic_thermal.o
# SPI Devices
-obj-$(subst m,y,$(CONFIG_SPI_SPIDEV)) += platform_spidev.o
+obj-$(subst m,y,$(CONFIG_SPI_SPIDEV)) += platform_mrfld_spidev.o
# I2C Devices
obj-$(subst m,y,$(CONFIG_SENSORS_EMC1403)) += platform_emc1403.o
obj-$(subst m,y,$(CONFIG_SENSORS_LIS3LV02D)) += platform_lis331.o
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_spidev.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c
index 30c601b399ee..27186ad654c9 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_spidev.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c
@@ -11,6 +11,7 @@
* of the License.
*/
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/sfi.h>
#include <linux/spi/pxa2xx_spi.h>
@@ -34,6 +35,9 @@ static void __init *spidev_platform_data(void *info)
{
struct spi_board_info *spi_info = info;
+ if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
+ return ERR_PTR(-ENODEV);
+
spi_info->mode = SPI_MODE_0;
spi_info->controller_data = &spidev_spi_chip;
diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c
index a9fafb5c8738..a0b36a9d5df1 100644
--- a/arch/x86/xen/pci-swiotlb-xen.c
+++ b/arch/x86/xen/pci-swiotlb-xen.c
@@ -48,7 +48,7 @@ int __init pci_xen_swiotlb_detect(void)
* activate this IOMMU. If running as PV privileged, activate it
* irregardless.
*/
- if ((xen_initial_domain() || swiotlb || swiotlb_force))
+ if (xen_initial_domain() || swiotlb || swiotlb_force == SWIOTLB_FORCE)
xen_swiotlb = 1;
/* If we are running under Xen, we MUST disable the native SWIOTLB.
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 8c394e30e5fe..f3f7b41116f7 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -713,10 +713,9 @@ static void __init xen_reserve_xen_mfnlist(void)
size = PFN_PHYS(xen_start_info->nr_p2m_frames);
}
- if (!xen_is_e820_reserved(start, size)) {
- memblock_reserve(start, size);
+ memblock_reserve(start, size);
+ if (!xen_is_e820_reserved(start, size))
return;
- }
#ifdef CONFIG_X86_32
/*
@@ -727,6 +726,7 @@ static void __init xen_reserve_xen_mfnlist(void)
BUG();
#else
xen_relocate_p2m();
+ memblock_free(start, size);
#endif
}
diff --git a/block/blk-lib.c b/block/blk-lib.c
index ed89c8f4b2a0..f8c82a9b4012 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -301,13 +301,6 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
if ((sector | nr_sects) & bs_mask)
return -EINVAL;
- if (discard) {
- ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask,
- BLKDEV_DISCARD_ZERO, biop);
- if (ret == 0 || (ret && ret != -EOPNOTSUPP))
- goto out;
- }
-
ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
biop);
if (ret == 0 || (ret && ret != -EOPNOTSUPP))
@@ -370,6 +363,12 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
struct bio *bio = NULL;
struct blk_plug plug;
+ if (discard) {
+ if (!blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask,
+ BLKDEV_DISCARD_ZERO))
+ return 0;
+ }
+
blk_start_plug(&plug);
ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask,
&bio, discard);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index a8e67a155d04..c3400b5444a7 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -912,7 +912,6 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
static void blk_mq_process_rq_list(struct blk_mq_hw_ctx *hctx)
{
LIST_HEAD(rq_list);
- LIST_HEAD(driver_list);
if (unlikely(blk_mq_hctx_stopped(hctx)))
return;
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 6e82769f4042..f0a9c07b4c7a 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -544,6 +544,8 @@ static inline bool may_queue(struct rq_wb *rwb, struct rq_wait *rqw,
* the timer to kick off queuing again.
*/
static void __wbt_wait(struct rq_wb *rwb, unsigned long rw, spinlock_t *lock)
+ __releases(lock)
+ __acquires(lock)
{
struct rq_wait *rqw = get_rq_wait(rwb, current_is_kswapd());
DEFINE_WAIT(wait);
@@ -558,13 +560,12 @@ static void __wbt_wait(struct rq_wb *rwb, unsigned long rw, spinlock_t *lock)
if (may_queue(rwb, rqw, &wait, rw))
break;
- if (lock)
+ if (lock) {
spin_unlock_irq(lock);
-
- io_schedule();
-
- if (lock)
+ io_schedule();
spin_lock_irq(lock);
+ } else
+ io_schedule();
} while (1);
finish_wait(&rqw->wait, &wait);
@@ -595,7 +596,7 @@ static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
* in an irq held spinlock, if it holds one when calling this function.
* If we do sleep, we'll release and re-grab it.
*/
-unsigned int wbt_wait(struct rq_wb *rwb, struct bio *bio, spinlock_t *lock)
+enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio, spinlock_t *lock)
{
unsigned int ret = 0;
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 472211fa183a..3bd15d8095b1 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -16,7 +16,7 @@
static inline sector_t blk_zone_start(struct request_queue *q,
sector_t sector)
{
- sector_t zone_mask = blk_queue_zone_size(q) - 1;
+ sector_t zone_mask = blk_queue_zone_sectors(q) - 1;
return sector & ~zone_mask;
}
@@ -222,7 +222,7 @@ int blkdev_reset_zones(struct block_device *bdev,
return -EINVAL;
/* Check alignment (handle eventual smaller last zone) */
- zone_sectors = blk_queue_zone_size(q);
+ zone_sectors = blk_queue_zone_sectors(q);
if (sector & (zone_sectors - 1))
return -EINVAL;
diff --git a/block/partition-generic.c b/block/partition-generic.c
index d7beb6bbbf66..7afb9907821f 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -434,7 +434,7 @@ static bool part_zone_aligned(struct gendisk *disk,
struct block_device *bdev,
sector_t from, sector_t size)
{
- unsigned int zone_size = bdev_zone_size(bdev);
+ unsigned int zone_sectors = bdev_zone_sectors(bdev);
/*
* If this function is called, then the disk is a zoned block device
@@ -446,7 +446,7 @@ static bool part_zone_aligned(struct gendisk *disk,
* regular block devices (no zone operation) and their zone size will
* be reported as 0. Allow this case.
*/
- if (!zone_size)
+ if (!zone_sectors)
return true;
/*
@@ -455,24 +455,24 @@ static bool part_zone_aligned(struct gendisk *disk,
* use it. Check the zone size too: it should be a power of 2 number
* of sectors.
*/
- if (WARN_ON_ONCE(!is_power_of_2(zone_size))) {
+ if (WARN_ON_ONCE(!is_power_of_2(zone_sectors))) {
u32 rem;
- div_u64_rem(from, zone_size, &rem);
+ div_u64_rem(from, zone_sectors, &rem);
if (rem)
return false;
if ((from + size) < get_capacity(disk)) {
- div_u64_rem(size, zone_size, &rem);
+ div_u64_rem(size, zone_sectors, &rem);
if (rem)
return false;
}
} else {
- if (from & (zone_size - 1))
+ if (from & (zone_sectors - 1))
return false;
if ((from + size) < get_capacity(disk) &&
- (size & (zone_size - 1)))
+ (size & (zone_sectors - 1)))
return false;
}
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index f616ad74cce7..44e888b0b041 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -1461,16 +1461,25 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
for (i = 0; i < ctcount; i++) {
unsigned int dlen = COMP_BUF_SIZE;
int ilen = ctemplate[i].inlen;
+ void *input_vec;
+ input_vec = kmalloc(ilen, GFP_KERNEL);
+ if (!input_vec) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(input_vec, ctemplate[i].input, ilen);
memset(output, 0, dlen);
init_completion(&result.completion);
- sg_init_one(&src, ctemplate[i].input, ilen);
+ sg_init_one(&src, input_vec, ilen);
sg_init_one(&dst, output, dlen);
req = acomp_request_alloc(tfm);
if (!req) {
pr_err("alg: acomp: request alloc failed for %s\n",
algo);
+ kfree(input_vec);
ret = -ENOMEM;
goto out;
}
@@ -1483,6 +1492,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
if (ret) {
pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
i + 1, algo, -ret);
+ kfree(input_vec);
acomp_request_free(req);
goto out;
}
@@ -1491,6 +1501,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n",
i + 1, algo, req->dlen);
ret = -EINVAL;
+ kfree(input_vec);
acomp_request_free(req);
goto out;
}
@@ -1500,26 +1511,37 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
i + 1, algo);
hexdump(output, req->dlen);
ret = -EINVAL;
+ kfree(input_vec);
acomp_request_free(req);
goto out;
}
+ kfree(input_vec);
acomp_request_free(req);
}
for (i = 0; i < dtcount; i++) {
unsigned int dlen = COMP_BUF_SIZE;
int ilen = dtemplate[i].inlen;
+ void *input_vec;
+
+ input_vec = kmalloc(ilen, GFP_KERNEL);
+ if (!input_vec) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ memcpy(input_vec, dtemplate[i].input, ilen);
memset(output, 0, dlen);
init_completion(&result.completion);
- sg_init_one(&src, dtemplate[i].input, ilen);
+ sg_init_one(&src, input_vec, ilen);
sg_init_one(&dst, output, dlen);
req = acomp_request_alloc(tfm);
if (!req) {
pr_err("alg: acomp: request alloc failed for %s\n",
algo);
+ kfree(input_vec);
ret = -ENOMEM;
goto out;
}
@@ -1532,6 +1554,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
if (ret) {
pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n",
i + 1, algo, -ret);
+ kfree(input_vec);
acomp_request_free(req);
goto out;
}
@@ -1540,6 +1563,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
pr_err("alg: acomp: Decompression test %d failed for %s: output len = %d\n",
i + 1, algo, req->dlen);
ret = -EINVAL;
+ kfree(input_vec);
acomp_request_free(req);
goto out;
}
@@ -1549,10 +1573,12 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
i + 1, algo);
hexdump(output, req->dlen);
ret = -EINVAL;
+ kfree(input_vec);
acomp_request_free(req);
goto out;
}
+ kfree(input_vec);
acomp_request_free(req);
}
diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
index 13caebd679f5..8c4e0a18460a 100644
--- a/drivers/acpi/acpi_watchdog.c
+++ b/drivers/acpi/acpi_watchdog.c
@@ -114,7 +114,7 @@ void __init acpi_watchdog_init(void)
pdev = platform_device_register_simple("wdat_wdt", PLATFORM_DEVID_NONE,
resources, nresources);
if (IS_ERR(pdev))
- pr_err("Failed to create platform device\n");
+ pr_err("Device creation failed: %ld\n", PTR_ERR(pdev));
kfree(resources);
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index 82b0b5710979..b0399e8f6d27 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -852,23 +852,18 @@ acpi_tb_install_and_load_table(acpi_physical_address address,
ACPI_FUNCTION_TRACE(tb_install_and_load_table);
- (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
-
/* Install the table and load it into the namespace */
status = acpi_tb_install_standard_table(address, flags, TRUE,
override, &i);
if (ACPI_FAILURE(status)) {
- goto unlock_and_exit;
+ goto exit;
}
- (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
status = acpi_tb_load_table(i, acpi_gbl_root_node);
- (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
-unlock_and_exit:
+exit:
*table_index = i;
- (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 5fdf251a9f97..01e1b3d63fc0 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -217,6 +217,10 @@ acpi_tb_install_standard_table(acpi_physical_address address,
goto release_and_exit;
}
+ /* Acquire the table lock */
+
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
if (reload) {
/*
* Validate the incoming table signature.
@@ -244,7 +248,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
new_table_desc.signature.integer));
status = AE_BAD_SIGNATURE;
- goto release_and_exit;
+ goto unlock_and_exit;
}
/* Check if table is already registered */
@@ -279,7 +283,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
/* Table is still loaded, this is an error */
status = AE_ALREADY_EXISTS;
- goto release_and_exit;
+ goto unlock_and_exit;
} else {
/*
* Table was unloaded, allow it to be reloaded.
@@ -290,6 +294,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
* indicate the re-installation.
*/
acpi_tb_uninstall_table(&new_table_desc);
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
*table_index = i;
return_ACPI_STATUS(AE_OK);
}
@@ -303,11 +308,19 @@ acpi_tb_install_standard_table(acpi_physical_address address,
/* Invoke table handler if present */
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
if (acpi_gbl_table_handler) {
(void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_INSTALL,
new_table_desc.pointer,
acpi_gbl_table_handler_context);
}
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
+unlock_and_exit:
+
+ /* Release the table lock */
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
release_and_exit:
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index f8d65647ea79..fb19e1cdb641 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -98,7 +98,15 @@ static int find_child_checks(struct acpi_device *adev, bool check_children)
if (check_children && list_empty(&adev->children))
return -ENODEV;
- return sta_present ? FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
+ /*
+ * If the device has a _HID (or _CID) returning a valid ACPI/PNP
+ * device ID, it is better to make it look less attractive here, so that
+ * the other device with the same _ADR value (that may not have a valid
+ * device ID) can be matched going forward. [This means a second spec
+ * violation in a row, so whatever we do here is best effort anyway.]
+ */
+ return sta_present && list_empty(&adev->pnp.ids) ?
+ FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
}
struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
@@ -250,7 +258,6 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev)
return 0;
err:
- acpi_dma_deconfigure(dev);
ACPI_COMPANION_SET(dev, NULL);
put_device(dev);
put_device(&acpi_dev->dev);
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 1b41a2739dac..0c452265c111 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -37,6 +37,7 @@ void acpi_amba_init(void);
static inline void acpi_amba_init(void) {}
#endif
int acpi_sysfs_init(void);
+void acpi_gpe_apply_masked_gpes(void);
void acpi_container_init(void);
void acpi_memory_hotplug_init(void);
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 45dec874ea55..192691880d55 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -2074,6 +2074,7 @@ int __init acpi_scan_init(void)
}
}
+ acpi_gpe_apply_masked_gpes();
acpi_update_all_gpes();
acpi_ec_ecdt_start();
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 9b6cebe227a0..54abb26b7366 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -674,14 +674,6 @@ static void acpi_sleep_suspend_setup(void)
if (acpi_sleep_state_supported(i))
sleep_states[i] = 1;
- /*
- * Use suspend-to-idle by default if ACPI_FADT_LOW_POWER_S0 is set and
- * the default suspend mode was not selected from the command line.
- */
- if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0 &&
- mem_sleep_default > PM_SUSPEND_MEM)
- mem_sleep_default = PM_SUSPEND_FREEZE;
-
suspend_set_ops(old_suspend_ordering ?
&acpi_suspend_ops_old : &acpi_suspend_ops);
freeze_set_ops(&acpi_freeze_ops);
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 703c26e7022c..cf05ae973381 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -708,6 +708,62 @@ end:
return result ? result : size;
}
+/*
+ * A Quirk Mechanism for GPE Flooding Prevention:
+ *
+ * Quirks may be needed to prevent GPE flooding on a specific GPE. The
+ * flooding typically cannot be detected and automatically prevented by
+ * ACPI_GPE_DISPATCH_NONE check because there is a _Lxx/_Exx prepared in
+ * the AML tables. This normally indicates a feature gap in Linux, thus
+ * instead of providing endless quirk tables, we provide a boot parameter
+ * for those who want this quirk. For example, if the users want to prevent
+ * the GPE flooding for GPE 00, they need to specify the following boot
+ * parameter:
+ * acpi_mask_gpe=0x00
+ * The masking status can be modified by the following runtime controlling
+ * interface:
+ * echo unmask > /sys/firmware/acpi/interrupts/gpe00
+ */
+
+/*
+ * Currently, the GPE flooding prevention only supports to mask the GPEs
+ * numbered from 00 to 7f.
+ */
+#define ACPI_MASKABLE_GPE_MAX 0x80
+
+static u64 __initdata acpi_masked_gpes;
+
+static int __init acpi_gpe_set_masked_gpes(char *val)
+{
+ u8 gpe;
+
+ if (kstrtou8(val, 0, &gpe) || gpe > ACPI_MASKABLE_GPE_MAX)
+ return -EINVAL;
+ acpi_masked_gpes |= ((u64)1<<gpe);
+
+ return 1;
+}
+__setup("acpi_mask_gpe=", acpi_gpe_set_masked_gpes);
+
+void __init acpi_gpe_apply_masked_gpes(void)
+{
+ acpi_handle handle;
+ acpi_status status;
+ u8 gpe;
+
+ for (gpe = 0;
+ gpe < min_t(u8, ACPI_MASKABLE_GPE_MAX, acpi_current_gpe_count);
+ gpe++) {
+ if (acpi_masked_gpes & ((u64)1<<gpe)) {
+ status = acpi_get_gpe_device(gpe, &handle);
+ if (ACPI_SUCCESS(status)) {
+ pr_info("Masking GPE 0x%x.\n", gpe);
+ (void)acpi_mask_gpe(handle, gpe, TRUE);
+ }
+ }
+ }
+}
+
void acpi_irq_stats_init(void)
{
acpi_status status;
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 02ded25c82e4..7f48156cbc0c 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -305,17 +305,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L702X"),
},
},
- {
- /* https://bugzilla.redhat.com/show_bug.cgi?id=1204476 */
- /* https://bugs.launchpad.net/ubuntu/+source/linux-lts-trusty/+bug/1416940 */
- .callback = video_detect_force_native,
- .ident = "HP Pavilion dv6",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv6 Notebook PC"),
- },
- },
-
{ },
};
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index 4ef4c5caed4f..8a8e403644d6 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -132,9 +132,9 @@ config HT16K33
tristate "Holtek Ht16K33 LED controller with keyscan"
depends on FB && OF && I2C && INPUT
select FB_SYS_FOPS
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
select INPUT_MATRIXKMAP
select FB_BACKLIGHT
help
diff --git a/drivers/base/base.h b/drivers/base/base.h
index ada9dce34e6d..e19b1008e5fb 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -141,8 +141,6 @@ extern void device_unblock_probing(void);
extern struct kset *devices_kset;
extern void devices_kset_move_last(struct device *dev);
-extern struct device_attribute dev_attr_deferred_probe;
-
#if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS)
extern void module_add_driver(struct module *mod, struct device_driver *drv);
extern void module_remove_driver(struct device_driver *drv);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 020ea7f05520..8c25e68e67d7 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1060,14 +1060,8 @@ static int device_add_attrs(struct device *dev)
goto err_remove_dev_groups;
}
- error = device_create_file(dev, &dev_attr_deferred_probe);
- if (error)
- goto err_remove_online;
-
return 0;
- err_remove_online:
- device_remove_file(dev, &dev_attr_online);
err_remove_dev_groups:
device_remove_groups(dev, dev->groups);
err_remove_type_groups:
@@ -1085,7 +1079,6 @@ static void device_remove_attrs(struct device *dev)
struct class *class = dev->class;
const struct device_type *type = dev->type;
- device_remove_file(dev, &dev_attr_deferred_probe);
device_remove_file(dev, &dev_attr_online);
device_remove_groups(dev, dev->groups);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index a8b258e5407b..a1fbf55c4d3a 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -53,19 +53,6 @@ static LIST_HEAD(deferred_probe_pending_list);
static LIST_HEAD(deferred_probe_active_list);
static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
-static ssize_t deferred_probe_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- bool value;
-
- mutex_lock(&deferred_probe_mutex);
- value = !list_empty(&dev->p->deferred_probe);
- mutex_unlock(&deferred_probe_mutex);
-
- return sprintf(buf, "%d\n", value);
-}
-DEVICE_ATTR_RO(deferred_probe);
-
/*
* In some cases, like suspend to RAM or hibernation, It might be reasonable
* to prohibit probing of devices as it could be unsafe.
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 8ab8ea1253e6..dacb6a8418aa 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -408,14 +408,14 @@ static ssize_t show_valid_zones(struct device *dev,
sprintf(buf, "%s", zone->name);
/* MMOP_ONLINE_KERNEL */
- zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL);
+ zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL, &zone_shift);
if (zone_shift) {
strcat(buf, " ");
strcat(buf, (zone + zone_shift)->name);
}
/* MMOP_ONLINE_MOVABLE */
- zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE);
+ zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE, &zone_shift);
if (zone_shift) {
strcat(buf, " ");
strcat(buf, (zone + zone_shift)->name);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index a5e1262b964b..2997026b4dfb 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -626,6 +626,7 @@ static int genpd_runtime_resume(struct device *dev)
out:
/* Measure resume latency. */
+ time_start = 0;
if (timed && runtime_pm)
time_start = ktime_get();
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 38c576f76d36..9fd06eeb1a17 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -271,7 +271,7 @@ static inline int sock_send_bvec(struct nbd_device *nbd, int index,
static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
- int result, flags;
+ int result;
struct nbd_request request;
unsigned long size = blk_rq_bytes(req);
struct bio *bio;
@@ -310,7 +310,6 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
if (type != NBD_CMD_WRITE)
return 0;
- flags = 0;
bio = req->bio;
while (bio) {
struct bio *next = bio->bi_next;
@@ -319,9 +318,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
bio_for_each_segment(bvec, bio, iter) {
bool is_last = !next && bio_iter_last(bvec, iter);
+ int flags = is_last ? 0 : MSG_MORE;
- if (is_last)
- flags = MSG_MORE;
dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
cmd, bvec.bv_len);
result = sock_send_bvec(nbd, index, &bvec, flags);
@@ -1042,6 +1040,7 @@ static int __init nbd_init(void)
return -ENOMEM;
for (i = 0; i < nbds_max; i++) {
+ struct request_queue *q;
struct gendisk *disk = alloc_disk(1 << part_shift);
if (!disk)
goto out;
@@ -1067,12 +1066,13 @@ static int __init nbd_init(void)
* every gendisk to have its very own request_queue struct.
* These structs are big so we dynamically allocate them.
*/
- disk->queue = blk_mq_init_queue(&nbd_dev[i].tag_set);
- if (!disk->queue) {
+ q = blk_mq_init_queue(&nbd_dev[i].tag_set);
+ if (IS_ERR(q)) {
blk_mq_free_tag_set(&nbd_dev[i].tag_set);
put_disk(disk);
goto out;
}
+ disk->queue = q;
/*
* Tell the block layer that we are not a rotational device
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 5545a679abd8..10332c24f961 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -56,6 +56,7 @@ struct virtblk_req {
struct virtio_blk_outhdr out_hdr;
struct virtio_scsi_inhdr in_hdr;
u8 status;
+ u8 sense[SCSI_SENSE_BUFFERSIZE];
struct scatterlist sg[];
};
@@ -102,7 +103,8 @@ static int __virtblk_add_req(struct virtqueue *vq,
}
if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) {
- sg_init_one(&sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
+ memcpy(vbr->sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
+ sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE);
sgs[num_out + num_in++] = &sense;
sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
sgs[num_out + num_in++] = &inhdr;
@@ -628,11 +630,12 @@ static int virtblk_probe(struct virtio_device *vdev)
if (err)
goto out_put_disk;
- q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set);
+ q = blk_mq_init_queue(&vblk->tag_set);
if (IS_ERR(q)) {
err = -ENOMEM;
goto out_free_tags;
}
+ vblk->disk->queue = q;
q->queuedata = vblk;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index b2bdfa81f929..265f1a7072e9 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -197,13 +197,13 @@ struct blkfront_info
/* Number of pages per ring buffer. */
unsigned int nr_ring_pages;
struct request_queue *rq;
- unsigned int feature_flush;
- unsigned int feature_fua;
+ unsigned int feature_flush:1;
+ unsigned int feature_fua:1;
unsigned int feature_discard:1;
unsigned int feature_secdiscard:1;
+ unsigned int feature_persistent:1;
unsigned int discard_granularity;
unsigned int discard_alignment;
- unsigned int feature_persistent:1;
/* Number of 4KB segments handled */
unsigned int max_indirect_segments;
int is_ready;
@@ -2223,7 +2223,7 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
}
else
grants = info->max_indirect_segments;
- psegs = grants / GRANTS_PER_PSEG;
+ psegs = DIV_ROUND_UP(grants, GRANTS_PER_PSEG);
err = fill_grant_buffer(rinfo,
(grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info));
@@ -2323,13 +2323,16 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
blkfront_setup_discard(info);
info->feature_persistent =
- xenbus_read_unsigned(info->xbdev->otherend,
- "feature-persistent", 0);
+ !!xenbus_read_unsigned(info->xbdev->otherend,
+ "feature-persistent", 0);
indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
"feature-max-indirect-segments", 0);
- info->max_indirect_segments = min(indirect_segments,
- xen_blkif_max_segments);
+ if (indirect_segments > xen_blkif_max_segments)
+ indirect_segments = xen_blkif_max_segments;
+ if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST)
+ indirect_segments = 0;
+ info->max_indirect_segments = indirect_segments;
}
/*
@@ -2652,6 +2655,9 @@ static int __init xlblk_init(void)
if (!xen_domain())
return -ENODEV;
+ if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST)
+ xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
+
if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 15f58ab44d0b..e5ab7d9e8c45 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -25,6 +25,7 @@
#include <linux/genhd.h>
#include <linux/highmem.h>
#include <linux/slab.h>
+#include <linux/backing-dev.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
#include <linux/err.h>
@@ -112,6 +113,14 @@ static inline bool is_partial_io(struct bio_vec *bvec)
return bvec->bv_len != PAGE_SIZE;
}
+static void zram_revalidate_disk(struct zram *zram)
+{
+ revalidate_disk(zram->disk);
+ /* revalidate_disk reset the BDI_CAP_STABLE_WRITES so set again */
+ zram->disk->queue->backing_dev_info.capabilities |=
+ BDI_CAP_STABLE_WRITES;
+}
+
/*
* Check if request is within bounds and aligned on zram logical blocks.
*/
@@ -1095,15 +1104,9 @@ static ssize_t disksize_store(struct device *dev,
zram->comp = comp;
zram->disksize = disksize;
set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
+ zram_revalidate_disk(zram);
up_write(&zram->init_lock);
- /*
- * Revalidate disk out of the init_lock to avoid lockdep splat.
- * It's okay because disk's capacity is protected by init_lock
- * so that revalidate_disk always sees up-to-date capacity.
- */
- revalidate_disk(zram->disk);
-
return len;
out_destroy_comp:
@@ -1149,7 +1152,7 @@ static ssize_t reset_store(struct device *dev,
/* Make sure all the pending I/O are finished */
fsync_bdev(bdev);
zram_reset_device(zram);
- revalidate_disk(zram->disk);
+ zram_revalidate_disk(zram);
bdput(bdev);
mutex_lock(&bdev->bd_mutex);
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 5bb1985ec484..6d9cc2d39d22 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -381,9 +381,6 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
int err = 0;
- if (!pfn_valid(PFN_DOWN(p)))
- return -EIO;
-
read = 0;
if (p < (unsigned long) high_memory) {
low_count = count;
@@ -412,6 +409,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
* by the kernel or data corruption may occur
*/
kbuf = xlate_dev_kmem_ptr((void *)p);
+ if (!virt_addr_valid(kbuf))
+ return -ENXIO;
if (copy_to_user(buf, kbuf, sz))
return -EFAULT;
@@ -482,6 +481,8 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
* corruption may occur.
*/
ptr = xlate_dev_kmem_ptr((void *)p);
+ if (!virt_addr_valid(ptr))
+ return -ENXIO;
copied = copy_from_user(ptr, buf, sz);
if (copied) {
@@ -512,9 +513,6 @@ static ssize_t write_kmem(struct file *file, const char __user *buf,
char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
int err = 0;
- if (!pfn_valid(PFN_DOWN(p)))
- return -EIO;
-
if (p < (unsigned long) high_memory) {
unsigned long to_write = min_t(unsigned long, count,
(unsigned long)high_memory - p);
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index 02819e0703c8..87885d146dbb 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -290,6 +290,7 @@ static int register_device(int minor, struct pp_struct *pp)
struct pardevice *pdev = NULL;
char *name;
struct pardev_cb ppdev_cb;
+ int rc = 0;
name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor);
if (name == NULL)
@@ -298,8 +299,8 @@ static int register_device(int minor, struct pp_struct *pp)
port = parport_find_number(minor);
if (!port) {
pr_warn("%s: no associated port!\n", name);
- kfree(name);
- return -ENXIO;
+ rc = -ENXIO;
+ goto err;
}
memset(&ppdev_cb, 0, sizeof(ppdev_cb));
@@ -308,16 +309,18 @@ static int register_device(int minor, struct pp_struct *pp)
ppdev_cb.private = pp;
pdev = parport_register_dev_model(port, name, &ppdev_cb, minor);
parport_put_port(port);
- kfree(name);
if (!pdev) {
pr_warn("%s: failed to register device!\n", name);
- return -ENXIO;
+ rc = -ENXIO;
+ goto err;
}
pp->pdev = pdev;
dev_dbg(&pdev->dev, "registered pardevice\n");
- return 0;
+err:
+ kfree(name);
+ return rc;
}
static enum ieee1284_phase init_phase(int mode)
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 8b00e79c2683..17857beb4892 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1862,7 +1862,7 @@ static void config_work_handler(struct work_struct *work)
{
struct ports_device *portdev;
- portdev = container_of(work, struct ports_device, control_work);
+ portdev = container_of(work, struct ports_device, config_work);
if (!use_multiport(portdev)) {
struct virtio_device *vdev;
struct port *port;
diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c
index 5eb05dbf59b8..fc585f370549 100644
--- a/drivers/clk/clk-stm32f4.c
+++ b/drivers/clk/clk-stm32f4.c
@@ -768,5 +768,5 @@ fail:
kfree(clks);
iounmap(base);
}
-CLK_OF_DECLARE(stm32f42xx_rcc, "st,stm32f42xx-rcc", stm32f4_rcc_init);
-CLK_OF_DECLARE(stm32f46xx_rcc, "st,stm32f469-rcc", stm32f4_rcc_init);
+CLK_OF_DECLARE_DRIVER(stm32f42xx_rcc, "st,stm32f42xx-rcc", stm32f4_rcc_init);
+CLK_OF_DECLARE_DRIVER(stm32f46xx_rcc, "st,stm32f469-rcc", stm32f4_rcc_init);
diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c
index 9375777776d9..b533f99550e1 100644
--- a/drivers/clk/renesas/clk-mstp.c
+++ b/drivers/clk/renesas/clk-mstp.c
@@ -37,12 +37,14 @@
* @smstpcr: module stop control register
* @mstpsr: module stop status register (optional)
* @lock: protects writes to SMSTPCR
+ * @width_8bit: registers are 8-bit, not 32-bit
*/
struct mstp_clock_group {
struct clk_onecell_data data;
void __iomem *smstpcr;
void __iomem *mstpsr;
spinlock_t lock;
+ bool width_8bit;
};
/**
@@ -59,6 +61,18 @@ struct mstp_clock {
#define to_mstp_clock(_hw) container_of(_hw, struct mstp_clock, hw)
+static inline u32 cpg_mstp_read(struct mstp_clock_group *group,
+ u32 __iomem *reg)
+{
+ return group->width_8bit ? readb(reg) : clk_readl(reg);
+}
+
+static inline void cpg_mstp_write(struct mstp_clock_group *group, u32 val,
+ u32 __iomem *reg)
+{
+ group->width_8bit ? writeb(val, reg) : clk_writel(val, reg);
+}
+
static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
{
struct mstp_clock *clock = to_mstp_clock(hw);
@@ -70,12 +84,12 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
spin_lock_irqsave(&group->lock, flags);
- value = clk_readl(group->smstpcr);
+ value = cpg_mstp_read(group, group->smstpcr);
if (enable)
value &= ~bitmask;
else
value |= bitmask;
- clk_writel(value, group->smstpcr);
+ cpg_mstp_write(group, value, group->smstpcr);
spin_unlock_irqrestore(&group->lock, flags);
@@ -83,7 +97,7 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
return 0;
for (i = 1000; i > 0; --i) {
- if (!(clk_readl(group->mstpsr) & bitmask))
+ if (!(cpg_mstp_read(group, group->mstpsr) & bitmask))
break;
cpu_relax();
}
@@ -114,9 +128,9 @@ static int cpg_mstp_clock_is_enabled(struct clk_hw *hw)
u32 value;
if (group->mstpsr)
- value = clk_readl(group->mstpsr);
+ value = cpg_mstp_read(group, group->mstpsr);
else
- value = clk_readl(group->smstpcr);
+ value = cpg_mstp_read(group, group->smstpcr);
return !(value & BIT(clock->bit_index));
}
@@ -188,6 +202,9 @@ static void __init cpg_mstp_clocks_init(struct device_node *np)
return;
}
+ if (of_device_is_compatible(np, "renesas,r7s72100-mstp-clocks"))
+ group->width_8bit = true;
+
for (i = 0; i < MSTP_MAX_CLOCKS; ++i)
clks[i] = ERR_PTR(-ENOENT);
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 8c8b495cbf0d..cdc092a1d9ef 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -586,7 +586,7 @@ static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = {
GATE(CLK_ACLK550_CAM, "aclk550_cam", "mout_user_aclk550_cam",
GATE_BUS_TOP, 24, 0, 0),
GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler",
- GATE_BUS_TOP, 27, 0, 0),
+ GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0),
};
static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = {
@@ -956,20 +956,20 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
GATE(CLK_SMMU_G2D, "smmu_g2d", "aclk333_g2d", GATE_IP_G2D, 7, 0, 0),
GATE(0, "aclk200_fsys", "mout_user_aclk200_fsys",
- GATE_BUS_FSYS0, 9, CLK_IGNORE_UNUSED, 0),
+ GATE_BUS_FSYS0, 9, CLK_IS_CRITICAL, 0),
GATE(0, "aclk200_fsys2", "mout_user_aclk200_fsys2",
GATE_BUS_FSYS0, 10, CLK_IGNORE_UNUSED, 0),
GATE(0, "aclk333_g2d", "mout_user_aclk333_g2d",
GATE_BUS_TOP, 0, CLK_IGNORE_UNUSED, 0),
GATE(0, "aclk266_g2d", "mout_user_aclk266_g2d",
- GATE_BUS_TOP, 1, CLK_IGNORE_UNUSED, 0),
+ GATE_BUS_TOP, 1, CLK_IS_CRITICAL, 0),
GATE(0, "aclk300_jpeg", "mout_user_aclk300_jpeg",
GATE_BUS_TOP, 4, CLK_IGNORE_UNUSED, 0),
GATE(0, "aclk333_432_isp0", "mout_user_aclk333_432_isp0",
GATE_BUS_TOP, 5, 0, 0),
GATE(0, "aclk300_gscl", "mout_user_aclk300_gscl",
- GATE_BUS_TOP, 6, CLK_IGNORE_UNUSED, 0),
+ GATE_BUS_TOP, 6, CLK_IS_CRITICAL, 0),
GATE(0, "aclk333_432_gscl", "mout_user_aclk333_432_gscl",
GATE_BUS_TOP, 7, CLK_IGNORE_UNUSED, 0),
GATE(0, "aclk333_432_isp", "mout_user_aclk333_432_isp",
@@ -983,20 +983,20 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
GATE(0, "aclk166", "mout_user_aclk166",
GATE_BUS_TOP, 14, CLK_IGNORE_UNUSED, 0),
GATE(CLK_ACLK333, "aclk333", "mout_user_aclk333",
- GATE_BUS_TOP, 15, CLK_IGNORE_UNUSED, 0),
+ GATE_BUS_TOP, 15, CLK_IS_CRITICAL, 0),
GATE(0, "aclk400_isp", "mout_user_aclk400_isp",
GATE_BUS_TOP, 16, 0, 0),
GATE(0, "aclk400_mscl", "mout_user_aclk400_mscl",
GATE_BUS_TOP, 17, 0, 0),
GATE(0, "aclk200_disp1", "mout_user_aclk200_disp1",
- GATE_BUS_TOP, 18, 0, 0),
+ GATE_BUS_TOP, 18, CLK_IS_CRITICAL, 0),
GATE(CLK_SCLK_MPHY_IXTAL24, "sclk_mphy_ixtal24", "mphy_refclk_ixtal24",
GATE_BUS_TOP, 28, 0, 0),
GATE(CLK_SCLK_HSIC_12M, "sclk_hsic_12m", "ff_hsic_12m",
GATE_BUS_TOP, 29, 0, 0),
GATE(0, "aclk300_disp1", "mout_user_aclk300_disp1",
- SRC_MASK_TOP2, 24, 0, 0),
+ SRC_MASK_TOP2, 24, CLK_IS_CRITICAL, 0),
GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk",
SRC_MASK_TOP7, 20, 0, 0),
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 4da1dc2278bd..670ff0f25b67 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -495,6 +495,7 @@ static int exynos4_mct_dying_cpu(unsigned int cpu)
if (mct_int_type == MCT_INT_SPI) {
if (evt->irq != -1)
disable_irq_nosync(evt->irq);
+ exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
} else {
disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
}
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index bc97b6a4b1cf..7fcaf26e8f81 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -26,6 +26,8 @@ static const struct of_device_id machines[] __initconst = {
{ .compatible = "allwinner,sun8i-a83t", },
{ .compatible = "allwinner,sun8i-h3", },
+ { .compatible = "apm,xgene-shadowcat", },
+
{ .compatible = "arm,integrator-ap", },
{ .compatible = "arm,integrator-cp", },
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 6acbd4af632e..a54d65aa776d 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -857,13 +857,13 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
NULL,
};
-static void intel_pstate_hwp_set(const struct cpumask *cpumask)
+static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
{
int min, hw_min, max, hw_max, cpu, range, adj_range;
struct perf_limits *perf_limits = limits;
u64 value, cap;
- for_each_cpu(cpu, cpumask) {
+ for_each_cpu(cpu, policy->cpus) {
int max_perf_pct, min_perf_pct;
struct cpudata *cpu_data = all_cpu_data[cpu];
s16 epp;
@@ -949,7 +949,7 @@ skip_epp:
static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy)
{
if (hwp_active)
- intel_pstate_hwp_set(policy->cpus);
+ intel_pstate_hwp_set(policy);
return 0;
}
@@ -968,19 +968,28 @@ static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy)
static int intel_pstate_resume(struct cpufreq_policy *policy)
{
+ int ret;
+
if (!hwp_active)
return 0;
+ mutex_lock(&intel_pstate_limits_lock);
+
all_cpu_data[policy->cpu]->epp_policy = 0;
- return intel_pstate_hwp_set_policy(policy);
+ ret = intel_pstate_hwp_set_policy(policy);
+
+ mutex_unlock(&intel_pstate_limits_lock);
+
+ return ret;
}
-static void intel_pstate_hwp_set_online_cpus(void)
+static void intel_pstate_update_policies(void)
{
- get_online_cpus();
- intel_pstate_hwp_set(cpu_online_mask);
- put_online_cpus();
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ cpufreq_update_policy(cpu);
}
/************************** debugfs begin ************************/
@@ -1018,10 +1027,6 @@ static void __init intel_pstate_debug_expose_params(void)
struct dentry *debugfs_parent;
int i = 0;
- if (hwp_active ||
- pstate_funcs.get_target_pstate == get_target_pstate_use_cpu_load)
- return;
-
debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
if (IS_ERR_OR_NULL(debugfs_parent))
return;
@@ -1105,11 +1110,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
limits->no_turbo = clamp_t(int, input, 0, 1);
- if (hwp_active)
- intel_pstate_hwp_set_online_cpus();
-
mutex_unlock(&intel_pstate_limits_lock);
+ intel_pstate_update_policies();
+
return count;
}
@@ -1134,11 +1138,10 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
limits->max_perf_pct);
limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
- if (hwp_active)
- intel_pstate_hwp_set_online_cpus();
-
mutex_unlock(&intel_pstate_limits_lock);
+ intel_pstate_update_policies();
+
return count;
}
@@ -1163,11 +1166,10 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
limits->min_perf_pct);
limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
- if (hwp_active)
- intel_pstate_hwp_set_online_cpus();
-
mutex_unlock(&intel_pstate_limits_lock);
+ intel_pstate_update_policies();
+
return count;
}
@@ -2003,7 +2005,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
limits = &performance_limits;
perf_limits = limits;
}
- if (policy->max >= policy->cpuinfo.max_freq) {
+ if (policy->max >= policy->cpuinfo.max_freq &&
+ !limits->no_turbo) {
pr_debug("set performance\n");
intel_pstate_set_performance_limits(perf_limits);
goto out;
@@ -2045,6 +2048,17 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
policy->policy != CPUFREQ_POLICY_PERFORMANCE)
return -EINVAL;
+ /* When per-CPU limits are used, sysfs limits are not used */
+ if (!per_cpu_limits) {
+ unsigned int max_freq, min_freq;
+
+ max_freq = policy->cpuinfo.max_freq *
+ limits->max_sysfs_pct / 100;
+ min_freq = policy->cpuinfo.max_freq *
+ limits->min_sysfs_pct / 100;
+ cpufreq_verify_within_limits(policy, min_freq, max_freq);
+ }
+
return 0;
}
@@ -2153,8 +2167,12 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
if (per_cpu_limits)
perf_limits = cpu->perf_limits;
+ mutex_lock(&intel_pstate_limits_lock);
+
intel_pstate_update_perf_limits(policy, perf_limits);
+ mutex_unlock(&intel_pstate_limits_lock);
+
return 0;
}
@@ -2487,7 +2505,10 @@ hwp_cpu_matched:
if (rc)
goto out;
- intel_pstate_debug_expose_params();
+ if (intel_pstate_driver == &intel_pstate && !hwp_active &&
+ pstate_funcs.get_target_pstate != get_target_pstate_use_cpu_load)
+ intel_pstate_debug_expose_params();
+
intel_pstate_sysfs_expose_params();
if (hwp_active)
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
index a768da7138a1..b7872f62f674 100644
--- a/drivers/crypto/marvell/cesa.h
+++ b/drivers/crypto/marvell/cesa.h
@@ -273,7 +273,8 @@ struct mv_cesa_op_ctx {
#define CESA_TDMA_SRC_IN_SRAM BIT(30)
#define CESA_TDMA_END_OF_REQ BIT(29)
#define CESA_TDMA_BREAK_CHAIN BIT(28)
-#define CESA_TDMA_TYPE_MSK GENMASK(27, 0)
+#define CESA_TDMA_SET_STATE BIT(27)
+#define CESA_TDMA_TYPE_MSK GENMASK(26, 0)
#define CESA_TDMA_DUMMY 0
#define CESA_TDMA_DATA 1
#define CESA_TDMA_OP 2
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index 317cf029c0cf..77c0fb936f47 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -280,13 +280,32 @@ static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
sreq->offset = 0;
}
+static void mv_cesa_ahash_dma_step(struct ahash_request *req)
+{
+ struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+ struct mv_cesa_req *base = &creq->base;
+
+ /* We must explicitly set the digest state. */
+ if (base->chain.first->flags & CESA_TDMA_SET_STATE) {
+ struct mv_cesa_engine *engine = base->engine;
+ int i;
+
+ /* Set the hash state in the IVDIG regs. */
+ for (i = 0; i < ARRAY_SIZE(creq->state); i++)
+ writel_relaxed(creq->state[i], engine->regs +
+ CESA_IVDIG(i));
+ }
+
+ mv_cesa_dma_step(base);
+}
+
static void mv_cesa_ahash_step(struct crypto_async_request *req)
{
struct ahash_request *ahashreq = ahash_request_cast(req);
struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
- mv_cesa_dma_step(&creq->base);
+ mv_cesa_ahash_dma_step(ahashreq);
else
mv_cesa_ahash_std_step(ahashreq);
}
@@ -584,12 +603,16 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
struct mv_cesa_ahash_dma_iter iter;
struct mv_cesa_op_ctx *op = NULL;
unsigned int frag_len;
+ bool set_state = false;
int ret;
u32 type;
basereq->chain.first = NULL;
basereq->chain.last = NULL;
+ if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl))
+ set_state = true;
+
if (creq->src_nents) {
ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
DMA_TO_DEVICE);
@@ -683,6 +706,15 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
if (type != CESA_TDMA_RESULT)
basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
+ if (set_state) {
+ /*
+ * Put the CESA_TDMA_SET_STATE flag on the first tdma desc to
+ * let the step logic know that the IVDIG registers should be
+ * explicitly set before launching a TDMA chain.
+ */
+ basereq->chain.first->flags |= CESA_TDMA_SET_STATE;
+ }
+
return 0;
err_free_tdma:
diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c
index 4416b88eca70..c76375ff376d 100644
--- a/drivers/crypto/marvell/tdma.c
+++ b/drivers/crypto/marvell/tdma.c
@@ -109,7 +109,14 @@ void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
last->next = dreq->chain.first;
engine->chain.last = dreq->chain.last;
- if (!(last->flags & CESA_TDMA_BREAK_CHAIN))
+ /*
+ * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
+ * the last element of the current chain, or if the request
+ * being queued needs the IV regs to be set before lauching
+ * the request.
+ */
+ if (!(last->flags & CESA_TDMA_BREAK_CHAIN) &&
+ !(dreq->chain.first->flags & CESA_TDMA_SET_STATE))
last->next_dma = dreq->chain.first->cur_dma;
}
}
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index a324801d6a66..47206a21bb90 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -593,11 +593,16 @@ struct devfreq *devfreq_add_device(struct device *dev,
list_add(&devfreq->node, &devfreq_list);
governor = find_devfreq_governor(devfreq->governor_name);
- if (!IS_ERR(governor))
- devfreq->governor = governor;
- if (devfreq->governor)
- err = devfreq->governor->event_handler(devfreq,
- DEVFREQ_GOV_START, NULL);
+ if (IS_ERR(governor)) {
+ dev_err(dev, "%s: Unable to find governor for the device\n",
+ __func__);
+ err = PTR_ERR(governor);
+ goto err_init;
+ }
+
+ devfreq->governor = governor;
+ err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START,
+ NULL);
if (err) {
dev_err(dev, "%s: Unable to start governor for the device\n",
__func__);
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index a8ed7792ece2..9af86f46fbec 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -497,7 +497,7 @@ passive:
if (IS_ERR(bus->devfreq)) {
dev_err(dev,
"failed to add devfreq dev with passive governor\n");
- ret = -EPROBE_DEFER;
+ ret = PTR_ERR(bus->devfreq);
goto err;
}
diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig
index e00c9b022964..5a37b9fcf40d 100644
--- a/drivers/dma/dw/Kconfig
+++ b/drivers/dma/dw/Kconfig
@@ -24,5 +24,5 @@ config DW_DMAC_PCI
select DW_DMAC_CORE
help
Support the Synopsys DesignWare AHB DMA controller on the
- platfroms that enumerate it as a PCI device. For example,
+ platforms that enumerate it as a PCI device. For example,
Intel Medfield has integrated this GPDMA controller.
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index 8e67895bcca3..abcc51b343ce 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -64,6 +64,8 @@
#define PCI_DEVICE_ID_INTEL_IOAT_BDX8 0x6f2e
#define PCI_DEVICE_ID_INTEL_IOAT_BDX9 0x6f2f
+#define PCI_DEVICE_ID_INTEL_IOAT_SKX 0x2021
+
#define IOAT_VER_1_2 0x12 /* Version 1.2 */
#define IOAT_VER_2_0 0x20 /* Version 2.0 */
#define IOAT_VER_3_0 0x30 /* Version 3.0 */
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 90eddd9f07e4..cc5259b881d4 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -106,6 +106,8 @@ static struct pci_device_id ioat_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX8) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX9) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SKX) },
+
/* I/OAT v3.3 platforms */
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) },
@@ -243,10 +245,15 @@ static bool is_bdx_ioat(struct pci_dev *pdev)
}
}
+static inline bool is_skx_ioat(struct pci_dev *pdev)
+{
+ return (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SKX) ? true : false;
+}
+
static bool is_xeon_cb32(struct pci_dev *pdev)
{
return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
- is_hsw_ioat(pdev) || is_bdx_ioat(pdev);
+ is_hsw_ioat(pdev) || is_bdx_ioat(pdev) || is_skx_ioat(pdev);
}
bool is_bwd_ioat(struct pci_dev *pdev)
@@ -693,7 +700,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
ioat_chan->completion =
dma_pool_zalloc(ioat_chan->ioat_dma->completion_pool,
- GFP_KERNEL, &ioat_chan->completion_dma);
+ GFP_NOWAIT, &ioat_chan->completion_dma);
if (!ioat_chan->completion)
return -ENOMEM;
@@ -703,7 +710,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
order = IOAT_MAX_ORDER;
- ring = ioat_alloc_ring(c, order, GFP_KERNEL);
+ ring = ioat_alloc_ring(c, order, GFP_NOWAIT);
if (!ring)
return -ENOMEM;
@@ -1357,6 +1364,8 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
device->version = readb(device->reg_base + IOAT_VER_OFFSET);
if (device->version >= IOAT_VER_3_0) {
+ if (is_skx_ioat(pdev))
+ device->version = IOAT_VER_3_2;
err = ioat3_dma_probe(device, ioat_dca_enabled);
if (device->version >= IOAT_VER_3_3)
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index ac68666cd3f4..daf479cce691 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -938,21 +938,14 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
d->ccr |= CCR_DST_AMODE_POSTINC;
if (port_window) {
d->ccr |= CCR_SRC_AMODE_DBLIDX;
- d->ei = 1;
- /*
- * One frame covers the port_window and by configure
- * the source frame index to be -1 * (port_window - 1)
- * we instruct the sDMA that after a frame is processed
- * it should move back to the start of the window.
- */
- d->fi = -(port_window_bytes - 1);
if (port_window_bytes >= 64)
- d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED;
+ d->csdp |= CSDP_SRC_BURST_64;
else if (port_window_bytes >= 32)
- d->csdp = CSDP_SRC_BURST_32 | CSDP_SRC_PACKED;
+ d->csdp |= CSDP_SRC_BURST_32;
else if (port_window_bytes >= 16)
- d->csdp = CSDP_SRC_BURST_16 | CSDP_SRC_PACKED;
+ d->csdp |= CSDP_SRC_BURST_16;
+
} else {
d->ccr |= CCR_SRC_AMODE_CONSTANT;
}
@@ -962,13 +955,21 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
d->ccr |= CCR_SRC_AMODE_POSTINC;
if (port_window) {
d->ccr |= CCR_DST_AMODE_DBLIDX;
+ d->ei = 1;
+ /*
+ * One frame covers the port_window and by configure
+ * the source frame index to be -1 * (port_window - 1)
+ * we instruct the sDMA that after a frame is processed
+ * it should move back to the start of the window.
+ */
+ d->fi = -(port_window_bytes - 1);
if (port_window_bytes >= 64)
- d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED;
+ d->csdp |= CSDP_DST_BURST_64;
else if (port_window_bytes >= 32)
- d->csdp = CSDP_DST_BURST_32 | CSDP_DST_PACKED;
+ d->csdp |= CSDP_DST_BURST_32;
else if (port_window_bytes >= 16)
- d->csdp = CSDP_DST_BURST_16 | CSDP_DST_PACKED;
+ d->csdp |= CSDP_DST_BURST_16;
} else {
d->ccr |= CCR_DST_AMODE_CONSTANT;
}
@@ -1017,7 +1018,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
osg->addr = sg_dma_address(sgent);
osg->en = en;
osg->fn = sg_dma_len(sgent) / frame_bytes;
- if (port_window && dir == DMA_MEM_TO_DEV) {
+ if (port_window && dir == DMA_DEV_TO_MEM) {
osg->ei = 1;
/*
* One frame covers the port_window and by configure
@@ -1452,6 +1453,7 @@ static int omap_dma_probe(struct platform_device *pdev)
struct omap_dmadev *od;
struct resource *res;
int rc, i, irq;
+ u32 lch_count;
od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
if (!od)
@@ -1494,20 +1496,31 @@ static int omap_dma_probe(struct platform_device *pdev)
spin_lock_init(&od->lock);
spin_lock_init(&od->irq_lock);
- if (!pdev->dev.of_node) {
- od->dma_requests = od->plat->dma_attr->lch_count;
- if (unlikely(!od->dma_requests))
- od->dma_requests = OMAP_SDMA_REQUESTS;
- } else if (of_property_read_u32(pdev->dev.of_node, "dma-requests",
- &od->dma_requests)) {
+ /* Number of DMA requests */
+ od->dma_requests = OMAP_SDMA_REQUESTS;
+ if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node,
+ "dma-requests",
+ &od->dma_requests)) {
dev_info(&pdev->dev,
"Missing dma-requests property, using %u.\n",
OMAP_SDMA_REQUESTS);
- od->dma_requests = OMAP_SDMA_REQUESTS;
}
- od->lch_map = devm_kcalloc(&pdev->dev, od->dma_requests,
- sizeof(*od->lch_map), GFP_KERNEL);
+ /* Number of available logical channels */
+ if (!pdev->dev.of_node) {
+ lch_count = od->plat->dma_attr->lch_count;
+ if (unlikely(!lch_count))
+ lch_count = OMAP_SDMA_CHANNELS;
+ } else if (of_property_read_u32(pdev->dev.of_node, "dma-channels",
+ &lch_count)) {
+ dev_info(&pdev->dev,
+ "Missing dma-channels property, using %u.\n",
+ OMAP_SDMA_CHANNELS);
+ lch_count = OMAP_SDMA_CHANNELS;
+ }
+
+ od->lch_map = devm_kcalloc(&pdev->dev, lch_count, sizeof(*od->lch_map),
+ GFP_KERNEL);
if (!od->lch_map)
return -ENOMEM;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 87fd01539fcb..740bbb942594 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -448,6 +448,9 @@ struct dma_pl330_chan {
/* for cyclic capability */
bool cyclic;
+
+ /* for runtime pm tracking */
+ bool active;
};
struct pl330_dmac {
@@ -2033,6 +2036,7 @@ static void pl330_tasklet(unsigned long data)
_stop(pch->thread);
spin_unlock(&pch->thread->dmac->lock);
power_down = true;
+ pch->active = false;
} else {
/* Make sure the PL330 Channel thread is active */
spin_lock(&pch->thread->dmac->lock);
@@ -2052,6 +2056,7 @@ static void pl330_tasklet(unsigned long data)
desc->status = PREP;
list_move_tail(&desc->node, &pch->work_list);
if (power_down) {
+ pch->active = true;
spin_lock(&pch->thread->dmac->lock);
_start(pch->thread);
spin_unlock(&pch->thread->dmac->lock);
@@ -2166,6 +2171,7 @@ static int pl330_terminate_all(struct dma_chan *chan)
unsigned long flags;
struct pl330_dmac *pl330 = pch->dmac;
LIST_HEAD(list);
+ bool power_down = false;
pm_runtime_get_sync(pl330->ddma.dev);
spin_lock_irqsave(&pch->lock, flags);
@@ -2176,6 +2182,8 @@ static int pl330_terminate_all(struct dma_chan *chan)
pch->thread->req[0].desc = NULL;
pch->thread->req[1].desc = NULL;
pch->thread->req_running = -1;
+ power_down = pch->active;
+ pch->active = false;
/* Mark all desc done */
list_for_each_entry(desc, &pch->submitted_list, node) {
@@ -2193,6 +2201,8 @@ static int pl330_terminate_all(struct dma_chan *chan)
list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
spin_unlock_irqrestore(&pch->lock, flags);
pm_runtime_mark_last_busy(pl330->ddma.dev);
+ if (power_down)
+ pm_runtime_put_autosuspend(pl330->ddma.dev);
pm_runtime_put_autosuspend(pl330->ddma.dev);
return 0;
@@ -2357,6 +2367,7 @@ static void pl330_issue_pending(struct dma_chan *chan)
* updated on work_list emptiness status.
*/
WARN_ON(list_empty(&pch->submitted_list));
+ pch->active = true;
pm_runtime_get_sync(pch->dmac->ddma.dev);
}
list_splice_tail_init(&pch->submitted_list, &pch->work_list);
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 2e441d0ccd79..4c357d475465 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -986,6 +986,7 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
{
struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
+ struct rcar_dmac_chan_map *map = &rchan->map;
struct rcar_dmac_desc_page *page, *_page;
struct rcar_dmac_desc *desc;
LIST_HEAD(list);
@@ -1019,6 +1020,13 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
free_page((unsigned long)page);
}
+ /* Remove slave mapping if present. */
+ if (map->slave.xfer_size) {
+ dma_unmap_resource(chan->device->dev, map->addr,
+ map->slave.xfer_size, map->dir, 0);
+ map->slave.xfer_size = 0;
+ }
+
pm_runtime_put(chan->device->dev);
}
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 3688d0873a3e..3056ce7f8c69 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -880,7 +880,7 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
struct virt_dma_desc *vdesc;
enum dma_status status;
unsigned long flags;
- u32 residue;
+ u32 residue = 0;
status = dma_cookie_status(c, cookie, state);
if ((status == DMA_COMPLETE) || (!state))
@@ -888,16 +888,12 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
spin_lock_irqsave(&chan->vchan.lock, flags);
vdesc = vchan_find_desc(&chan->vchan, cookie);
- if (cookie == chan->desc->vdesc.tx.cookie) {
+ if (chan->desc && cookie == chan->desc->vdesc.tx.cookie)
residue = stm32_dma_desc_residue(chan, chan->desc,
chan->next_sg);
- } else if (vdesc) {
+ else if (vdesc)
residue = stm32_dma_desc_residue(chan,
to_stm32_dma_desc(vdesc), 0);
- } else {
- residue = 0;
- }
-
dma_set_residue(state, residue);
spin_unlock_irqrestore(&chan->vchan.lock, flags);
@@ -972,21 +968,18 @@ static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
struct stm32_dma_chan *chan;
struct dma_chan *c;
- if (dma_spec->args_count < 3)
+ if (dma_spec->args_count < 4)
return NULL;
cfg.channel_id = dma_spec->args[0];
cfg.request_line = dma_spec->args[1];
cfg.stream_config = dma_spec->args[2];
- cfg.threshold = 0;
+ cfg.threshold = dma_spec->args[3];
if ((cfg.channel_id >= STM32_DMA_MAX_CHANNELS) || (cfg.request_line >=
STM32_DMA_MAX_REQUEST_ID))
return NULL;
- if (dma_spec->args_count > 3)
- cfg.threshold = dma_spec->args[3];
-
chan = &dmadev->chan[cfg.channel_id];
c = dma_get_slave_channel(&chan->vchan.chan);
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index 3f24aeb48c0e..2403475a37cf 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -149,6 +149,7 @@ static int ti_am335x_xbar_probe(struct platform_device *pdev)
match = of_match_node(ti_am335x_master_match, dma_node);
if (!match) {
dev_err(&pdev->dev, "DMA master is not supported\n");
+ of_node_put(dma_node);
return -EINVAL;
}
@@ -339,6 +340,7 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
match = of_match_node(ti_dra7_master_match, dma_node);
if (!match) {
dev_err(&pdev->dev, "DMA master is not supported\n");
+ of_node_put(dma_node);
return -EINVAL;
}
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 78298460d168..7c1e3a7b14e0 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -453,7 +453,7 @@ int extcon_sync(struct extcon_dev *edev, unsigned int id)
dev_err(&edev->dev, "out of memory in extcon_set_state\n");
kobject_uevent(&edev->dev.kobj, KOBJ_CHANGE);
- return 0;
+ return -ENOMEM;
}
length = name_show(&edev->dev, NULL, prop_buf);
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index 70e13230d8db..9ad0b1934be9 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -721,11 +721,17 @@ static int scpi_sensor_get_value(u16 sensor, u64 *val)
ret = scpi_send_message(CMD_SENSOR_VALUE, &id, sizeof(id),
&buf, sizeof(buf));
- if (!ret)
+ if (ret)
+ return ret;
+
+ if (scpi_info->is_legacy)
+ /* only 32-bits supported, hi_val can be junk */
+ *val = le32_to_cpu(buf.lo_val);
+ else
*val = (u64)le32_to_cpu(buf.hi_val) << 32 |
le32_to_cpu(buf.lo_val);
- return ret;
+ return 0;
}
static int scpi_device_get_power_state(u16 dev_id)
diff --git a/drivers/firmware/efi/fake_mem.c b/drivers/firmware/efi/fake_mem.c
index 520a40e5e0e4..6c7d60c239b5 100644
--- a/drivers/firmware/efi/fake_mem.c
+++ b/drivers/firmware/efi/fake_mem.c
@@ -71,8 +71,7 @@ void __init efi_fake_memmap(void)
}
/* allocate memory for new EFI memmap */
- new_memmap_phy = memblock_alloc(efi.memmap.desc_size * new_nr_map,
- PAGE_SIZE);
+ new_memmap_phy = efi_memmap_alloc(new_nr_map);
if (!new_memmap_phy)
return;
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index b98824e3800a..0e2a96b12cb3 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -39,14 +39,6 @@ efi_status_t efi_file_close(void *handle);
unsigned long get_dram_base(efi_system_table_t *sys_table_arg);
-efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
- unsigned long orig_fdt_size,
- void *fdt, int new_fdt_size, char *cmdline_ptr,
- u64 initrd_addr, u64 initrd_size,
- efi_memory_desc_t *memory_map,
- unsigned long map_size, unsigned long desc_size,
- u32 desc_ver);
-
efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
void *handle,
unsigned long *new_fdt_addr,
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index a6a93116a8f0..921dfa047202 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -16,13 +16,10 @@
#include "efistub.h"
-efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
- unsigned long orig_fdt_size,
- void *fdt, int new_fdt_size, char *cmdline_ptr,
- u64 initrd_addr, u64 initrd_size,
- efi_memory_desc_t *memory_map,
- unsigned long map_size, unsigned long desc_size,
- u32 desc_ver)
+static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
+ unsigned long orig_fdt_size,
+ void *fdt, int new_fdt_size, char *cmdline_ptr,
+ u64 initrd_addr, u64 initrd_size)
{
int node, num_rsv;
int status;
@@ -101,25 +98,23 @@ efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
if (status)
goto fdt_set_fail;
- fdt_val64 = cpu_to_fdt64((u64)(unsigned long)memory_map);
+ fdt_val64 = U64_MAX; /* placeholder */
status = fdt_setprop(fdt, node, "linux,uefi-mmap-start",
&fdt_val64, sizeof(fdt_val64));
if (status)
goto fdt_set_fail;
- fdt_val32 = cpu_to_fdt32(map_size);
+ fdt_val32 = U32_MAX; /* placeholder */
status = fdt_setprop(fdt, node, "linux,uefi-mmap-size",
&fdt_val32, sizeof(fdt_val32));
if (status)
goto fdt_set_fail;
- fdt_val32 = cpu_to_fdt32(desc_size);
status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-size",
&fdt_val32, sizeof(fdt_val32));
if (status)
goto fdt_set_fail;
- fdt_val32 = cpu_to_fdt32(desc_ver);
status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-ver",
&fdt_val32, sizeof(fdt_val32));
if (status)
@@ -148,6 +143,43 @@ fdt_set_fail:
return EFI_LOAD_ERROR;
}
+static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
+{
+ int node = fdt_path_offset(fdt, "/chosen");
+ u64 fdt_val64;
+ u32 fdt_val32;
+ int err;
+
+ if (node < 0)
+ return EFI_LOAD_ERROR;
+
+ fdt_val64 = cpu_to_fdt64((unsigned long)*map->map);
+ err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-start",
+ &fdt_val64, sizeof(fdt_val64));
+ if (err)
+ return EFI_LOAD_ERROR;
+
+ fdt_val32 = cpu_to_fdt32(*map->map_size);
+ err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-size",
+ &fdt_val32, sizeof(fdt_val32));
+ if (err)
+ return EFI_LOAD_ERROR;
+
+ fdt_val32 = cpu_to_fdt32(*map->desc_size);
+ err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-desc-size",
+ &fdt_val32, sizeof(fdt_val32));
+ if (err)
+ return EFI_LOAD_ERROR;
+
+ fdt_val32 = cpu_to_fdt32(*map->desc_ver);
+ err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-desc-ver",
+ &fdt_val32, sizeof(fdt_val32));
+ if (err)
+ return EFI_LOAD_ERROR;
+
+ return EFI_SUCCESS;
+}
+
#ifndef EFI_FDT_ALIGN
#define EFI_FDT_ALIGN EFI_PAGE_SIZE
#endif
@@ -243,20 +275,10 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
goto fail;
}
- /*
- * Now that we have done our final memory allocation (and free)
- * we can get the memory map key needed for
- * exit_boot_services().
- */
- status = efi_get_memory_map(sys_table, &map);
- if (status != EFI_SUCCESS)
- goto fail_free_new_fdt;
-
status = update_fdt(sys_table,
(void *)fdt_addr, fdt_size,
(void *)*new_fdt_addr, new_fdt_size,
- cmdline_ptr, initrd_addr, initrd_size,
- memory_map, map_size, desc_size, desc_ver);
+ cmdline_ptr, initrd_addr, initrd_size);
/* Succeeding the first time is the expected case. */
if (status == EFI_SUCCESS)
@@ -266,20 +288,16 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
/*
* We need to allocate more space for the new
* device tree, so free existing buffer that is
- * too small. Also free memory map, as we will need
- * to get new one that reflects the free/alloc we do
- * on the device tree buffer.
+ * too small.
*/
efi_free(sys_table, new_fdt_size, *new_fdt_addr);
- sys_table->boottime->free_pool(memory_map);
new_fdt_size += EFI_PAGE_SIZE;
} else {
pr_efi_err(sys_table, "Unable to construct new device tree.\n");
- goto fail_free_mmap;
+ goto fail_free_new_fdt;
}
}
- sys_table->boottime->free_pool(memory_map);
priv.runtime_map = runtime_map;
priv.runtime_entry_count = &runtime_entry_count;
status = efi_exit_boot_services(sys_table, handle, &map, &priv,
@@ -288,6 +306,16 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
if (status == EFI_SUCCESS) {
efi_set_virtual_address_map_t *svam;
+ status = update_fdt_memmap((void *)*new_fdt_addr, &map);
+ if (status != EFI_SUCCESS) {
+ /*
+ * The kernel won't get far without the memory map, but
+ * may still be able to print something meaningful so
+ * return success here.
+ */
+ return EFI_SUCCESS;
+ }
+
/* Install the new virtual address map */
svam = sys_table->runtime->set_virtual_address_map;
status = svam(runtime_entry_count * desc_size, desc_size,
@@ -319,9 +347,6 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
pr_efi_err(sys_table, "Exit boot services failed.\n");
-fail_free_mmap:
- sys_table->boottime->free_pool(memory_map);
-
fail_free_new_fdt:
efi_free(sys_table, new_fdt_size, *new_fdt_addr);
diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
index f03ddecd232b..78686443cb37 100644
--- a/drivers/firmware/efi/memmap.c
+++ b/drivers/firmware/efi/memmap.c
@@ -9,6 +9,44 @@
#include <linux/efi.h>
#include <linux/io.h>
#include <asm/early_ioremap.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+
+static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size)
+{
+ return memblock_alloc(size, 0);
+}
+
+static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
+{
+ unsigned int order = get_order(size);
+ struct page *p = alloc_pages(GFP_KERNEL, order);
+
+ if (!p)
+ return 0;
+
+ return PFN_PHYS(page_to_pfn(p));
+}
+
+/**
+ * efi_memmap_alloc - Allocate memory for the EFI memory map
+ * @num_entries: Number of entries in the allocated map.
+ *
+ * Depending on whether mm_init() has already been invoked or not,
+ * either memblock or "normal" page allocation is used.
+ *
+ * Returns the physical address of the allocated memory map on
+ * success, zero on failure.
+ */
+phys_addr_t __init efi_memmap_alloc(unsigned int num_entries)
+{
+ unsigned long size = num_entries * efi.memmap.desc_size;
+
+ if (slab_is_available())
+ return __efi_memmap_alloc_late(size);
+
+ return __efi_memmap_alloc_early(size);
+}
/**
* __efi_memmap_init - Common code for mapping the EFI memory map
diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c
index 44bdb78f837b..29d58feaf675 100644
--- a/drivers/firmware/psci_checker.c
+++ b/drivers/firmware/psci_checker.c
@@ -270,8 +270,7 @@ static int suspend_test_thread(void *arg)
struct cpuidle_device *dev;
struct cpuidle_driver *drv;
/* No need for an actual callback, we just want to wake up the CPU. */
- struct timer_list wakeup_timer =
- TIMER_INITIALIZER(dummy_callback, 0, 0);
+ struct timer_list wakeup_timer;
/* Wait for the main thread to give the start signal. */
wait_for_completion(&suspend_threads_started);
@@ -287,6 +286,7 @@ static int suspend_test_thread(void *arg)
pr_info("CPU %d entering suspend cycles, states 1 through %d\n",
cpu, drv->state_count - 1);
+ setup_timer_on_stack(&wakeup_timer, dummy_callback, 0);
for (i = 0; i < NUM_SUSPEND_CYCLE; ++i) {
int index;
/*
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 03a5925a423c..6e4b278a82f1 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -15,58 +15,76 @@
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
+#include <linux/gpio.h>
#include <linux/gpio/driver.h>
#include <linux/pinctrl/consumer.h>
+struct aspeed_bank_props {
+ unsigned int bank;
+ u32 input;
+ u32 output;
+};
+
+struct aspeed_gpio_config {
+ unsigned int nr_gpios;
+ const struct aspeed_bank_props *props;
+};
+
struct aspeed_gpio {
struct gpio_chip chip;
spinlock_t lock;
void __iomem *base;
int irq;
+ const struct aspeed_gpio_config *config;
};
struct aspeed_gpio_bank {
uint16_t val_regs;
uint16_t irq_regs;
- const char names[4];
+ const char names[4][3];
};
static const struct aspeed_gpio_bank aspeed_gpio_banks[] = {
{
.val_regs = 0x0000,
.irq_regs = 0x0008,
- .names = { 'A', 'B', 'C', 'D' },
+ .names = { "A", "B", "C", "D" },
},
{
.val_regs = 0x0020,
.irq_regs = 0x0028,
- .names = { 'E', 'F', 'G', 'H' },
+ .names = { "E", "F", "G", "H" },
},
{
.val_regs = 0x0070,
.irq_regs = 0x0098,
- .names = { 'I', 'J', 'K', 'L' },
+ .names = { "I", "J", "K", "L" },
},
{
.val_regs = 0x0078,
.irq_regs = 0x00e8,
- .names = { 'M', 'N', 'O', 'P' },
+ .names = { "M", "N", "O", "P" },
},
{
.val_regs = 0x0080,
.irq_regs = 0x0118,
- .names = { 'Q', 'R', 'S', 'T' },
+ .names = { "Q", "R", "S", "T" },
},
{
.val_regs = 0x0088,
.irq_regs = 0x0148,
- .names = { 'U', 'V', 'W', 'X' },
+ .names = { "U", "V", "W", "X" },
+ },
+ {
+ .val_regs = 0x01E0,
+ .irq_regs = 0x0178,
+ .names = { "Y", "Z", "AA", "AB" },
+ },
+ {
+ .val_regs = 0x01E8,
+ .irq_regs = 0x01A8,
+ .names = { "AC", "", "", "" },
},
- /*
- * A bank exists for { 'Y', 'Z', "AA", "AB" }, but is not implemented.
- * Only half of GPIOs Y support interrupt configuration, and none of Z,
- * AA or AB do as they are output only.
- */
};
#define GPIO_BANK(x) ((x) >> 5)
@@ -90,6 +108,51 @@ static const struct aspeed_gpio_bank *to_bank(unsigned int offset)
return &aspeed_gpio_banks[bank];
}
+static inline bool is_bank_props_sentinel(const struct aspeed_bank_props *props)
+{
+ return !(props->input || props->output);
+}
+
+static inline const struct aspeed_bank_props *find_bank_props(
+ struct aspeed_gpio *gpio, unsigned int offset)
+{
+ const struct aspeed_bank_props *props = gpio->config->props;
+
+ while (!is_bank_props_sentinel(props)) {
+ if (props->bank == GPIO_BANK(offset))
+ return props;
+ props++;
+ }
+
+ return NULL;
+}
+
+static inline bool have_gpio(struct aspeed_gpio *gpio, unsigned int offset)
+{
+ const struct aspeed_bank_props *props = find_bank_props(gpio, offset);
+ const struct aspeed_gpio_bank *bank = to_bank(offset);
+ unsigned int group = GPIO_OFFSET(offset) / 8;
+
+ return bank->names[group][0] != '\0' &&
+ (!props || ((props->input | props->output) & GPIO_BIT(offset)));
+}
+
+static inline bool have_input(struct aspeed_gpio *gpio, unsigned int offset)
+{
+ const struct aspeed_bank_props *props = find_bank_props(gpio, offset);
+
+ return !props || (props->input & GPIO_BIT(offset));
+}
+
+#define have_irq(g, o) have_input((g), (o))
+
+static inline bool have_output(struct aspeed_gpio *gpio, unsigned int offset)
+{
+ const struct aspeed_bank_props *props = find_bank_props(gpio, offset);
+
+ return !props || (props->output & GPIO_BIT(offset));
+}
+
static void __iomem *bank_val_reg(struct aspeed_gpio *gpio,
const struct aspeed_gpio_bank *bank,
unsigned int reg)
@@ -152,6 +215,9 @@ static int aspeed_gpio_dir_in(struct gpio_chip *gc, unsigned int offset)
unsigned long flags;
u32 reg;
+ if (!have_input(gpio, offset))
+ return -ENOTSUPP;
+
spin_lock_irqsave(&gpio->lock, flags);
reg = ioread32(bank_val_reg(gpio, bank, GPIO_DIR));
@@ -170,6 +236,9 @@ static int aspeed_gpio_dir_out(struct gpio_chip *gc,
unsigned long flags;
u32 reg;
+ if (!have_output(gpio, offset))
+ return -ENOTSUPP;
+
spin_lock_irqsave(&gpio->lock, flags);
reg = ioread32(bank_val_reg(gpio, bank, GPIO_DIR));
@@ -189,6 +258,12 @@ static int aspeed_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
unsigned long flags;
u32 val;
+ if (!have_input(gpio, offset))
+ return GPIOF_DIR_OUT;
+
+ if (!have_output(gpio, offset))
+ return GPIOF_DIR_IN;
+
spin_lock_irqsave(&gpio->lock, flags);
val = ioread32(bank_val_reg(gpio, bank, GPIO_DIR)) & GPIO_BIT(offset);
@@ -205,10 +280,17 @@ static inline int irqd_to_aspeed_gpio_data(struct irq_data *d,
u32 *bit)
{
int offset;
+ struct aspeed_gpio *internal;
offset = irqd_to_hwirq(d);
- *gpio = irq_data_get_irq_chip_data(d);
+ internal = irq_data_get_irq_chip_data(d);
+
+ /* This might be a bit of a questionable place to check */
+ if (!have_irq(internal, offset))
+ return -ENOTSUPP;
+
+ *gpio = internal;
*bank = to_bank(offset);
*bit = GPIO_BIT(offset);
@@ -364,6 +446,28 @@ static struct irq_chip aspeed_gpio_irqchip = {
.irq_set_type = aspeed_gpio_set_type,
};
+static void set_irq_valid_mask(struct aspeed_gpio *gpio)
+{
+ const struct aspeed_bank_props *props = gpio->config->props;
+
+ while (!is_bank_props_sentinel(props)) {
+ unsigned int offset;
+ const unsigned long int input = props->input;
+
+ /* Pretty crummy approach, but similar to GPIO core */
+ for_each_clear_bit(offset, &input, 32) {
+ unsigned int i = props->bank * 32 + offset;
+
+ if (i >= gpio->config->nr_gpios)
+ break;
+
+ clear_bit(i, gpio->chip.irq_valid_mask);
+ }
+
+ props++;
+ }
+}
+
static int aspeed_gpio_setup_irqs(struct aspeed_gpio *gpio,
struct platform_device *pdev)
{
@@ -375,6 +479,8 @@ static int aspeed_gpio_setup_irqs(struct aspeed_gpio *gpio,
gpio->irq = rc;
+ set_irq_valid_mask(gpio);
+
rc = gpiochip_irqchip_add(&gpio->chip, &aspeed_gpio_irqchip,
0, handle_bad_irq, IRQ_TYPE_NONE);
if (rc) {
@@ -390,6 +496,9 @@ static int aspeed_gpio_setup_irqs(struct aspeed_gpio *gpio,
static int aspeed_gpio_request(struct gpio_chip *chip, unsigned int offset)
{
+ if (!have_gpio(gpiochip_get_data(chip), offset))
+ return -ENODEV;
+
return pinctrl_request_gpio(chip->base + offset);
}
@@ -398,8 +507,46 @@ static void aspeed_gpio_free(struct gpio_chip *chip, unsigned int offset)
pinctrl_free_gpio(chip->base + offset);
}
+/*
+ * Any banks not specified in a struct aspeed_bank_props array are assumed to
+ * have the properties:
+ *
+ * { .input = 0xffffffff, .output = 0xffffffff }
+ */
+
+static const struct aspeed_bank_props ast2400_bank_props[] = {
+ /* input output */
+ { 5, 0xffffffff, 0x0000ffff }, /* U/V/W/X */
+ { 6, 0x0000000f, 0x0fffff0f }, /* Y/Z/AA/AB, two 4-GPIO holes */
+ { },
+};
+
+static const struct aspeed_gpio_config ast2400_config =
+ /* 220 for simplicity, really 216 with two 4-GPIO holes, four at end */
+ { .nr_gpios = 220, .props = ast2400_bank_props, };
+
+static const struct aspeed_bank_props ast2500_bank_props[] = {
+ /* input output */
+ { 5, 0xffffffff, 0x0000ffff }, /* U/V/W/X */
+ { 6, 0x0fffffff, 0x0fffffff }, /* Y/Z/AA/AB, 4-GPIO hole */
+ { 7, 0x000000ff, 0x000000ff }, /* AC */
+ { },
+};
+
+static const struct aspeed_gpio_config ast2500_config =
+ /* 232 for simplicity, actual number is 228 (4-GPIO hole in GPIOAB) */
+ { .nr_gpios = 232, .props = ast2500_bank_props, };
+
+static const struct of_device_id aspeed_gpio_of_table[] = {
+ { .compatible = "aspeed,ast2400-gpio", .data = &ast2400_config, },
+ { .compatible = "aspeed,ast2500-gpio", .data = &ast2500_config, },
+ {}
+};
+MODULE_DEVICE_TABLE(of, aspeed_gpio_of_table);
+
static int __init aspeed_gpio_probe(struct platform_device *pdev)
{
+ const struct of_device_id *gpio_id;
struct aspeed_gpio *gpio;
struct resource *res;
int rc;
@@ -415,8 +562,13 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
spin_lock_init(&gpio->lock);
- gpio->chip.ngpio = ARRAY_SIZE(aspeed_gpio_banks) * 32;
+ gpio_id = of_match_node(aspeed_gpio_of_table, pdev->dev.of_node);
+ if (!gpio_id)
+ return -EINVAL;
+
+ gpio->config = gpio_id->data;
+ gpio->chip.ngpio = gpio->config->nr_gpios;
gpio->chip.parent = &pdev->dev;
gpio->chip.direction_input = aspeed_gpio_dir_in;
gpio->chip.direction_output = aspeed_gpio_dir_out;
@@ -427,6 +579,7 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
gpio->chip.set = aspeed_gpio_set;
gpio->chip.label = dev_name(&pdev->dev);
gpio->chip.base = -1;
+ gpio->chip.irq_need_valid_mask = true;
rc = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
if (rc < 0)
@@ -435,13 +588,6 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
return aspeed_gpio_setup_irqs(gpio, pdev);
}
-static const struct of_device_id aspeed_gpio_of_table[] = {
- { .compatible = "aspeed,ast2400-gpio" },
- { .compatible = "aspeed,ast2500-gpio" },
- {}
-};
-MODULE_DEVICE_TABLE(of, aspeed_gpio_of_table);
-
static struct platform_driver aspeed_gpio_driver = {
.driver = {
.name = KBUILD_MODNAME,
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 3d1cf018e8e7..41d0ac142580 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -308,6 +308,18 @@ static int bcm_kona_gpio_set_debounce(struct gpio_chip *chip, unsigned gpio,
return 0;
}
+static int bcm_kona_gpio_set_config(struct gpio_chip *chip, unsigned gpio,
+ unsigned long config)
+{
+ u32 debounce;
+
+ if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+ return -ENOTSUPP;
+
+ debounce = pinconf_to_config_argument(config);
+ return bcm_kona_gpio_set_debounce(chip, gpio, debounce);
+}
+
static const struct gpio_chip template_chip = {
.label = "bcm-kona-gpio",
.owner = THIS_MODULE,
@@ -318,7 +330,7 @@ static const struct gpio_chip template_chip = {
.get = bcm_kona_gpio_get,
.direction_output = bcm_kona_gpio_direction_output,
.set = bcm_kona_gpio_set,
- .set_debounce = bcm_kona_gpio_set_debounce,
+ .set_config = bcm_kona_gpio_set_config,
.to_irq = bcm_kona_gpio_to_irq,
.base = 0,
};
diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c
index 5d38b08d1ee2..aecb847166f5 100644
--- a/drivers/gpio/gpio-dln2.c
+++ b/drivers/gpio/gpio-dln2.c
@@ -272,12 +272,16 @@ static int dln2_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
return dln2_gpio_set_direction(chip, offset, DLN2_GPIO_DIRECTION_OUT);
}
-static int dln2_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
- unsigned debounce)
+static int dln2_gpio_set_config(struct gpio_chip *chip, unsigned offset,
+ unsigned long config)
{
struct dln2_gpio *dln2 = gpiochip_get_data(chip);
- __le32 duration = cpu_to_le32(debounce);
+ __le32 duration;
+ if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+ return -ENOTSUPP;
+
+ duration = cpu_to_le32(pinconf_to_config_argument(config));
return dln2_transfer_tx(dln2->pdev, DLN2_GPIO_SET_DEBOUNCE,
&duration, sizeof(duration));
}
@@ -474,7 +478,7 @@ static int dln2_gpio_probe(struct platform_device *pdev)
dln2->gpio.get_direction = dln2_gpio_get_direction;
dln2->gpio.direction_input = dln2_gpio_direction_input;
dln2->gpio.direction_output = dln2_gpio_direction_output;
- dln2->gpio.set_debounce = dln2_gpio_set_debounce;
+ dln2->gpio.set_config = dln2_gpio_set_config;
platform_set_drvdata(pdev, dln2);
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 6193f62c0df4..9c15ee4ef4e9 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -279,6 +279,18 @@ static int dwapb_gpio_set_debounce(struct gpio_chip *gc,
return 0;
}
+static int dwapb_gpio_set_config(struct gpio_chip *gc, unsigned offset,
+ unsigned long config)
+{
+ u32 debounce;
+
+ if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+ return -ENOTSUPP;
+
+ debounce = pinconf_to_config_argument(config);
+ return dwapb_gpio_set_debounce(gc, offset, debounce);
+}
+
static irqreturn_t dwapb_irq_handler_mfd(int irq, void *dev_id)
{
u32 worked;
@@ -426,7 +438,7 @@ static int dwapb_gpio_add_port(struct dwapb_gpio *gpio,
/* Only port A support debounce */
if (pp->idx == 0)
- port->gc.set_debounce = dwapb_gpio_set_debounce;
+ port->gc.set_config = dwapb_gpio_set_config;
if (pp->irq)
dwapb_configure_irqs(gpio, port, pp);
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
index d054219e18b9..45d384039e9b 100644
--- a/drivers/gpio/gpio-ep93xx.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -291,15 +291,20 @@ static struct ep93xx_gpio_bank ep93xx_gpio_banks[] = {
EP93XX_GPIO_BANK("H", 0x40, 0x44, 56, false),
};
-static int ep93xx_gpio_set_debounce(struct gpio_chip *chip,
- unsigned offset, unsigned debounce)
+static int ep93xx_gpio_set_config(struct gpio_chip *chip, unsigned offset,
+ unsigned long config)
{
int gpio = chip->base + offset;
int irq = gpio_to_irq(gpio);
+ u32 debounce;
+
+ if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+ return -ENOTSUPP;
if (irq < 0)
return -EINVAL;
+ debounce = pinconf_to_config_argument(config);
ep93xx_gpio_int_debounce(irq, debounce ? true : false);
return 0;
@@ -335,7 +340,7 @@ static int ep93xx_gpio_add_bank(struct gpio_chip *gc, struct device *dev,
gc->base = bank->base;
if (bank->has_debounce) {
- gc->set_debounce = ep93xx_gpio_set_debounce;
+ gc->set_config = ep93xx_gpio_set_config;
gc->to_irq = ep93xx_gpio_to_irq;
}
diff --git a/drivers/gpio/gpio-f7188x.c b/drivers/gpio/gpio-f7188x.c
index e8accde62aa7..56bd76c33767 100644
--- a/drivers/gpio/gpio-f7188x.c
+++ b/drivers/gpio/gpio-f7188x.c
@@ -131,9 +131,8 @@ static int f7188x_gpio_get(struct gpio_chip *chip, unsigned offset);
static int f7188x_gpio_direction_out(struct gpio_chip *chip,
unsigned offset, int value);
static void f7188x_gpio_set(struct gpio_chip *chip, unsigned offset, int value);
-static int f7188x_gpio_set_single_ended(struct gpio_chip *gc,
- unsigned offset,
- enum single_ended_mode mode);
+static int f7188x_gpio_set_config(struct gpio_chip *chip, unsigned offset,
+ unsigned long config);
#define F7188X_GPIO_BANK(_base, _ngpio, _regbase) \
{ \
@@ -145,7 +144,7 @@ static int f7188x_gpio_set_single_ended(struct gpio_chip *gc,
.get = f7188x_gpio_get, \
.direction_output = f7188x_gpio_direction_out, \
.set = f7188x_gpio_set, \
- .set_single_ended = f7188x_gpio_set_single_ended, \
+ .set_config = f7188x_gpio_set_config, \
.base = _base, \
.ngpio = _ngpio, \
.can_sleep = true, \
@@ -326,17 +325,17 @@ static void f7188x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
superio_exit(sio->addr);
}
-static int f7188x_gpio_set_single_ended(struct gpio_chip *chip,
- unsigned offset,
- enum single_ended_mode mode)
+static int f7188x_gpio_set_config(struct gpio_chip *chip, unsigned offset,
+ unsigned long config)
{
int err;
+ enum pin_config_param param = pinconf_to_config_param(config);
struct f7188x_gpio_bank *bank = gpiochip_get_data(chip);
struct f7188x_sio *sio = bank->data->sio;
u8 data;
- if (mode != LINE_MODE_OPEN_DRAIN &&
- mode != LINE_MODE_PUSH_PULL)
+ if (param != PIN_CONFIG_DRIVE_OPEN_DRAIN &&
+ param != PIN_CONFIG_DRIVE_PUSH_PULL)
return -ENOTSUPP;
err = superio_enter(sio->addr);
@@ -345,7 +344,7 @@ static int f7188x_gpio_set_single_ended(struct gpio_chip *chip,
superio_select(sio->addr, SIO_LD_GPIO);
data = superio_inb(sio->addr, gpio_out_mode(bank->regbase));
- if (mode == LINE_MODE_OPEN_DRAIN)
+ if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN)
data &= ~BIT(offset);
else
data |= BIT(offset);
diff --git a/drivers/gpio/gpio-lp873x.c b/drivers/gpio/gpio-lp873x.c
index 218c706359aa..df0ad2cef0d2 100644
--- a/drivers/gpio/gpio-lp873x.c
+++ b/drivers/gpio/gpio-lp873x.c
@@ -100,21 +100,21 @@ static int lp873x_gpio_request(struct gpio_chip *gc, unsigned int offset)
return 0;
}
-static int lp873x_gpio_set_single_ended(struct gpio_chip *gc,
- unsigned int offset,
- enum single_ended_mode mode)
+static int lp873x_gpio_set_config(struct gpio_chip *gc, unsigned offset,
+ unsigned long config)
{
struct lp873x_gpio *gpio = gpiochip_get_data(gc);
- switch (mode) {
- case LINE_MODE_OPEN_DRAIN:
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
return regmap_update_bits(gpio->lp873->regmap,
LP873X_REG_GPO_CTRL,
BIT(offset * BITS_PER_GPO +
LP873X_GPO_CTRL_OD),
BIT(offset * BITS_PER_GPO +
LP873X_GPO_CTRL_OD));
- case LINE_MODE_PUSH_PULL:
+
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
return regmap_update_bits(gpio->lp873->regmap,
LP873X_REG_GPO_CTRL,
BIT(offset * BITS_PER_GPO +
@@ -133,7 +133,7 @@ static const struct gpio_chip template_chip = {
.direction_output = lp873x_gpio_direction_output,
.get = lp873x_gpio_get,
.set = lp873x_gpio_set,
- .set_single_ended = lp873x_gpio_set_single_ended,
+ .set_config = lp873x_gpio_set_config,
.base = -1,
.ngpio = 2,
.can_sleep = true,
diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c
index ec8de4190db9..743459d9477d 100644
--- a/drivers/gpio/gpio-max77620.c
+++ b/drivers/gpio/gpio-max77620.c
@@ -152,11 +152,10 @@ static int max77620_gpio_dir_output(struct gpio_chip *gc, unsigned int offset,
return ret;
}
-static int max77620_gpio_set_debounce(struct gpio_chip *gc,
+static int max77620_gpio_set_debounce(struct max77620_gpio *mgpio,
unsigned int offset,
unsigned int debounce)
{
- struct max77620_gpio *mgpio = gpiochip_get_data(gc);
u8 val;
int ret;
@@ -202,21 +201,23 @@ static void max77620_gpio_set(struct gpio_chip *gc, unsigned int offset,
dev_err(mgpio->dev, "CNFG_GPIO_OUT update failed: %d\n", ret);
}
-static int max77620_gpio_set_single_ended(struct gpio_chip *gc,
- unsigned int offset,
- enum single_ended_mode mode)
+static int max77620_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
+ unsigned long config)
{
struct max77620_gpio *mgpio = gpiochip_get_data(gc);
- switch (mode) {
- case LINE_MODE_OPEN_DRAIN:
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
return regmap_update_bits(mgpio->rmap, GPIO_REG_ADDR(offset),
MAX77620_CNFG_GPIO_DRV_MASK,
MAX77620_CNFG_GPIO_DRV_OPENDRAIN);
- case LINE_MODE_PUSH_PULL:
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
return regmap_update_bits(mgpio->rmap, GPIO_REG_ADDR(offset),
MAX77620_CNFG_GPIO_DRV_MASK,
MAX77620_CNFG_GPIO_DRV_PUSHPULL);
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ return max77620_gpio_set_debounce(mgpio, offset,
+ pinconf_to_config_argument(config));
default:
break;
}
@@ -257,9 +258,8 @@ static int max77620_gpio_probe(struct platform_device *pdev)
mgpio->gpio_chip.direction_input = max77620_gpio_dir_input;
mgpio->gpio_chip.get = max77620_gpio_get;
mgpio->gpio_chip.direction_output = max77620_gpio_dir_output;
- mgpio->gpio_chip.set_debounce = max77620_gpio_set_debounce;
mgpio->gpio_chip.set = max77620_gpio_set;
- mgpio->gpio_chip.set_single_ended = max77620_gpio_set_single_ended;
+ mgpio->gpio_chip.set_config = max77620_gpio_set_config;
mgpio->gpio_chip.to_irq = max77620_gpio_to_irq;
mgpio->gpio_chip.ngpio = MAX77620_GPIO_NR;
mgpio->gpio_chip.can_sleep = 1;
diff --git a/drivers/gpio/gpio-menz127.c b/drivers/gpio/gpio-menz127.c
index a1210e330571..e1037582e34d 100644
--- a/drivers/gpio/gpio-menz127.c
+++ b/drivers/gpio/gpio-menz127.c
@@ -89,22 +89,18 @@ static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio,
static int men_z127_set_single_ended(struct gpio_chip *gc,
unsigned offset,
- enum single_ended_mode mode)
+ enum pin_config_param param)
{
struct men_z127_gpio *priv = gpiochip_get_data(gc);
u32 od_en;
- if (mode != LINE_MODE_OPEN_DRAIN &&
- mode != LINE_MODE_PUSH_PULL)
- return -ENOTSUPP;
-
spin_lock(&gc->bgpio_lock);
od_en = readl(priv->reg_base + MEN_Z127_ODER);
- if (mode == LINE_MODE_OPEN_DRAIN)
+ if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN)
od_en |= BIT(offset);
else
- /* Implicitly LINE_MODE_PUSH_PULL */
+ /* Implicitly PIN_CONFIG_DRIVE_PUSH_PULL */
od_en &= ~BIT(offset);
writel(od_en, priv->reg_base + MEN_Z127_ODER);
@@ -113,6 +109,27 @@ static int men_z127_set_single_ended(struct gpio_chip *gc,
return 0;
}
+static int men_z127_set_config(struct gpio_chip *gc, unsigned offset,
+ unsigned long config)
+{
+ enum pin_config_param param = pinconf_to_config_param(config);
+
+ switch (param) {
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ return men_z127_set_single_ended(gc, offset, param);
+
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ return men_z127_debounce(gc, offset,
+ pinconf_to_config_argument(config));
+
+ default:
+ break;
+ }
+
+ return -ENOTSUPP;
+}
+
static int men_z127_probe(struct mcb_device *mdev,
const struct mcb_device_id *id)
{
@@ -149,8 +166,7 @@ static int men_z127_probe(struct mcb_device *mdev,
if (ret)
goto err_unmap;
- men_z127_gpio->gc.set_debounce = men_z127_debounce;
- men_z127_gpio->gc.set_single_ended = men_z127_set_single_ended;
+ men_z127_gpio->gc.set_config = men_z127_set_config;
ret = gpiochip_add_data(&men_z127_gpio->gc, men_z127_gpio);
if (ret) {
diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c
index 69e0f4ace465..f40088d268c1 100644
--- a/drivers/gpio/gpio-merrifield.c
+++ b/drivers/gpio/gpio-merrifield.c
@@ -190,6 +190,18 @@ static int mrfld_gpio_set_debounce(struct gpio_chip *chip, unsigned int offset,
return 0;
}
+static int mrfld_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
+ unsigned long config)
+{
+ u32 debounce;
+
+ if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+ return -ENOTSUPP;
+
+ debounce = pinconf_to_config_argument(config);
+ return mrfld_gpio_set_debounce(chip, offset, debounce);
+}
+
static void mrfld_irq_ack(struct irq_data *d)
{
struct mrfld_gpio *priv = irq_data_get_irq_chip_data(d);
@@ -414,7 +426,7 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
priv->chip.get = mrfld_gpio_get;
priv->chip.set = mrfld_gpio_set;
priv->chip.get_direction = mrfld_gpio_get_direction;
- priv->chip.set_debounce = mrfld_gpio_set_debounce;
+ priv->chip.set_config = mrfld_gpio_set_config;
priv->chip.base = gpio_base;
priv->chip.ngpio = MRFLD_NGPIO;
priv->chip.can_sleep = false;
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index 1e8fde8cb803..2292742eac8f 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -205,7 +205,7 @@ static int mxs_gpio_set_wake_irq(struct irq_data *d, unsigned int enable)
return 0;
}
-static int __init mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base)
+static int mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base)
{
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index b98ede78c9d8..efc85a279d54 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -974,6 +974,18 @@ static int omap_gpio_debounce(struct gpio_chip *chip, unsigned offset,
return 0;
}
+static int omap_gpio_set_config(struct gpio_chip *chip, unsigned offset,
+ unsigned long config)
+{
+ u32 debounce;
+
+ if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+ return -ENOTSUPP;
+
+ debounce = pinconf_to_config_argument(config);
+ return omap_gpio_debounce(chip, offset, debounce);
+}
+
static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct gpio_bank *bank;
@@ -1045,7 +1057,7 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
bank->chip.direction_input = omap_gpio_input;
bank->chip.get = omap_gpio_get;
bank->chip.direction_output = omap_gpio_output;
- bank->chip.set_debounce = omap_gpio_debounce;
+ bank->chip.set_config = omap_gpio_set_config;
bank->chip.set = omap_gpio_set;
if (bank->is_mpuio) {
bank->chip.label = "mpuio";
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index be97101c2c9a..433b45ef332e 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -100,9 +100,8 @@ static int tc3589x_gpio_get_direction(struct gpio_chip *chip,
return !(ret & BIT(pos));
}
-static int tc3589x_gpio_set_single_ended(struct gpio_chip *chip,
- unsigned int offset,
- enum single_ended_mode mode)
+static int tc3589x_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
+ unsigned long config)
{
struct tc3589x_gpio *tc3589x_gpio = gpiochip_get_data(chip);
struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
@@ -116,22 +115,22 @@ static int tc3589x_gpio_set_single_ended(struct gpio_chip *chip,
unsigned int pos = offset % 8;
int ret;
- switch(mode) {
- case LINE_MODE_OPEN_DRAIN:
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
/* Set open drain mode */
ret = tc3589x_set_bits(tc3589x, odmreg, BIT(pos), 0);
if (ret)
return ret;
/* Enable open drain/source mode */
return tc3589x_set_bits(tc3589x, odereg, BIT(pos), BIT(pos));
- case LINE_MODE_OPEN_SOURCE:
+ case PIN_CONFIG_DRIVE_OPEN_SOURCE:
/* Set open source mode */
ret = tc3589x_set_bits(tc3589x, odmreg, BIT(pos), BIT(pos));
if (ret)
return ret;
/* Enable open drain/source mode */
return tc3589x_set_bits(tc3589x, odereg, BIT(pos), BIT(pos));
- case LINE_MODE_PUSH_PULL:
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
/* Disable open drain/source mode */
return tc3589x_set_bits(tc3589x, odereg, BIT(pos), 0);
default:
@@ -148,7 +147,7 @@ static const struct gpio_chip template_chip = {
.direction_output = tc3589x_gpio_direction_output,
.direction_input = tc3589x_gpio_direction_input,
.get_direction = tc3589x_gpio_get_direction,
- .set_single_ended = tc3589x_gpio_set_single_ended,
+ .set_config = tc3589x_gpio_set_config,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 661b0e34e067..88529d3c06c9 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -238,6 +238,18 @@ static int tegra_gpio_set_debounce(struct gpio_chip *chip, unsigned int offset,
return 0;
}
+static int tegra_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
+ unsigned long config)
+{
+ u32 debounce;
+
+ if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+ return -ENOTSUPP;
+
+ debounce = pinconf_to_config_argument(config);
+ return tegra_gpio_set_debounce(chip, offset, debounce);
+}
+
static int tegra_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
@@ -615,7 +627,7 @@ static int tegra_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tgi);
if (config->debounce_supported)
- tgi->gc.set_debounce = tegra_gpio_set_debounce;
+ tgi->gc.set_config = tegra_gpio_set_config;
tgi->bank_info = devm_kzalloc(&pdev->dev, tgi->bank_count *
sizeof(*tgi->bank_info), GFP_KERNEL);
diff --git a/drivers/gpio/gpio-tps65218.c b/drivers/gpio/gpio-tps65218.c
index 46e6dcc089cb..a379bba57d31 100644
--- a/drivers/gpio/gpio-tps65218.c
+++ b/drivers/gpio/gpio-tps65218.c
@@ -139,28 +139,28 @@ static int tps65218_gpio_request(struct gpio_chip *gc, unsigned offset)
return 0;
}
-static int tps65218_gpio_set_single_ended(struct gpio_chip *gc,
- unsigned offset,
- enum single_ended_mode mode)
+static int tps65218_gpio_set_config(struct gpio_chip *gc, unsigned offset,
+ unsigned long config)
{
struct tps65218_gpio *tps65218_gpio = gpiochip_get_data(gc);
struct tps65218 *tps65218 = tps65218_gpio->tps65218;
+ enum pin_config_param param = pinconf_to_config_param(config);
switch (offset) {
case 0:
case 2:
/* GPO1 is hardwired to be open drain */
- if (mode == LINE_MODE_OPEN_DRAIN)
+ if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN)
return 0;
return -ENOTSUPP;
case 1:
/* GPO2 is push-pull by default, can be set as open drain. */
- if (mode == LINE_MODE_OPEN_DRAIN)
+ if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN)
return tps65218_clear_bits(tps65218,
TPS65218_REG_CONFIG1,
TPS65218_CONFIG1_GPO2_BUF,
TPS65218_PROTECT_L1);
- if (mode == LINE_MODE_PUSH_PULL)
+ if (param == PIN_CONFIG_DRIVE_PUSH_PULL)
return tps65218_set_bits(tps65218,
TPS65218_REG_CONFIG1,
TPS65218_CONFIG1_GPO2_BUF,
@@ -181,7 +181,7 @@ static const struct gpio_chip template_chip = {
.direction_input = tps65218_gpio_input,
.get = tps65218_gpio_get,
.set = tps65218_gpio_set,
- .set_single_ended = tps65218_gpio_set_single_ended,
+ .set_config = tps65218_gpio_set_config,
.can_sleep = true,
.ngpio = 3,
.base = -1,
diff --git a/drivers/gpio/gpio-vx855.c b/drivers/gpio/gpio-vx855.c
index 4e450121129b..98a6f1fcc561 100644
--- a/drivers/gpio/gpio-vx855.c
+++ b/drivers/gpio/gpio-vx855.c
@@ -186,23 +186,24 @@ static int vx855gpio_direction_output(struct gpio_chip *gpio,
return 0;
}
-static int vx855gpio_set_single_ended(struct gpio_chip *gpio,
- unsigned int nr,
- enum single_ended_mode mode)
+static int vx855gpio_set_config(struct gpio_chip *gpio, unsigned int nr,
+ unsigned long config)
{
+ enum pin_config_param param = pinconf_to_config_param(config);
+
/* The GPI cannot be single-ended */
if (nr < NR_VX855_GPI)
return -EINVAL;
/* The GPO's are push-pull */
if (nr < NR_VX855_GPInO) {
- if (mode != LINE_MODE_PUSH_PULL)
+ if (param != PIN_CONFIG_DRIVE_PUSH_PULL)
return -ENOTSUPP;
return 0;
}
/* The GPIO's are open drain */
- if (mode != LINE_MODE_OPEN_DRAIN)
+ if (param != PIN_CONFIG_DRIVE_OPEN_DRAIN)
return -ENOTSUPP;
return 0;
@@ -231,7 +232,7 @@ static void vx855gpio_gpio_setup(struct vx855_gpio *vg)
c->direction_output = vx855gpio_direction_output;
c->get = vx855gpio_get;
c->set = vx855gpio_set;
- c->set_single_ended = vx855gpio_set_single_ended;
+ c->set_config = vx855gpio_set_config,
c->dbg_show = NULL;
c->base = 0;
c->ngpio = NR_VX855_GP;
diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c
index 34baee5b1dd6..97613de5304e 100644
--- a/drivers/gpio/gpio-wcove.c
+++ b/drivers/gpio/gpio-wcove.c
@@ -202,17 +202,16 @@ static void wcove_gpio_set(struct gpio_chip *chip,
regmap_update_bits(wg->regmap, to_reg(gpio, CTRL_OUT), 1, 0);
}
-static int wcove_gpio_set_single_ended(struct gpio_chip *chip,
- unsigned int gpio,
- enum single_ended_mode mode)
+static int wcove_gpio_set_config(struct gpio_chip *chip, unsigned int gpio,
+ unsigned long config)
{
struct wcove_gpio *wg = gpiochip_get_data(chip);
- switch (mode) {
- case LINE_MODE_OPEN_DRAIN:
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
return regmap_update_bits(wg->regmap, to_reg(gpio, CTRL_OUT),
CTLO_DRV_MASK, CTLO_DRV_OD);
- case LINE_MODE_PUSH_PULL:
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
return regmap_update_bits(wg->regmap, to_reg(gpio, CTRL_OUT),
CTLO_DRV_MASK, CTLO_DRV_CMOS);
default:
@@ -411,7 +410,7 @@ static int wcove_gpio_probe(struct platform_device *pdev)
wg->chip.get_direction = wcove_gpio_get_direction;
wg->chip.get = wcove_gpio_get;
wg->chip.set = wcove_gpio_set;
- wg->chip.set_single_ended = wcove_gpio_set_single_ended,
+ wg->chip.set_config = wcove_gpio_set_config,
wg->chip.base = -1;
wg->chip.ngpio = WCOVE_VGPIO_NUM;
wg->chip.can_sleep = true;
diff --git a/drivers/gpio/gpio-wm831x.c b/drivers/gpio/gpio-wm831x.c
index 533707f943f4..00e3839b3f96 100644
--- a/drivers/gpio/gpio-wm831x.c
+++ b/drivers/gpio/gpio-wm831x.c
@@ -101,11 +101,9 @@ static int wm831x_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
WM831X_IRQ_GPIO_1 + offset);
}
-static int wm831x_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
+static int wm831x_gpio_set_debounce(struct wm831x *wm831x, unsigned offset,
unsigned debounce)
{
- struct wm831x_gpio *wm831x_gpio = gpiochip_get_data(chip);
- struct wm831x *wm831x = wm831x_gpio->wm831x;
int reg = WM831X_GPIO1_CONTROL + offset;
int ret, fn;
@@ -132,21 +130,23 @@ static int wm831x_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
return wm831x_set_bits(wm831x, reg, WM831X_GPN_FN_MASK, fn);
}
-static int wm831x_set_single_ended(struct gpio_chip *chip,
- unsigned int offset,
- enum single_ended_mode mode)
+static int wm831x_set_config(struct gpio_chip *chip, unsigned int offset,
+ unsigned long config)
{
struct wm831x_gpio *wm831x_gpio = gpiochip_get_data(chip);
struct wm831x *wm831x = wm831x_gpio->wm831x;
int reg = WM831X_GPIO1_CONTROL + offset;
- switch (mode) {
- case LINE_MODE_OPEN_DRAIN:
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
return wm831x_set_bits(wm831x, reg,
WM831X_GPN_OD_MASK, WM831X_GPN_OD);
- case LINE_MODE_PUSH_PULL:
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
return wm831x_set_bits(wm831x, reg,
WM831X_GPN_OD_MASK, 0);
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ return wm831x_gpio_set_debounce(wm831x, offset,
+ pinconf_to_config_argument(config));
default:
break;
}
@@ -255,8 +255,7 @@ static const struct gpio_chip template_chip = {
.direction_output = wm831x_gpio_direction_out,
.set = wm831x_gpio_set,
.to_irq = wm831x_gpio_to_irq,
- .set_debounce = wm831x_gpio_set_debounce,
- .set_single_ended = wm831x_set_single_ended,
+ .set_config = wm831x_set_config,
.dbg_show = wm831x_gpio_dbg_show,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c
index 68410fda6138..1e35756ac55b 100644
--- a/drivers/gpio/gpio-wm8994.c
+++ b/drivers/gpio/gpio-wm8994.c
@@ -103,19 +103,18 @@ static void wm8994_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset, WM8994_GPN_LVL, value);
}
-static int wm8994_gpio_set_single_ended(struct gpio_chip *chip,
- unsigned int offset,
- enum single_ended_mode mode)
+static int wm8994_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
+ unsigned long config)
{
struct wm8994_gpio *wm8994_gpio = gpiochip_get_data(chip);
struct wm8994 *wm8994 = wm8994_gpio->wm8994;
- switch (mode) {
- case LINE_MODE_OPEN_DRAIN:
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
return wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset,
WM8994_GPN_OP_CFG_MASK,
WM8994_GPN_OP_CFG);
- case LINE_MODE_PUSH_PULL:
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
return wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset,
WM8994_GPN_OP_CFG_MASK, 0);
default:
@@ -257,7 +256,7 @@ static const struct gpio_chip template_chip = {
.get = wm8994_gpio_get,
.direction_output = wm8994_gpio_direction_out,
.set = wm8994_gpio_set,
- .set_single_ended = wm8994_gpio_set_single_ended,
+ .set_config = wm8994_gpio_set_config,
.to_irq = wm8994_gpio_to_irq,
.dbg_show = wm8994_gpio_dbg_show,
.can_sleep = true,
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index f4c26c7826cd..d0478f1853db 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1317,12 +1317,12 @@ void gpiochip_remove(struct gpio_chip *chip)
/* FIXME: should the legacy sysfs handling be moved to gpio_device? */
gpiochip_sysfs_unregister(gdev);
+ gpiochip_free_hogs(chip);
/* Numb the device, cancelling all outstanding operations */
gdev->chip = NULL;
gpiochip_irqchip_remove(chip);
acpi_gpiochip_remove(chip);
gpiochip_remove_pin_ranges(chip);
- gpiochip_free_hogs(chip);
of_gpiochip_remove(chip);
/*
* We accept no more calls into the driver from this point, so
@@ -1723,7 +1723,7 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
}
/**
- * _gpiochip_irqchip_add() - adds an irqchip to a gpiochip
+ * gpiochip_irqchip_add_key() - adds an irqchip to a gpiochip
* @gpiochip: the gpiochip to add the irqchip to
* @irqchip: the irqchip to add to the gpiochip
* @first_irq: if not dynamically assigned, the base (first) IRQ to
@@ -1749,13 +1749,13 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
* the pins on the gpiochip can generate a unique IRQ. Everything else
* need to be open coded.
*/
-int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
- struct irq_chip *irqchip,
- unsigned int first_irq,
- irq_flow_handler_t handler,
- unsigned int type,
- bool nested,
- struct lock_class_key *lock_key)
+int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
+ struct irq_chip *irqchip,
+ unsigned int first_irq,
+ irq_flow_handler_t handler,
+ unsigned int type,
+ bool nested,
+ struct lock_class_key *lock_key)
{
struct device_node *of_node;
bool irq_base_set = false;
@@ -1840,7 +1840,7 @@ int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
return 0;
}
-EXPORT_SYMBOL_GPL(_gpiochip_irqchip_add);
+EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_key);
#else /* CONFIG_GPIOLIB_IRQCHIP */
@@ -1876,6 +1876,19 @@ void gpiochip_generic_free(struct gpio_chip *chip, unsigned offset)
}
EXPORT_SYMBOL_GPL(gpiochip_generic_free);
+/**
+ * gpiochip_generic_config() - apply configuration for a pin
+ * @chip: the gpiochip owning the GPIO
+ * @offset: the offset of the GPIO to apply the configuration
+ * @config: the configuration to be applied
+ */
+int gpiochip_generic_config(struct gpio_chip *chip, unsigned offset,
+ unsigned long config)
+{
+ return pinctrl_gpio_set_config(chip->gpiodev->base + offset, config);
+}
+EXPORT_SYMBOL_GPL(gpiochip_generic_config);
+
#ifdef CONFIG_PINCTRL
/**
@@ -2264,6 +2277,14 @@ int gpiod_direction_input(struct gpio_desc *desc)
}
EXPORT_SYMBOL_GPL(gpiod_direction_input);
+static int gpio_set_drive_single_ended(struct gpio_chip *gc, unsigned offset,
+ enum pin_config_param mode)
+{
+ unsigned long config = { PIN_CONF_PACKED(mode, 0) };
+
+ return gc->set_config ? gc->set_config(gc, offset, config) : -ENOTSUPP;
+}
+
static int _gpiod_direction_output_raw(struct gpio_desc *desc, int value)
{
struct gpio_chip *gc = desc->gdev->chip;
@@ -2280,32 +2301,25 @@ static int _gpiod_direction_output_raw(struct gpio_desc *desc, int value)
if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) {
/* First see if we can enable open drain in hardware */
- if (gc->set_single_ended) {
- ret = gc->set_single_ended(gc, gpio_chip_hwgpio(desc),
- LINE_MODE_OPEN_DRAIN);
- if (!ret)
- goto set_output_value;
- }
+ ret = gpio_set_drive_single_ended(gc, gpio_chip_hwgpio(desc),
+ PIN_CONFIG_DRIVE_OPEN_DRAIN);
+ if (!ret)
+ goto set_output_value;
/* Emulate open drain by not actively driving the line high */
if (val)
return gpiod_direction_input(desc);
}
else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) {
- if (gc->set_single_ended) {
- ret = gc->set_single_ended(gc, gpio_chip_hwgpio(desc),
- LINE_MODE_OPEN_SOURCE);
- if (!ret)
- goto set_output_value;
- }
+ ret = gpio_set_drive_single_ended(gc, gpio_chip_hwgpio(desc),
+ PIN_CONFIG_DRIVE_OPEN_SOURCE);
+ if (!ret)
+ goto set_output_value;
/* Emulate open source by not actively driving the line low */
if (!val)
return gpiod_direction_input(desc);
} else {
- /* Make sure to disable open drain/source hardware, if any */
- if (gc->set_single_ended)
- gc->set_single_ended(gc,
- gpio_chip_hwgpio(desc),
- LINE_MODE_PUSH_PULL);
+ gpio_set_drive_single_ended(gc, gpio_chip_hwgpio(desc),
+ PIN_CONFIG_DRIVE_PUSH_PULL);
}
set_output_value:
@@ -2376,17 +2390,19 @@ EXPORT_SYMBOL_GPL(gpiod_direction_output);
int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
{
struct gpio_chip *chip;
+ unsigned long config;
VALIDATE_DESC(desc);
chip = desc->gdev->chip;
- if (!chip->set || !chip->set_debounce) {
+ if (!chip->set || !chip->set_config) {
gpiod_dbg(desc,
- "%s: missing set() or set_debounce() operations\n",
+ "%s: missing set() or set_config() operations\n",
__func__);
return -ENOTSUPP;
}
- return chip->set_debounce(chip, gpio_chip_hwgpio(desc), debounce);
+ config = pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE, debounce);
+ return chip->set_config(chip, gpio_chip_hwgpio(desc), config);
}
EXPORT_SYMBOL_GPL(gpiod_set_debounce);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 9ada56c16a58..4c851fde1e82 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -840,6 +840,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
else if (type == CGS_UCODE_ID_SMU_SK)
strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
break;
+ case CHIP_POLARIS12:
+ strcpy(fw_name, "amdgpu/polaris12_smc.bin");
+ break;
default:
DRM_ERROR("SMC firmware not supported\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 29d6d84d1c28..41e41f90265d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -83,6 +83,13 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
}
break;
}
+
+ if (!(*out_ring && (*out_ring)->adev)) {
+ DRM_ERROR("Ring %d is not initialized on IP %d\n",
+ ring, ip_type);
+ return -EINVAL;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 60bd4afe45c8..fe3bb94fe58d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -73,6 +73,7 @@ static const char *amdgpu_asic_name[] = {
"STONEY",
"POLARIS10",
"POLARIS11",
+ "POLARIS12",
"LAST",
};
@@ -1277,6 +1278,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
case CHIP_FIJI:
case CHIP_POLARIS11:
case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
case CHIP_CARRIZO:
case CHIP_STONEY:
if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 8cb937b2bfcc..2534adaebe30 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -418,6 +418,13 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
{0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
{0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ /* Polaris12 */
+ {0x1002, 0x6980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+ {0x1002, 0x6981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+ {0x1002, 0x6985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+ {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+ {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+ {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0, 0, 0}
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index fc592c2b0e16..95a568df8551 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -98,6 +98,7 @@ static int amdgpu_pp_early_init(void *handle)
switch (adev->asic_type) {
case CHIP_POLARIS11:
case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
case CHIP_TONGA:
case CHIP_FIJI:
case CHIP_TOPAZ:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index a81dfaeeb8c0..1d564beb0fde 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -65,6 +65,7 @@
#define FIRMWARE_STONEY "amdgpu/stoney_uvd.bin"
#define FIRMWARE_POLARIS10 "amdgpu/polaris10_uvd.bin"
#define FIRMWARE_POLARIS11 "amdgpu/polaris11_uvd.bin"
+#define FIRMWARE_POLARIS12 "amdgpu/polaris12_uvd.bin"
/**
* amdgpu_uvd_cs_ctx - Command submission parser context
@@ -98,6 +99,7 @@ MODULE_FIRMWARE(FIRMWARE_FIJI);
MODULE_FIRMWARE(FIRMWARE_STONEY);
MODULE_FIRMWARE(FIRMWARE_POLARIS10);
MODULE_FIRMWARE(FIRMWARE_POLARIS11);
+MODULE_FIRMWARE(FIRMWARE_POLARIS12);
static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
@@ -149,6 +151,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
case CHIP_POLARIS11:
fw_name = FIRMWARE_POLARIS11;
break;
+ case CHIP_POLARIS12:
+ fw_name = FIRMWARE_POLARIS12;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 69b66b9e7f57..8fec802d3908 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -52,6 +52,7 @@
#define FIRMWARE_STONEY "amdgpu/stoney_vce.bin"
#define FIRMWARE_POLARIS10 "amdgpu/polaris10_vce.bin"
#define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin"
+#define FIRMWARE_POLARIS12 "amdgpu/polaris12_vce.bin"
#ifdef CONFIG_DRM_AMDGPU_CIK
MODULE_FIRMWARE(FIRMWARE_BONAIRE);
@@ -66,6 +67,7 @@ MODULE_FIRMWARE(FIRMWARE_FIJI);
MODULE_FIRMWARE(FIRMWARE_STONEY);
MODULE_FIRMWARE(FIRMWARE_POLARIS10);
MODULE_FIRMWARE(FIRMWARE_POLARIS11);
+MODULE_FIRMWARE(FIRMWARE_POLARIS12);
static void amdgpu_vce_idle_work_handler(struct work_struct *work);
@@ -121,6 +123,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
case CHIP_POLARIS11:
fw_name = FIRMWARE_POLARIS11;
break;
+ case CHIP_POLARIS12:
+ fw_name = FIRMWARE_POLARIS12;
+ break;
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 9999dc71b998..ccb5e02e7b20 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2512,6 +2512,8 @@ static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+ WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+ ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
return 0;
}
@@ -2537,7 +2539,6 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
int32_t hot_y)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
struct drm_gem_object *obj;
struct amdgpu_bo *aobj;
int ret;
@@ -2578,7 +2579,9 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
dce_v10_0_lock_cursor(crtc, true);
- if (hot_x != amdgpu_crtc->cursor_hot_x ||
+ if (width != amdgpu_crtc->cursor_width ||
+ height != amdgpu_crtc->cursor_height ||
+ hot_x != amdgpu_crtc->cursor_hot_x ||
hot_y != amdgpu_crtc->cursor_hot_y) {
int x, y;
@@ -2587,16 +2590,10 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
dce_v10_0_cursor_move_locked(crtc, x, y);
- amdgpu_crtc->cursor_hot_x = hot_x;
- amdgpu_crtc->cursor_hot_y = hot_y;
- }
-
- if (width != amdgpu_crtc->cursor_width ||
- height != amdgpu_crtc->cursor_height) {
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- (width - 1) << 16 | (height - 1));
amdgpu_crtc->cursor_width = width;
amdgpu_crtc->cursor_height = height;
+ amdgpu_crtc->cursor_hot_x = hot_x;
+ amdgpu_crtc->cursor_hot_y = hot_y;
}
dce_v10_0_show_cursor(crtc);
@@ -2620,7 +2617,6 @@ unpin:
static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
if (amdgpu_crtc->cursor_bo) {
dce_v10_0_lock_cursor(crtc, true);
@@ -2628,10 +2624,6 @@ static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
amdgpu_crtc->cursor_y);
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- (amdgpu_crtc->cursor_width - 1) << 16 |
- (amdgpu_crtc->cursor_height - 1));
-
dce_v10_0_show_cursor(crtc);
dce_v10_0_lock_cursor(crtc, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index b3d62b909f43..a7af5b33a5e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -167,6 +167,7 @@ static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
(const u32)ARRAY_SIZE(stoney_golden_settings_a11));
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
amdgpu_program_register_sequence(adev,
polaris11_golden_settings_a11,
(const u32)ARRAY_SIZE(polaris11_golden_settings_a11));
@@ -608,6 +609,7 @@ static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev)
num_crtc = 6;
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
num_crtc = 5;
break;
default:
@@ -1589,6 +1591,7 @@ static int dce_v11_0_audio_init(struct amdgpu_device *adev)
adev->mode_info.audio.num_pins = 8;
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
adev->mode_info.audio.num_pins = 6;
break;
default:
@@ -2388,7 +2391,8 @@ static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
int pll;
if ((adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_POLARIS11)) {
+ (adev->asic_type == CHIP_POLARIS11) ||
+ (adev->asic_type == CHIP_POLARIS12)) {
struct amdgpu_encoder *amdgpu_encoder =
to_amdgpu_encoder(amdgpu_crtc->encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@@ -2528,6 +2532,8 @@ static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+ WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+ ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
return 0;
}
@@ -2553,7 +2559,6 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
int32_t hot_y)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
struct drm_gem_object *obj;
struct amdgpu_bo *aobj;
int ret;
@@ -2594,7 +2599,9 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
dce_v11_0_lock_cursor(crtc, true);
- if (hot_x != amdgpu_crtc->cursor_hot_x ||
+ if (width != amdgpu_crtc->cursor_width ||
+ height != amdgpu_crtc->cursor_height ||
+ hot_x != amdgpu_crtc->cursor_hot_x ||
hot_y != amdgpu_crtc->cursor_hot_y) {
int x, y;
@@ -2603,16 +2610,10 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
dce_v11_0_cursor_move_locked(crtc, x, y);
- amdgpu_crtc->cursor_hot_x = hot_x;
- amdgpu_crtc->cursor_hot_y = hot_y;
- }
-
- if (width != amdgpu_crtc->cursor_width ||
- height != amdgpu_crtc->cursor_height) {
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- (width - 1) << 16 | (height - 1));
amdgpu_crtc->cursor_width = width;
amdgpu_crtc->cursor_height = height;
+ amdgpu_crtc->cursor_hot_x = hot_x;
+ amdgpu_crtc->cursor_hot_y = hot_y;
}
dce_v11_0_show_cursor(crtc);
@@ -2636,7 +2637,6 @@ unpin:
static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
if (amdgpu_crtc->cursor_bo) {
dce_v11_0_lock_cursor(crtc, true);
@@ -2644,10 +2644,6 @@ static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
amdgpu_crtc->cursor_y);
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- (amdgpu_crtc->cursor_width - 1) << 16 |
- (amdgpu_crtc->cursor_height - 1));
-
dce_v11_0_show_cursor(crtc);
dce_v11_0_lock_cursor(crtc, false);
@@ -2822,7 +2818,8 @@ static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
return -EINVAL;
if ((adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_POLARIS11)) {
+ (adev->asic_type == CHIP_POLARIS11) ||
+ (adev->asic_type == CHIP_POLARIS12)) {
struct amdgpu_encoder *amdgpu_encoder =
to_amdgpu_encoder(amdgpu_crtc->encoder);
int encoder_mode =
@@ -2992,6 +2989,7 @@ static int dce_v11_0_early_init(void *handle)
adev->mode_info.num_dig = 6;
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
adev->mode_info.num_hpd = 5;
adev->mode_info.num_dig = 5;
break;
@@ -3101,7 +3099,8 @@ static int dce_v11_0_hw_init(void *handle)
amdgpu_atombios_crtc_powergate_init(adev);
amdgpu_atombios_encoder_init_dig(adev);
if ((adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_POLARIS11)) {
+ (adev->asic_type == CHIP_POLARIS11) ||
+ (adev->asic_type == CHIP_POLARIS12)) {
amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk,
DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS);
amdgpu_atombios_crtc_set_dce_clock(adev, 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index b4e4ec630e8c..39df6a50637f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -1859,6 +1859,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
struct amdgpu_device *adev = crtc->dev->dev_private;
int xorigin = 0, yorigin = 0;
+ int w = amdgpu_crtc->cursor_width;
+
amdgpu_crtc->cursor_x = x;
amdgpu_crtc->cursor_y = y;
@@ -1878,6 +1880,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+ WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+ ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
return 0;
}
@@ -1903,7 +1907,6 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
int32_t hot_y)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
struct drm_gem_object *obj;
struct amdgpu_bo *aobj;
int ret;
@@ -1944,7 +1947,9 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
dce_v6_0_lock_cursor(crtc, true);
- if (hot_x != amdgpu_crtc->cursor_hot_x ||
+ if (width != amdgpu_crtc->cursor_width ||
+ height != amdgpu_crtc->cursor_height ||
+ hot_x != amdgpu_crtc->cursor_hot_x ||
hot_y != amdgpu_crtc->cursor_hot_y) {
int x, y;
@@ -1953,16 +1958,10 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
dce_v6_0_cursor_move_locked(crtc, x, y);
- amdgpu_crtc->cursor_hot_x = hot_x;
- amdgpu_crtc->cursor_hot_y = hot_y;
- }
-
- if (width != amdgpu_crtc->cursor_width ||
- height != amdgpu_crtc->cursor_height) {
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- (width - 1) << 16 | (height - 1));
amdgpu_crtc->cursor_width = width;
amdgpu_crtc->cursor_height = height;
+ amdgpu_crtc->cursor_hot_x = hot_x;
+ amdgpu_crtc->cursor_hot_y = hot_y;
}
dce_v6_0_show_cursor(crtc);
@@ -1986,7 +1985,6 @@ unpin:
static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
if (amdgpu_crtc->cursor_bo) {
dce_v6_0_lock_cursor(crtc, true);
@@ -1994,10 +1992,6 @@ static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
amdgpu_crtc->cursor_y);
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- (amdgpu_crtc->cursor_width - 1) << 16 |
- (amdgpu_crtc->cursor_height - 1));
-
dce_v6_0_show_cursor(crtc);
dce_v6_0_lock_cursor(crtc, false);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 584abe834a3c..28102bb1704d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2363,6 +2363,8 @@ static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+ WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+ ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
return 0;
}
@@ -2388,7 +2390,6 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
int32_t hot_y)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
struct drm_gem_object *obj;
struct amdgpu_bo *aobj;
int ret;
@@ -2429,7 +2430,9 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
dce_v8_0_lock_cursor(crtc, true);
- if (hot_x != amdgpu_crtc->cursor_hot_x ||
+ if (width != amdgpu_crtc->cursor_width ||
+ height != amdgpu_crtc->cursor_height ||
+ hot_x != amdgpu_crtc->cursor_hot_x ||
hot_y != amdgpu_crtc->cursor_hot_y) {
int x, y;
@@ -2438,16 +2441,10 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
dce_v8_0_cursor_move_locked(crtc, x, y);
- amdgpu_crtc->cursor_hot_x = hot_x;
- amdgpu_crtc->cursor_hot_y = hot_y;
- }
-
- if (width != amdgpu_crtc->cursor_width ||
- height != amdgpu_crtc->cursor_height) {
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- (width - 1) << 16 | (height - 1));
amdgpu_crtc->cursor_width = width;
amdgpu_crtc->cursor_height = height;
+ amdgpu_crtc->cursor_hot_x = hot_x;
+ amdgpu_crtc->cursor_hot_y = hot_y;
}
dce_v8_0_show_cursor(crtc);
@@ -2471,7 +2468,6 @@ unpin:
static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
if (amdgpu_crtc->cursor_bo) {
dce_v8_0_lock_cursor(crtc, true);
@@ -2479,10 +2475,6 @@ static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
amdgpu_crtc->cursor_y);
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- (amdgpu_crtc->cursor_width - 1) << 16 |
- (amdgpu_crtc->cursor_height - 1));
-
dce_v8_0_show_cursor(crtc);
dce_v8_0_lock_cursor(crtc, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index 762f8e82ceb7..e9a176891e13 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -627,11 +627,8 @@ static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs =
static void dce_virtual_encoder_destroy(struct drm_encoder *encoder)
{
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
-
- kfree(amdgpu_encoder->enc_priv);
drm_encoder_cleanup(encoder);
- kfree(amdgpu_encoder);
+ kfree(encoder);
}
static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index d0ec00986f38..373374164bd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -139,6 +139,13 @@ MODULE_FIRMWARE("amdgpu/polaris10_mec.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_ce.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_pfp.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_me.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_mec.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_mec2.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_rlc.bin");
+
static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
{
{mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
@@ -689,6 +696,7 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
(const u32)ARRAY_SIZE(tonga_golden_common_all));
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
amdgpu_program_register_sequence(adev,
golden_settings_polaris11_a11,
(const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
@@ -903,6 +911,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
case CHIP_POLARIS10:
chip_name = "polaris10";
break;
+ case CHIP_POLARIS12:
+ chip_name = "polaris12";
+ break;
case CHIP_STONEY:
chip_name = "stoney";
break;
@@ -1768,6 +1779,7 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
ret = amdgpu_atombios_get_gfx_info(adev);
if (ret)
return ret;
@@ -2682,6 +2694,7 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
@@ -3503,6 +3516,7 @@ gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
*rconf1 |= 0x0;
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
*rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
SE_XSEL(1) | SE_YSEL(1);
*rconf1 |= 0x0;
@@ -4021,7 +4035,8 @@ static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
cz_enable_cp_power_gating(adev, true);
else
cz_enable_cp_power_gating(adev, false);
- } else if (adev->asic_type == CHIP_POLARIS11) {
+ } else if ((adev->asic_type == CHIP_POLARIS11) ||
+ (adev->asic_type == CHIP_POLARIS12)) {
gfx_v8_0_init_csb(adev);
gfx_v8_0_init_save_restore_list(adev);
gfx_v8_0_enable_save_restore_machine(adev);
@@ -4095,7 +4110,8 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
if (adev->asic_type == CHIP_POLARIS11 ||
- adev->asic_type == CHIP_POLARIS10) {
+ adev->asic_type == CHIP_POLARIS10 ||
+ adev->asic_type == CHIP_POLARIS12) {
tmp = RREG32(mmRLC_CGCG_CGLS_CTRL_3D);
tmp &= ~0x3;
WREG32(mmRLC_CGCG_CGLS_CTRL_3D, tmp);
@@ -4283,6 +4299,7 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
amdgpu_ring_write(ring, 0x0000002A);
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
amdgpu_ring_write(ring, 0x16000012);
amdgpu_ring_write(ring, 0x00000000);
break;
@@ -4664,7 +4681,8 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
(adev->asic_type == CHIP_FIJI) ||
(adev->asic_type == CHIP_STONEY) ||
(adev->asic_type == CHIP_POLARIS11) ||
- (adev->asic_type == CHIP_POLARIS10)) {
+ (adev->asic_type == CHIP_POLARIS10) ||
+ (adev->asic_type == CHIP_POLARIS12)) {
WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
AMDGPU_DOORBELL_KIQ << 2);
WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
@@ -4700,7 +4718,8 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
mqd->cp_hqd_persistent_state = tmp;
if (adev->asic_type == CHIP_STONEY ||
adev->asic_type == CHIP_POLARIS11 ||
- adev->asic_type == CHIP_POLARIS10) {
+ adev->asic_type == CHIP_POLARIS10 ||
+ adev->asic_type == CHIP_POLARIS12) {
tmp = RREG32(mmCP_ME1_PIPE3_INT_CNTL);
tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE3_INT_CNTL, GENERIC2_INT_ENABLE, 1);
WREG32(mmCP_ME1_PIPE3_INT_CNTL, tmp);
@@ -5279,7 +5298,8 @@ static int gfx_v8_0_late_init(void *handle)
static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
bool enable)
{
- if (adev->asic_type == CHIP_POLARIS11)
+ if ((adev->asic_type == CHIP_POLARIS11) ||
+ (adev->asic_type == CHIP_POLARIS12))
/* Send msg to SMU via Powerplay */
amdgpu_set_powergating_state(adev,
AMD_IP_BLOCK_TYPE_SMC,
@@ -5353,6 +5373,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
else
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 45a573e63d4a..e2b0b1646f99 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -44,6 +44,7 @@ MODULE_FIRMWARE("radeon/tahiti_mc.bin");
MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
MODULE_FIRMWARE("radeon/verde_mc.bin");
MODULE_FIRMWARE("radeon/oland_mc.bin");
+MODULE_FIRMWARE("radeon/si58_mc.bin");
#define MC_SEQ_MISC0__MT__MASK 0xf0000000
#define MC_SEQ_MISC0__MT__GDDR1 0x10000000
@@ -113,6 +114,7 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
const char *chip_name;
char fw_name[30];
int err;
+ bool is_58_fw = false;
DRM_DEBUG("\n");
@@ -135,7 +137,14 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
default: BUG();
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ /* this memory configuration requires special firmware */
+ if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
+ is_58_fw = true;
+
+ if (is_58_fw)
+ snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
+ else
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
if (err)
goto out;
@@ -463,19 +472,11 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
WREG32(mmVM_CONTEXT1_CNTL,
VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
(1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
- ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT) |
- VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
- VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
- VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
- VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
- VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
- VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK);
+ ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
+ if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
+ gmc_v6_0_set_fault_enable_default(adev, false);
+ else
+ gmc_v6_0_set_fault_enable_default(adev, true);
gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -754,7 +755,10 @@ static int gmc_v6_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+ if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
+ return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+ else
+ return 0;
}
static int gmc_v6_0_sw_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 0daac3a5be79..476bc9f1954b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -46,6 +46,7 @@ static int gmc_v8_0_wait_for_idle(void *handle);
MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
static const u32 golden_settings_tonga_a11[] =
{
@@ -130,6 +131,7 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
(const u32)ARRAY_SIZE(golden_settings_tonga_a11));
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
amdgpu_program_register_sequence(adev,
golden_settings_polaris11_a11,
(const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
@@ -225,6 +227,9 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
case CHIP_POLARIS10:
chip_name = "polaris10";
break;
+ case CHIP_POLARIS12:
+ chip_name = "polaris12";
+ break;
case CHIP_FIJI:
case CHIP_CARRIZO:
case CHIP_STONEY:
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 1170a64a3184..034ace79ed49 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -60,6 +60,8 @@ MODULE_FIRMWARE("amdgpu/polaris10_sdma.bin");
MODULE_FIRMWARE("amdgpu/polaris10_sdma1.bin");
MODULE_FIRMWARE("amdgpu/polaris11_sdma.bin");
MODULE_FIRMWARE("amdgpu/polaris11_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_sdma.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_sdma1.bin");
static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
@@ -206,6 +208,7 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
(const u32)ARRAY_SIZE(golden_settings_tonga_a11));
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
amdgpu_program_register_sequence(adev,
golden_settings_polaris11_a11,
(const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
@@ -278,6 +281,9 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
case CHIP_POLARIS10:
chip_name = "polaris10";
break;
+ case CHIP_POLARIS12:
+ chip_name = "polaris12";
+ break;
case CHIP_CARRIZO:
chip_name = "carrizo";
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 6c65a1a2de79..6e150db8f380 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -56,7 +56,6 @@
#define BIOS_SCRATCH_4 0x5cd
MODULE_FIRMWARE("radeon/tahiti_smc.bin");
-MODULE_FIRMWARE("radeon/tahiti_k_smc.bin");
MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin");
MODULE_FIRMWARE("radeon/verde_smc.bin");
@@ -65,6 +64,7 @@ MODULE_FIRMWARE("radeon/oland_smc.bin");
MODULE_FIRMWARE("radeon/oland_k_smc.bin");
MODULE_FIRMWARE("radeon/hainan_smc.bin");
MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
+MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
union power_info {
struct _ATOM_POWERPLAY_INFO info;
@@ -3488,30 +3488,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
(adev->pdev->device == 0x6817) ||
(adev->pdev->device == 0x6806))
max_mclk = 120000;
- } else if (adev->asic_type == CHIP_VERDE) {
- if ((adev->pdev->revision == 0x81) ||
- (adev->pdev->revision == 0x83) ||
- (adev->pdev->revision == 0x87) ||
- (adev->pdev->device == 0x6820) ||
- (adev->pdev->device == 0x6821) ||
- (adev->pdev->device == 0x6822) ||
- (adev->pdev->device == 0x6823) ||
- (adev->pdev->device == 0x682A) ||
- (adev->pdev->device == 0x682B)) {
- max_sclk = 75000;
- max_mclk = 80000;
- }
- } else if (adev->asic_type == CHIP_OLAND) {
- if ((adev->pdev->revision == 0xC7) ||
- (adev->pdev->revision == 0x80) ||
- (adev->pdev->revision == 0x81) ||
- (adev->pdev->revision == 0x83) ||
- (adev->pdev->revision == 0x87) ||
- (adev->pdev->device == 0x6604) ||
- (adev->pdev->device == 0x6605)) {
- max_sclk = 75000;
- max_mclk = 80000;
- }
} else if (adev->asic_type == CHIP_HAINAN) {
if ((adev->pdev->revision == 0x81) ||
(adev->pdev->revision == 0x83) ||
@@ -3520,7 +3496,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
(adev->pdev->device == 0x6665) ||
(adev->pdev->device == 0x6667)) {
max_sclk = 75000;
- max_mclk = 80000;
}
}
/* Apply dpm quirks */
@@ -7687,50 +7662,51 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
chip_name = "tahiti";
break;
case CHIP_PITCAIRN:
- if ((adev->pdev->revision == 0x81) ||
- (adev->pdev->device == 0x6810) ||
- (adev->pdev->device == 0x6811) ||
- (adev->pdev->device == 0x6816) ||
- (adev->pdev->device == 0x6817) ||
- (adev->pdev->device == 0x6806))
+ if ((adev->pdev->revision == 0x81) &&
+ ((adev->pdev->device == 0x6810) ||
+ (adev->pdev->device == 0x6811)))
chip_name = "pitcairn_k";
else
chip_name = "pitcairn";
break;
case CHIP_VERDE:
- if ((adev->pdev->revision == 0x81) ||
- (adev->pdev->revision == 0x83) ||
- (adev->pdev->revision == 0x87) ||
- (adev->pdev->device == 0x6820) ||
- (adev->pdev->device == 0x6821) ||
- (adev->pdev->device == 0x6822) ||
- (adev->pdev->device == 0x6823) ||
- (adev->pdev->device == 0x682A) ||
- (adev->pdev->device == 0x682B))
+ if (((adev->pdev->device == 0x6820) &&
+ ((adev->pdev->revision == 0x81) ||
+ (adev->pdev->revision == 0x83))) ||
+ ((adev->pdev->device == 0x6821) &&
+ ((adev->pdev->revision == 0x83) ||
+ (adev->pdev->revision == 0x87))) ||
+ ((adev->pdev->revision == 0x87) &&
+ ((adev->pdev->device == 0x6823) ||
+ (adev->pdev->device == 0x682b))))
chip_name = "verde_k";
else
chip_name = "verde";
break;
case CHIP_OLAND:
- if ((adev->pdev->revision == 0xC7) ||
- (adev->pdev->revision == 0x80) ||
- (adev->pdev->revision == 0x81) ||
- (adev->pdev->revision == 0x83) ||
- (adev->pdev->revision == 0x87) ||
- (adev->pdev->device == 0x6604) ||
- (adev->pdev->device == 0x6605))
+ if (((adev->pdev->revision == 0x81) &&
+ ((adev->pdev->device == 0x6600) ||
+ (adev->pdev->device == 0x6604) ||
+ (adev->pdev->device == 0x6605) ||
+ (adev->pdev->device == 0x6610))) ||
+ ((adev->pdev->revision == 0x83) &&
+ (adev->pdev->device == 0x6610)))
chip_name = "oland_k";
else
chip_name = "oland";
break;
case CHIP_HAINAN:
- if ((adev->pdev->revision == 0x81) ||
- (adev->pdev->revision == 0x83) ||
- (adev->pdev->revision == 0xC3) ||
- (adev->pdev->device == 0x6664) ||
- (adev->pdev->device == 0x6665) ||
- (adev->pdev->device == 0x6667))
+ if (((adev->pdev->revision == 0x81) &&
+ (adev->pdev->device == 0x6660)) ||
+ ((adev->pdev->revision == 0x83) &&
+ ((adev->pdev->device == 0x6660) ||
+ (adev->pdev->device == 0x6663) ||
+ (adev->pdev->device == 0x6665) ||
+ (adev->pdev->device == 0x6667))))
chip_name = "hainan_k";
+ else if ((adev->pdev->revision == 0xc3) &&
+ (adev->pdev->device == 0x6665))
+ chip_name = "banks_k_2";
else
chip_name = "hainan";
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 96444e4d862a..7fb9137dd89b 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -40,13 +40,14 @@
#include "smu/smu_7_0_1_sh_mask.h"
static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
-static void uvd_v4_2_init_cg(struct amdgpu_device *adev);
static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev);
static int uvd_v4_2_start(struct amdgpu_device *adev);
static void uvd_v4_2_stop(struct amdgpu_device *adev);
static int uvd_v4_2_set_clockgating_state(void *handle,
enum amd_clockgating_state state);
+static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
+ bool sw_mode);
/**
* uvd_v4_2_ring_get_rptr - get read pointer
*
@@ -140,7 +141,8 @@ static int uvd_v4_2_sw_fini(void *handle)
return r;
}
-
+static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
+ bool enable);
/**
* uvd_v4_2_hw_init - start and test UVD block
*
@@ -155,8 +157,7 @@ static int uvd_v4_2_hw_init(void *handle)
uint32_t tmp;
int r;
- uvd_v4_2_init_cg(adev);
- uvd_v4_2_set_clockgating_state(adev, AMD_CG_STATE_GATE);
+ uvd_v4_2_enable_mgcg(adev, true);
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
r = uvd_v4_2_start(adev);
if (r)
@@ -266,11 +267,13 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
struct amdgpu_ring *ring = &adev->uvd.ring;
uint32_t rb_bufsz;
int i, j, r;
-
/* disable byte swapping */
u32 lmi_swap_cntl = 0;
u32 mp_swap_cntl = 0;
+ WREG32(mmUVD_CGC_GATE, 0);
+ uvd_v4_2_set_dcm(adev, true);
+
uvd_v4_2_mc_resume(adev);
/* disable interupt */
@@ -406,6 +409,8 @@ static void uvd_v4_2_stop(struct amdgpu_device *adev)
/* Unstall UMC and register bus */
WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
+
+ uvd_v4_2_set_dcm(adev, false);
}
/**
@@ -619,19 +624,6 @@ static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2);
}
-static void uvd_v4_2_init_cg(struct amdgpu_device *adev)
-{
- bool hw_mode = true;
-
- if (hw_mode) {
- uvd_v4_2_set_dcm(adev, false);
- } else {
- u32 tmp = RREG32(mmUVD_CGC_CTRL);
- tmp &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
- WREG32(mmUVD_CGC_CTRL, tmp);
- }
-}
-
static bool uvd_v4_2_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -685,17 +677,6 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
static int uvd_v4_2_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
- bool gate = false;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
- return 0;
-
- if (state == AMD_CG_STATE_GATE)
- gate = true;
-
- uvd_v4_2_enable_mgcg(adev, gate);
-
return 0;
}
@@ -711,9 +692,6 @@ static int uvd_v4_2_set_powergating_state(void *handle,
*/
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
- return 0;
-
if (state == AMD_PG_STATE_GATE) {
uvd_v4_2_stop(adev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index a79e283590fb..6de6becce745 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -791,15 +791,10 @@ static int uvd_v5_0_set_clockgating_state(void *handle,
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
- static int curstate = -1;
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
return 0;
- if (curstate == state)
- return 0;
-
- curstate = state;
if (enable) {
/* wait for STATUS to clear */
if (uvd_v5_0_wait_for_idle(handle))
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 6b3293a1c7b8..37ca685e5a9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -43,9 +43,13 @@
#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
+#define GRBM_GFX_INDEX__VCE_ALL_PIPE 0x07
+
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618
+#define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000
+
#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
#define VCE_V3_0_FW_SIZE (384 * 1024)
@@ -54,6 +58,9 @@
#define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8))
+#define GET_VCE_INSTANCE(i) ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \
+ | GRBM_GFX_INDEX__VCE_ALL_PIPE)
+
static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -175,7 +182,7 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
- data &= ~0xffc00000;
+ data &= ~0x3ff;
WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
@@ -249,7 +256,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
if (adev->vce.harvest_config & (1 << idx))
continue;
- WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
+ WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
vce_v3_0_mc_resume(adev, idx);
WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
@@ -273,7 +280,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
}
}
- WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
+ WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
@@ -288,7 +295,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev)
if (adev->vce.harvest_config & (1 << idx))
continue;
- WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
+ WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
if (adev->asic_type >= CHIP_STONEY)
WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
@@ -306,7 +313,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev)
vce_v3_0_set_vce_sw_clock_gating(adev, false);
}
- WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
+ WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
@@ -320,11 +327,12 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
{
u32 tmp;
- /* Fiji, Stoney, Polaris10, Polaris11 are single pipe */
+ /* Fiji, Stoney, Polaris10, Polaris11, Polaris12 are single pipe */
if ((adev->asic_type == CHIP_FIJI) ||
(adev->asic_type == CHIP_STONEY) ||
(adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_POLARIS11))
+ (adev->asic_type == CHIP_POLARIS11) ||
+ (adev->asic_type == CHIP_POLARIS12))
return AMDGPU_VCE_HARVEST_VCE1;
/* Tonga and CZ are dual or single pipe */
@@ -585,17 +593,17 @@ static bool vce_v3_0_check_soft_reset(void *handle)
* VCE team suggest use bit 3--bit 6 for busy status check
*/
mutex_lock(&adev->grbm_idx_mutex);
- WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
+ WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
}
- WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
+ WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
}
- WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
+ WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
mutex_unlock(&adev->grbm_idx_mutex);
if (srbm_soft_reset) {
@@ -733,7 +741,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
if (adev->vce.harvest_config & (1 << i))
continue;
- WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i);
+ WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i));
if (enable) {
/* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
@@ -752,7 +760,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
vce_v3_0_set_vce_sw_clock_gating(adev, enable);
}
- WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
+ WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index bf088d6d9bf1..c2ac54f11341 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -88,6 +88,7 @@ MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
/*
* Indirect registers accessor
@@ -312,6 +313,7 @@ static void vi_init_golden_registers(struct amdgpu_device *adev)
break;
case CHIP_POLARIS11:
case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
default:
break;
}
@@ -671,6 +673,7 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
case CHIP_TONGA:
case CHIP_POLARIS11:
case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
case CHIP_CARRIZO:
case CHIP_STONEY:
asic_register_table = cz_allowed_read_registers;
@@ -994,6 +997,11 @@ static int vi_common_early_init(void *handle)
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x50;
break;
+ case CHIP_POLARIS12:
+ adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG;
+ adev->pg_flags = 0;
+ adev->external_rev_id = adev->rev_id + 0x64;
+ break;
case CHIP_CARRIZO:
adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
AMD_CG_SUPPORT_GFX_MGCG |
@@ -1346,6 +1354,7 @@ static int vi_common_set_clockgating_state(void *handle,
case CHIP_TONGA:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
vi_common_set_clockgating_state_by_smu(adev, state);
default:
break;
@@ -1429,6 +1438,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
break;
case CHIP_POLARIS11:
case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
amdgpu_ip_block_add(adev, &vi_common_ip_block);
amdgpu_ip_block_add(adev, &gmc_v8_1_ip_block);
amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index c02469ada9f1..85f358764bbc 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -23,7 +23,7 @@
#ifndef __AMD_SHARED_H__
#define __AMD_SHARED_H__
-#define AMD_MAX_USEC_TIMEOUT 100000 /* 100 ms */
+#define AMD_MAX_USEC_TIMEOUT 200000 /* 200 ms */
/*
* Supported ASIC types
@@ -46,6 +46,7 @@ enum amd_asic_type {
CHIP_STONEY,
CHIP_POLARIS10,
CHIP_POLARIS11,
+ CHIP_POLARIS12,
CHIP_LAST,
};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
index b0c63c5f54c9..6bb79c94cb9f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
@@ -200,7 +200,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
cgs_set_clockgating_state(
hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_UNGATE);
+ AMD_CG_STATE_GATE);
cgs_set_powergating_state(
hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
@@ -218,7 +218,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
cgs_set_clockgating_state(
hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_GATE);
+ AMD_PG_STATE_UNGATE);
cz_dpm_update_vce_dpm(hwmgr);
cz_enable_disable_vce_dpm(hwmgr, true);
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 4b14f259a147..0fb4e8c8f5e1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -1402,14 +1402,22 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
cz_hwmgr->vce_dpm.hard_min_clk,
PPSMC_MSG_SetEclkHardMin));
} else {
- /*EPR# 419220 -HW limitation to to */
- cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetEclkHardMin,
- cz_get_eclk_level(hwmgr,
- cz_hwmgr->vce_dpm.hard_min_clk,
- PPSMC_MSG_SetEclkHardMin));
-
+ /*Program HardMin based on the vce_arbiter.ecclk */
+ if (hwmgr->vce_arbiter.ecclk == 0) {
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetEclkHardMin, 0);
+ /* disable ECLK DPM 0. Otherwise VCE could hang if
+ * switching SCLK from DPM 0 to 6/7 */
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetEclkSoftMin, 1);
+ } else {
+ cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetEclkHardMin,
+ cz_get_eclk_level(hwmgr,
+ cz_hwmgr->vce_dpm.hard_min_clk,
+ PPSMC_MSG_SetEclkHardMin));
+ }
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index dc6700aee18f..b03606405a53 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -95,6 +95,7 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
break;
case CHIP_POLARIS11:
case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
polaris_set_asic_special_caps(hwmgr);
hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
break;
@@ -745,7 +746,7 @@ int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TablelessHardwareInterface);
- if (hwmgr->chip_id == CHIP_POLARIS11)
+ if ((hwmgr->chip_id == CHIP_POLARIS11) || (hwmgr->chip_id == CHIP_POLARIS12))
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SPLLShutdownSupport);
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index 26477f0f09dc..6cd1287a7a8f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -521,7 +521,7 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10);
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
- } else if (hwmgr->chip_id == CHIP_POLARIS11) {
+ } else if ((hwmgr->chip_id == CHIP_POLARIS11) || (hwmgr->chip_id == CHIP_POLARIS12)) {
result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11);
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index e5812aa456f3..6e618aa20719 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -65,6 +65,7 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
break;
case CHIP_POLARIS11:
case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
polaris10_smum_init(smumgr);
break;
default:
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 908011d2c8f5..7abda94fc2cf 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -113,6 +113,7 @@ struct ast_private {
struct ttm_bo_kmap_obj cache_kmap;
int next_cursor;
bool support_wide_screen;
+ bool DisableP2A;
enum ast_tx_chip tx_chip_type;
u8 dp501_maxclk;
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index f75c6421db62..533e762d036d 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -124,6 +124,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
} else
*need_post = false;
+ /* Check P2A Access */
+ ast->DisableP2A = true;
+ data = ast_read32(ast, 0xf004);
+ if (data != 0xFFFFFFFF)
+ ast->DisableP2A = false;
+
/* Check if we support wide screen */
switch (ast->chip) {
case AST1180:
@@ -140,15 +146,17 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
ast->support_wide_screen = true;
else {
ast->support_wide_screen = false;
- /* Read SCU7c (silicon revision register) */
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
- data = ast_read32(ast, 0x1207c);
- data &= 0x300;
- if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
- ast->support_wide_screen = true;
- if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
- ast->support_wide_screen = true;
+ if (ast->DisableP2A == false) {
+ /* Read SCU7c (silicon revision register) */
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ data = ast_read32(ast, 0x1207c);
+ data &= 0x300;
+ if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
+ ast->support_wide_screen = true;
+ if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
+ ast->support_wide_screen = true;
+ }
}
break;
}
@@ -216,80 +224,81 @@ static int ast_get_dram_info(struct drm_device *dev)
uint32_t data, data2;
uint32_t denum, num, div, ref_pll;
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
-
-
- ast_write32(ast, 0x10000, 0xfc600309);
-
- do {
- if (pci_channel_offline(dev->pdev))
- return -EIO;
- } while (ast_read32(ast, 0x10000) != 0x01);
- data = ast_read32(ast, 0x10004);
-
- if (data & 0x40)
+ if (ast->DisableP2A)
+ {
ast->dram_bus_width = 16;
+ ast->dram_type = AST_DRAM_1Gx16;
+ ast->mclk = 396;
+ }
else
- ast->dram_bus_width = 32;
+ {
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ data = ast_read32(ast, 0x10004);
+
+ if (data & 0x40)
+ ast->dram_bus_width = 16;
+ else
+ ast->dram_bus_width = 32;
+
+ if (ast->chip == AST2300 || ast->chip == AST2400) {
+ switch (data & 0x03) {
+ case 0:
+ ast->dram_type = AST_DRAM_512Mx16;
+ break;
+ default:
+ case 1:
+ ast->dram_type = AST_DRAM_1Gx16;
+ break;
+ case 2:
+ ast->dram_type = AST_DRAM_2Gx16;
+ break;
+ case 3:
+ ast->dram_type = AST_DRAM_4Gx16;
+ break;
+ }
+ } else {
+ switch (data & 0x0c) {
+ case 0:
+ case 4:
+ ast->dram_type = AST_DRAM_512Mx16;
+ break;
+ case 8:
+ if (data & 0x40)
+ ast->dram_type = AST_DRAM_1Gx16;
+ else
+ ast->dram_type = AST_DRAM_512Mx32;
+ break;
+ case 0xc:
+ ast->dram_type = AST_DRAM_1Gx32;
+ break;
+ }
+ }
- if (ast->chip == AST2300 || ast->chip == AST2400) {
- switch (data & 0x03) {
- case 0:
- ast->dram_type = AST_DRAM_512Mx16;
- break;
- default:
- case 1:
- ast->dram_type = AST_DRAM_1Gx16;
- break;
- case 2:
- ast->dram_type = AST_DRAM_2Gx16;
- break;
+ data = ast_read32(ast, 0x10120);
+ data2 = ast_read32(ast, 0x10170);
+ if (data2 & 0x2000)
+ ref_pll = 14318;
+ else
+ ref_pll = 12000;
+
+ denum = data & 0x1f;
+ num = (data & 0x3fe0) >> 5;
+ data = (data & 0xc000) >> 14;
+ switch (data) {
case 3:
- ast->dram_type = AST_DRAM_4Gx16;
- break;
- }
- } else {
- switch (data & 0x0c) {
- case 0:
- case 4:
- ast->dram_type = AST_DRAM_512Mx16;
+ div = 0x4;
break;
- case 8:
- if (data & 0x40)
- ast->dram_type = AST_DRAM_1Gx16;
- else
- ast->dram_type = AST_DRAM_512Mx32;
+ case 2:
+ case 1:
+ div = 0x2;
break;
- case 0xc:
- ast->dram_type = AST_DRAM_1Gx32;
+ default:
+ div = 0x1;
break;
}
+ ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
}
-
- data = ast_read32(ast, 0x10120);
- data2 = ast_read32(ast, 0x10170);
- if (data2 & 0x2000)
- ref_pll = 14318;
- else
- ref_pll = 12000;
-
- denum = data & 0x1f;
- num = (data & 0x3fe0) >> 5;
- data = (data & 0xc000) >> 14;
- switch (data) {
- case 3:
- div = 0x4;
- break;
- case 2:
- case 1:
- div = 0x2;
- break;
- default:
- div = 0x1;
- break;
- }
- ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 810c51d92b99..5331ee1df086 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -379,12 +379,20 @@ void ast_post_gpu(struct drm_device *dev)
ast_open_key(ast);
ast_set_def_ext_reg(dev);
- if (ast->chip == AST2300 || ast->chip == AST2400)
- ast_init_dram_2300(dev);
- else
- ast_init_dram_reg(dev);
+ if (ast->DisableP2A == false)
+ {
+ if (ast->chip == AST2300 || ast->chip == AST2400)
+ ast_init_dram_2300(dev);
+ else
+ ast_init_dram_reg(dev);
- ast_init_3rdtx(dev);
+ ast_init_3rdtx(dev);
+ }
+ else
+ {
+ if (ast->tx_chip_type != AST_TX_NONE)
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */
+ }
}
/* AST 2300 DRAM settings */
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index eb9bf8786c24..18eefdcbf1ba 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1382,6 +1382,7 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
phy_power_on(dp->phy);
analogix_dp_init_dp(dp);
@@ -1414,9 +1415,15 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
goto err_disable_pm_runtime;
}
+ phy_power_off(dp->phy);
+ pm_runtime_put(dev);
+
return 0;
err_disable_pm_runtime:
+
+ phy_power_off(dp->phy);
+ pm_runtime_put(dev);
pm_runtime_disable(dev);
return ret;
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig
index 04b3c161dfae..7f4cc6e172ab 100644
--- a/drivers/gpu/drm/cirrus/Kconfig
+++ b/drivers/gpu/drm/cirrus/Kconfig
@@ -7,3 +7,12 @@ config DRM_CIRRUS_QEMU
This is a KMS driver for emulated cirrus device in qemu.
It is *NOT* intended for real cirrus devices. This requires
the modesetting userspace X.org driver.
+
+ Cirrus is obsolete, the hardware was designed in the 90ies
+ and can't keep up with todays needs. More background:
+ https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
+
+ Better alternatives are:
+ - stdvga (DRM_BOCHS, qemu -vga std, default in qemu 2.2+)
+ - qxl (DRM_QXL, qemu -vga qxl, works best with spice)
+ - virtio (DRM_VIRTIO_GPU), qemu -vga virtio)
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 60697482b94c..50f5cf7b69d1 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -291,15 +291,15 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
EXPORT_SYMBOL(drm_atomic_get_crtc_state);
static void set_out_fence_for_crtc(struct drm_atomic_state *state,
- struct drm_crtc *crtc, s64 __user *fence_ptr)
+ struct drm_crtc *crtc, s32 __user *fence_ptr)
{
state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
}
-static s64 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
+static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
- s64 __user *fence_ptr;
+ s32 __user *fence_ptr;
fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
@@ -512,7 +512,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
state->color_mgmt_changed |= replaced;
return ret;
} else if (property == config->prop_out_fence_ptr) {
- s64 __user *fence_ptr = u64_to_user_ptr(val);
+ s32 __user *fence_ptr = u64_to_user_ptr(val);
if (!fence_ptr)
return 0;
@@ -1915,7 +1915,7 @@ EXPORT_SYMBOL(drm_atomic_clean_old_fb);
*/
struct drm_out_fence_state {
- s64 __user *out_fence_ptr;
+ s32 __user *out_fence_ptr;
struct sync_file *sync_file;
int fd;
};
@@ -1952,7 +1952,7 @@ static int prepare_crtc_signaling(struct drm_device *dev,
return 0;
for_each_crtc_in_state(state, crtc, crtc_state, i) {
- u64 __user *fence_ptr;
+ s32 __user *fence_ptr;
fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 583f47f27b36..34f757bcabae 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1259,8 +1259,10 @@ int drm_atomic_helper_commit(struct drm_device *dev,
if (!nonblock) {
ret = drm_atomic_helper_wait_for_fences(dev, state, true);
- if (ret)
+ if (ret) {
+ drm_atomic_helper_cleanup_planes(dev, state);
return ret;
+ }
}
/*
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index ac6a35212501..e6b19bc9021a 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1460,6 +1460,13 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev,
return NULL;
mode->type |= DRM_MODE_TYPE_USERDEF;
+ /* fix up 1368x768: GFT/CVT can't express 1366 width due to alignment */
+ if (cmd->xres == 1366 && mode->hdisplay == 1368) {
+ mode->hdisplay = 1366;
+ mode->hsync_start--;
+ mode->hsync_end--;
+ drm_mode_set_name(mode);
+ }
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
return mode;
}
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index ac953f037be7..cf8f0128c161 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -143,8 +143,18 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
}
if (dev->mode_config.delayed_event) {
+ /*
+ * FIXME:
+ *
+ * Use short (1s) delay to handle the initial delayed event.
+ * This delay should not be needed, but Optimus/nouveau will
+ * fail in a mysterious way if the delayed event is handled as
+ * soon as possible like it is done in
+ * drm_helper_probe_single_connector_modes() in case the poll
+ * was enabled before.
+ */
poll = true;
- delay = 0;
+ delay = HZ;
}
if (poll)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index 169ac96e8f08..fe0e85b41310 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -116,9 +116,14 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
struct list_head list;
bool found;
+ /*
+ * XXX: The DRM_MM_SEARCH_BELOW is really a hack to trick
+ * drm_mm into giving out a low IOVA after address space
+ * rollover. This needs a proper fix.
+ */
ret = drm_mm_insert_node_in_range(&mmu->mm, node,
size, 0, mmu->last_iova, ~0UL,
- DRM_MM_SEARCH_DEFAULT);
+ mmu->last_iova ? DRM_MM_SEARCH_DEFAULT : DRM_MM_SEARCH_BELOW);
if (ret != -ENOSPC)
break;
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 6ca1f3117fe8..75eeb831ed6a 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -46,7 +46,8 @@ enum decon_flag_bits {
BIT_CLKS_ENABLED,
BIT_IRQS_ENABLED,
BIT_WIN_UPDATED,
- BIT_SUSPENDED
+ BIT_SUSPENDED,
+ BIT_REQUEST_UPDATE
};
struct decon_context {
@@ -141,12 +142,6 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
m->crtc_vsync_end = m->crtc_vsync_start + 1;
}
- decon_set_bits(ctx, DECON_VIDCON0, VIDCON0_ENVID, 0);
-
- /* enable clock gate */
- val = CMU_CLKGAGE_MODE_SFR_F | CMU_CLKGAGE_MODE_MEM_F;
- writel(val, ctx->addr + DECON_CMU);
-
if (ctx->out_type & (IFTYPE_I80 | I80_HW_TRG))
decon_setup_trigger(ctx);
@@ -315,6 +310,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
/* window enable */
decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0);
+ set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
}
static void decon_disable_plane(struct exynos_drm_crtc *crtc,
@@ -327,6 +323,7 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
return;
decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0);
+ set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
}
static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
@@ -340,8 +337,8 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
for (i = ctx->first_win; i < WINDOWS_NR; i++)
decon_shadow_protect_win(ctx, i, false);
- /* standalone update */
- decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
+ if (test_and_clear_bit(BIT_REQUEST_UPDATE, &ctx->flags))
+ decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
if (ctx->out_type & IFTYPE_I80)
set_bit(BIT_WIN_UPDATED, &ctx->flags);
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 0d41ebc4aea6..f7bce8603958 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -37,13 +37,6 @@
#include "i915_drv.h"
#include "gvt.h"
-#define MB_TO_BYTES(mb) ((mb) << 20ULL)
-#define BYTES_TO_MB(b) ((b) >> 20ULL)
-
-#define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
-#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
-#define HOST_FENCE 4
-
static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
{
struct intel_gvt *gvt = vgpu->gvt;
@@ -165,6 +158,14 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
POSTING_READ(fence_reg_lo);
}
+static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
+{
+ int i;
+
+ for (i = 0; i < vgpu_fence_sz(vgpu); i++)
+ intel_vgpu_write_fence(vgpu, i, 0);
+}
+
static void free_vgpu_fence(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
@@ -178,9 +179,9 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->drm.struct_mutex);
+ _clear_vgpu_fence(vgpu);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = vgpu->fence.regs[i];
- intel_vgpu_write_fence(vgpu, i, 0);
list_add_tail(&reg->link,
&dev_priv->mm.fence_list);
}
@@ -208,13 +209,14 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
continue;
list_del(pos);
vgpu->fence.regs[i] = reg;
- intel_vgpu_write_fence(vgpu, i, 0);
if (++i == vgpu_fence_sz(vgpu))
break;
}
if (i != vgpu_fence_sz(vgpu))
goto out_free_fence;
+ _clear_vgpu_fence(vgpu);
+
mutex_unlock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_put(dev_priv);
return 0;
@@ -314,6 +316,22 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
}
/**
+ * intel_vgpu_reset_resource - reset resource state owned by a vGPU
+ * @vgpu: a vGPU
+ *
+ * This function is used to reset resource state owned by a vGPU.
+ *
+ */
+void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ intel_runtime_pm_get(dev_priv);
+ _clear_vgpu_fence(vgpu);
+ intel_runtime_pm_put(dev_priv);
+}
+
+/**
* intel_alloc_vgpu_resource - allocate HW resource for a vGPU
* @vgpu: vGPU
* @param: vGPU creation params
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index db516382a4d4..4a6a2ed65732 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -123,6 +123,7 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
u8 changed = old ^ new;
int ret;
+ memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
if (!(changed & PCI_COMMAND_MEMORY))
return 0;
@@ -142,7 +143,6 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
return ret;
}
- memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
return 0;
}
@@ -240,7 +240,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
if (WARN_ON(bytes > 4))
return -EINVAL;
- if (WARN_ON(offset + bytes >= INTEL_GVT_MAX_CFG_SPACE_SZ))
+ if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ))
return -EINVAL;
/* First check if it's PCI_COMMAND */
@@ -282,3 +282,77 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
}
return 0;
}
+
+/**
+ * intel_vgpu_init_cfg_space - init vGPU configuration space when create vGPU
+ *
+ * @vgpu: a vGPU
+ * @primary: is the vGPU presented as primary
+ *
+ */
+void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
+ bool primary)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ const struct intel_gvt_device_info *info = &gvt->device_info;
+ u16 *gmch_ctl;
+ int i;
+
+ memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
+ info->cfg_space_size);
+
+ if (!primary) {
+ vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
+ INTEL_GVT_PCI_CLASS_VGA_OTHER;
+ vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
+ INTEL_GVT_PCI_CLASS_VGA_OTHER;
+ }
+
+ /* Show guest that there isn't any stolen memory.*/
+ gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
+ *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
+
+ intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
+ gvt_aperture_pa_base(gvt), true);
+
+ vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
+ | PCI_COMMAND_MEMORY
+ | PCI_COMMAND_MASTER);
+ /*
+ * Clear the bar upper 32bit and let guest to assign the new value
+ */
+ memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
+ memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
+ memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
+
+ for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
+ vgpu->cfg_space.bar[i].size = pci_resource_len(
+ gvt->dev_priv->drm.pdev, i * 2);
+ vgpu->cfg_space.bar[i].tracked = false;
+ }
+}
+
+/**
+ * intel_vgpu_reset_cfg_space - reset vGPU configuration space
+ *
+ * @vgpu: a vGPU
+ *
+ */
+void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu)
+{
+ u8 cmd = vgpu_cfg_space(vgpu)[PCI_COMMAND];
+ bool primary = vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] !=
+ INTEL_GVT_PCI_CLASS_VGA_OTHER;
+
+ if (cmd & PCI_COMMAND_MEMORY) {
+ trap_gttmmio(vgpu, false);
+ map_aperture(vgpu, false);
+ }
+
+ /**
+ * Currently we only do such reset when vGPU is not
+ * owned by any VM, so we simply restore entire cfg
+ * space to default value.
+ */
+ intel_vgpu_init_cfg_space(vgpu, primary);
+}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index d26a092c70e8..e4563984cb1e 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -481,7 +481,6 @@ struct parser_exec_state {
(s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
static unsigned long bypass_scan_mask = 0;
-static bool bypass_batch_buffer_scan = true;
/* ring ALL, type = 0 */
static struct sub_op_bits sub_op_mi[] = {
@@ -1525,9 +1524,6 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
{
struct intel_gvt *gvt = s->vgpu->gvt;
- if (bypass_batch_buffer_scan)
- return 0;
-
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
/* BDW decides privilege based on address space */
if (cmd_val(s, 0) & (1 << 8))
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index f32bb6f6495c..34083731669d 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -364,58 +364,30 @@ static void free_workload(struct intel_vgpu_workload *workload)
#define get_desc_from_elsp_dwords(ed, i) \
((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
-
-#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
-#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
-static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj,
- unsigned long add, int gmadr_bytes)
-{
- if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
- return -1;
-
- *((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add &
- BATCH_BUFFER_ADDR_MASK;
- if (gmadr_bytes == 8) {
- *((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) =
- add & BATCH_BUFFER_ADDR_HIGH_MASK;
- }
-
- return 0;
-}
-
static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{
- int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+ const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+ struct intel_shadow_bb_entry *entry_obj;
/* pin the gem object to ggtt */
- if (!list_empty(&workload->shadow_bb)) {
- struct intel_shadow_bb_entry *entry_obj =
- list_first_entry(&workload->shadow_bb,
- struct intel_shadow_bb_entry,
- list);
- struct intel_shadow_bb_entry *temp;
+ list_for_each_entry(entry_obj, &workload->shadow_bb, list) {
+ struct i915_vma *vma;
- list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
- list) {
- struct i915_vma *vma;
-
- vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0,
- 4, 0);
- if (IS_ERR(vma)) {
- gvt_err("Cannot pin\n");
- return;
- }
-
- /* FIXME: we are not tracking our pinned VMA leaving it
- * up to the core to fix up the stray pin_count upon
- * free.
- */
-
- /* update the relocate gma with shadow batch buffer*/
- set_gma_to_bb_cmd(entry_obj,
- i915_ggtt_offset(vma),
- gmadr_bytes);
+ vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
+ if (IS_ERR(vma)) {
+ gvt_err("Cannot pin\n");
+ return;
}
+
+ /* FIXME: we are not tracking our pinned VMA leaving it
+ * up to the core to fix up the stray pin_count upon
+ * free.
+ */
+
+ /* update the relocate gma with shadow batch buffer*/
+ entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma);
+ if (gmadr_bytes == 8)
+ entry_obj->bb_start_cmd_va[2] = 0;
}
}
@@ -826,7 +798,7 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
}
- vgpu->workloads = kmem_cache_create("gvt-g vgpu workload",
+ vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload",
sizeof(struct intel_vgpu_workload), 0,
SLAB_HWCACHE_ALIGN,
NULL);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 7eaaf1c9ed2b..47dec4acf7ff 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -240,15 +240,8 @@ static inline int get_pse_type(int type)
static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
{
void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
- u64 pte;
-#ifdef readq
- pte = readq(addr);
-#else
- pte = ioread32(addr);
- pte |= (u64)ioread32(addr + 4) << 32;
-#endif
- return pte;
+ return readq(addr);
}
static void write_pte64(struct drm_i915_private *dev_priv,
@@ -256,12 +249,8 @@ static void write_pte64(struct drm_i915_private *dev_priv,
{
void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
-#ifdef writeq
writeq(pte, addr);
-#else
- iowrite32((u32)pte, addr);
- iowrite32(pte >> 32, addr + 4);
-#endif
+
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
}
@@ -1380,8 +1369,7 @@ static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
info->gtt_entry_size;
mem = kzalloc(mm->has_shadow_page_table ?
mm->page_table_entry_size * 2
- : mm->page_table_entry_size,
- GFP_ATOMIC);
+ : mm->page_table_entry_size, GFP_KERNEL);
if (!mem)
return -ENOMEM;
mm->virtual_page_table = mem;
@@ -1532,7 +1520,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
struct intel_vgpu_mm *mm;
int ret;
- mm = kzalloc(sizeof(*mm), GFP_ATOMIC);
+ mm = kzalloc(sizeof(*mm), GFP_KERNEL);
if (!mm) {
ret = -ENOMEM;
goto fail;
@@ -1886,30 +1874,27 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
int page_entry_num = GTT_PAGE_SIZE >>
vgpu->gvt->device_info.gtt_entry_size_shift;
- struct page *scratch_pt;
+ void *scratch_pt;
unsigned long mfn;
int i;
- void *p;
if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
return -EINVAL;
- scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
+ scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
if (!scratch_pt) {
gvt_err("fail to allocate scratch page\n");
return -ENOMEM;
}
- p = kmap_atomic(scratch_pt);
- mfn = intel_gvt_hypervisor_virt_to_mfn(p);
+ mfn = intel_gvt_hypervisor_virt_to_mfn(scratch_pt);
if (mfn == INTEL_GVT_INVALID_ADDR) {
- gvt_err("fail to translate vaddr:0x%llx\n", (u64)p);
- kunmap_atomic(p);
- __free_page(scratch_pt);
+ gvt_err("fail to translate vaddr:0x%lx\n", (unsigned long)scratch_pt);
+ free_page((unsigned long)scratch_pt);
return -EFAULT;
}
gtt->scratch_pt[type].page_mfn = mfn;
- gtt->scratch_pt[type].page = scratch_pt;
+ gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
vgpu->id, type, mfn);
@@ -1918,7 +1903,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
* scratch_pt[type] indicate the scratch pt/scratch page used by the
* 'type' pt.
* e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
- * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self
+ * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
* is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
*/
if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
@@ -1936,11 +1921,9 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
se.val64 |= PPAT_CACHED_INDEX;
for (i = 0; i < page_entry_num; i++)
- ops->set_entry(p, &se, i, false, 0, vgpu);
+ ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
}
- kunmap_atomic(p);
-
return 0;
}
@@ -1998,6 +1981,8 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
INIT_LIST_HEAD(&gtt->oos_page_list_head);
INIT_LIST_HEAD(&gtt->post_shadow_list_head);
+ intel_vgpu_reset_ggtt(vgpu);
+
ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
NULL, 1, 0);
if (IS_ERR(ggtt_mm)) {
@@ -2206,6 +2191,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
int intel_gvt_init_gtt(struct intel_gvt *gvt)
{
int ret;
+ void *page;
gvt_dbg_core("init gtt\n");
@@ -2218,6 +2204,20 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
return -ENODEV;
}
+ page = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!page) {
+ gvt_err("fail to allocate scratch ggtt page\n");
+ return -ENOMEM;
+ }
+ gvt->gtt.scratch_ggtt_page = virt_to_page(page);
+
+ gvt->gtt.scratch_ggtt_mfn = intel_gvt_hypervisor_virt_to_mfn(page);
+ if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) {
+ gvt_err("fail to translate scratch ggtt page\n");
+ __free_page(gvt->gtt.scratch_ggtt_page);
+ return -EFAULT;
+ }
+
if (enable_out_of_sync) {
ret = setup_spt_oos(gvt);
if (ret) {
@@ -2239,6 +2239,68 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
*/
void intel_gvt_clean_gtt(struct intel_gvt *gvt)
{
+ __free_page(gvt->gtt.scratch_ggtt_page);
+
if (enable_out_of_sync)
clean_spt_oos(gvt);
}
+
+/**
+ * intel_vgpu_reset_ggtt - reset the GGTT entry
+ * @vgpu: a vGPU
+ *
+ * This function is called at the vGPU create stage
+ * to reset all the GGTT entries.
+ *
+ */
+void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ u32 index;
+ u32 offset;
+ u32 num_entries;
+ struct intel_gvt_gtt_entry e;
+
+ memset(&e, 0, sizeof(struct intel_gvt_gtt_entry));
+ e.type = GTT_TYPE_GGTT_PTE;
+ ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn);
+ e.val64 |= _PAGE_PRESENT;
+
+ index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
+ num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
+ for (offset = 0; offset < num_entries; offset++)
+ ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
+
+ index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
+ num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
+ for (offset = 0; offset < num_entries; offset++)
+ ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
+}
+
+/**
+ * intel_vgpu_reset_gtt - reset the all GTT related status
+ * @vgpu: a vGPU
+ * @dmlr: true for vGPU Device Model Level Reset, false for GT Reset
+ *
+ * This function is called from vfio core to reset reset all
+ * GTT related status, including GGTT, PPGTT, scratch page.
+ *
+ */
+void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr)
+{
+ int i;
+
+ ppgtt_free_all_shadow_page(vgpu);
+ if (!dmlr)
+ return;
+
+ intel_vgpu_reset_ggtt(vgpu);
+
+ /* clear scratch page for security */
+ for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
+ if (vgpu->gtt.scratch_pt[i].page != NULL)
+ memset(page_address(vgpu->gtt.scratch_pt[i].page),
+ 0, PAGE_SIZE);
+ }
+}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index d250013bc37b..f88eb5e89bea 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -81,6 +81,9 @@ struct intel_gvt_gtt {
struct list_head oos_page_use_list_head;
struct list_head oos_page_free_list_head;
struct list_head mm_lru_list_head;
+
+ struct page *scratch_ggtt_page;
+ unsigned long scratch_ggtt_mfn;
};
enum {
@@ -202,8 +205,10 @@ struct intel_vgpu_gtt {
extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
+void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
+extern void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr);
extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 398877c3d2fd..e6bf5c533fbe 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -201,6 +201,8 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
intel_gvt_clean_vgpu_types(gvt);
+ idr_destroy(&gvt->vgpu_idr);
+
kfree(dev_priv->gvt);
dev_priv->gvt = NULL;
}
@@ -237,6 +239,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
gvt_dbg_core("init gvt device\n");
+ idr_init(&gvt->vgpu_idr);
+
mutex_init(&gvt->lock);
gvt->dev_priv = dev_priv;
@@ -244,7 +248,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
ret = intel_gvt_setup_mmio_info(gvt);
if (ret)
- return ret;
+ goto out_clean_idr;
ret = intel_gvt_load_firmware(gvt);
if (ret)
@@ -313,6 +317,8 @@ out_free_firmware:
intel_gvt_free_firmware(gvt);
out_clean_mmio_info:
intel_gvt_clean_mmio_info(gvt);
+out_clean_idr:
+ idr_destroy(&gvt->vgpu_idr);
kfree(gvt);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index ad0e9364ee70..e227caf5859e 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -175,6 +175,7 @@ struct intel_vgpu {
struct notifier_block group_notifier;
struct kvm *kvm;
struct work_struct release_work;
+ atomic_t released;
} vdev;
#endif
};
@@ -322,6 +323,7 @@ struct intel_vgpu_creation_params {
int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
struct intel_vgpu_creation_params *param);
+void intel_vgpu_reset_resource(struct intel_vgpu *vgpu);
void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
u32 fence, u64 value);
@@ -374,6 +376,8 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
struct intel_vgpu_type *type);
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
+void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
+ unsigned int engine_mask);
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
@@ -410,6 +414,10 @@ int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
unsigned long *g_index);
+void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
+ bool primary);
+void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu);
+
int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes);
@@ -423,7 +431,6 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
-int setup_vgpu_mmio(struct intel_vgpu *vgpu);
void populate_pvinfo_page(struct intel_vgpu *vgpu);
struct intel_gvt_ops {
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 522809710312..ab2ea157da4c 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -93,7 +93,8 @@ static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
static int new_mmio_info(struct intel_gvt *gvt,
u32 offset, u32 flags, u32 size,
u32 addr_mask, u32 ro_mask, u32 device,
- void *read, void *write)
+ int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int),
+ int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int))
{
struct intel_gvt_mmio_info *info, *p;
u32 start, end, i;
@@ -219,7 +220,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
default:
/*should not hit here*/
gvt_err("invalid forcewake offset 0x%x\n", offset);
- return 1;
+ return -EINVAL;
}
} else {
ack_reg_offset = FORCEWAKE_ACK_HSW_REG;
@@ -230,77 +231,45 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
return 0;
}
-static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset,
- void *p_data, unsigned int bytes, unsigned long bitmap)
-{
- struct intel_gvt_workload_scheduler *scheduler =
- &vgpu->gvt->scheduler;
-
- vgpu->resetting = true;
-
- intel_vgpu_stop_schedule(vgpu);
- /*
- * The current_vgpu will set to NULL after stopping the
- * scheduler when the reset is triggered by current vgpu.
- */
- if (scheduler->current_vgpu == NULL) {
- mutex_unlock(&vgpu->gvt->lock);
- intel_gvt_wait_vgpu_idle(vgpu);
- mutex_lock(&vgpu->gvt->lock);
- }
-
- intel_vgpu_reset_execlist(vgpu, bitmap);
-
- /* full GPU reset */
- if (bitmap == 0xff) {
- mutex_unlock(&vgpu->gvt->lock);
- intel_vgpu_clean_gtt(vgpu);
- mutex_lock(&vgpu->gvt->lock);
- setup_vgpu_mmio(vgpu);
- populate_pvinfo_page(vgpu);
- intel_vgpu_init_gtt(vgpu);
- }
-
- vgpu->resetting = false;
-
- return 0;
-}
-
static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
- void *p_data, unsigned int bytes)
+ void *p_data, unsigned int bytes)
{
+ unsigned int engine_mask = 0;
u32 data;
- u64 bitmap = 0;
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
if (data & GEN6_GRDOM_FULL) {
gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id);
- bitmap = 0xff;
- }
- if (data & GEN6_GRDOM_RENDER) {
- gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
- bitmap |= (1 << RCS);
- }
- if (data & GEN6_GRDOM_MEDIA) {
- gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
- bitmap |= (1 << VCS);
- }
- if (data & GEN6_GRDOM_BLT) {
- gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
- bitmap |= (1 << BCS);
- }
- if (data & GEN6_GRDOM_VECS) {
- gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
- bitmap |= (1 << VECS);
- }
- if (data & GEN8_GRDOM_MEDIA2) {
- gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
- if (HAS_BSD2(vgpu->gvt->dev_priv))
- bitmap |= (1 << VCS2);
+ engine_mask = ALL_ENGINES;
+ } else {
+ if (data & GEN6_GRDOM_RENDER) {
+ gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
+ engine_mask |= (1 << RCS);
+ }
+ if (data & GEN6_GRDOM_MEDIA) {
+ gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
+ engine_mask |= (1 << VCS);
+ }
+ if (data & GEN6_GRDOM_BLT) {
+ gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
+ engine_mask |= (1 << BCS);
+ }
+ if (data & GEN6_GRDOM_VECS) {
+ gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
+ engine_mask |= (1 << VECS);
+ }
+ if (data & GEN8_GRDOM_MEDIA2) {
+ gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
+ if (HAS_BSD2(vgpu->gvt->dev_priv))
+ engine_mask |= (1 << VCS2);
+ }
}
- return handle_device_reset(vgpu, offset, p_data, bytes, bitmap);
+
+ intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
+
+ return 0;
}
static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
@@ -974,7 +943,7 @@ static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
return 0;
}
-static bool sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 data;
@@ -1366,7 +1335,6 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
- int rc = 0;
unsigned int id = 0;
write_vreg(vgpu, offset, p_data, bytes);
@@ -1389,12 +1357,11 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
id = VECS;
break;
default:
- rc = -EINVAL;
- break;
+ return -EINVAL;
}
set_bit(id, (void *)vgpu->tlb_handle_pending);
- return rc;
+ return 0;
}
static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 4dd6722a7339..3f656e3a6e5a 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -114,12 +114,15 @@ out:
static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
{
struct gvt_dma *entry;
+ kvm_pfn_t pfn;
mutex_lock(&vgpu->vdev.cache_lock);
+
entry = __gvt_cache_find(vgpu, gfn);
- mutex_unlock(&vgpu->vdev.cache_lock);
+ pfn = (entry == NULL) ? 0 : entry->pfn;
- return entry == NULL ? 0 : entry->pfn;
+ mutex_unlock(&vgpu->vdev.cache_lock);
+ return pfn;
}
static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn)
@@ -166,7 +169,7 @@ static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
{
- struct device *dev = &vgpu->vdev.mdev->dev;
+ struct device *dev = mdev_dev(vgpu->vdev.mdev);
struct gvt_dma *this;
unsigned long g1;
int rc;
@@ -195,7 +198,7 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu)
{
struct gvt_dma *dma;
struct rb_node *node = NULL;
- struct device *dev = &vgpu->vdev.mdev->dev;
+ struct device *dev = mdev_dev(vgpu->vdev.mdev);
unsigned long gfn;
mutex_lock(&vgpu->vdev.cache_lock);
@@ -227,8 +230,8 @@ static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
return NULL;
}
-static ssize_t available_instance_show(struct kobject *kobj, struct device *dev,
- char *buf)
+static ssize_t available_instances_show(struct kobject *kobj,
+ struct device *dev, char *buf)
{
struct intel_vgpu_type *type;
unsigned int num = 0;
@@ -266,12 +269,12 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev,
type->fence);
}
-static MDEV_TYPE_ATTR_RO(available_instance);
+static MDEV_TYPE_ATTR_RO(available_instances);
static MDEV_TYPE_ATTR_RO(device_api);
static MDEV_TYPE_ATTR_RO(description);
static struct attribute *type_attrs[] = {
- &mdev_type_attr_available_instance.attr,
+ &mdev_type_attr_available_instances.attr,
&mdev_type_attr_device_api.attr,
&mdev_type_attr_description.attr,
NULL,
@@ -395,21 +398,24 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
struct intel_vgpu_type *type;
struct device *pdev;
void *gvt;
+ int ret;
- pdev = mdev->parent->dev;
+ pdev = mdev_parent_dev(mdev);
gvt = kdev_to_i915(pdev)->gvt;
type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
if (!type) {
gvt_err("failed to find type %s to create\n",
kobject_name(kobj));
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
vgpu = intel_gvt_ops->vgpu_create(gvt, type);
if (IS_ERR_OR_NULL(vgpu)) {
- gvt_err("create intel vgpu failed\n");
- return -EINVAL;
+ ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
+ gvt_err("failed to create intel vgpu: %d\n", ret);
+ goto out;
}
INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
@@ -418,8 +424,11 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
mdev_set_drvdata(mdev, vgpu);
gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
- dev_name(&mdev->dev));
- return 0;
+ dev_name(mdev_dev(mdev)));
+ ret = 0;
+
+out:
+ return ret;
}
static int intel_vgpu_remove(struct mdev_device *mdev)
@@ -482,7 +491,7 @@ static int intel_vgpu_open(struct mdev_device *mdev)
vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier;
events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
- ret = vfio_register_notifier(&mdev->dev, VFIO_IOMMU_NOTIFY, &events,
+ ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
&vgpu->vdev.iommu_notifier);
if (ret != 0) {
gvt_err("vfio_register_notifier for iommu failed: %d\n", ret);
@@ -490,17 +499,26 @@ static int intel_vgpu_open(struct mdev_device *mdev)
}
events = VFIO_GROUP_NOTIFY_SET_KVM;
- ret = vfio_register_notifier(&mdev->dev, VFIO_GROUP_NOTIFY, &events,
+ ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
&vgpu->vdev.group_notifier);
if (ret != 0) {
gvt_err("vfio_register_notifier for group failed: %d\n", ret);
goto undo_iommu;
}
- return kvmgt_guest_init(mdev);
+ ret = kvmgt_guest_init(mdev);
+ if (ret)
+ goto undo_group;
+
+ atomic_set(&vgpu->vdev.released, 0);
+ return ret;
+
+undo_group:
+ vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
+ &vgpu->vdev.group_notifier);
undo_iommu:
- vfio_unregister_notifier(&mdev->dev, VFIO_IOMMU_NOTIFY,
+ vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
&vgpu->vdev.iommu_notifier);
out:
return ret;
@@ -509,17 +527,26 @@ out:
static void __intel_vgpu_release(struct intel_vgpu *vgpu)
{
struct kvmgt_guest_info *info;
+ int ret;
if (!handle_valid(vgpu->handle))
return;
- vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_IOMMU_NOTIFY,
+ if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
+ return;
+
+ ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
&vgpu->vdev.iommu_notifier);
- vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_GROUP_NOTIFY,
+ WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
+
+ ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_GROUP_NOTIFY,
&vgpu->vdev.group_notifier);
+ WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret);
info = (struct kvmgt_guest_info *)vgpu->handle;
kvmgt_guest_exit(info);
+
+ vgpu->vdev.kvm = NULL;
vgpu->handle = 0;
}
@@ -534,6 +561,7 @@ static void intel_vgpu_release_work(struct work_struct *work)
{
struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
vdev.release_work);
+
__intel_vgpu_release(vgpu);
}
@@ -1089,7 +1117,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
return 0;
}
-static const struct parent_ops intel_vgpu_ops = {
+static const struct mdev_parent_ops intel_vgpu_ops = {
.supported_type_groups = intel_vgpu_type_groups,
.create = intel_vgpu_create,
.remove = intel_vgpu_remove,
@@ -1134,6 +1162,10 @@ static int kvmgt_write_protect_add(unsigned long handle, u64 gfn)
idx = srcu_read_lock(&kvm->srcu);
slot = gfn_to_memslot(kvm, gfn);
+ if (!slot) {
+ srcu_read_unlock(&kvm->srcu, idx);
+ return -EINVAL;
+ }
spin_lock(&kvm->mmu_lock);
@@ -1164,6 +1196,10 @@ static int kvmgt_write_protect_remove(unsigned long handle, u64 gfn)
idx = srcu_read_lock(&kvm->srcu);
slot = gfn_to_memslot(kvm, gfn);
+ if (!slot) {
+ srcu_read_unlock(&kvm->srcu, idx);
+ return -EINVAL;
+ }
spin_lock(&kvm->mmu_lock);
@@ -1311,18 +1347,14 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
{
- struct intel_vgpu *vgpu;
-
if (!info) {
gvt_err("kvmgt_guest_info invalid\n");
return false;
}
- vgpu = info->vgpu;
-
kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
kvmgt_protect_table_destroy(info);
- gvt_cache_destroy(vgpu);
+ gvt_cache_destroy(info->vgpu);
vfree(info);
return true;
@@ -1372,7 +1404,7 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
return pfn;
pfn = INTEL_GVT_INVALID_ADDR;
- dev = &info->vgpu->vdev.mdev->dev;
+ dev = mdev_dev(info->vgpu->vdev.mdev);
rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn);
if (rc != 1) {
gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc);
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 09c9450a1946..4df078bc5d04 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -125,25 +125,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
goto err;
- mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
- if (!mmio && !vgpu->mmio.disable_warn_untrack) {
- gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n",
- vgpu->id, offset, bytes, *(u32 *)p_data);
-
- if (offset == 0x206c) {
- gvt_err("------------------------------------------\n");
- gvt_err("vgpu%d: likely triggers a gfx reset\n",
- vgpu->id);
- gvt_err("------------------------------------------\n");
- vgpu->mmio.disable_warn_untrack = true;
- }
- }
-
if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
if (WARN_ON(!IS_ALIGNED(offset, bytes)))
goto err;
}
+ mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
if (mmio) {
if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
@@ -152,9 +139,23 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
goto err;
}
ret = mmio->read(vgpu, offset, p_data, bytes);
- } else
+ } else {
ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
+ if (!vgpu->mmio.disable_warn_untrack) {
+ gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n",
+ vgpu->id, offset, bytes, *(u32 *)p_data);
+
+ if (offset == 0x206c) {
+ gvt_err("------------------------------------------\n");
+ gvt_err("vgpu%d: likely triggers a gfx reset\n",
+ vgpu->id);
+ gvt_err("------------------------------------------\n");
+ vgpu->mmio.disable_warn_untrack = true;
+ }
+ }
+ }
+
if (ret)
goto err;
@@ -302,3 +303,56 @@ err:
mutex_unlock(&gvt->lock);
return ret;
}
+
+
+/**
+ * intel_vgpu_reset_mmio - reset virtual MMIO space
+ * @vgpu: a vGPU
+ *
+ */
+void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ const struct intel_gvt_device_info *info = &gvt->device_info;
+
+ memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
+ memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
+
+ vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
+
+ /* set the bit 0:2(Core C-State ) to C0 */
+ vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
+}
+
+/**
+ * intel_vgpu_init_mmio - init MMIO space
+ * @vgpu: a vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed
+ */
+int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
+{
+ const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+
+ vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
+ if (!vgpu->mmio.vreg)
+ return -ENOMEM;
+
+ vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
+
+ intel_vgpu_reset_mmio(vgpu);
+
+ return 0;
+}
+
+/**
+ * intel_vgpu_clean_mmio - clean MMIO space
+ * @vgpu: a vGPU
+ *
+ */
+void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu)
+{
+ vfree(vgpu->mmio.vreg);
+ vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
+}
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 87d5b5e366a3..3bc620f56f35 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -86,6 +86,10 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
*offset; \
})
+int intel_vgpu_init_mmio(struct intel_vgpu *vgpu);
+void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu);
+void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu);
+
int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa);
int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index d2a0fbc896c3..d9fb41ab7119 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -36,9 +36,9 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
vgpu->id))
return -EINVAL;
- vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_ATOMIC |
- GFP_DMA32 | __GFP_ZERO,
- INTEL_GVT_OPREGION_PORDER);
+ vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
+ __GFP_ZERO,
+ get_order(INTEL_GVT_OPREGION_SIZE));
if (!vgpu_opregion(vgpu)->va)
return -ENOMEM;
@@ -65,7 +65,7 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
int i, ret;
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
- mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)
+ mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va
+ i * PAGE_SIZE);
if (mfn == INTEL_GVT_INVALID_ADDR) {
gvt_err("fail to get MFN from VA\n");
@@ -97,7 +97,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
map_vgpu_opregion(vgpu, false);
free_pages((unsigned long)vgpu_opregion(vgpu)->va,
- INTEL_GVT_OPREGION_PORDER);
+ get_order(INTEL_GVT_OPREGION_SIZE));
vgpu_opregion(vgpu)->va = NULL;
}
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index 0dfe789d8f02..fbd023a16f18 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -50,8 +50,7 @@
#define INTEL_GVT_OPREGION_PARM 0x204
#define INTEL_GVT_OPREGION_PAGES 2
-#define INTEL_GVT_OPREGION_PORDER 1
-#define INTEL_GVT_OPREGION_SIZE (2 * 4096)
+#define INTEL_GVT_OPREGION_SIZE (INTEL_GVT_OPREGION_PAGES * PAGE_SIZE)
#define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B)
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 4db242250235..e91885dffeff 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -350,13 +350,15 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload;
+ struct intel_vgpu *vgpu;
int event;
mutex_lock(&gvt->lock);
workload = scheduler->current_workload[ring_id];
+ vgpu = workload->vgpu;
- if (!workload->status && !workload->vgpu->resetting) {
+ if (!workload->status && !vgpu->resetting) {
wait_event(workload->shadow_ctx_status_wq,
!atomic_read(&workload->shadow_ctx_active));
@@ -364,8 +366,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
for_each_set_bit(event, workload->pending_events,
INTEL_GVT_EVENT_MAX)
- intel_vgpu_trigger_virtual_event(workload->vgpu,
- event);
+ intel_vgpu_trigger_virtual_event(vgpu, event);
}
gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@@ -373,11 +374,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
scheduler->current_workload[ring_id] = NULL;
- atomic_dec(&workload->vgpu->running_workload_num);
-
list_del_init(&workload->list);
workload->complete(workload);
+ atomic_dec(&vgpu->running_workload_num);
wake_up(&scheduler->workload_complete_wq);
mutex_unlock(&gvt->lock);
}
@@ -459,11 +459,11 @@ complete:
gvt_dbg_sched("will complete workload %p\n, status: %d\n",
workload, workload->status);
- complete_current_workload(gvt, ring_id);
-
if (workload->req)
i915_gem_request_put(fetch_and_zero(&workload->req));
+ complete_current_workload(gvt, ring_id);
+
if (need_force_wake)
intel_uncore_forcewake_put(gvt->dev_priv,
FORCEWAKE_ALL);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 3b30c28bff51..2833dfa8c9ae 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -113,7 +113,7 @@ struct intel_shadow_bb_entry {
struct drm_i915_gem_object *obj;
void *va;
unsigned long len;
- void *bb_start_cmd_va;
+ u32 *bb_start_cmd_va;
};
#define workload_q_head(vgpu, ring_id) \
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 536d2b9d5777..7295bc8e12fb 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -35,79 +35,6 @@
#include "gvt.h"
#include "i915_pvinfo.h"
-static void clean_vgpu_mmio(struct intel_vgpu *vgpu)
-{
- vfree(vgpu->mmio.vreg);
- vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
-}
-
-int setup_vgpu_mmio(struct intel_vgpu *vgpu)
-{
- struct intel_gvt *gvt = vgpu->gvt;
- const struct intel_gvt_device_info *info = &gvt->device_info;
-
- if (vgpu->mmio.vreg)
- memset(vgpu->mmio.vreg, 0, info->mmio_size * 2);
- else {
- vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
- if (!vgpu->mmio.vreg)
- return -ENOMEM;
- }
-
- vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
-
- memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
- memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
-
- vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
-
- /* set the bit 0:2(Core C-State ) to C0 */
- vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
- return 0;
-}
-
-static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu,
- struct intel_vgpu_creation_params *param)
-{
- struct intel_gvt *gvt = vgpu->gvt;
- const struct intel_gvt_device_info *info = &gvt->device_info;
- u16 *gmch_ctl;
- int i;
-
- memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
- info->cfg_space_size);
-
- if (!param->primary) {
- vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
- INTEL_GVT_PCI_CLASS_VGA_OTHER;
- vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
- INTEL_GVT_PCI_CLASS_VGA_OTHER;
- }
-
- /* Show guest that there isn't any stolen memory.*/
- gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
- *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
-
- intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
- gvt_aperture_pa_base(gvt), true);
-
- vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
- | PCI_COMMAND_MEMORY
- | PCI_COMMAND_MASTER);
- /*
- * Clear the bar upper 32bit and let guest to assign the new value
- */
- memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
- memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
- memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
-
- for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
- vgpu->cfg_space.bar[i].size = pci_resource_len(
- gvt->dev_priv->drm.pdev, i * 2);
- vgpu->cfg_space.bar[i].tracked = false;
- }
-}
-
void populate_pvinfo_page(struct intel_vgpu *vgpu)
{
/* setup the ballooning information */
@@ -177,7 +104,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
if (low_avail / min_low == 0)
break;
gvt->types[i].low_gm_size = min_low;
- gvt->types[i].high_gm_size = 3 * gvt->types[i].low_gm_size;
+ gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U));
gvt->types[i].fence = 4;
gvt->types[i].max_instance = low_avail / min_low;
gvt->types[i].avail_instance = gvt->types[i].max_instance;
@@ -217,7 +144,7 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
*/
low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE -
gvt->gm.vgpu_allocated_low_gm_size;
- high_gm_avail = MB_TO_BYTES(256) * 3 - HOST_HIGH_GM_SIZE -
+ high_gm_avail = MB_TO_BYTES(256) * 8UL - HOST_HIGH_GM_SIZE -
gvt->gm.vgpu_allocated_high_gm_size;
fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
gvt->fence.vgpu_allocated_fence_num;
@@ -268,7 +195,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
intel_vgpu_clean_gtt(vgpu);
intel_gvt_hypervisor_detach_vgpu(vgpu);
intel_vgpu_free_resource(vgpu);
- clean_vgpu_mmio(vgpu);
+ intel_vgpu_clean_mmio(vgpu);
vfree(vgpu);
intel_gvt_update_vgpu_types(gvt);
@@ -300,11 +227,11 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
vgpu->gvt = gvt;
bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
- setup_vgpu_cfg_space(vgpu, param);
+ intel_vgpu_init_cfg_space(vgpu, param->primary);
- ret = setup_vgpu_mmio(vgpu);
+ ret = intel_vgpu_init_mmio(vgpu);
if (ret)
- goto out_free_vgpu;
+ goto out_clean_idr;
ret = intel_vgpu_alloc_resource(vgpu, param);
if (ret)
@@ -354,7 +281,9 @@ out_detach_hypervisor_vgpu:
out_clean_vgpu_resource:
intel_vgpu_free_resource(vgpu);
out_clean_vgpu_mmio:
- clean_vgpu_mmio(vgpu);
+ intel_vgpu_clean_mmio(vgpu);
+out_clean_idr:
+ idr_remove(&gvt->vgpu_idr, vgpu->id);
out_free_vgpu:
vfree(vgpu);
mutex_unlock(&gvt->lock);
@@ -398,7 +327,75 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
}
/**
- * intel_gvt_reset_vgpu - reset a virtual GPU
+ * intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset
+ * @vgpu: virtual GPU
+ * @dmlr: vGPU Device Model Level Reset or GT Reset
+ * @engine_mask: engines to reset for GT reset
+ *
+ * This function is called when user wants to reset a virtual GPU through
+ * device model reset or GT reset. The caller should hold the gvt lock.
+ *
+ * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset
+ * the whole vGPU to default state as when it is created. This vGPU function
+ * is required both for functionary and security concerns.The ultimate goal
+ * of vGPU FLR is that reuse a vGPU instance by virtual machines. When we
+ * assign a vGPU to a virtual machine we must isse such reset first.
+ *
+ * Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines
+ * (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec.
+ * Unlike the FLR, GT reset only reset particular resource of a vGPU per
+ * the reset request. Guest driver can issue a GT reset by programming the
+ * virtual GDRST register to reset specific virtual GPU engine or all
+ * engines.
+ *
+ * The parameter dev_level is to identify if we will do DMLR or GT reset.
+ * The parameter engine_mask is to specific the engines that need to be
+ * resetted. If value ALL_ENGINES is given for engine_mask, it means
+ * the caller requests a full GT reset that we will reset all virtual
+ * GPU engines. For FLR, engine_mask is ignored.
+ */
+void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
+ unsigned int engine_mask)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+
+ gvt_dbg_core("------------------------------------------\n");
+ gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
+ vgpu->id, dmlr, engine_mask);
+ vgpu->resetting = true;
+
+ intel_vgpu_stop_schedule(vgpu);
+ /*
+ * The current_vgpu will set to NULL after stopping the
+ * scheduler when the reset is triggered by current vgpu.
+ */
+ if (scheduler->current_vgpu == NULL) {
+ mutex_unlock(&gvt->lock);
+ intel_gvt_wait_vgpu_idle(vgpu);
+ mutex_lock(&gvt->lock);
+ }
+
+ intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask);
+
+ /* full GPU reset or device model level reset */
+ if (engine_mask == ALL_ENGINES || dmlr) {
+ intel_vgpu_reset_gtt(vgpu, dmlr);
+ intel_vgpu_reset_resource(vgpu);
+ intel_vgpu_reset_mmio(vgpu);
+ populate_pvinfo_page(vgpu);
+
+ if (dmlr)
+ intel_vgpu_reset_cfg_space(vgpu);
+ }
+
+ vgpu->resetting = false;
+ gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
+ gvt_dbg_core("------------------------------------------\n");
+}
+
+/**
+ * intel_gvt_reset_vgpu - reset a virtual GPU (Function Level)
* @vgpu: virtual GPU
*
* This function is called when user wants to reset a virtual GPU.
@@ -406,4 +403,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
*/
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
{
+ mutex_lock(&vgpu->gvt->lock);
+ intel_gvt_reset_vgpu_locked(vgpu, true, 0);
+ mutex_unlock(&vgpu->gvt->lock);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 445fec9c2841..b2c4a0b8a627 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -2378,7 +2378,7 @@ static int intel_runtime_suspend(struct device *kdev)
assert_forcewakes_inactive(dev_priv);
- if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv))
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
intel_hpd_poll_init(dev_priv);
DRM_DEBUG_KMS("Device suspended\n");
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 243224aeabf8..69bc3b0c4390 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1977,6 +1977,11 @@ struct drm_i915_private {
struct i915_frontbuffer_tracking fb_tracking;
+ struct intel_atomic_helper {
+ struct llist_head free_list;
+ struct work_struct free_work;
+ } atomic_helper;
+
u16 orig_clock;
bool mchbar_need_disable;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4a31b7a891ec..4b23a7814713 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -244,14 +244,16 @@ err_phys:
static void
__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
- struct sg_table *pages)
+ struct sg_table *pages,
+ bool needs_clflush)
{
GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
if (obj->mm.madv == I915_MADV_DONTNEED)
obj->mm.dirty = false;
- if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
+ if (needs_clflush &&
+ (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
drm_clflush_sg(pages);
@@ -263,7 +265,7 @@ static void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
- __i915_gem_object_release_shmem(obj, pages);
+ __i915_gem_object_release_shmem(obj, pages, false);
if (obj->mm.dirty) {
struct address_space *mapping = obj->base.filp->f_mapping;
@@ -593,47 +595,21 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file)
{
- struct drm_device *dev = obj->base.dev;
void *vaddr = obj->phys_handle->vaddr + args->offset;
char __user *user_data = u64_to_user_ptr(args->data_ptr);
- int ret;
/* We manually control the domain here and pretend that it
* remains coherent i.e. in the GTT domain, like shmem_pwrite.
*/
- lockdep_assert_held(&obj->base.dev->struct_mutex);
- ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED |
- I915_WAIT_ALL,
- MAX_SCHEDULE_TIMEOUT,
- to_rps_client(file));
- if (ret)
- return ret;
-
intel_fb_obj_invalidate(obj, ORIGIN_CPU);
- if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
- unsigned long unwritten;
-
- /* The physical object once assigned is fixed for the lifetime
- * of the obj, so we can safely drop the lock and continue
- * to access vaddr.
- */
- mutex_unlock(&dev->struct_mutex);
- unwritten = copy_from_user(vaddr, user_data, args->size);
- mutex_lock(&dev->struct_mutex);
- if (unwritten) {
- ret = -EFAULT;
- goto out;
- }
- }
+ if (copy_from_user(vaddr, user_data, args->size))
+ return -EFAULT;
drm_clflush_virt_range(vaddr, args->size);
- i915_gem_chipset_flush(to_i915(dev));
+ i915_gem_chipset_flush(to_i915(obj->base.dev));
-out:
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
- return ret;
+ return 0;
}
void *i915_gem_object_alloc(struct drm_device *dev)
@@ -2231,7 +2207,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
struct sgt_iter sgt_iter;
struct page *page;
- __i915_gem_object_release_shmem(obj, pages);
+ __i915_gem_object_release_shmem(obj, pages, true);
i915_gem_gtt_finish_pages(obj, pages);
@@ -2304,15 +2280,6 @@ unlock:
mutex_unlock(&obj->mm.lock);
}
-static unsigned int swiotlb_max_size(void)
-{
-#if IS_ENABLED(CONFIG_SWIOTLB)
- return rounddown(swiotlb_nr_tbl() << IO_TLB_SHIFT, PAGE_SIZE);
-#else
- return 0;
-#endif
-}
-
static void i915_sg_trim(struct sg_table *orig_st)
{
struct sg_table new_st;
@@ -2322,7 +2289,7 @@ static void i915_sg_trim(struct sg_table *orig_st)
if (orig_st->nents == orig_st->orig_nents)
return;
- if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL))
+ if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
return;
new_sg = new_st.sgl;
@@ -2360,7 +2327,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
- max_segment = swiotlb_max_size();
+ max_segment = swiotlb_max_segment();
if (!max_segment)
max_segment = rounddown(UINT_MAX, PAGE_SIZE);
@@ -2728,6 +2695,7 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
struct drm_i915_gem_request *request;
struct i915_gem_context *incomplete_ctx;
struct intel_timeline *timeline;
+ unsigned long flags;
bool ring_hung;
if (engine->irq_seqno_barrier)
@@ -2763,13 +2731,20 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
if (i915_gem_context_is_default(incomplete_ctx))
return;
+ timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine);
+
+ spin_lock_irqsave(&engine->timeline->lock, flags);
+ spin_lock(&timeline->lock);
+
list_for_each_entry_continue(request, &engine->timeline->requests, link)
if (request->ctx == incomplete_ctx)
reset_request(request);
- timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine);
list_for_each_entry(request, &timeline->requests, link)
reset_request(request);
+
+ spin_unlock(&timeline->lock);
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
void i915_gem_reset(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index bd08814b015c..d534a316a16e 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -199,6 +199,7 @@ found:
}
/* Unbinding will emit any required flushes */
+ ret = 0;
while (!list_empty(&eviction_list)) {
vma = list_first_entry(&eviction_list,
struct i915_vma,
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index e2b077df2da0..d229f47d1028 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -413,6 +413,25 @@ i915_gem_active_set(struct i915_gem_active *active,
rcu_assign_pointer(active->request, request);
}
+/**
+ * i915_gem_active_set_retire_fn - updates the retirement callback
+ * @active - the active tracker
+ * @fn - the routine called when the request is retired
+ * @mutex - struct_mutex used to guard retirements
+ *
+ * i915_gem_active_set_retire_fn() updates the function pointer that
+ * is called when the final request associated with the @active tracker
+ * is retired.
+ */
+static inline void
+i915_gem_active_set_retire_fn(struct i915_gem_active *active,
+ i915_gem_retire_fn fn,
+ struct mutex *mutex)
+{
+ lockdep_assert_held(mutex);
+ active->retire = fn ?: i915_gem_retire_noop;
+}
+
static inline struct drm_i915_gem_request *
__i915_gem_active_peek(const struct i915_gem_active *active)
{
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index a792dcb902b5..e924a9516079 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -185,6 +185,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
return ret;
}
+ trace_i915_vma_bind(vma, bind_flags);
ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 86ecec5601d4..588470eb8d39 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -499,6 +499,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev);
struct edid *edid;
struct i2c_adapter *i2c;
+ bool ret = false;
BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
@@ -515,17 +516,17 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
*/
if (!is_digital) {
DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
- return true;
+ ret = true;
+ } else {
+ DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
}
-
- DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
} else {
DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
}
kfree(edid);
- return false;
+ return ret;
}
static enum drm_connector_status
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 6daad8613760..f0b9aa7a0483 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2251,6 +2251,9 @@ void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
intel_fill_fb_ggtt_view(&view, fb, rotation);
vma = i915_gem_object_to_ggtt(obj, &view);
+ if (WARN_ON_ONCE(!vma))
+ return;
+
i915_vma_unpin_fence(vma);
i915_gem_object_unpin_from_display_plane(vma);
}
@@ -2585,8 +2588,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
* We only keep the x/y offsets, so push all of the
* gtt offset into the x/y offsets.
*/
- _intel_adjust_tile_offset(&x, &y, tile_size,
- tile_width, tile_height, pitch_tiles,
+ _intel_adjust_tile_offset(&x, &y,
+ tile_width, tile_height,
+ tile_size, pitch_tiles,
gtt_offset_rotated * tile_size, 0);
gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
@@ -2967,6 +2971,9 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
unsigned int rotation = plane_state->base.rotation;
int ret;
+ if (!plane_state->base.visible)
+ return 0;
+
/* Rotate src coordinates to match rotated GTT view */
if (drm_rotation_90_or_270(rotation))
drm_rect_rotate(&plane_state->base.src,
@@ -6846,6 +6853,12 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
}
state = drm_atomic_state_alloc(crtc->dev);
+ if (!state) {
+ DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
+ crtc->base.id, crtc->name);
+ return;
+ }
+
state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
/* Everything's already locked, -EDEADLK can't happen. */
@@ -11243,6 +11256,7 @@ found:
}
old->restore_state = restore_state;
+ drm_atomic_state_put(state);
/* let the connector get through one full cycle before testing */
intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
@@ -14512,8 +14526,14 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence,
break;
case FENCE_FREE:
- drm_atomic_state_put(&state->base);
- break;
+ {
+ struct intel_atomic_helper *helper =
+ &to_i915(state->base.dev)->atomic_helper;
+
+ if (llist_add(&state->freed, &helper->free_list))
+ schedule_work(&helper->free_work);
+ break;
+ }
}
return NOTIFY_DONE;
@@ -16392,6 +16412,18 @@ fail:
drm_modeset_acquire_fini(&ctx);
}
+static void intel_atomic_helper_free_state(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), atomic_helper.free_work);
+ struct intel_atomic_state *state, *next;
+ struct llist_node *freed;
+
+ freed = llist_del_all(&dev_priv->atomic_helper.free_list);
+ llist_for_each_entry_safe(state, next, freed, freed)
+ drm_atomic_state_put(&state->base);
+}
+
int intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -16411,6 +16443,9 @@ int intel_modeset_init(struct drm_device *dev)
dev->mode_config.funcs = &intel_mode_funcs;
+ INIT_WORK(&dev_priv->atomic_helper.free_work,
+ intel_atomic_helper_free_state);
+
intel_init_quirks(dev);
intel_init_pm(dev_priv);
@@ -16791,7 +16826,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
for_each_intel_crtc(dev, crtc) {
struct intel_crtc_state *crtc_state = crtc->config;
- int pixclk = 0;
__drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
memset(crtc_state, 0, sizeof(*crtc_state));
@@ -16803,23 +16837,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
crtc->base.enabled = crtc_state->base.enable;
crtc->active = crtc_state->base.active;
- if (crtc_state->base.active) {
+ if (crtc_state->base.active)
dev_priv->active_crtcs |= 1 << crtc->pipe;
- if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
- pixclk = ilk_pipe_pixel_rate(crtc_state);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- pixclk = crtc_state->base.adjusted_mode.crtc_clock;
- else
- WARN_ON(dev_priv->display.modeset_calc_cdclk);
-
- /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
- pixclk = DIV_ROUND_UP(pixclk * 100, 95);
- }
-
- dev_priv->min_pixclk[crtc->pipe] = pixclk;
-
readout_plane_state(crtc);
DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
@@ -16892,6 +16912,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
}
for_each_intel_crtc(dev, crtc) {
+ int pixclk = 0;
+
crtc->base.hwmode = crtc->config->base.adjusted_mode;
memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
@@ -16919,10 +16941,23 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
*/
crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
+ if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
+ pixclk = ilk_pipe_pixel_rate(crtc->config);
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ pixclk = crtc->config->base.adjusted_mode.crtc_clock;
+ else
+ WARN_ON(dev_priv->display.modeset_calc_cdclk);
+
+ /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+ if (IS_BROADWELL(dev_priv) && crtc->config->ips_enabled)
+ pixclk = DIV_ROUND_UP(pixclk * 100, 95);
+
drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
update_scanline_offset(crtc);
}
+ dev_priv->min_pixclk[crtc->pipe] = pixclk;
+
intel_pipe_config_sanity_check(dev_priv, crtc->config);
}
}
@@ -17024,7 +17059,8 @@ void intel_display_resume(struct drm_device *dev)
if (ret)
DRM_ERROR("Restoring old state failed with %i\n", ret);
- drm_atomic_state_put(state);
+ if (state)
+ drm_atomic_state_put(state);
}
void intel_modeset_gem_init(struct drm_device *dev)
@@ -17094,6 +17130,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ flush_work(&dev_priv->atomic_helper.free_work);
+ WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
+
intel_disable_gt_powersave(dev_priv);
/*
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index d9bc19be855e..0b8e8eb85c19 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -355,7 +355,8 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
struct intel_dp *intel_dp);
static void
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
- struct intel_dp *intel_dp);
+ struct intel_dp *intel_dp,
+ bool force_disable_vdd);
static void
intel_dp_pps_init(struct drm_device *dev, struct intel_dp *intel_dp);
@@ -516,7 +517,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
/* init power sequencer on this pipe and port */
intel_dp_init_panel_power_sequencer(dev, intel_dp);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
/*
* Even vdd force doesn't work until we've made
@@ -553,7 +554,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
* Only the HW needs to be reprogrammed, the SW state is fixed and
* has been setup during connector init.
*/
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
return 0;
}
@@ -636,7 +637,7 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
port_name(port), pipe_name(intel_dp->pps_pipe));
intel_dp_init_panel_power_sequencer(dev, intel_dp);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
}
void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
@@ -2912,7 +2913,7 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
/* init power sequencer on this pipe and port */
intel_dp_init_panel_power_sequencer(dev, intel_dp);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
}
static void vlv_pre_enable_dp(struct intel_encoder *encoder,
@@ -5055,7 +5056,8 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
static void
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
- struct intel_dp *intel_dp)
+ struct intel_dp *intel_dp,
+ bool force_disable_vdd)
{
struct drm_i915_private *dev_priv = to_i915(dev);
u32 pp_on, pp_off, pp_div, port_sel = 0;
@@ -5068,6 +5070,31 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
intel_pps_get_registers(dev_priv, intel_dp, &regs);
+ /*
+ * On some VLV machines the BIOS can leave the VDD
+ * enabled even on power seqeuencers which aren't
+ * hooked up to any port. This would mess up the
+ * power domain tracking the first time we pick
+ * one of these power sequencers for use since
+ * edp_panel_vdd_on() would notice that the VDD was
+ * already on and therefore wouldn't grab the power
+ * domain reference. Disable VDD first to avoid this.
+ * This also avoids spuriously turning the VDD on as
+ * soon as the new power seqeuencer gets initialized.
+ */
+ if (force_disable_vdd) {
+ u32 pp = ironlake_get_pp_control(intel_dp);
+
+ WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
+
+ if (pp & EDP_FORCE_VDD)
+ DRM_DEBUG_KMS("VDD already on, disabling first\n");
+
+ pp &= ~EDP_FORCE_VDD;
+
+ I915_WRITE(regs.pp_ctrl, pp);
+ }
+
pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
(seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
@@ -5122,7 +5149,7 @@ static void intel_dp_pps_init(struct drm_device *dev,
vlv_initial_power_sequencer_setup(intel_dp);
} else {
intel_dp_init_panel_power_sequencer(dev, intel_dp);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
}
}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index cd132c216a67..cd72ae171eeb 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -370,6 +370,8 @@ struct intel_atomic_state {
struct skl_wm_values wm_results;
struct i915_sw_fence commit_ready;
+
+ struct llist_node freed;
};
struct intel_plane_state {
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index beb08982dc0b..8cf2d80f2254 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -742,6 +742,9 @@ void intel_fbdev_initial_config_async(struct drm_device *dev)
{
struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
+ if (!ifbdev)
+ return;
+
ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev);
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index d4961fa20c73..beabc17e7c8a 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -979,18 +979,8 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
uint32_t *batch,
uint32_t index)
{
- struct drm_i915_private *dev_priv = engine->i915;
uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
- /*
- * WaDisableLSQCROPERFforOCL:kbl
- * This WA is implemented in skl_init_clock_gating() but since
- * this batch updates GEN8_L3SQCREG4 with default value we need to
- * set this bit here to retain the WA during flush.
- */
- if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
- l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
-
wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT));
wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index fd0e4dac7cc1..e589e17876dc 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -216,7 +216,8 @@ static void intel_overlay_submit_request(struct intel_overlay *overlay,
{
GEM_BUG_ON(i915_gem_active_peek(&overlay->last_flip,
&overlay->i915->drm.struct_mutex));
- overlay->last_flip.retire = retire;
+ i915_gem_active_set_retire_fn(&overlay->last_flip, retire,
+ &overlay->i915->drm.struct_mutex);
i915_gem_active_set(&overlay->last_flip, req);
i915_add_request(req);
}
@@ -839,8 +840,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret)
goto out_unpin;
- i915_gem_track_fb(overlay->vma->obj, new_bo,
- INTEL_FRONTBUFFER_OVERLAY(pipe));
+ i915_gem_track_fb(overlay->vma ? overlay->vma->obj : NULL,
+ vma->obj, INTEL_FRONTBUFFER_OVERLAY(pipe));
overlay->old_vma = overlay->vma;
overlay->vma = vma;
@@ -1430,6 +1431,8 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
overlay->contrast = 75;
overlay->saturation = 146;
+ init_request_active(&overlay->last_flip, NULL);
+
regs = intel_overlay_map_regs(overlay);
if (!regs)
goto out_unpin_bo;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index aeb637dc1fdf..91cb4c422ad5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1095,14 +1095,6 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FENCE_DEST_SLM_DISABLE);
- /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
- * involving this register should also be added to WA batch as required.
- */
- if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
- /* WaDisableLSQCROPERFforOCL:kbl */
- I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
- GEN8_LQSC_RO_PERF_DIS);
-
/* WaToEnableHwFixForPushConstHWBug:kbl */
if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index 4942ca090b46..7890e30eb584 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -51,6 +51,9 @@ static int meson_plane_atomic_check(struct drm_plane *plane,
struct drm_crtc_state *crtc_state;
struct drm_rect clip = { 0, };
+ if (!state->crtc)
+ return 0;
+
crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
index d836b2274531..f7c870172220 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -38,6 +38,11 @@
* - TV Panel encoding via ENCT
*/
+/* HHI Registers */
+#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
+#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
+#define HHI_HDMI_PHY_CNTL0 0x3a0 /* 0xe8 offset in data sheet */
+
struct meson_cvbs_enci_mode meson_cvbs_enci_pal = {
.mode_tag = MESON_VENC_MODE_CVBS_PAL,
.hso_begin = 3,
@@ -242,6 +247,20 @@ void meson_venc_disable_vsync(struct meson_drm *priv)
void meson_venc_init(struct meson_drm *priv)
{
+ /* Disable CVBS VDAC */
+ regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0);
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8);
+
+ /* Power Down Dacs */
+ writel_relaxed(0xff, priv->io_base + _REG(VENC_VDAC_SETTING));
+
+ /* Disable HDMI PHY */
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0);
+
+ /* Disable HDMI */
+ writel_bits_relaxed(0x3, 0,
+ priv->io_base + _REG(VPU_HDMI_SETTING));
+
/* Disable all encoders */
writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN));
writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN));
diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c
index c809c085fd78..a2bcc70a03ef 100644
--- a/drivers/gpu/drm/meson/meson_venc_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c
@@ -167,7 +167,7 @@ static void meson_venc_cvbs_encoder_disable(struct drm_encoder *encoder)
/* Disable CVBS VDAC */
regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0);
- regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0);
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8);
}
static void meson_venc_cvbs_encoder_enable(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index a18126150e11..686a580c711a 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -213,7 +213,14 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
void adreno_flush(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- uint32_t wptr = get_wptr(gpu->rb);
+ uint32_t wptr;
+
+ /*
+ * Mask wptr value that we calculate to fit in the HW range. This is
+ * to account for the possibility that the last command fit exactly into
+ * the ringbuffer and rb->next hasn't wrapped to zero yet
+ */
+ wptr = get_wptr(gpu->rb) & ((gpu->rb->size / 4) - 1);
/* ensure writes to ringbuffer have hit system memory: */
mb();
@@ -338,7 +345,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
{
struct adreno_platform_config *config = pdev->dev.platform_data;
struct msm_gpu *gpu = &adreno_gpu->base;
- struct msm_mmu *mmu;
int ret;
adreno_gpu->funcs = funcs;
@@ -378,8 +384,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return ret;
}
- mmu = gpu->aspace->mmu;
- if (mmu) {
+ if (gpu->aspace && gpu->aspace->mmu) {
+ struct msm_mmu *mmu = gpu->aspace->mmu;
ret = mmu->funcs->attach(mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
if (ret)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 5f6cd8745dbc..c396d459a9d0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -119,13 +119,7 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st
static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
- int i;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- struct drm_plane *plane;
- struct drm_plane_state *plane_state;
-
- for_each_plane_in_state(state, plane, plane_state, i)
- mdp5_plane_complete_commit(plane, plane_state);
if (mdp5_kms->smp)
mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 17b0cc101171..cdfc63d90c7b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -104,8 +104,6 @@ struct mdp5_plane_state {
/* assigned by crtc blender */
enum mdp_mixer_stage_id stage;
-
- bool pending : 1;
};
#define to_mdp5_plane_state(x) \
container_of(x, struct mdp5_plane_state, base)
@@ -232,8 +230,6 @@ int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms);
void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
-void mdp5_plane_complete_commit(struct drm_plane *plane,
- struct drm_plane_state *state);
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index c099da7bc212..25d9d0a97156 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -179,7 +179,6 @@ mdp5_plane_atomic_print_state(struct drm_printer *p,
drm_printf(p, "\tzpos=%u\n", pstate->zpos);
drm_printf(p, "\talpha=%u\n", pstate->alpha);
drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage));
- drm_printf(p, "\tpending=%u\n", pstate->pending);
}
static void mdp5_plane_reset(struct drm_plane *plane)
@@ -220,8 +219,6 @@ mdp5_plane_duplicate_state(struct drm_plane *plane)
if (mdp5_state && mdp5_state->base.fb)
drm_framebuffer_reference(mdp5_state->base.fb);
- mdp5_state->pending = false;
-
return &mdp5_state->base;
}
@@ -288,13 +285,6 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
DBG("%s: check (%d -> %d)", plane->name,
plane_enabled(old_state), plane_enabled(state));
- /* We don't allow faster-than-vblank updates.. if we did add this
- * some day, we would need to disallow in cases where hwpipe
- * changes
- */
- if (WARN_ON(to_mdp5_plane_state(old_state)->pending))
- return -EBUSY;
-
max_width = config->hw->lm.max_width << 16;
max_height = config->hw->lm.max_height << 16;
@@ -370,12 +360,9 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = plane->state;
- struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
DBG("%s: update", plane->name);
- mdp5_state->pending = true;
-
if (plane_enabled(state)) {
int ret;
@@ -851,15 +838,6 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
return pstate->hwpipe->flush_mask;
}
-/* called after vsync in thread context */
-void mdp5_plane_complete_commit(struct drm_plane *plane,
- struct drm_plane_state *state)
-{
- struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
-
- pstate->pending = false;
-}
-
/* initialize plane */
struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary)
{
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index d8bc59c7e261..8098677a3916 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -294,6 +294,8 @@ put_iova(struct drm_gem_object *obj)
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
+ if (!priv->aspace[id])
+ continue;
msm_gem_unmap_vma(priv->aspace[id],
&msm_obj->domain[id], msm_obj->sgt);
}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 166e84e4f0d4..489676568a10 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -106,7 +106,8 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
pagefault_disable();
}
- if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
+ if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
+ !(submit_bo.flags & MSM_SUBMIT_BO_FLAGS)) {
DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
ret = -EINVAL;
goto out_unlock;
@@ -290,7 +291,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
{
uint32_t i, last_offset = 0;
uint32_t *ptr;
- int ret;
+ int ret = 0;
if (offset % 4) {
DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
@@ -318,12 +319,13 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
if (ret)
- return -EFAULT;
+ goto out;
if (submit_reloc.submit_offset % 4) {
DRM_ERROR("non-aligned reloc offset: %u\n",
submit_reloc.submit_offset);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
/* offset in dwords: */
@@ -332,12 +334,13 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
if ((off >= (obj->base.size / 4)) ||
(off < last_offset)) {
DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
if (ret)
- return ret;
+ goto out;
if (valid)
continue;
@@ -354,9 +357,10 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
last_offset = off;
}
+out:
msm_gem_put_vaddr_locked(&obj->base);
- return 0;
+ return ret;
}
static void submit_cleanup(struct msm_gem_submit *submit)
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index f326cf6a32e6..67b34e069abf 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -23,7 +23,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
struct msm_ringbuffer *ring;
int ret;
- size = ALIGN(size, 4); /* size should be dword aligned */
+ if (WARN_ON(!is_power_of_2(size)))
+ return ERR_PTR(-EINVAL);
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index cef08da1da4e..6a157763dfc3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -411,7 +411,8 @@ nouveau_display_init(struct drm_device *dev)
return ret;
/* enable polling for external displays */
- drm_kms_helper_poll_enable(dev);
+ if (!dev->mode_config.poll_enabled)
+ drm_kms_helper_poll_enable(dev);
/* enable hotplug interrupts */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 59348fc41c77..bc85a45f91cd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -773,7 +773,10 @@ nouveau_pmops_runtime_resume(struct device *dev)
pci_set_master(pdev);
ret = nouveau_do_resume(drm_dev, true);
- drm_kms_helper_poll_enable(drm_dev);
+
+ if (!drm_dev->mode_config.poll_enabled)
+ drm_kms_helper_poll_enable(drm_dev);
+
/* do magic */
nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 8d5ed5bfdacb..42c1fa53d431 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -165,6 +165,8 @@ struct nouveau_drm {
struct backlight_device *backlight;
struct list_head bl_connectors;
struct work_struct hpd_work;
+ struct work_struct fbcon_work;
+ int fbcon_new_state;
#ifdef CONFIG_ACPI
struct notifier_block acpi_nb;
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 2f2a3dcd4ad7..fa2d0a978ccc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -470,19 +470,43 @@ static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
.fb_probe = nouveau_fbcon_create,
};
+static void
+nouveau_fbcon_set_suspend_work(struct work_struct *work)
+{
+ struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work);
+ int state = READ_ONCE(drm->fbcon_new_state);
+
+ if (state == FBINFO_STATE_RUNNING)
+ pm_runtime_get_sync(drm->dev->dev);
+
+ console_lock();
+ if (state == FBINFO_STATE_RUNNING)
+ nouveau_fbcon_accel_restore(drm->dev);
+ drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
+ if (state != FBINFO_STATE_RUNNING)
+ nouveau_fbcon_accel_save_disable(drm->dev);
+ console_unlock();
+
+ if (state == FBINFO_STATE_RUNNING) {
+ pm_runtime_mark_last_busy(drm->dev->dev);
+ pm_runtime_put_sync(drm->dev->dev);
+ }
+}
+
void
nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- if (drm->fbcon) {
- console_lock();
- if (state == FBINFO_STATE_RUNNING)
- nouveau_fbcon_accel_restore(dev);
- drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
- if (state != FBINFO_STATE_RUNNING)
- nouveau_fbcon_accel_save_disable(dev);
- console_unlock();
- }
+
+ if (!drm->fbcon)
+ return;
+
+ drm->fbcon_new_state = state;
+ /* Since runtime resume can happen as a result of a sysfs operation,
+ * it's possible we already have the console locked. So handle fbcon
+ * init/deinit from a seperate work thread
+ */
+ schedule_work(&drm->fbcon_work);
}
int
@@ -502,6 +526,7 @@ nouveau_fbcon_init(struct drm_device *dev)
return -ENOMEM;
drm->fbcon = fbcon;
+ INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work);
drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 00ea0002b539..e0c143b865f3 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -366,11 +366,10 @@ static void
radeon_pci_shutdown(struct pci_dev *pdev)
{
/* if we are running in a VM, make sure the device
- * torn down properly on reboot/shutdown.
- * unfortunately we can't detect certain
- * hypervisors so just do this all the time.
+ * torn down properly on reboot/shutdown
*/
- radeon_pci_remove(pdev);
+ if (radeon_device_is_virtual())
+ radeon_pci_remove(pdev);
}
static int radeon_pmops_suspend(struct device *dev)
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index ad4d7b8b8322..414776811e71 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -50,7 +50,6 @@ MODULE_FIRMWARE("radeon/tahiti_ce.bin");
MODULE_FIRMWARE("radeon/tahiti_mc.bin");
MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
MODULE_FIRMWARE("radeon/tahiti_smc.bin");
-MODULE_FIRMWARE("radeon/tahiti_k_smc.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
@@ -115,6 +114,9 @@ MODULE_FIRMWARE("radeon/hainan_mc.bin");
MODULE_FIRMWARE("radeon/hainan_rlc.bin");
MODULE_FIRMWARE("radeon/hainan_smc.bin");
MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
+MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
+
+MODULE_FIRMWARE("radeon/si58_mc.bin");
static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
static void si_pcie_gen3_enable(struct radeon_device *rdev);
@@ -1651,15 +1653,14 @@ static int si_init_microcode(struct radeon_device *rdev)
int err;
int new_fw = 0;
bool new_smc = false;
+ bool si58_fw = false;
+ bool banks2_fw = false;
DRM_DEBUG("\n");
switch (rdev->family) {
case CHIP_TAHITI:
chip_name = "TAHITI";
- /* XXX: figure out which Tahitis need the new ucode */
- if (0)
- new_smc = true;
new_chip_name = "tahiti";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1671,12 +1672,9 @@ static int si_init_microcode(struct radeon_device *rdev)
break;
case CHIP_PITCAIRN:
chip_name = "PITCAIRN";
- if ((rdev->pdev->revision == 0x81) ||
- (rdev->pdev->device == 0x6810) ||
- (rdev->pdev->device == 0x6811) ||
- (rdev->pdev->device == 0x6816) ||
- (rdev->pdev->device == 0x6817) ||
- (rdev->pdev->device == 0x6806))
+ if ((rdev->pdev->revision == 0x81) &&
+ ((rdev->pdev->device == 0x6810) ||
+ (rdev->pdev->device == 0x6811)))
new_smc = true;
new_chip_name = "pitcairn";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
@@ -1689,15 +1687,15 @@ static int si_init_microcode(struct radeon_device *rdev)
break;
case CHIP_VERDE:
chip_name = "VERDE";
- if ((rdev->pdev->revision == 0x81) ||
- (rdev->pdev->revision == 0x83) ||
- (rdev->pdev->revision == 0x87) ||
- (rdev->pdev->device == 0x6820) ||
- (rdev->pdev->device == 0x6821) ||
- (rdev->pdev->device == 0x6822) ||
- (rdev->pdev->device == 0x6823) ||
- (rdev->pdev->device == 0x682A) ||
- (rdev->pdev->device == 0x682B))
+ if (((rdev->pdev->device == 0x6820) &&
+ ((rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->revision == 0x83))) ||
+ ((rdev->pdev->device == 0x6821) &&
+ ((rdev->pdev->revision == 0x83) ||
+ (rdev->pdev->revision == 0x87))) ||
+ ((rdev->pdev->revision == 0x87) &&
+ ((rdev->pdev->device == 0x6823) ||
+ (rdev->pdev->device == 0x682b))))
new_smc = true;
new_chip_name = "verde";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
@@ -1710,13 +1708,13 @@ static int si_init_microcode(struct radeon_device *rdev)
break;
case CHIP_OLAND:
chip_name = "OLAND";
- if ((rdev->pdev->revision == 0xC7) ||
- (rdev->pdev->revision == 0x80) ||
- (rdev->pdev->revision == 0x81) ||
- (rdev->pdev->revision == 0x83) ||
- (rdev->pdev->revision == 0x87) ||
- (rdev->pdev->device == 0x6604) ||
- (rdev->pdev->device == 0x6605))
+ if (((rdev->pdev->revision == 0x81) &&
+ ((rdev->pdev->device == 0x6600) ||
+ (rdev->pdev->device == 0x6604) ||
+ (rdev->pdev->device == 0x6605) ||
+ (rdev->pdev->device == 0x6610))) ||
+ ((rdev->pdev->revision == 0x83) &&
+ (rdev->pdev->device == 0x6610)))
new_smc = true;
new_chip_name = "oland";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
@@ -1728,13 +1726,17 @@ static int si_init_microcode(struct radeon_device *rdev)
break;
case CHIP_HAINAN:
chip_name = "HAINAN";
- if ((rdev->pdev->revision == 0x81) ||
- (rdev->pdev->revision == 0x83) ||
- (rdev->pdev->revision == 0xC3) ||
- (rdev->pdev->device == 0x6664) ||
- (rdev->pdev->device == 0x6665) ||
- (rdev->pdev->device == 0x6667))
+ if (((rdev->pdev->revision == 0x81) &&
+ (rdev->pdev->device == 0x6660)) ||
+ ((rdev->pdev->revision == 0x83) &&
+ ((rdev->pdev->device == 0x6660) ||
+ (rdev->pdev->device == 0x6663) ||
+ (rdev->pdev->device == 0x6665) ||
+ (rdev->pdev->device == 0x6667))))
new_smc = true;
+ else if ((rdev->pdev->revision == 0xc3) &&
+ (rdev->pdev->device == 0x6665))
+ banks2_fw = true;
new_chip_name = "hainan";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1746,6 +1748,10 @@ static int si_init_microcode(struct radeon_device *rdev)
default: BUG();
}
+ /* this memory configuration requires special firmware */
+ if (((RREG32(MC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
+ si58_fw = true;
+
DRM_INFO("Loading %s Microcode\n", new_chip_name);
snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
@@ -1849,7 +1855,10 @@ static int si_init_microcode(struct radeon_device *rdev)
}
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
+ if (si58_fw)
+ snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
+ else
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
if (err) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
@@ -1880,7 +1889,9 @@ static int si_init_microcode(struct radeon_device *rdev)
}
}
- if (new_smc)
+ if (banks2_fw)
+ snprintf(fw_name, sizeof(fw_name), "radeon/banks_k_2_smc.bin");
+ else if (new_smc)
snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name);
else
snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 8b5e697f2549..2944916f7102 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -3008,30 +3008,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
(rdev->pdev->device == 0x6817) ||
(rdev->pdev->device == 0x6806))
max_mclk = 120000;
- } else if (rdev->family == CHIP_VERDE) {
- if ((rdev->pdev->revision == 0x81) ||
- (rdev->pdev->revision == 0x83) ||
- (rdev->pdev->revision == 0x87) ||
- (rdev->pdev->device == 0x6820) ||
- (rdev->pdev->device == 0x6821) ||
- (rdev->pdev->device == 0x6822) ||
- (rdev->pdev->device == 0x6823) ||
- (rdev->pdev->device == 0x682A) ||
- (rdev->pdev->device == 0x682B)) {
- max_sclk = 75000;
- max_mclk = 80000;
- }
- } else if (rdev->family == CHIP_OLAND) {
- if ((rdev->pdev->revision == 0xC7) ||
- (rdev->pdev->revision == 0x80) ||
- (rdev->pdev->revision == 0x81) ||
- (rdev->pdev->revision == 0x83) ||
- (rdev->pdev->revision == 0x87) ||
- (rdev->pdev->device == 0x6604) ||
- (rdev->pdev->device == 0x6605)) {
- max_sclk = 75000;
- max_mclk = 80000;
- }
} else if (rdev->family == CHIP_HAINAN) {
if ((rdev->pdev->revision == 0x81) ||
(rdev->pdev->revision == 0x83) ||
@@ -3040,7 +3016,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
(rdev->pdev->device == 0x6665) ||
(rdev->pdev->device == 0x6667)) {
max_sclk = 75000;
- max_mclk = 80000;
}
}
/* Apply dpm quirks */
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 725dffad5640..6dfdb145f3bb 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -856,7 +856,7 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct tilcdc_drm_private *priv = dev->dev_private;
- uint32_t stat;
+ uint32_t stat, reg;
stat = tilcdc_read_irqstatus(dev);
tilcdc_clear_irqstatus(dev, stat);
@@ -921,17 +921,26 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost",
__func__, stat);
tilcdc_crtc->frame_intact = false;
- if (tilcdc_crtc->sync_lost_count++ >
- SYNC_LOST_COUNT_LIMIT) {
- dev_err(dev->dev, "%s(0x%08x): Sync lost flood detected, recovering", __func__, stat);
- queue_work(system_wq, &tilcdc_crtc->recover_work);
- if (priv->rev == 1)
+ if (priv->rev == 1) {
+ reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG);
+ if (reg & LCDC_RASTER_ENABLE) {
tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
- LCDC_V1_SYNC_LOST_INT_ENA);
- else
+ LCDC_RASTER_ENABLE);
+ tilcdc_set(dev, LCDC_RASTER_CTRL_REG,
+ LCDC_RASTER_ENABLE);
+ }
+ } else {
+ if (tilcdc_crtc->sync_lost_count++ >
+ SYNC_LOST_COUNT_LIMIT) {
+ dev_err(dev->dev,
+ "%s(0x%08x): Sync lost flood detected, recovering",
+ __func__, stat);
+ queue_work(system_wq,
+ &tilcdc_crtc->recover_work);
tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
LCDC_SYNC_LOST);
- tilcdc_crtc->sync_lost_count = 0;
+ tilcdc_crtc->sync_lost_count = 0;
+ }
}
}
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index a0fd3e66bc4b..7aadce1f7e7a 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -839,7 +839,7 @@ static void vc4_crtc_destroy_state(struct drm_crtc *crtc,
}
- __drm_atomic_helper_crtc_destroy_state(state);
+ drm_atomic_helper_crtc_destroy_state(crtc, state);
}
static const struct drm_crtc_funcs vc4_crtc_funcs = {
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index db920771bfb5..ab3016982466 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -594,12 +594,14 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
args->shader_rec_count);
struct vc4_bo *bo;
- if (uniforms_offset < shader_rec_offset ||
+ if (shader_rec_offset < args->bin_cl_size ||
+ uniforms_offset < shader_rec_offset ||
exec_size < uniforms_offset ||
args->shader_rec_count >= (UINT_MAX /
sizeof(struct vc4_shader_state)) ||
temp_size < exec_size) {
DRM_ERROR("overflow in exec arguments\n");
+ ret = -EINVAL;
goto fail;
}
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
index 08886a309757..5cdd003605f5 100644
--- a/drivers/gpu/drm/vc4/vc4_render_cl.c
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
@@ -461,7 +461,7 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
}
ret = vc4_full_res_bounds_check(exec, *obj, surf);
- if (!ret)
+ if (ret)
return ret;
return 0;
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index dd21f950e129..cde9f3758106 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -331,7 +331,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
info->fbops = &virtio_gpufb_ops;
info->pixmap.flags = FB_PIXMAP_SYSTEM;
- info->screen_base = obj->vmap;
+ info->screen_buffer = obj->vmap;
info->screen_size = obj->gem_base.size;
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &vfbdev->helper,
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index d40ed9fdf68d..70b12f89a193 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -64,7 +64,8 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
#define QUIRK_SKIP_INPUT_MAPPING BIT(2)
#define QUIRK_IS_MULTITOUCH BIT(3)
-#define NOTEBOOK_QUIRKS QUIRK_FIX_NOTEBOOK_REPORT
+#define KEYBOARD_QUIRKS (QUIRK_FIX_NOTEBOOK_REPORT | \
+ QUIRK_NO_INIT_REPORTS)
#define TOUCHPAD_QUIRKS (QUIRK_NO_INIT_REPORTS | \
QUIRK_SKIP_INPUT_MAPPING | \
QUIRK_IS_MULTITOUCH)
@@ -170,11 +171,11 @@ static int asus_raw_event(struct hid_device *hdev,
static int asus_input_configured(struct hid_device *hdev, struct hid_input *hi)
{
+ struct input_dev *input = hi->input;
struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
if (drvdata->quirks & QUIRK_IS_MULTITOUCH) {
int ret;
- struct input_dev *input = hi->input;
input_set_abs_params(input, ABS_MT_POSITION_X, 0, MAX_X, 0, 0);
input_set_abs_params(input, ABS_MT_POSITION_Y, 0, MAX_Y, 0, 0);
@@ -191,10 +192,10 @@ static int asus_input_configured(struct hid_device *hdev, struct hid_input *hi)
hid_err(hdev, "Asus input mt init slots failed: %d\n", ret);
return ret;
}
-
- drvdata->input = input;
}
+ drvdata->input = input;
+
return 0;
}
@@ -286,7 +287,11 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto err_stop_hw;
}
- drvdata->input->name = "Asus TouchPad";
+ if (drvdata->quirks & QUIRK_IS_MULTITOUCH) {
+ drvdata->input->name = "Asus TouchPad";
+ } else {
+ drvdata->input->name = "Asus Keyboard";
+ }
if (drvdata->quirks & QUIRK_IS_MULTITOUCH) {
ret = asus_start_multitouch(hdev);
@@ -315,7 +320,7 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
static const struct hid_device_id asus_devices[] = {
{ HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK,
- USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD), NOTEBOOK_QUIRKS},
+ USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD), KEYBOARD_QUIRKS},
{ HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_TOUCHPAD), TOUCHPAD_QUIRKS },
{ }
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index cff060b56da9..ea36b557d5ee 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2496,6 +2496,7 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0002) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0003) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PETZL, USB_DEVICE_ID_PETZL_HEADLAMP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) },
#if IS_ENABLED(CONFIG_MOUSE_SYNAPTICS_USB)
diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c
index 717704e9ae07..c0303f61c26a 100644
--- a/drivers/hid/hid-corsair.c
+++ b/drivers/hid/hid-corsair.c
@@ -148,26 +148,36 @@ static enum led_brightness k90_backlight_get(struct led_classdev *led_cdev)
struct usb_interface *usbif = to_usb_interface(dev->parent);
struct usb_device *usbdev = interface_to_usbdev(usbif);
int brightness;
- char data[8];
+ char *data;
+
+ data = kmalloc(8, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
K90_REQUEST_STATUS,
USB_DIR_IN | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, 0, 0, data, 8,
USB_CTRL_SET_TIMEOUT);
- if (ret < 0) {
+ if (ret < 5) {
dev_warn(dev, "Failed to get K90 initial state (error %d).\n",
ret);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
brightness = data[4];
if (brightness < 0 || brightness > 3) {
dev_warn(dev,
"Read invalid backlight brightness: %02hhx.\n",
data[4]);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
- return brightness;
+ ret = brightness;
+out:
+ kfree(data);
+
+ return ret;
}
static enum led_brightness k90_record_led_get(struct led_classdev *led_cdev)
@@ -253,17 +263,22 @@ static ssize_t k90_show_macro_mode(struct device *dev,
struct usb_interface *usbif = to_usb_interface(dev->parent);
struct usb_device *usbdev = interface_to_usbdev(usbif);
const char *macro_mode;
- char data[8];
+ char *data;
+
+ data = kmalloc(2, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
K90_REQUEST_GET_MODE,
USB_DIR_IN | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, 0, 0, data, 2,
USB_CTRL_SET_TIMEOUT);
- if (ret < 0) {
+ if (ret < 1) {
dev_warn(dev, "Failed to get K90 initial mode (error %d).\n",
ret);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
switch (data[0]) {
@@ -277,10 +292,15 @@ static ssize_t k90_show_macro_mode(struct device *dev,
default:
dev_warn(dev, "K90 in unknown mode: %02hhx.\n",
data[0]);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
- return snprintf(buf, PAGE_SIZE, "%s\n", macro_mode);
+ ret = snprintf(buf, PAGE_SIZE, "%s\n", macro_mode);
+out:
+ kfree(data);
+
+ return ret;
}
static ssize_t k90_store_macro_mode(struct device *dev,
@@ -320,26 +340,36 @@ static ssize_t k90_show_current_profile(struct device *dev,
struct usb_interface *usbif = to_usb_interface(dev->parent);
struct usb_device *usbdev = interface_to_usbdev(usbif);
int current_profile;
- char data[8];
+ char *data;
+
+ data = kmalloc(8, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
K90_REQUEST_STATUS,
USB_DIR_IN | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, 0, 0, data, 8,
USB_CTRL_SET_TIMEOUT);
- if (ret < 0) {
+ if (ret < 8) {
dev_warn(dev, "Failed to get K90 initial state (error %d).\n",
ret);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
current_profile = data[7];
if (current_profile < 1 || current_profile > 3) {
dev_warn(dev, "Read invalid current profile: %02hhx.\n",
data[7]);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
- return snprintf(buf, PAGE_SIZE, "%d\n", current_profile);
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", current_profile);
+out:
+ kfree(data);
+
+ return ret;
}
static ssize_t k90_store_current_profile(struct device *dev,
diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c
index 1b764d1745f3..1689568b597d 100644
--- a/drivers/hid/hid-cypress.c
+++ b/drivers/hid/hid-cypress.c
@@ -39,6 +39,9 @@ static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
if (!(quirks & CP_RDESC_SWAPPED_MIN_MAX))
return rdesc;
+ if (*rsize < 4)
+ return rdesc;
+
for (i = 0; i < *rsize - 4; i++)
if (rdesc[i] == 0x29 && rdesc[i + 2] == 0x19) {
rdesc[i] = 0x19;
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index ec277b96eaa1..f46f2c5117fa 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -319,6 +319,7 @@
#define USB_VENDOR_ID_DRAGONRISE 0x0079
#define USB_DEVICE_ID_DRAGONRISE_WIIU 0x1800
#define USB_DEVICE_ID_DRAGONRISE_PS3 0x1801
+#define USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR 0x1803
#define USB_DEVICE_ID_DRAGONRISE_GAMECUBE 0x1843
#define USB_VENDOR_ID_DWAV 0x0eef
@@ -365,6 +366,9 @@
#define USB_VENDOR_ID_FLATFROG 0x25b5
#define USB_DEVICE_ID_MULTITOUCH_3200 0x0002
+#define USB_VENDOR_ID_FUTABA 0x0547
+#define USB_DEVICE_ID_LED_DISPLAY 0x7000
+
#define USB_VENDOR_ID_ESSENTIAL_REALITY 0x0d7f
#define USB_DEVICE_ID_ESSENTIAL_REALITY_P5 0x0100
@@ -812,6 +816,9 @@
#define USB_VENDOR_ID_PETALYNX 0x18b1
#define USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE 0x0037
+#define USB_VENDOR_ID_PETZL 0x2122
+#define USB_DEVICE_ID_PETZL_HEADLAMP 0x1234
+
#define USB_VENDOR_ID_PHILIPS 0x0471
#define USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE 0x0617
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 5c925228847c..4ef73374a8f9 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -212,7 +212,6 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
__s32 value;
int ret = 0;
- memset(buffer, 0, buffer_size);
mutex_lock(&data->mutex);
report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
if (!report || (field_index >= report->maxfield)) {
@@ -256,6 +255,8 @@ int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
int buffer_index = 0;
int i;
+ memset(buffer, 0, buffer_size);
+
mutex_lock(&data->mutex);
report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
if (!report || (field_index >= report->maxfield) ||
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 7687c0875395..f405b07d0381 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -1099,8 +1099,11 @@ struct sony_sc {
u8 led_delay_on[MAX_LEDS];
u8 led_delay_off[MAX_LEDS];
u8 led_count;
+ bool ds4_dongle_connected;
};
+static void sony_set_leds(struct sony_sc *sc);
+
static inline void sony_schedule_work(struct sony_sc *sc)
{
if (!sc->defer_initialization)
@@ -1430,6 +1433,31 @@ static int sony_raw_event(struct hid_device *hdev, struct hid_report *report,
return -EILSEQ;
}
}
+
+ /*
+ * In the case of a DS4 USB dongle, bit[2] of byte 31 indicates
+ * if a DS4 is actually connected (indicated by '0').
+ * For non-dongle, this bit is always 0 (connected).
+ */
+ if (sc->hdev->vendor == USB_VENDOR_ID_SONY &&
+ sc->hdev->product == USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE) {
+ bool connected = (rd[31] & 0x04) ? false : true;
+
+ if (!sc->ds4_dongle_connected && connected) {
+ hid_info(sc->hdev, "DualShock 4 USB dongle: controller connected\n");
+ sony_set_leds(sc);
+ sc->ds4_dongle_connected = true;
+ } else if (sc->ds4_dongle_connected && !connected) {
+ hid_info(sc->hdev, "DualShock 4 USB dongle: controller disconnected\n");
+ sc->ds4_dongle_connected = false;
+ /* Return 0, so hidraw can get the report. */
+ return 0;
+ } else if (!sc->ds4_dongle_connected) {
+ /* Return 0, so hidraw can get the report. */
+ return 0;
+ }
+ }
+
dualshock4_parse_report(sc, rd, size);
}
@@ -2390,6 +2418,12 @@ static int sony_check_add(struct sony_sc *sc)
}
memcpy(sc->mac_address, &buf[1], sizeof(sc->mac_address));
+
+ snprintf(sc->hdev->uniq, sizeof(sc->hdev->uniq),
+ "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
+ sc->mac_address[5], sc->mac_address[4],
+ sc->mac_address[3], sc->mac_address[2],
+ sc->mac_address[1], sc->mac_address[0]);
} else if ((sc->quirks & SIXAXIS_CONTROLLER_USB) ||
(sc->quirks & NAVIGATION_CONTROLLER_USB)) {
buf = kmalloc(SIXAXIS_REPORT_0xF2_SIZE, GFP_KERNEL);
@@ -2548,7 +2582,7 @@ static int sony_input_configured(struct hid_device *hdev,
hid_err(sc->hdev,
"Unable to initialize multi-touch slots: %d\n",
ret);
- return ret;
+ goto err_stop;
}
sony_init_output_report(sc, dualshock4_send_output_report);
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 78fb32a7b103..ea3c3546cef7 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -426,6 +426,15 @@ static int i2c_hid_hwreset(struct i2c_client *client)
if (ret)
goto out_unlock;
+ /*
+ * The HID over I2C specification states that if a DEVICE needs time
+ * after the PWR_ON request, it should utilise CLOCK stretching.
+ * However, it has been observered that the Windows driver provides a
+ * 1ms sleep between the PWR_ON and RESET requests and that some devices
+ * rely on this.
+ */
+ usleep_range(1000, 5000);
+
i2c_hid_dbg(ihid, "resetting...\n");
ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index b3e01c82af05..e9d6cc7cdfc5 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -83,11 +83,13 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_FUTABA, USB_DEVICE_ID_LED_DISPLAY, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index b9779bcbd140..8aeca038cc73 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -740,6 +740,11 @@ static int wacom_add_shared_data(struct hid_device *hdev)
return retval;
}
+ if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH)
+ wacom_wac->shared->touch = hdev;
+ else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN)
+ wacom_wac->shared->pen = hdev;
+
out:
mutex_unlock(&wacom_udev_list_lock);
return retval;
@@ -2036,10 +2041,6 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
if (error)
goto fail;
- error = wacom_add_shared_data(hdev);
- if (error)
- goto fail;
-
/*
* Bamboo Pad has a generic hid handling for the Pen, and we switch it
* into debug mode for the touch part.
@@ -2080,10 +2081,9 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
wacom_update_name(wacom, wireless ? " (WL)" : "");
- if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH)
- wacom_wac->shared->touch = hdev;
- else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN)
- wacom_wac->shared->pen = hdev;
+ error = wacom_add_shared_data(hdev);
+ if (error)
+ goto fail;
if (!(features->device_type & WACOM_DEVICETYPE_WL_MONITOR) &&
(features->quirks & WACOM_QUIRK_BATTERY)) {
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index b1a9a3ca6d56..0884dc9554fd 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2187,6 +2187,16 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
wacom_report_events(hdev, report);
+ /*
+ * Non-input reports may be sent prior to the device being
+ * completely initialized. Since only their events need
+ * to be processed, exit after 'wacom_report_events' has
+ * been called to prevent potential crashes in the report-
+ * processing functions.
+ */
+ if (report->type != HID_INPUT_REPORT)
+ return;
+
if (WACOM_PAD_FIELD(field)) {
wacom_wac_pad_battery_report(hdev, report);
if (wacom->wacom_wac.pad_input)
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 322ed9272811..841f2428e84a 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -1036,7 +1036,7 @@ static const u8 lm90_temp_emerg_index[3] = {
};
static const u8 lm90_min_alarm_bits[3] = { 5, 3, 11 };
-static const u8 lm90_max_alarm_bits[3] = { 0, 4, 12 };
+static const u8 lm90_max_alarm_bits[3] = { 6, 4, 12 };
static const u8 lm90_crit_alarm_bits[3] = { 0, 1, 9 };
static const u8 lm90_emergency_alarm_bits[3] = { 15, 13, 14 };
static const u8 lm90_fault_bits[3] = { 0, 2, 10 };
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 686971263bef..45d6771fac8c 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -962,10 +962,6 @@ static int cdns_i2c_probe(struct platform_device *pdev)
goto err_clk_dis;
}
- ret = i2c_add_adapter(&id->adap);
- if (ret < 0)
- goto err_clk_dis;
-
/*
* Cadence I2C controller has a bug wherein it generates
* invalid read transaction after HW timeout in master receiver mode.
@@ -975,6 +971,10 @@ static int cdns_i2c_probe(struct platform_device *pdev)
*/
cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET);
+ ret = i2c_add_adapter(&id->adap);
+ if (ret < 0)
+ goto err_clk_dis;
+
dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n",
id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq);
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index c62b7cd475f8..3310f2e0dbd3 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -28,6 +28,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -636,12 +637,31 @@ static int lpi2c_imx_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int lpi2c_imx_suspend(struct device *dev)
+{
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static int lpi2c_imx_resume(struct device *dev)
+{
+ pinctrl_pm_select_default_state(dev);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(imx_lpi2c_pm, lpi2c_imx_suspend, lpi2c_imx_resume);
+
static struct platform_driver lpi2c_imx_driver = {
.probe = lpi2c_imx_probe,
.remove = lpi2c_imx_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = lpi2c_imx_of_match,
+ .pm = &imx_lpi2c_pm,
},
};
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index c2268cdf38e8..e34d82e79b98 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -585,10 +585,29 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
u8 command, int size, union i2c_smbus_data *data)
{
struct i2c_piix4_adapdata *adapdata = i2c_get_adapdata(adap);
+ unsigned short piix4_smba = adapdata->smba;
+ int retries = MAX_TIMEOUT;
+ int smbslvcnt;
u8 smba_en_lo;
u8 port;
int retval;
+ /* Request the SMBUS semaphore, avoid conflicts with the IMC */
+ smbslvcnt = inb_p(SMBSLVCNT);
+ do {
+ outb_p(smbslvcnt | 0x10, SMBSLVCNT);
+
+ /* Check the semaphore status */
+ smbslvcnt = inb_p(SMBSLVCNT);
+ if (smbslvcnt & 0x10)
+ break;
+
+ usleep_range(1000, 2000);
+ } while (--retries);
+ /* SMBus is still owned by the IMC, we give up */
+ if (!retries)
+ return -EBUSY;
+
mutex_lock(&piix4_mutex_sb800);
outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX);
@@ -606,6 +625,9 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
mutex_unlock(&piix4_mutex_sb800);
+ /* Release the semaphore */
+ outb_p(smbslvcnt | 0x20, SMBSLVCNT);
+
return retval;
}
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index cf9e396d7702..583e95042a21 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -931,7 +931,10 @@ static int i2c_device_probe(struct device *dev)
if (!client->irq) {
int irq = -ENOENT;
- if (dev->of_node) {
+ if (client->flags & I2C_CLIENT_HOST_NOTIFY) {
+ dev_dbg(dev, "Using Host Notify IRQ\n");
+ irq = i2c_smbus_host_notify_to_irq(client);
+ } else if (dev->of_node) {
irq = of_irq_get_byname(dev->of_node, "irq");
if (irq == -EINVAL || irq == -ENODATA)
irq = of_irq_get(dev->of_node, 0);
@@ -940,14 +943,7 @@ static int i2c_device_probe(struct device *dev)
}
if (irq == -EPROBE_DEFER)
return irq;
- /*
- * ACPI and OF did not find any useful IRQ, try to see
- * if Host Notify can be used.
- */
- if (irq < 0) {
- dev_dbg(dev, "Using Host Notify IRQ\n");
- irq = i2c_smbus_host_notify_to_irq(client);
- }
+
if (irq < 0)
irq = 0;
@@ -1708,7 +1704,7 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
if (i2c_check_addr_validity(addr, info.flags)) {
dev_err(&adap->dev, "of_i2c: invalid addr=%x on %s\n",
- info.addr, node->full_name);
+ addr, node->full_name);
return ERR_PTR(-EINVAL);
}
@@ -1716,6 +1712,9 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
info.of_node = of_node_get(node);
info.archdata = &dev_ad;
+ if (of_property_read_bool(node, "host-notify"))
+ info.flags |= I2C_CLIENT_HOST_NOTIFY;
+
if (of_get_property(node, "wakeup-source", NULL))
info.flags |= I2C_CLIENT_WAKE;
@@ -3633,7 +3632,7 @@ int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb)
int ret;
if (!client || !slave_cb) {
- WARN(1, "insufficent data\n");
+ WARN(1, "insufficient data\n");
return -EINVAL;
}
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 66f323fd3982..6f638bbc922d 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -331,7 +331,7 @@ static noinline int i2cdev_ioctl_smbus(struct i2c_client *client,
unsigned long arg)
{
struct i2c_smbus_ioctl_data data_arg;
- union i2c_smbus_data temp;
+ union i2c_smbus_data temp = {};
int datasize, res;
if (copy_from_user(&data_arg,
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index f6b6d42385e1..784670e2736b 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -353,12 +353,12 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
[0] = {
.num = ST_ACCEL_FS_AVL_2G,
.value = 0x00,
- .gain = IIO_G_TO_M_S_2(1024),
+ .gain = IIO_G_TO_M_S_2(1000),
},
[1] = {
.num = ST_ACCEL_FS_AVL_6G,
.value = 0x01,
- .gain = IIO_G_TO_M_S_2(340),
+ .gain = IIO_G_TO_M_S_2(3000),
},
},
},
@@ -366,6 +366,14 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.addr = 0x21,
.mask = 0x40,
},
+ /*
+ * Data Alignment Setting - needs to be set to get
+ * left-justified data like all other sensors.
+ */
+ .das = {
+ .addr = 0x21,
+ .mask = 0x01,
+ },
.drdy_irq = {
.addr = 0x21,
.mask_int1 = 0x04,
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 38bc319904c4..9c8b558ba19e 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -561,7 +561,7 @@ config TI_ADS8688
config TI_AM335X_ADC
tristate "TI's AM335X ADC driver"
- depends on MFD_TI_AM335X_TSCADC
+ depends on MFD_TI_AM335X_TSCADC && HAS_DMA
select IIO_BUFFER
select IIO_KFIFO_BUF
help
diff --git a/drivers/iio/common/st_sensors/st_sensors_buffer.c b/drivers/iio/common/st_sensors/st_sensors_buffer.c
index fe7775bb3740..df4045203a07 100644
--- a/drivers/iio/common/st_sensors/st_sensors_buffer.c
+++ b/drivers/iio/common/st_sensors/st_sensors_buffer.c
@@ -30,7 +30,9 @@ static int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf)
for_each_set_bit(i, indio_dev->active_scan_mask, num_data_channels) {
const struct iio_chan_spec *channel = &indio_dev->channels[i];
- unsigned int bytes_to_read = channel->scan_type.realbits >> 3;
+ unsigned int bytes_to_read =
+ DIV_ROUND_UP(channel->scan_type.realbits +
+ channel->scan_type.shift, 8);
unsigned int storage_bytes =
channel->scan_type.storagebits >> 3;
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 975a1f19f747..79c8c7cd70d5 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -401,6 +401,15 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
return err;
}
+ /* set DAS */
+ if (sdata->sensor_settings->das.addr) {
+ err = st_sensors_write_data_with_mask(indio_dev,
+ sdata->sensor_settings->das.addr,
+ sdata->sensor_settings->das.mask, 1);
+ if (err < 0)
+ return err;
+ }
+
if (sdata->int_pin_open_drain) {
dev_info(&indio_dev->dev,
"set interrupt line to open drain mode\n");
@@ -483,8 +492,10 @@ static int st_sensors_read_axis_data(struct iio_dev *indio_dev,
int err;
u8 *outdata;
struct st_sensor_data *sdata = iio_priv(indio_dev);
- unsigned int byte_for_channel = ch->scan_type.realbits >> 3;
+ unsigned int byte_for_channel;
+ byte_for_channel = DIV_ROUND_UP(ch->scan_type.realbits +
+ ch->scan_type.shift, 8);
outdata = kmalloc(byte_for_channel, GFP_KERNEL);
if (!outdata)
return -ENOMEM;
diff --git a/drivers/iio/counter/104-quad-8.c b/drivers/iio/counter/104-quad-8.c
index 2d2ee353dde7..a5913e97945e 100644
--- a/drivers/iio/counter/104-quad-8.c
+++ b/drivers/iio/counter/104-quad-8.c
@@ -153,7 +153,7 @@ static int quad8_write_raw(struct iio_dev *indio_dev,
ior_cfg = val | priv->preset_enable[chan->channel] << 1;
/* Load I/O control configuration */
- outb(0x40 | ior_cfg, base_offset);
+ outb(0x40 | ior_cfg, base_offset + 1);
return 0;
case IIO_CHAN_INFO_SCALE:
@@ -233,7 +233,7 @@ static ssize_t quad8_read_set_to_preset_on_index(struct iio_dev *indio_dev,
const struct quad8_iio *const priv = iio_priv(indio_dev);
return snprintf(buf, PAGE_SIZE, "%u\n",
- priv->preset_enable[chan->channel]);
+ !priv->preset_enable[chan->channel]);
}
static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev,
@@ -241,7 +241,7 @@ static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev,
size_t len)
{
struct quad8_iio *const priv = iio_priv(indio_dev);
- const int base_offset = priv->base + 2 * chan->channel;
+ const int base_offset = priv->base + 2 * chan->channel + 1;
bool preset_enable;
int ret;
unsigned int ior_cfg;
@@ -250,6 +250,9 @@ static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev,
if (ret)
return ret;
+ /* Preset enable is active low in Input/Output Control register */
+ preset_enable = !preset_enable;
+
priv->preset_enable[chan->channel] = preset_enable;
ior_cfg = priv->ab_enable[chan->channel] |
@@ -362,7 +365,7 @@ static int quad8_set_synchronous_mode(struct iio_dev *indio_dev,
priv->synchronous_mode[chan->channel] = synchronous_mode;
/* Load Index Control configuration to Index Control Register */
- outb(0x40 | idr_cfg, base_offset);
+ outb(0x60 | idr_cfg, base_offset);
return 0;
}
@@ -444,7 +447,7 @@ static int quad8_set_index_polarity(struct iio_dev *indio_dev,
priv->index_polarity[chan->channel] = index_polarity;
/* Load Index Control configuration to Index Control Register */
- outb(0x40 | idr_cfg, base_offset);
+ outb(0x60 | idr_cfg, base_offset);
return 0;
}
diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c
index 5355507f8fa1..c9e319bff58b 100644
--- a/drivers/iio/imu/bmi160/bmi160_core.c
+++ b/drivers/iio/imu/bmi160/bmi160_core.c
@@ -66,10 +66,8 @@
#define BMI160_REG_DUMMY 0x7F
-#define BMI160_ACCEL_PMU_MIN_USLEEP 3200
-#define BMI160_ACCEL_PMU_MAX_USLEEP 3800
-#define BMI160_GYRO_PMU_MIN_USLEEP 55000
-#define BMI160_GYRO_PMU_MAX_USLEEP 80000
+#define BMI160_ACCEL_PMU_MIN_USLEEP 3800
+#define BMI160_GYRO_PMU_MIN_USLEEP 80000
#define BMI160_SOFTRESET_USLEEP 1000
#define BMI160_CHANNEL(_type, _axis, _index) { \
@@ -151,20 +149,9 @@ static struct bmi160_regs bmi160_regs[] = {
},
};
-struct bmi160_pmu_time {
- unsigned long min;
- unsigned long max;
-};
-
-static struct bmi160_pmu_time bmi160_pmu_time[] = {
- [BMI160_ACCEL] = {
- .min = BMI160_ACCEL_PMU_MIN_USLEEP,
- .max = BMI160_ACCEL_PMU_MAX_USLEEP
- },
- [BMI160_GYRO] = {
- .min = BMI160_GYRO_PMU_MIN_USLEEP,
- .max = BMI160_GYRO_PMU_MIN_USLEEP,
- },
+static unsigned long bmi160_pmu_time[] = {
+ [BMI160_ACCEL] = BMI160_ACCEL_PMU_MIN_USLEEP,
+ [BMI160_GYRO] = BMI160_GYRO_PMU_MIN_USLEEP,
};
struct bmi160_scale {
@@ -289,7 +276,7 @@ int bmi160_set_mode(struct bmi160_data *data, enum bmi160_sensor_type t,
if (ret < 0)
return ret;
- usleep_range(bmi160_pmu_time[t].min, bmi160_pmu_time[t].max);
+ usleep_range(bmi160_pmu_time[t], bmi160_pmu_time[t] + 1000);
return 0;
}
diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c
index a144ca3461fc..81bd8e8da4a6 100644
--- a/drivers/iio/light/max44000.c
+++ b/drivers/iio/light/max44000.c
@@ -113,7 +113,7 @@ static const char max44000_int_time_avail_str[] =
"0.100 "
"0.025 "
"0.00625 "
- "0.001625";
+ "0.0015625";
/* Available scales (internal to ulux) with pretty manual alignment: */
static const int max44000_scale_avail_ulux_array[] = {
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index e7dcfac877ca..3e70a9c5d79d 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2811,7 +2811,8 @@ static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
if (!src_addr || !src_addr->sa_family) {
src_addr = (struct sockaddr *) &id->route.addr.src_addr;
src_addr->sa_family = dst_addr->sa_family;
- if (dst_addr->sa_family == AF_INET6) {
+ if (IS_ENABLED(CONFIG_IPV6) &&
+ dst_addr->sa_family == AF_INET6) {
struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr;
struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr;
src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 1e62a5f0cb28..4609b921f899 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -134,6 +134,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
if (access & IB_ACCESS_ON_DEMAND) {
+ put_pid(umem->pid);
ret = ib_umem_odp_get(context, umem);
if (ret) {
kfree(umem);
@@ -149,6 +150,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
page_list = (struct page **) __get_free_page(GFP_KERNEL);
if (!page_list) {
+ put_pid(umem->pid);
kfree(umem);
return ERR_PTR(-ENOMEM);
}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 9d5fe1853da4..6262dc035f3c 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1135,16 +1135,7 @@ static int iwch_query_port(struct ib_device *ibdev,
memset(props, 0, sizeof(struct ib_port_attr));
props->max_mtu = IB_MTU_4096;
- if (netdev->mtu >= 4096)
- props->active_mtu = IB_MTU_4096;
- else if (netdev->mtu >= 2048)
- props->active_mtu = IB_MTU_2048;
- else if (netdev->mtu >= 1024)
- props->active_mtu = IB_MTU_1024;
- else if (netdev->mtu >= 512)
- props->active_mtu = IB_MTU_512;
- else
- props->active_mtu = IB_MTU_256;
+ props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
if (!netif_carrier_ok(netdev))
props->state = IB_PORT_DOWN;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index f1510cc76d2d..9398143d7c5e 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1804,20 +1804,21 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
skb_trim(skb, dlen);
mutex_lock(&ep->com.mutex);
- /* update RX credits */
- update_rx_credits(ep, dlen);
-
switch (ep->com.state) {
case MPA_REQ_SENT:
+ update_rx_credits(ep, dlen);
ep->rcv_seq += dlen;
disconnect = process_mpa_reply(ep, skb);
break;
case MPA_REQ_WAIT:
+ update_rx_credits(ep, dlen);
ep->rcv_seq += dlen;
disconnect = process_mpa_request(ep, skb);
break;
case FPDU_MODE: {
struct c4iw_qp_attributes attrs;
+
+ update_rx_credits(ep, dlen);
BUG_ON(!ep->com.qp);
if (status)
pr_err("%s Unexpected streaming data." \
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 19c6477af19f..bec82a600d77 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -505,6 +505,15 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
}
/*
+ * Special cqe for drain WR completions...
+ */
+ if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) {
+ *cookie = CQE_DRAIN_COOKIE(hw_cqe);
+ *cqe = *hw_cqe;
+ goto skip_cqe;
+ }
+
+ /*
* Gotta tweak READ completions:
* 1) the cqe doesn't contain the sq_wptr from the wr.
* 2) opcode not reflected from the wr.
@@ -753,6 +762,9 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
c4iw_invalidate_mr(qhp->rhp,
CQE_WRID_FR_STAG(&cqe));
break;
+ case C4IW_DRAIN_OPCODE:
+ wc->opcode = IB_WC_SEND;
+ break;
default:
printk(KERN_ERR MOD "Unexpected opcode %d "
"in the CQE received for QPID=0x%0x\n",
@@ -817,15 +829,8 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
}
}
out:
- if (wq) {
- if (unlikely(qhp->attr.state != C4IW_QP_STATE_RTS)) {
- if (t4_sq_empty(wq))
- complete(&qhp->sq_drained);
- if (t4_rq_empty(wq))
- complete(&qhp->rq_drained);
- }
+ if (wq)
spin_unlock(&qhp->lock);
- }
return ret;
}
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 516b0ae6dc3f..40c0e7b9fc6e 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -846,9 +846,17 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
}
}
+ rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free");
+ if (!rdev->free_workq) {
+ err = -ENOMEM;
+ goto err_free_status_page;
+ }
+
rdev->status_page->db_off = 0;
return 0;
+err_free_status_page:
+ free_page((unsigned long)rdev->status_page);
destroy_ocqp_pool:
c4iw_ocqp_pool_destroy(rdev);
destroy_rqtpool:
@@ -862,6 +870,7 @@ destroy_resource:
static void c4iw_rdev_close(struct c4iw_rdev *rdev)
{
+ destroy_workqueue(rdev->free_workq);
kfree(rdev->wr_log);
free_page((unsigned long)rdev->status_page);
c4iw_pblpool_destroy(rdev);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 4788e1a46fde..8cd4d054a87e 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -45,6 +45,7 @@
#include <linux/kref.h>
#include <linux/timer.h>
#include <linux/io.h>
+#include <linux/workqueue.h>
#include <asm/byteorder.h>
@@ -107,6 +108,7 @@ struct c4iw_dev_ucontext {
struct list_head qpids;
struct list_head cqids;
struct mutex lock;
+ struct kref kref;
};
enum c4iw_rdev_flags {
@@ -183,6 +185,7 @@ struct c4iw_rdev {
atomic_t wr_log_idx;
struct wr_log_entry *wr_log;
int wr_log_size;
+ struct workqueue_struct *free_workq;
};
static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
@@ -480,8 +483,8 @@ struct c4iw_qp {
wait_queue_head_t wait;
struct timer_list timer;
int sq_sig_all;
- struct completion rq_drained;
- struct completion sq_drained;
+ struct work_struct free_work;
+ struct c4iw_ucontext *ucontext;
};
static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
@@ -495,6 +498,7 @@ struct c4iw_ucontext {
u32 key;
spinlock_t mmap_lock;
struct list_head mmaps;
+ struct kref kref;
};
static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
@@ -502,6 +506,18 @@ static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
return container_of(c, struct c4iw_ucontext, ibucontext);
}
+void _c4iw_free_ucontext(struct kref *kref);
+
+static inline void c4iw_put_ucontext(struct c4iw_ucontext *ucontext)
+{
+ kref_put(&ucontext->kref, _c4iw_free_ucontext);
+}
+
+static inline void c4iw_get_ucontext(struct c4iw_ucontext *ucontext)
+{
+ kref_get(&ucontext->kref);
+}
+
struct c4iw_mm_entry {
struct list_head entry;
u64 addr;
@@ -615,6 +631,8 @@ static inline int to_ib_qp_state(int c4iw_qp_state)
return IB_QPS_ERR;
}
+#define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN
+
static inline u32 c4iw_ib_to_tpt_access(int a)
{
return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
@@ -997,8 +1015,6 @@ extern int c4iw_wr_log;
extern int db_fc_threshold;
extern int db_coalescing_threshold;
extern int use_dsgl;
-void c4iw_drain_rq(struct ib_qp *qp);
-void c4iw_drain_sq(struct ib_qp *qp);
void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
#endif
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 49b51b7e0fd7..3345e1c312f7 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -93,17 +93,28 @@ static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
return -ENOSYS;
}
-static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
+void _c4iw_free_ucontext(struct kref *kref)
{
- struct c4iw_dev *rhp = to_c4iw_dev(context->device);
- struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
+ struct c4iw_ucontext *ucontext;
+ struct c4iw_dev *rhp;
struct c4iw_mm_entry *mm, *tmp;
- PDBG("%s context %p\n", __func__, context);
+ ucontext = container_of(kref, struct c4iw_ucontext, kref);
+ rhp = to_c4iw_dev(ucontext->ibucontext.device);
+
+ PDBG("%s ucontext %p\n", __func__, ucontext);
list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
kfree(mm);
c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
kfree(ucontext);
+}
+
+static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
+{
+ struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
+
+ PDBG("%s context %p\n", __func__, context);
+ c4iw_put_ucontext(ucontext);
return 0;
}
@@ -127,6 +138,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
INIT_LIST_HEAD(&context->mmaps);
spin_lock_init(&context->mmap_lock);
+ kref_init(&context->kref);
if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
if (!warned++)
@@ -361,16 +373,7 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port,
memset(props, 0, sizeof(struct ib_port_attr));
props->max_mtu = IB_MTU_4096;
- if (netdev->mtu >= 4096)
- props->active_mtu = IB_MTU_4096;
- else if (netdev->mtu >= 2048)
- props->active_mtu = IB_MTU_2048;
- else if (netdev->mtu >= 1024)
- props->active_mtu = IB_MTU_1024;
- else if (netdev->mtu >= 512)
- props->active_mtu = IB_MTU_512;
- else
- props->active_mtu = IB_MTU_256;
+ props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
if (!netif_carrier_ok(netdev))
props->state = IB_PORT_DOWN;
@@ -607,8 +610,6 @@ int c4iw_register_device(struct c4iw_dev *dev)
dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
dev->ibdev.get_port_immutable = c4iw_port_immutable;
dev->ibdev.get_dev_fw_str = get_dev_fw_str;
- dev->ibdev.drain_sq = c4iw_drain_sq;
- dev->ibdev.drain_rq = c4iw_drain_rq;
dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
if (!dev->ibdev.iwcm)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index cda5542e13a2..04c1c382dedb 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -715,13 +715,32 @@ static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
return 0;
}
-static void _free_qp(struct kref *kref)
+static void free_qp_work(struct work_struct *work)
+{
+ struct c4iw_ucontext *ucontext;
+ struct c4iw_qp *qhp;
+ struct c4iw_dev *rhp;
+
+ qhp = container_of(work, struct c4iw_qp, free_work);
+ ucontext = qhp->ucontext;
+ rhp = qhp->rhp;
+
+ PDBG("%s qhp %p ucontext %p\n", __func__, qhp, ucontext);
+ destroy_qp(&rhp->rdev, &qhp->wq,
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+
+ if (ucontext)
+ c4iw_put_ucontext(ucontext);
+ kfree(qhp);
+}
+
+static void queue_qp_free(struct kref *kref)
{
struct c4iw_qp *qhp;
qhp = container_of(kref, struct c4iw_qp, kref);
PDBG("%s qhp %p\n", __func__, qhp);
- kfree(qhp);
+ queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work);
}
void c4iw_qp_add_ref(struct ib_qp *qp)
@@ -733,7 +752,7 @@ void c4iw_qp_add_ref(struct ib_qp *qp)
void c4iw_qp_rem_ref(struct ib_qp *qp)
{
PDBG("%s ib_qp %p\n", __func__, qp);
- kref_put(&to_c4iw_qp(qp)->kref, _free_qp);
+ kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free);
}
static void add_to_fc_list(struct list_head *head, struct list_head *entry)
@@ -776,6 +795,64 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
return 0;
}
+static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
+{
+ struct t4_cqe cqe = {};
+ struct c4iw_cq *schp;
+ unsigned long flag;
+ struct t4_cq *cq;
+
+ schp = to_c4iw_cq(qhp->ibqp.send_cq);
+ cq = &schp->cq;
+
+ cqe.u.drain_cookie = wr->wr_id;
+ cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
+ CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
+ CQE_TYPE_V(1) |
+ CQE_SWCQE_V(1) |
+ CQE_QPID_V(qhp->wq.sq.qid));
+
+ spin_lock_irqsave(&schp->lock, flag);
+ cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
+ cq->sw_queue[cq->sw_pidx] = cqe;
+ t4_swcq_produce(cq);
+ spin_unlock_irqrestore(&schp->lock, flag);
+
+ spin_lock_irqsave(&schp->comp_handler_lock, flag);
+ (*schp->ibcq.comp_handler)(&schp->ibcq,
+ schp->ibcq.cq_context);
+ spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
+}
+
+static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
+{
+ struct t4_cqe cqe = {};
+ struct c4iw_cq *rchp;
+ unsigned long flag;
+ struct t4_cq *cq;
+
+ rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
+ cq = &rchp->cq;
+
+ cqe.u.drain_cookie = wr->wr_id;
+ cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
+ CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
+ CQE_TYPE_V(0) |
+ CQE_SWCQE_V(1) |
+ CQE_QPID_V(qhp->wq.sq.qid));
+
+ spin_lock_irqsave(&rchp->lock, flag);
+ cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
+ cq->sw_queue[cq->sw_pidx] = cqe;
+ t4_swcq_produce(cq);
+ spin_unlock_irqrestore(&rchp->lock, flag);
+
+ spin_lock_irqsave(&rchp->comp_handler_lock, flag);
+ (*rchp->ibcq.comp_handler)(&rchp->ibcq,
+ rchp->ibcq.cq_context);
+ spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
+}
+
int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr)
{
@@ -794,8 +871,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
spin_lock_irqsave(&qhp->lock, flag);
if (t4_wq_in_error(&qhp->wq)) {
spin_unlock_irqrestore(&qhp->lock, flag);
- *bad_wr = wr;
- return -EINVAL;
+ complete_sq_drain_wr(qhp, wr);
+ return err;
}
num_wrs = t4_sq_avail(&qhp->wq);
if (num_wrs == 0) {
@@ -937,8 +1014,8 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
spin_lock_irqsave(&qhp->lock, flag);
if (t4_wq_in_error(&qhp->wq)) {
spin_unlock_irqrestore(&qhp->lock, flag);
- *bad_wr = wr;
- return -EINVAL;
+ complete_rq_drain_wr(qhp, wr);
+ return err;
}
num_wrs = t4_rq_avail(&qhp->wq);
if (num_wrs == 0) {
@@ -1550,7 +1627,12 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
}
break;
case C4IW_QP_STATE_CLOSING:
- if (!internal) {
+
+ /*
+ * Allow kernel users to move to ERROR for qp draining.
+ */
+ if (!internal && (qhp->ibqp.uobject || attrs->next_state !=
+ C4IW_QP_STATE_ERROR)) {
ret = -EINVAL;
goto out;
}
@@ -1643,7 +1725,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
struct c4iw_dev *rhp;
struct c4iw_qp *qhp;
struct c4iw_qp_attributes attrs;
- struct c4iw_ucontext *ucontext;
qhp = to_c4iw_qp(ib_qp);
rhp = qhp->rhp;
@@ -1663,11 +1744,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
spin_unlock_irq(&rhp->lock);
free_ird(rhp, qhp->attr.max_ird);
- ucontext = ib_qp->uobject ?
- to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
- destroy_qp(&rhp->rdev, &qhp->wq,
- ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
-
c4iw_qp_rem_ref(ib_qp);
PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
@@ -1763,11 +1839,10 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
qhp->attr.max_ird = 0;
qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
spin_lock_init(&qhp->lock);
- init_completion(&qhp->sq_drained);
- init_completion(&qhp->rq_drained);
mutex_init(&qhp->mutex);
init_waitqueue_head(&qhp->wait);
kref_init(&qhp->kref);
+ INIT_WORK(&qhp->free_work, free_qp_work);
ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
if (ret)
@@ -1854,6 +1929,9 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
ma_sync_key_mm->len = PAGE_SIZE;
insert_mmap(ucontext, ma_sync_key_mm);
}
+
+ c4iw_get_ucontext(ucontext);
+ qhp->ucontext = ucontext;
}
qhp->ibqp.qp_num = qhp->wq.sq.qid;
init_timer(&(qhp->timer));
@@ -1958,40 +2036,3 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
return 0;
}
-
-static void move_qp_to_err(struct c4iw_qp *qp)
-{
- struct c4iw_qp_attributes attrs = { .next_state = C4IW_QP_STATE_ERROR };
-
- (void)c4iw_modify_qp(qp->rhp, qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
-}
-
-void c4iw_drain_sq(struct ib_qp *ibqp)
-{
- struct c4iw_qp *qp = to_c4iw_qp(ibqp);
- unsigned long flag;
- bool need_to_wait;
-
- move_qp_to_err(qp);
- spin_lock_irqsave(&qp->lock, flag);
- need_to_wait = !t4_sq_empty(&qp->wq);
- spin_unlock_irqrestore(&qp->lock, flag);
-
- if (need_to_wait)
- wait_for_completion(&qp->sq_drained);
-}
-
-void c4iw_drain_rq(struct ib_qp *ibqp)
-{
- struct c4iw_qp *qp = to_c4iw_qp(ibqp);
- unsigned long flag;
- bool need_to_wait;
-
- move_qp_to_err(qp);
- spin_lock_irqsave(&qp->lock, flag);
- need_to_wait = !t4_rq_empty(&qp->wq);
- spin_unlock_irqrestore(&qp->lock, flag);
-
- if (need_to_wait)
- wait_for_completion(&qp->rq_drained);
-}
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 862381aa83c8..640d22148a3e 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -179,6 +179,7 @@ struct t4_cqe {
__be32 wrid_hi;
__be32 wrid_low;
} gen;
+ u64 drain_cookie;
} u;
__be64 reserved;
__be64 bits_type_ts;
@@ -238,6 +239,7 @@ struct t4_cqe {
/* generic accessor macros */
#define CQE_WRID_HI(x) (be32_to_cpu((x)->u.gen.wrid_hi))
#define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low))
+#define CQE_DRAIN_COOKIE(x) ((x)->u.drain_cookie)
/* macros for flit 3 of the cqe */
#define CQE_GENBIT_S 63
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 29e97df9e1a7..4c000d60d5c6 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -100,16 +100,7 @@ static int i40iw_query_port(struct ib_device *ibdev,
memset(props, 0, sizeof(*props));
props->max_mtu = IB_MTU_4096;
- if (netdev->mtu >= 4096)
- props->active_mtu = IB_MTU_4096;
- else if (netdev->mtu >= 2048)
- props->active_mtu = IB_MTU_2048;
- else if (netdev->mtu >= 1024)
- props->active_mtu = IB_MTU_1024;
- else if (netdev->mtu >= 512)
- props->active_mtu = IB_MTU_512;
- else
- props->active_mtu = IB_MTU_256;
+ props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
props->lid = 1;
if (netif_carrier_ok(iwdev->netdev))
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index c8413fc120e6..7031a8dd4d14 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1682,9 +1682,19 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
size += ret;
}
+ if (mlx4_is_master(mdev->dev) && flow_type == MLX4_FS_REGULAR &&
+ flow_attr->num_of_specs == 1) {
+ struct _rule_hw *rule_header = (struct _rule_hw *)(ctrl + 1);
+ enum ib_flow_spec_type header_spec =
+ ((union ib_flow_spec *)(flow_attr + 1))->type;
+
+ if (header_spec == IB_FLOW_SPEC_ETH)
+ mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
+ }
+
ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
- MLX4_CMD_WRAPPED);
+ MLX4_CMD_NATIVE);
if (ret == -ENOMEM)
pr_err("mcg table is full. Fail to register network rule.\n");
else if (ret == -ENXIO)
@@ -1701,7 +1711,7 @@ static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
int err;
err = mlx4_cmd(dev, reg_id, 0, 0,
MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
- MLX4_CMD_WRAPPED);
+ MLX4_CMD_NATIVE);
if (err)
pr_err("Fail to detach network rule. registration id = 0x%llx\n",
reg_id);
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index aff9fb14768b..5a31f3c6a421 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -478,17 +478,7 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr
memset(props, 0, sizeof(*props));
props->max_mtu = IB_MTU_4096;
-
- if (netdev->mtu >= 4096)
- props->active_mtu = IB_MTU_4096;
- else if (netdev->mtu >= 2048)
- props->active_mtu = IB_MTU_2048;
- else if (netdev->mtu >= 1024)
- props->active_mtu = IB_MTU_1024;
- else if (netdev->mtu >= 512)
- props->active_mtu = IB_MTU_512;
- else
- props->active_mtu = IB_MTU_256;
+ props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
props->lid = 1;
props->lmc = 0;
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 7b74d09a8217..3ac8aa5ef37d 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -576,8 +576,7 @@ static int qedr_set_device_attr(struct qedr_dev *dev)
return 0;
}
-void qedr_unaffiliated_event(void *context,
- u8 event_code)
+void qedr_unaffiliated_event(void *context, u8 event_code)
{
pr_err("unaffiliated event not implemented yet\n");
}
@@ -792,6 +791,9 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
if (device_create_file(&dev->ibdev.dev, qedr_attributes[i]))
goto sysfs_err;
+ if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+ qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
+
DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
return dev;
@@ -824,11 +826,10 @@ static void qedr_remove(struct qedr_dev *dev)
ib_dealloc_device(&dev->ibdev);
}
-static int qedr_close(struct qedr_dev *dev)
+static void qedr_close(struct qedr_dev *dev)
{
- qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
-
- return 0;
+ if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+ qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR);
}
static void qedr_shutdown(struct qedr_dev *dev)
@@ -837,6 +838,12 @@ static void qedr_shutdown(struct qedr_dev *dev)
qedr_remove(dev);
}
+static void qedr_open(struct qedr_dev *dev)
+{
+ if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+ qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
+}
+
static void qedr_mac_address_change(struct qedr_dev *dev)
{
union ib_gid *sgid = &dev->sgid_tbl[0];
@@ -863,7 +870,7 @@ static void qedr_mac_address_change(struct qedr_dev *dev)
ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
- qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE);
+ qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE);
if (rc)
DP_ERR(dev, "Error updating mac filter\n");
@@ -877,7 +884,7 @@ static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event)
{
switch (event) {
case QEDE_UP:
- qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
+ qedr_open(dev);
break;
case QEDE_DOWN:
qedr_close(dev);
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 620badd7d4fb..bb32e4792ec9 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -113,6 +113,8 @@ struct qedr_device_attr {
struct qed_rdma_events events;
};
+#define QEDR_ENET_STATE_BIT (0)
+
struct qedr_dev {
struct ib_device ibdev;
struct qed_dev *cdev;
@@ -153,6 +155,8 @@ struct qedr_dev {
struct qedr_cq *gsi_sqcq;
struct qedr_cq *gsi_rqcq;
struct qedr_qp *gsi_qp;
+
+ unsigned long enet_state;
};
#define QEDR_MAX_SQ_PBL (0x8000)
@@ -188,6 +192,7 @@ struct qedr_dev {
#define QEDR_ROCE_MAX_CNQ_SIZE (0x4000)
#define QEDR_MAX_PORT (1)
+#define QEDR_PORT (1)
#define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
@@ -251,9 +256,6 @@ struct qedr_cq {
u16 icid;
- /* Lock to protect completion handler */
- spinlock_t comp_handler_lock;
-
/* Lock to protect multiplem CQ's */
spinlock_t cq_lock;
u8 arm_flags;
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c
index 63890ebb72bd..a9a8d8745d2e 100644
--- a/drivers/infiniband/hw/qedr/qedr_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_cm.c
@@ -87,11 +87,8 @@ void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt)
qedr_inc_sw_gsi_cons(&qp->sq);
spin_unlock_irqrestore(&qp->q_lock, flags);
- if (cq->ibcq.comp_handler) {
- spin_lock_irqsave(&cq->comp_handler_lock, flags);
+ if (cq->ibcq.comp_handler)
(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
- spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
- }
}
void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
@@ -113,11 +110,8 @@ void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
spin_unlock_irqrestore(&qp->q_lock, flags);
- if (cq->ibcq.comp_handler) {
- spin_lock_irqsave(&cq->comp_handler_lock, flags);
+ if (cq->ibcq.comp_handler)
(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
- spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
- }
}
static void qedr_destroy_gsi_cq(struct qedr_dev *dev,
@@ -404,9 +398,9 @@ static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
}
if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
- packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
- else
packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB;
+ else
+ packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
packet->roce_mode = roce_mode;
memcpy(packet->header.vaddr, ud_header_buffer, header_size);
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 57c8de208077..c7d6c9a783bd 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -471,8 +471,6 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
struct ib_ucontext *context, struct ib_udata *udata)
{
struct qedr_dev *dev = get_qedr_dev(ibdev);
- struct qedr_ucontext *uctx = NULL;
- struct qedr_alloc_pd_uresp uresp;
struct qedr_pd *pd;
u16 pd_id;
int rc;
@@ -489,21 +487,33 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
if (!pd)
return ERR_PTR(-ENOMEM);
- dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
+ rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
+ if (rc)
+ goto err;
- uresp.pd_id = pd_id;
pd->pd_id = pd_id;
if (udata && context) {
+ struct qedr_alloc_pd_uresp uresp;
+
+ uresp.pd_id = pd_id;
+
rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
- if (rc)
+ if (rc) {
DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
- uctx = get_qedr_ucontext(context);
- uctx->pd = pd;
- pd->uctx = uctx;
+ dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
+ goto err;
+ }
+
+ pd->uctx = get_qedr_ucontext(context);
+ pd->uctx->pd = pd;
}
return &pd->ibpd;
+
+err:
+ kfree(pd);
+ return ERR_PTR(rc);
}
int qedr_dealloc_pd(struct ib_pd *ibpd)
@@ -1600,7 +1610,7 @@ err0:
return ERR_PTR(-EFAULT);
}
-enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
+static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
{
switch (qp_state) {
case QED_ROCE_QP_STATE_RESET:
@@ -1621,7 +1631,8 @@ enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
return IB_QPS_ERR;
}
-enum qed_roce_qp_state qedr_get_state_from_ibqp(enum ib_qp_state qp_state)
+static enum qed_roce_qp_state qedr_get_state_from_ibqp(
+ enum ib_qp_state qp_state)
{
switch (qp_state) {
case IB_QPS_RESET:
@@ -1657,7 +1668,7 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
int status = 0;
if (new_state == qp->state)
- return 1;
+ return 0;
switch (qp->state) {
case QED_ROCE_QP_STATE_RESET:
@@ -1733,6 +1744,14 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
/* ERR->XXX */
switch (new_state) {
case QED_ROCE_QP_STATE_RESET:
+ if ((qp->rq.prod != qp->rq.cons) ||
+ (qp->sq.prod != qp->sq.cons)) {
+ DP_NOTICE(dev,
+ "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
+ qp->rq.prod, qp->rq.cons, qp->sq.prod,
+ qp->sq.cons);
+ status = -EINVAL;
+ }
break;
default:
status = -EINVAL;
@@ -1865,7 +1884,6 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
qp_params.remote_mac_addr);
-;
qp_params.mtu = qp->mtu;
qp_params.lb_indication = false;
@@ -2016,7 +2034,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
qp_attr->qp_state = qedr_get_ibqp_state(params.state);
qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
- qp_attr->path_mtu = iboe_get_mtu(params.mtu);
+ qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
qp_attr->path_mig_state = IB_MIG_MIGRATED;
qp_attr->rq_psn = params.rq_psn;
qp_attr->sq_psn = params.sq_psn;
@@ -2028,7 +2046,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
qp_attr->cap.max_recv_wr = qp->rq.max_wr;
qp_attr->cap.max_send_sge = qp->sq.max_sges;
qp_attr->cap.max_recv_sge = qp->rq.max_sges;
- qp_attr->cap.max_inline_data = qp->max_inline_data;
+ qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
qp_init_attr->cap = qp_attr->cap;
memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0],
@@ -2302,7 +2320,8 @@ int qedr_dereg_mr(struct ib_mr *ib_mr)
return rc;
}
-struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len)
+static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
+ int max_page_list_len)
{
struct qedr_pd *pd = get_qedr_pd(ibpd);
struct qedr_dev *dev = get_qedr_dev(ibpd->device);
@@ -2704,7 +2723,7 @@ static int qedr_prepare_reg(struct qedr_qp *qp,
return 0;
}
-enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
+static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
{
switch (opcode) {
case IB_WR_RDMA_WRITE:
@@ -2729,7 +2748,7 @@ enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
}
}
-inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
+static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
{
int wq_is_full, err_wr, pbl_is_full;
struct qedr_dev *dev = qp->dev;
@@ -2766,7 +2785,7 @@ inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
return true;
}
-int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr)
{
struct qedr_dev *dev = get_qedr_dev(ibqp->device);
@@ -3234,9 +3253,10 @@ static int qedr_poll_cq_req(struct qedr_dev *dev,
IB_WC_SUCCESS, 0);
break;
case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
- DP_ERR(dev,
- "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
- cq->icid, qp->icid);
+ if (qp->state != QED_ROCE_QP_STATE_ERR)
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
IB_WC_WR_FLUSH_ERR, 1);
break;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 231a1ce1f4be..bd8fbd3d2032 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -1029,7 +1029,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
if (ret) {
dev_err(&pdev->dev, "failed to allocate interrupts\n");
ret = -ENOMEM;
- goto err_netdevice;
+ goto err_free_cq_ring;
}
/* Allocate UAR table. */
@@ -1092,8 +1092,6 @@ err_free_uar_table:
err_free_intrs:
pvrdma_free_irq(dev);
pvrdma_disable_msi_all(dev);
-err_netdevice:
- unregister_netdevice_notifier(&dev->nb_netdev);
err_free_cq_ring:
pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
err_free_async_ring:
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
index 54891370d18a..c2aa52638dcb 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
@@ -306,7 +306,7 @@ struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev,
union pvrdma_cmd_resp rsp;
struct pvrdma_cmd_create_uc *cmd = &req.create_uc;
struct pvrdma_cmd_create_uc_resp *resp = &rsp.create_uc_resp;
- struct pvrdma_alloc_ucontext_resp uresp;
+ struct pvrdma_alloc_ucontext_resp uresp = {0};
int ret;
void *ptr;
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 342e78163613..4abdeb359fb4 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -555,7 +555,7 @@ struct rxe_dev *rxe_net_add(struct net_device *ndev)
}
spin_lock_bh(&dev_list_lock);
- list_add_tail(&rxe_dev_list, &rxe->list);
+ list_add_tail(&rxe->list, &rxe_dev_list);
spin_unlock_bh(&dev_list_lock);
return rxe;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 486d576e55bc..44b2108253bd 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -813,8 +813,7 @@ void rxe_qp_destroy(struct rxe_qp *qp)
del_timer_sync(&qp->rnr_nak_timer);
rxe_cleanup_task(&qp->req.task);
- if (qp_type(qp) == IB_QPT_RC)
- rxe_cleanup_task(&qp->comp.task);
+ rxe_cleanup_task(&qp->comp.task);
/* flush out any receive wr's or pending requests */
__rxe_do_task(&qp->req.task);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 9104e6b8cac9..e71af717e71b 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -651,13 +651,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
SHOST_DIX_GUARD_CRC);
}
- /*
- * Limit the sg_tablesize and max_sectors based on the device
- * max fastreg page list length.
- */
- shost->sg_tablesize = min_t(unsigned short, shost->sg_tablesize,
- ib_conn->device->ib_device->attrs.max_fast_reg_page_list_len);
-
if (iscsi_host_add(shost,
ib_conn->device->ib_device->dma_device)) {
mutex_unlock(&iser_conn->state_mutex);
@@ -679,6 +672,10 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
max_fr_sectors = ((shost->sg_tablesize - 1) * PAGE_SIZE) >> 9;
shost->max_sectors = min(iser_max_sectors, max_fr_sectors);
+ iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
+ iser_conn, shost->sg_tablesize,
+ shost->max_sectors);
+
if (cmds_max > max_cmds) {
iser_info("cmds_max changed from %u to %u\n",
cmds_max, max_cmds);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 0be6a7c5ddb5..9d0b22ad58c1 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -496,7 +496,6 @@ struct ib_conn {
* @rx_descs: rx buffers array (cyclic buffer)
* @num_rx_descs: number of rx descriptors
* @scsi_sg_tablesize: scsi host sg_tablesize
- * @scsi_max_sectors: scsi host max sectors
*/
struct iser_conn {
struct ib_conn ib_conn;
@@ -519,7 +518,6 @@ struct iser_conn {
struct iser_rx_desc *rx_descs;
u32 num_rx_descs;
unsigned short scsi_sg_tablesize;
- unsigned int scsi_max_sectors;
bool snd_w_inv;
};
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 8ae7a3beddb7..6a9d1cb548ee 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -707,18 +707,7 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE,
device->ib_device->attrs.max_fast_reg_page_list_len);
- if (sg_tablesize > sup_sg_tablesize) {
- sg_tablesize = sup_sg_tablesize;
- iser_conn->scsi_max_sectors = sg_tablesize * SIZE_4K / 512;
- } else {
- iser_conn->scsi_max_sectors = max_sectors;
- }
-
- iser_conn->scsi_sg_tablesize = sg_tablesize;
-
- iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
- iser_conn, iser_conn->scsi_sg_tablesize,
- iser_conn->scsi_max_sectors);
+ iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize);
}
/**
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 8ddc07123193..79bf48477ddb 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -371,6 +371,7 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
struct srp_fr_desc *d;
struct ib_mr *mr;
int i, ret = -EINVAL;
+ enum ib_mr_type mr_type;
if (pool_size <= 0)
goto err;
@@ -384,9 +385,13 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
spin_lock_init(&pool->lock);
INIT_LIST_HEAD(&pool->free_list);
+ if (device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
+ mr_type = IB_MR_TYPE_SG_GAPS;
+ else
+ mr_type = IB_MR_TYPE_MEM_REG;
+
for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
- mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
- max_page_list_len);
+ mr = ib_alloc_mr(pd, mr_type, max_page_list_len);
if (IS_ERR(mr)) {
ret = PTR_ERR(mr);
if (ret == -ENOMEM)
@@ -3694,6 +3699,12 @@ static int __init srp_init_module(void)
indirect_sg_entries = cmd_sg_entries;
}
+ if (indirect_sg_entries > SG_MAX_SEGMENTS) {
+ pr_warn("Clamping indirect_sg_entries to %u\n",
+ SG_MAX_SEGMENTS);
+ indirect_sg_entries = SG_MAX_SEGMENTS;
+ }
+
srp_remove_wq = create_workqueue("srp_remove");
if (!srp_remove_wq) {
ret = -ENOMEM;
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index f3135ae22df4..abd18f31b24f 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -22,7 +22,6 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/mm.h>
-#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/poll.h>
#include <linux/init.h>
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 6d9499658671..c7d5b2b643d1 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -1377,6 +1377,12 @@ static int xpad_init_input(struct usb_xpad *xpad)
input_dev->name = xpad->name;
input_dev->phys = xpad->phys;
usb_to_input_id(xpad->udev, &input_dev->id);
+
+ if (xpad->xtype == XTYPE_XBOX360W) {
+ /* x360w controllers and the receiver have different ids */
+ input_dev->id.product = 0x02a1;
+ }
+
input_dev->dev.parent = &xpad->intf->dev;
input_set_drvdata(input_dev, xpad);
diff --git a/drivers/input/misc/adxl34x-i2c.c b/drivers/input/misc/adxl34x-i2c.c
index a8b0a2eec344..7fed92fb8cc1 100644
--- a/drivers/input/misc/adxl34x-i2c.c
+++ b/drivers/input/misc/adxl34x-i2c.c
@@ -136,7 +136,6 @@ static const struct i2c_device_id adxl34x_id[] = {
MODULE_DEVICE_TABLE(i2c, adxl34x_id);
-#ifdef CONFIG_OF
static const struct of_device_id adxl34x_of_id[] = {
/*
* The ADXL346 is backward-compatible with the ADXL345. Differences are
@@ -153,13 +152,12 @@ static const struct of_device_id adxl34x_of_id[] = {
};
MODULE_DEVICE_TABLE(of, adxl34x_of_id);
-#endif
static struct i2c_driver adxl34x_driver = {
.driver = {
.name = "adxl34x",
.pm = &adxl34x_i2c_pm,
- .of_match_table = of_match_ptr(adxl34x_of_id),
+ .of_match_table = adxl34x_of_id,
},
.probe = adxl34x_i2c_probe,
.remove = adxl34x_i2c_remove,
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index cde6f4bd8ea2..6d279aa27cb9 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -114,7 +114,7 @@ enum SS4_PACKET_ID {
(_b[1] & 0x7F) \
)
-#define SS4_TS_Y_V2(_b) (s8)( \
+#define SS4_TS_Y_V2(_b) -(s8)( \
((_b[3] & 0x01) << 7) | \
(_b[2] & 0x7F) \
)
diff --git a/drivers/input/mouse/synaptics_i2c.c b/drivers/input/mouse/synaptics_i2c.c
index aa7c5da60800..cb2bf203f4ca 100644
--- a/drivers/input/mouse/synaptics_i2c.c
+++ b/drivers/input/mouse/synaptics_i2c.c
@@ -29,7 +29,7 @@
* after soft reset, we should wait for 1 ms
* before the device becomes operational
*/
-#define SOFT_RESET_DELAY_MS 3
+#define SOFT_RESET_DELAY_US 3000
/* and after hard reset, we should wait for max 500ms */
#define HARD_RESET_DELAY_MS 500
@@ -311,7 +311,7 @@ static int synaptics_i2c_reset_config(struct i2c_client *client)
if (ret) {
dev_err(&client->dev, "Unable to reset device\n");
} else {
- msleep(SOFT_RESET_DELAY_MS);
+ usleep_range(SOFT_RESET_DELAY_US, SOFT_RESET_DELAY_US + 100);
ret = synaptics_i2c_config(client);
if (ret)
dev_err(&client->dev, "Unable to config device\n");
diff --git a/drivers/input/rmi4/Kconfig b/drivers/input/rmi4/Kconfig
index 30cc627a4f45..8993983e3fe4 100644
--- a/drivers/input/rmi4/Kconfig
+++ b/drivers/input/rmi4/Kconfig
@@ -41,7 +41,8 @@ config RMI4_SMB
config RMI4_F03
bool "RMI4 Function 03 (PS2 Guest)"
- depends on RMI4_CORE && SERIO
+ depends on RMI4_CORE
+ depends on SERIO=y || RMI4_CORE=SERIO
help
Say Y here if you want to add support for RMI4 function 03.
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 77551f522202..a7618776705a 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -211,6 +211,12 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"),
},
},
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
+ },
+ },
{ }
};
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index 02aec284deca..3e6003d32e56 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -914,9 +914,9 @@ static irqreturn_t elants_i2c_irq(int irq, void *_dev)
case QUEUE_HEADER_NORMAL:
report_count = ts->buf[FW_HDR_COUNT];
- if (report_count > 3) {
+ if (report_count == 0 || report_count > 3) {
dev_err(&client->dev,
- "too large report count: %*ph\n",
+ "bad report count: %*ph\n",
HEADER_SIZE, ts->buf);
break;
}
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 019e02707cd5..3ef0f42984f2 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1023,7 +1023,7 @@ again:
next_tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
left = (head - next_tail) % CMD_BUFFER_SIZE;
- if (left <= 2) {
+ if (left <= 0x20) {
struct iommu_cmd sync_cmd;
int ret;
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index a88576d50740..8ccbd7023194 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -903,8 +903,10 @@ int __init detect_intel_iommu(void)
x86_init.iommu.iommu_init = intel_iommu_init;
#endif
- acpi_put_table(dmar_tbl);
- dmar_tbl = NULL;
+ if (dmar_tbl) {
+ acpi_put_table(dmar_tbl);
+ dmar_tbl = NULL;
+ }
up_write(&dmar_global_lock);
return ret ? 1 : -ENODEV;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index c66c273dfd8a..8a185250ae5a 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2037,6 +2037,25 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
if (context_present(context))
goto out_unlock;
+ /*
+ * For kdump cases, old valid entries may be cached due to the
+ * in-flight DMA and copied pgtable, but there is no unmapping
+ * behaviour for them, thus we need an explicit cache flush for
+ * the newly-mapped device. For kdump, at this point, the device
+ * is supposed to finish reset at its driver probe stage, so no
+ * in-flight DMA will exist, and we don't need to worry anymore
+ * hereafter.
+ */
+ if (context_copied(context)) {
+ u16 did_old = context_domain_id(context);
+
+ if (did_old >= 0 && did_old < cap_ndoms(iommu->cap))
+ iommu->flush.flush_context(iommu, did_old,
+ (((u16)bus) << 8) | devfn,
+ DMA_CCMD_MASK_NOBIT,
+ DMA_CCMD_DEVICE_INVL);
+ }
+
pgd = domain->pgd;
context_clear_entry(context);
@@ -5185,6 +5204,25 @@ static void intel_iommu_remove_device(struct device *dev)
}
#ifdef CONFIG_INTEL_IOMMU_SVM
+#define MAX_NR_PASID_BITS (20)
+static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu)
+{
+ /*
+ * Convert ecap_pss to extend context entry pts encoding, also
+ * respect the soft pasid_max value set by the iommu.
+ * - number of PASID bits = ecap_pss + 1
+ * - number of PASID table entries = 2^(pts + 5)
+ * Therefore, pts = ecap_pss - 4
+ * e.g. KBL ecap_pss = 0x13, PASID has 20 bits, pts = 15
+ */
+ if (ecap_pss(iommu->ecap) < 5)
+ return 0;
+
+ /* pasid_max is encoded as actual number of entries not the bits */
+ return find_first_bit((unsigned long *)&iommu->pasid_max,
+ MAX_NR_PASID_BITS) - 5;
+}
+
int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
{
struct device_domain_info *info;
@@ -5217,7 +5255,9 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
if (!(ctx_lo & CONTEXT_PASIDE)) {
context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table);
- context[1].lo = (u64)virt_to_phys(iommu->pasid_table) | ecap_pss(iommu->ecap);
+ context[1].lo = (u64)virt_to_phys(iommu->pasid_table) |
+ intel_iommu_get_pts(iommu);
+
wmb();
/* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both
* extended to permit requests-with-PASID if the PASIDE bit
diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
index 1a1d99704fe6..296f1411fe84 100644
--- a/drivers/isdn/hardware/eicon/message.c
+++ b/drivers/isdn/hardware/eicon/message.c
@@ -11297,7 +11297,8 @@ static void mixer_notify_update(PLCI *plci, byte others)
((CAPI_MSG *) msg)->header.ncci = 0;
((CAPI_MSG *) msg)->info.facility_req.Selector = SELECTOR_LINE_INTERCONNECT;
((CAPI_MSG *) msg)->info.facility_req.structs[0] = 3;
- PUT_WORD(&(((CAPI_MSG *) msg)->info.facility_req.structs[1]), LI_REQ_SILENT_UPDATE);
+ ((CAPI_MSG *) msg)->info.facility_req.structs[1] = LI_REQ_SILENT_UPDATE & 0xff;
+ ((CAPI_MSG *) msg)->info.facility_req.structs[2] = LI_REQ_SILENT_UPDATE >> 8;
((CAPI_MSG *) msg)->info.facility_req.structs[3] = 0;
w = api_put(notify_plci->appl, (CAPI_MSG *) msg);
if (w != _QUEUE_FULL)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 82821ee0d57f..01175dac0db6 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5291,6 +5291,11 @@ int md_run(struct mddev *mddev)
if (start_readonly && mddev->ro == 0)
mddev->ro = 2; /* read-only, but switch on first write */
+ /*
+ * NOTE: some pers->run(), for example r5l_recovery_log(), wakes
+ * up mddev->thread. It is important to initialize critical
+ * resources for mddev->thread BEFORE calling pers->run().
+ */
err = pers->run(mddev);
if (err)
pr_warn("md: pers->run() failed ...\n");
diff --git a/drivers/md/md.h b/drivers/md/md.h
index e38936d05df1..2a514036a83d 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -212,6 +212,7 @@ extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
int is_new);
struct md_cluster_info;
+/* change UNSUPPORTED_MDDEV_FLAGS for each array type if new flag is added */
enum mddev_flags {
MD_ARRAY_FIRST_USE, /* First use of array, needs initialization */
MD_CLOSING, /* If set, we are closing the array, do not open
@@ -702,4 +703,11 @@ static inline int mddev_is_clustered(struct mddev *mddev)
{
return mddev->cluster_info && mddev->bitmap_info.nodes > 1;
}
+
+/* clear unsupported mddev_flags */
+static inline void mddev_clear_unsupported_flags(struct mddev *mddev,
+ unsigned long unsupported_flags)
+{
+ mddev->flags &= ~unsupported_flags;
+}
#endif /* _MD_MD_H */
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index a162fedeb51a..848365d474f3 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -26,6 +26,11 @@
#include "raid0.h"
#include "raid5.h"
+#define UNSUPPORTED_MDDEV_FLAGS \
+ ((1L << MD_HAS_JOURNAL) | \
+ (1L << MD_JOURNAL_CLEAN) | \
+ (1L << MD_FAILFAST_SUPPORTED))
+
static int raid0_congested(struct mddev *mddev, int bits)
{
struct r0conf *conf = mddev->private;
@@ -539,8 +544,7 @@ static void *raid0_takeover_raid45(struct mddev *mddev)
mddev->delta_disks = -1;
/* make sure it will be not marked as dirty */
mddev->recovery_cp = MaxSector;
- clear_bit(MD_HAS_JOURNAL, &mddev->flags);
- clear_bit(MD_JOURNAL_CLEAN, &mddev->flags);
+ mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
create_strip_zones(mddev, &priv_conf);
@@ -583,7 +587,7 @@ static void *raid0_takeover_raid10(struct mddev *mddev)
mddev->degraded = 0;
/* make sure it will be not marked as dirty */
mddev->recovery_cp = MaxSector;
- clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
+ mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
create_strip_zones(mddev, &priv_conf);
return priv_conf;
@@ -626,7 +630,7 @@ static void *raid0_takeover_raid1(struct mddev *mddev)
mddev->raid_disks = 1;
/* make sure it will be not marked as dirty */
mddev->recovery_cp = MaxSector;
- clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
+ mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
create_strip_zones(mddev, &priv_conf);
return priv_conf;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a1f3fbed9100..7b0f647bcccb 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -42,6 +42,10 @@
#include "raid1.h"
#include "bitmap.h"
+#define UNSUPPORTED_MDDEV_FLAGS \
+ ((1L << MD_HAS_JOURNAL) | \
+ (1L << MD_JOURNAL_CLEAN))
+
/*
* Number of guaranteed r1bios in case of extreme VM load:
*/
@@ -1066,17 +1070,107 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
kfree(plug);
}
-static void raid1_make_request(struct mddev *mddev, struct bio * bio)
+static void raid1_read_request(struct mddev *mddev, struct bio *bio,
+ struct r1bio *r1_bio)
{
struct r1conf *conf = mddev->private;
struct raid1_info *mirror;
- struct r1bio *r1_bio;
struct bio *read_bio;
+ struct bitmap *bitmap = mddev->bitmap;
+ const int op = bio_op(bio);
+ const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
+ int sectors_handled;
+ int max_sectors;
+ int rdisk;
+
+ wait_barrier(conf, bio);
+
+read_again:
+ rdisk = read_balance(conf, r1_bio, &max_sectors);
+
+ if (rdisk < 0) {
+ /* couldn't find anywhere to read from */
+ raid_end_bio_io(r1_bio);
+ return;
+ }
+ mirror = conf->mirrors + rdisk;
+
+ if (test_bit(WriteMostly, &mirror->rdev->flags) &&
+ bitmap) {
+ /*
+ * Reading from a write-mostly device must take care not to
+ * over-take any writes that are 'behind'
+ */
+ raid1_log(mddev, "wait behind writes");
+ wait_event(bitmap->behind_wait,
+ atomic_read(&bitmap->behind_writes) == 0);
+ }
+ r1_bio->read_disk = rdisk;
+ r1_bio->start_next_window = 0;
+
+ read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+ bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
+ max_sectors);
+
+ r1_bio->bios[rdisk] = read_bio;
+
+ read_bio->bi_iter.bi_sector = r1_bio->sector +
+ mirror->rdev->data_offset;
+ read_bio->bi_bdev = mirror->rdev->bdev;
+ read_bio->bi_end_io = raid1_end_read_request;
+ bio_set_op_attrs(read_bio, op, do_sync);
+ if (test_bit(FailFast, &mirror->rdev->flags) &&
+ test_bit(R1BIO_FailFast, &r1_bio->state))
+ read_bio->bi_opf |= MD_FAILFAST;
+ read_bio->bi_private = r1_bio;
+
+ if (mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
+ read_bio, disk_devt(mddev->gendisk),
+ r1_bio->sector);
+
+ if (max_sectors < r1_bio->sectors) {
+ /*
+ * could not read all from this device, so we will need another
+ * r1_bio.
+ */
+ sectors_handled = (r1_bio->sector + max_sectors
+ - bio->bi_iter.bi_sector);
+ r1_bio->sectors = max_sectors;
+ spin_lock_irq(&conf->device_lock);
+ if (bio->bi_phys_segments == 0)
+ bio->bi_phys_segments = 2;
+ else
+ bio->bi_phys_segments++;
+ spin_unlock_irq(&conf->device_lock);
+
+ /*
+ * Cannot call generic_make_request directly as that will be
+ * queued in __make_request and subsequent mempool_alloc might
+ * block waiting for it. So hand bio over to raid1d.
+ */
+ reschedule_retry(r1_bio);
+
+ r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
+
+ r1_bio->master_bio = bio;
+ r1_bio->sectors = bio_sectors(bio) - sectors_handled;
+ r1_bio->state = 0;
+ r1_bio->mddev = mddev;
+ r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
+ goto read_again;
+ } else
+ generic_make_request(read_bio);
+}
+
+static void raid1_write_request(struct mddev *mddev, struct bio *bio,
+ struct r1bio *r1_bio)
+{
+ struct r1conf *conf = mddev->private;
int i, disks;
- struct bitmap *bitmap;
+ struct bitmap *bitmap = mddev->bitmap;
unsigned long flags;
const int op = bio_op(bio);
- const int rw = bio_data_dir(bio);
const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
const unsigned long do_flush_fua = (bio->bi_opf &
(REQ_PREFLUSH | REQ_FUA));
@@ -1096,15 +1190,15 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
md_write_start(mddev, bio); /* wait on superblock update early */
- if (bio_data_dir(bio) == WRITE &&
- ((bio_end_sector(bio) > mddev->suspend_lo &&
+ if ((bio_end_sector(bio) > mddev->suspend_lo &&
bio->bi_iter.bi_sector < mddev->suspend_hi) ||
(mddev_is_clustered(mddev) &&
md_cluster_ops->area_resyncing(mddev, WRITE,
- bio->bi_iter.bi_sector, bio_end_sector(bio))))) {
- /* As the suspend_* range is controlled by
- * userspace, we want an interruptible
- * wait.
+ bio->bi_iter.bi_sector, bio_end_sector(bio)))) {
+
+ /*
+ * As the suspend_* range is controlled by userspace, we want
+ * an interruptible wait.
*/
DEFINE_WAIT(w);
for (;;) {
@@ -1115,128 +1209,15 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
bio->bi_iter.bi_sector >= mddev->suspend_hi ||
(mddev_is_clustered(mddev) &&
!md_cluster_ops->area_resyncing(mddev, WRITE,
- bio->bi_iter.bi_sector, bio_end_sector(bio))))
+ bio->bi_iter.bi_sector,
+ bio_end_sector(bio))))
break;
schedule();
}
finish_wait(&conf->wait_barrier, &w);
}
-
start_next_window = wait_barrier(conf, bio);
- bitmap = mddev->bitmap;
-
- /*
- * make_request() can abort the operation when read-ahead is being
- * used and no empty request is available.
- *
- */
- r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
-
- r1_bio->master_bio = bio;
- r1_bio->sectors = bio_sectors(bio);
- r1_bio->state = 0;
- r1_bio->mddev = mddev;
- r1_bio->sector = bio->bi_iter.bi_sector;
-
- /* We might need to issue multiple reads to different
- * devices if there are bad blocks around, so we keep
- * track of the number of reads in bio->bi_phys_segments.
- * If this is 0, there is only one r1_bio and no locking
- * will be needed when requests complete. If it is
- * non-zero, then it is the number of not-completed requests.
- */
- bio->bi_phys_segments = 0;
- bio_clear_flag(bio, BIO_SEG_VALID);
-
- if (rw == READ) {
- /*
- * read balancing logic:
- */
- int rdisk;
-
-read_again:
- rdisk = read_balance(conf, r1_bio, &max_sectors);
-
- if (rdisk < 0) {
- /* couldn't find anywhere to read from */
- raid_end_bio_io(r1_bio);
- return;
- }
- mirror = conf->mirrors + rdisk;
-
- if (test_bit(WriteMostly, &mirror->rdev->flags) &&
- bitmap) {
- /* Reading from a write-mostly device must
- * take care not to over-take any writes
- * that are 'behind'
- */
- raid1_log(mddev, "wait behind writes");
- wait_event(bitmap->behind_wait,
- atomic_read(&bitmap->behind_writes) == 0);
- }
- r1_bio->read_disk = rdisk;
- r1_bio->start_next_window = 0;
-
- read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
- max_sectors);
-
- r1_bio->bios[rdisk] = read_bio;
-
- read_bio->bi_iter.bi_sector = r1_bio->sector +
- mirror->rdev->data_offset;
- read_bio->bi_bdev = mirror->rdev->bdev;
- read_bio->bi_end_io = raid1_end_read_request;
- bio_set_op_attrs(read_bio, op, do_sync);
- if (test_bit(FailFast, &mirror->rdev->flags) &&
- test_bit(R1BIO_FailFast, &r1_bio->state))
- read_bio->bi_opf |= MD_FAILFAST;
- read_bio->bi_private = r1_bio;
-
- if (mddev->gendisk)
- trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
- read_bio, disk_devt(mddev->gendisk),
- r1_bio->sector);
-
- if (max_sectors < r1_bio->sectors) {
- /* could not read all from this device, so we will
- * need another r1_bio.
- */
-
- sectors_handled = (r1_bio->sector + max_sectors
- - bio->bi_iter.bi_sector);
- r1_bio->sectors = max_sectors;
- spin_lock_irq(&conf->device_lock);
- if (bio->bi_phys_segments == 0)
- bio->bi_phys_segments = 2;
- else
- bio->bi_phys_segments++;
- spin_unlock_irq(&conf->device_lock);
- /* Cannot call generic_make_request directly
- * as that will be queued in __make_request
- * and subsequent mempool_alloc might block waiting
- * for it. So hand bio over to raid1d.
- */
- reschedule_retry(r1_bio);
-
- r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
-
- r1_bio->master_bio = bio;
- r1_bio->sectors = bio_sectors(bio) - sectors_handled;
- r1_bio->state = 0;
- r1_bio->mddev = mddev;
- r1_bio->sector = bio->bi_iter.bi_sector +
- sectors_handled;
- goto read_again;
- } else
- generic_make_request(read_bio);
- return;
- }
-
- /*
- * WRITE:
- */
if (conf->pending_count >= max_queued_requests) {
md_wakeup_thread(mddev->thread);
raid1_log(mddev, "wait queued");
@@ -1280,8 +1261,7 @@ read_again:
int bad_sectors;
int is_bad;
- is_bad = is_badblock(rdev, r1_bio->sector,
- max_sectors,
+ is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
&first_bad, &bad_sectors);
if (is_bad < 0) {
/* mustn't write here until the bad block is
@@ -1370,7 +1350,8 @@ read_again:
continue;
mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors);
+ bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector,
+ max_sectors);
if (first_clone) {
/* do behind I/O ?
@@ -1464,6 +1445,40 @@ read_again:
wake_up(&conf->wait_barrier);
}
+static void raid1_make_request(struct mddev *mddev, struct bio *bio)
+{
+ struct r1conf *conf = mddev->private;
+ struct r1bio *r1_bio;
+
+ /*
+ * make_request() can abort the operation when read-ahead is being
+ * used and no empty request is available.
+ *
+ */
+ r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
+
+ r1_bio->master_bio = bio;
+ r1_bio->sectors = bio_sectors(bio);
+ r1_bio->state = 0;
+ r1_bio->mddev = mddev;
+ r1_bio->sector = bio->bi_iter.bi_sector;
+
+ /*
+ * We might need to issue multiple reads to different devices if there
+ * are bad blocks around, so we keep track of the number of reads in
+ * bio->bi_phys_segments. If this is 0, there is only one r1_bio and
+ * no locking will be needed when requests complete. If it is
+ * non-zero, then it is the number of not-completed requests.
+ */
+ bio->bi_phys_segments = 0;
+ bio_clear_flag(bio, BIO_SEG_VALID);
+
+ if (bio_data_dir(bio) == READ)
+ raid1_read_request(mddev, bio, r1_bio);
+ else
+ raid1_write_request(mddev, bio, r1_bio);
+}
+
static void raid1_status(struct seq_file *seq, struct mddev *mddev)
{
struct r1conf *conf = mddev->private;
@@ -3246,8 +3261,8 @@ static void *raid1_takeover(struct mddev *mddev)
if (!IS_ERR(conf)) {
/* Array must appear to be quiesced */
conf->array_frozen = 1;
- clear_bit(MD_HAS_JOURNAL, &mddev->flags);
- clear_bit(MD_JOURNAL_CLEAN, &mddev->flags);
+ mddev_clear_unsupported_flags(mddev,
+ UNSUPPORTED_MDDEV_FLAGS);
}
return conf;
}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index ab5e86209322..1920756828df 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1087,23 +1087,122 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
kfree(plug);
}
-static void __make_request(struct mddev *mddev, struct bio *bio)
+static void raid10_read_request(struct mddev *mddev, struct bio *bio,
+ struct r10bio *r10_bio)
{
struct r10conf *conf = mddev->private;
- struct r10bio *r10_bio;
struct bio *read_bio;
+ const int op = bio_op(bio);
+ const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
+ int sectors_handled;
+ int max_sectors;
+ sector_t sectors;
+ struct md_rdev *rdev;
+ int slot;
+
+ /*
+ * Register the new request and wait if the reconstruction
+ * thread has put up a bar for new requests.
+ * Continue immediately if no resync is active currently.
+ */
+ wait_barrier(conf);
+
+ sectors = bio_sectors(bio);
+ while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
+ bio->bi_iter.bi_sector < conf->reshape_progress &&
+ bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
+ /*
+ * IO spans the reshape position. Need to wait for reshape to
+ * pass
+ */
+ raid10_log(conf->mddev, "wait reshape");
+ allow_barrier(conf);
+ wait_event(conf->wait_barrier,
+ conf->reshape_progress <= bio->bi_iter.bi_sector ||
+ conf->reshape_progress >= bio->bi_iter.bi_sector +
+ sectors);
+ wait_barrier(conf);
+ }
+
+read_again:
+ rdev = read_balance(conf, r10_bio, &max_sectors);
+ if (!rdev) {
+ raid_end_bio_io(r10_bio);
+ return;
+ }
+ slot = r10_bio->read_slot;
+
+ read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+ bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector,
+ max_sectors);
+
+ r10_bio->devs[slot].bio = read_bio;
+ r10_bio->devs[slot].rdev = rdev;
+
+ read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
+ choose_data_offset(r10_bio, rdev);
+ read_bio->bi_bdev = rdev->bdev;
+ read_bio->bi_end_io = raid10_end_read_request;
+ bio_set_op_attrs(read_bio, op, do_sync);
+ if (test_bit(FailFast, &rdev->flags) &&
+ test_bit(R10BIO_FailFast, &r10_bio->state))
+ read_bio->bi_opf |= MD_FAILFAST;
+ read_bio->bi_private = r10_bio;
+
+ if (mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
+ read_bio, disk_devt(mddev->gendisk),
+ r10_bio->sector);
+ if (max_sectors < r10_bio->sectors) {
+ /*
+ * Could not read all from this device, so we will need another
+ * r10_bio.
+ */
+ sectors_handled = (r10_bio->sector + max_sectors
+ - bio->bi_iter.bi_sector);
+ r10_bio->sectors = max_sectors;
+ spin_lock_irq(&conf->device_lock);
+ if (bio->bi_phys_segments == 0)
+ bio->bi_phys_segments = 2;
+ else
+ bio->bi_phys_segments++;
+ spin_unlock_irq(&conf->device_lock);
+ /*
+ * Cannot call generic_make_request directly as that will be
+ * queued in __generic_make_request and subsequent
+ * mempool_alloc might block waiting for it. so hand bio over
+ * to raid10d.
+ */
+ reschedule_retry(r10_bio);
+
+ r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
+
+ r10_bio->master_bio = bio;
+ r10_bio->sectors = bio_sectors(bio) - sectors_handled;
+ r10_bio->state = 0;
+ r10_bio->mddev = mddev;
+ r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
+ goto read_again;
+ } else
+ generic_make_request(read_bio);
+ return;
+}
+
+static void raid10_write_request(struct mddev *mddev, struct bio *bio,
+ struct r10bio *r10_bio)
+{
+ struct r10conf *conf = mddev->private;
int i;
const int op = bio_op(bio);
- const int rw = bio_data_dir(bio);
const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
const unsigned long do_fua = (bio->bi_opf & REQ_FUA);
unsigned long flags;
struct md_rdev *blocked_rdev;
struct blk_plug_cb *cb;
struct raid10_plug_cb *plug = NULL;
+ sector_t sectors;
int sectors_handled;
int max_sectors;
- int sectors;
md_write_start(mddev, bio);
@@ -1118,8 +1217,9 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
bio->bi_iter.bi_sector < conf->reshape_progress &&
bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
- /* IO spans the reshape position. Need to wait for
- * reshape to pass
+ /*
+ * IO spans the reshape position. Need to wait for reshape to
+ * pass
*/
raid10_log(conf->mddev, "wait reshape");
allow_barrier(conf);
@@ -1129,8 +1229,8 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
sectors);
wait_barrier(conf);
}
+
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
- bio_data_dir(bio) == WRITE &&
(mddev->reshape_backwards
? (bio->bi_iter.bi_sector < conf->reshape_safe &&
bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
@@ -1148,98 +1248,6 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
conf->reshape_safe = mddev->reshape_position;
}
- r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
-
- r10_bio->master_bio = bio;
- r10_bio->sectors = sectors;
-
- r10_bio->mddev = mddev;
- r10_bio->sector = bio->bi_iter.bi_sector;
- r10_bio->state = 0;
-
- /* We might need to issue multiple reads to different
- * devices if there are bad blocks around, so we keep
- * track of the number of reads in bio->bi_phys_segments.
- * If this is 0, there is only one r10_bio and no locking
- * will be needed when the request completes. If it is
- * non-zero, then it is the number of not-completed requests.
- */
- bio->bi_phys_segments = 0;
- bio_clear_flag(bio, BIO_SEG_VALID);
-
- if (rw == READ) {
- /*
- * read balancing logic:
- */
- struct md_rdev *rdev;
- int slot;
-
-read_again:
- rdev = read_balance(conf, r10_bio, &max_sectors);
- if (!rdev) {
- raid_end_bio_io(r10_bio);
- return;
- }
- slot = r10_bio->read_slot;
-
- read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector,
- max_sectors);
-
- r10_bio->devs[slot].bio = read_bio;
- r10_bio->devs[slot].rdev = rdev;
-
- read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
- choose_data_offset(r10_bio, rdev);
- read_bio->bi_bdev = rdev->bdev;
- read_bio->bi_end_io = raid10_end_read_request;
- bio_set_op_attrs(read_bio, op, do_sync);
- if (test_bit(FailFast, &rdev->flags) &&
- test_bit(R10BIO_FailFast, &r10_bio->state))
- read_bio->bi_opf |= MD_FAILFAST;
- read_bio->bi_private = r10_bio;
-
- if (mddev->gendisk)
- trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
- read_bio, disk_devt(mddev->gendisk),
- r10_bio->sector);
- if (max_sectors < r10_bio->sectors) {
- /* Could not read all from this device, so we will
- * need another r10_bio.
- */
- sectors_handled = (r10_bio->sector + max_sectors
- - bio->bi_iter.bi_sector);
- r10_bio->sectors = max_sectors;
- spin_lock_irq(&conf->device_lock);
- if (bio->bi_phys_segments == 0)
- bio->bi_phys_segments = 2;
- else
- bio->bi_phys_segments++;
- spin_unlock_irq(&conf->device_lock);
- /* Cannot call generic_make_request directly
- * as that will be queued in __generic_make_request
- * and subsequent mempool_alloc might block
- * waiting for it. so hand bio over to raid10d.
- */
- reschedule_retry(r10_bio);
-
- r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
-
- r10_bio->master_bio = bio;
- r10_bio->sectors = bio_sectors(bio) - sectors_handled;
- r10_bio->state = 0;
- r10_bio->mddev = mddev;
- r10_bio->sector = bio->bi_iter.bi_sector +
- sectors_handled;
- goto read_again;
- } else
- generic_make_request(read_bio);
- return;
- }
-
- /*
- * WRITE:
- */
if (conf->pending_count >= max_queued_requests) {
md_wakeup_thread(mddev->thread);
raid10_log(mddev, "wait queued");
@@ -1300,8 +1308,7 @@ retry_write:
int bad_sectors;
int is_bad;
- is_bad = is_badblock(rdev, dev_sector,
- max_sectors,
+ is_bad = is_badblock(rdev, dev_sector, max_sectors,
&first_bad, &bad_sectors);
if (is_bad < 0) {
/* Mustn't write here until the bad block
@@ -1405,8 +1412,7 @@ retry_write:
r10_bio->devs[i].bio = mbio;
mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
- choose_data_offset(r10_bio,
- rdev));
+ choose_data_offset(r10_bio, rdev));
mbio->bi_bdev = rdev->bdev;
mbio->bi_end_io = raid10_end_write_request;
bio_set_op_attrs(mbio, op, do_sync | do_fua);
@@ -1457,8 +1463,7 @@ retry_write:
r10_bio->devs[i].repl_bio = mbio;
mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr +
- choose_data_offset(
- r10_bio, rdev));
+ choose_data_offset(r10_bio, rdev));
mbio->bi_bdev = rdev->bdev;
mbio->bi_end_io = raid10_end_write_request;
bio_set_op_attrs(mbio, op, do_sync | do_fua);
@@ -1503,6 +1508,36 @@ retry_write:
one_write_done(r10_bio);
}
+static void __make_request(struct mddev *mddev, struct bio *bio)
+{
+ struct r10conf *conf = mddev->private;
+ struct r10bio *r10_bio;
+
+ r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
+
+ r10_bio->master_bio = bio;
+ r10_bio->sectors = bio_sectors(bio);
+
+ r10_bio->mddev = mddev;
+ r10_bio->sector = bio->bi_iter.bi_sector;
+ r10_bio->state = 0;
+
+ /*
+ * We might need to issue multiple reads to different devices if there
+ * are bad blocks around, so we keep track of the number of reads in
+ * bio->bi_phys_segments. If this is 0, there is only one r10_bio and
+ * no locking will be needed when the request completes. If it is
+ * non-zero, then it is the number of not-completed requests.
+ */
+ bio->bi_phys_segments = 0;
+ bio_clear_flag(bio, BIO_SEG_VALID);
+
+ if (bio_data_dir(bio) == READ)
+ raid10_read_request(mddev, bio, r10_bio);
+ else
+ raid10_write_request(mddev, bio, r10_bio);
+}
+
static void raid10_make_request(struct mddev *mddev, struct bio *bio)
{
struct r10conf *conf = mddev->private;
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index d7bfb6fc8aef..302dea3296ba 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -162,6 +162,8 @@ struct r5l_log {
/* to submit async io_units, to fulfill ordering of flush */
struct work_struct deferred_io_work;
+ /* to disable write back during in degraded mode */
+ struct work_struct disable_writeback_work;
};
/*
@@ -611,6 +613,21 @@ static void r5l_submit_io_async(struct work_struct *work)
r5l_do_submit_io(log, io);
}
+static void r5c_disable_writeback_async(struct work_struct *work)
+{
+ struct r5l_log *log = container_of(work, struct r5l_log,
+ disable_writeback_work);
+ struct mddev *mddev = log->rdev->mddev;
+
+ if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
+ return;
+ pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n",
+ mdname(mddev));
+ mddev_suspend(mddev);
+ log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
+ mddev_resume(mddev);
+}
+
static void r5l_submit_current_io(struct r5l_log *log)
{
struct r5l_io_unit *io = log->current_io;
@@ -1393,8 +1410,6 @@ static void r5l_do_reclaim(struct r5l_log *log)
next_checkpoint = r5c_calculate_new_cp(conf);
spin_unlock_irq(&log->io_list_lock);
- BUG_ON(reclaimable < 0);
-
if (reclaimable == 0 || !write_super)
return;
@@ -1682,8 +1697,7 @@ out:
static struct stripe_head *
r5c_recovery_alloc_stripe(struct r5conf *conf,
- sector_t stripe_sect,
- sector_t log_start)
+ sector_t stripe_sect)
{
struct stripe_head *sh;
@@ -1692,7 +1706,6 @@ r5c_recovery_alloc_stripe(struct r5conf *conf,
return NULL; /* no more stripe available */
r5l_recovery_reset_stripe(sh);
- sh->log_start = log_start;
return sh;
}
@@ -1862,7 +1875,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
stripe_sect);
if (!sh) {
- sh = r5c_recovery_alloc_stripe(conf, stripe_sect, ctx->pos);
+ sh = r5c_recovery_alloc_stripe(conf, stripe_sect);
/*
* cannot get stripe from raid5_get_active_stripe
* try replay some stripes
@@ -1871,7 +1884,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
r5c_recovery_replay_stripes(
cached_stripe_list, ctx);
sh = r5c_recovery_alloc_stripe(
- conf, stripe_sect, ctx->pos);
+ conf, stripe_sect);
}
if (!sh) {
pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
@@ -1879,8 +1892,8 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
conf->min_nr_stripes * 2);
raid5_set_cache_size(mddev,
conf->min_nr_stripes * 2);
- sh = r5c_recovery_alloc_stripe(
- conf, stripe_sect, ctx->pos);
+ sh = r5c_recovery_alloc_stripe(conf,
+ stripe_sect);
}
if (!sh) {
pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
@@ -1894,7 +1907,6 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
if (!test_bit(STRIPE_R5C_CACHING, &sh->state) &&
test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) {
r5l_recovery_replay_one_stripe(conf, sh, ctx);
- sh->log_start = ctx->pos;
list_move_tail(&sh->lru, cached_stripe_list);
}
r5l_recovery_load_data(log, sh, ctx, payload,
@@ -1933,8 +1945,6 @@ static void r5c_recovery_load_one_stripe(struct r5l_log *log,
set_bit(R5_UPTODATE, &dev->flags);
}
}
- list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
- atomic_inc(&log->stripe_in_journal_count);
}
/*
@@ -2067,9 +2077,10 @@ static int
r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
struct r5l_recovery_ctx *ctx)
{
- struct stripe_head *sh, *next;
+ struct stripe_head *sh;
struct mddev *mddev = log->rdev->mddev;
struct page *page;
+ sector_t next_checkpoint = MaxSector;
page = alloc_page(GFP_KERNEL);
if (!page) {
@@ -2078,7 +2089,9 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
return -ENOMEM;
}
- list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
+ WARN_ON(list_empty(&ctx->cached_list));
+
+ list_for_each_entry(sh, &ctx->cached_list, lru) {
struct r5l_meta_block *mb;
int i;
int offset;
@@ -2123,14 +2136,42 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
REQ_OP_WRITE, REQ_FUA, false);
sh->log_start = ctx->pos;
+ list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
+ atomic_inc(&log->stripe_in_journal_count);
ctx->pos = write_pos;
ctx->seq += 1;
+ next_checkpoint = sh->log_start;
+ }
+ log->next_checkpoint = next_checkpoint;
+ __free_page(page);
+ return 0;
+}
+
+static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log,
+ struct r5l_recovery_ctx *ctx)
+{
+ struct mddev *mddev = log->rdev->mddev;
+ struct r5conf *conf = mddev->private;
+ struct stripe_head *sh, *next;
+
+ if (ctx->data_only_stripes == 0)
+ return;
+ log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_BACK;
+
+ list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
+ r5c_make_stripe_write_out(sh);
+ set_bit(STRIPE_HANDLE, &sh->state);
list_del_init(&sh->lru);
raid5_release_stripe(sh);
}
- __free_page(page);
- return 0;
+
+ md_wakeup_thread(conf->mddev->thread);
+ /* reuse conf->wait_for_quiescent in recovery */
+ wait_event(conf->wait_for_quiescent,
+ atomic_read(&conf->active_stripes) == 0);
+
+ log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
}
static int r5l_recovery_log(struct r5l_log *log)
@@ -2139,7 +2180,6 @@ static int r5l_recovery_log(struct r5l_log *log)
struct r5l_recovery_ctx ctx;
int ret;
sector_t pos;
- struct stripe_head *sh;
ctx.pos = log->last_checkpoint;
ctx.seq = log->last_cp_seq;
@@ -2160,35 +2200,31 @@ static int r5l_recovery_log(struct r5l_log *log)
pos = ctx.pos;
ctx.seq += 10000;
- if (ctx.data_only_stripes == 0) {
- log->next_checkpoint = ctx.pos;
- r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++);
- ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
- } else {
- sh = list_last_entry(&ctx.cached_list, struct stripe_head, lru);
- log->next_checkpoint = sh->log_start;
- }
if ((ctx.data_only_stripes == 0) && (ctx.data_parity_stripes == 0))
pr_debug("md/raid:%s: starting from clean shutdown\n",
mdname(mddev));
- else {
- pr_debug("md/raid:%s: recoverying %d data-only stripes and %d data-parity stripes\n",
+ else
+ pr_debug("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
mdname(mddev), ctx.data_only_stripes,
ctx.data_parity_stripes);
- if (ctx.data_only_stripes > 0)
- if (r5c_recovery_rewrite_data_only_stripes(log, &ctx)) {
- pr_err("md/raid:%s: failed to rewrite stripes to journal\n",
- mdname(mddev));
- return -EIO;
- }
+ if (ctx.data_only_stripes == 0) {
+ log->next_checkpoint = ctx.pos;
+ r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++);
+ ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
+ } else if (r5c_recovery_rewrite_data_only_stripes(log, &ctx)) {
+ pr_err("md/raid:%s: failed to rewrite stripes to journal\n",
+ mdname(mddev));
+ return -EIO;
}
log->log_start = ctx.pos;
log->seq = ctx.seq;
log->last_checkpoint = pos;
r5l_write_super(log, pos);
+
+ r5c_recovery_flush_data_only_stripes(log, &ctx);
return 0;
}
@@ -2250,6 +2286,10 @@ static ssize_t r5c_journal_mode_store(struct mddev *mddev,
val > R5C_JOURNAL_MODE_WRITE_BACK)
return -EINVAL;
+ if (raid5_calc_degraded(conf) > 0 &&
+ val == R5C_JOURNAL_MODE_WRITE_BACK)
+ return -EINVAL;
+
mddev_suspend(mddev);
conf->log->r5c_journal_mode = val;
mddev_resume(mddev);
@@ -2304,6 +2344,16 @@ int r5c_try_caching_write(struct r5conf *conf,
set_bit(STRIPE_R5C_CACHING, &sh->state);
}
+ /*
+ * When run in degraded mode, array is set to write-through mode.
+ * This check helps drain pending write safely in the transition to
+ * write-through mode.
+ */
+ if (s->failed) {
+ r5c_make_stripe_write_out(sh);
+ return -EAGAIN;
+ }
+
for (i = disks; i--; ) {
dev = &sh->dev[i];
/* if non-overwrite, use writing-out phase */
@@ -2354,6 +2404,8 @@ void r5c_release_extra_page(struct stripe_head *sh)
struct page *p = sh->dev[i].orig_page;
sh->dev[i].orig_page = sh->dev[i].page;
+ clear_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags);
+
if (!using_disk_info_extra_page)
put_page(p);
}
@@ -2418,9 +2470,6 @@ void r5c_finish_stripe_write_out(struct r5conf *conf,
if (do_wakeup)
wake_up(&conf->wait_for_overlap);
- if (conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
- return;
-
spin_lock_irq(&conf->log->stripe_in_journal_lock);
list_del_init(&sh->r5c);
spin_unlock_irq(&conf->log->stripe_in_journal_lock);
@@ -2561,6 +2610,19 @@ ioerr:
return ret;
}
+void r5c_update_on_rdev_error(struct mddev *mddev)
+{
+ struct r5conf *conf = mddev->private;
+ struct r5l_log *log = conf->log;
+
+ if (!log)
+ return;
+
+ if (raid5_calc_degraded(conf) > 0 &&
+ conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK)
+ schedule_work(&log->disable_writeback_work);
+}
+
int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
{
struct request_queue *q = bdev_get_queue(rdev->bdev);
@@ -2633,20 +2695,23 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
spin_lock_init(&log->no_space_stripes_lock);
INIT_WORK(&log->deferred_io_work, r5l_submit_io_async);
+ INIT_WORK(&log->disable_writeback_work, r5c_disable_writeback_async);
log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
INIT_LIST_HEAD(&log->stripe_in_journal_list);
spin_lock_init(&log->stripe_in_journal_lock);
atomic_set(&log->stripe_in_journal_count, 0);
+ rcu_assign_pointer(conf->log, log);
+
if (r5l_load_log(log))
goto error;
- rcu_assign_pointer(conf->log, log);
set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
return 0;
error:
+ rcu_assign_pointer(conf->log, NULL);
md_unregister_thread(&log->reclaim_thread);
reclaim_thread:
mempool_destroy(log->meta_pool);
@@ -2663,6 +2728,7 @@ io_kc:
void r5l_exit_log(struct r5l_log *log)
{
+ flush_work(&log->disable_writeback_work);
md_unregister_thread(&log->reclaim_thread);
mempool_destroy(log->meta_pool);
bioset_free(log->bs);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 06d7279bdd04..3c7e106c12a2 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -62,6 +62,8 @@
#include "raid0.h"
#include "bitmap.h"
+#define UNSUPPORTED_MDDEV_FLAGS (1L << MD_FAILFAST_SUPPORTED)
+
#define cpu_to_group(cpu) cpu_to_node(cpu)
#define ANY_GROUP NUMA_NO_NODE
@@ -554,7 +556,7 @@ static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
* of the two sections, and some non-in_sync devices may
* be insync in the section most affected by failed devices.
*/
-static int calc_degraded(struct r5conf *conf)
+int raid5_calc_degraded(struct r5conf *conf)
{
int degraded, degraded2;
int i;
@@ -617,7 +619,7 @@ static int has_failed(struct r5conf *conf)
if (conf->mddev->reshape_position == MaxSector)
return conf->mddev->degraded > conf->max_degraded;
- degraded = calc_degraded(conf);
+ degraded = raid5_calc_degraded(conf);
if (degraded > conf->max_degraded)
return 1;
return 0;
@@ -1013,7 +1015,17 @@ again:
if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
- sh->dev[i].vec.bv_page = sh->dev[i].page;
+
+ if (!op_is_write(op) &&
+ test_bit(R5_InJournal, &sh->dev[i].flags))
+ /*
+ * issuing read for a page in journal, this
+ * must be preparing for prexor in rmw; read
+ * the data into orig_page
+ */
+ sh->dev[i].vec.bv_page = sh->dev[i].orig_page;
+ else
+ sh->dev[i].vec.bv_page = sh->dev[i].page;
bi->bi_vcnt = 1;
bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
bi->bi_io_vec[0].bv_offset = 0;
@@ -2378,6 +2390,13 @@ static void raid5_end_read_request(struct bio * bi)
} else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
+ if (test_bit(R5_InJournal, &sh->dev[i].flags))
+ /*
+ * end read for a page in journal, this
+ * must be preparing for prexor in rmw
+ */
+ set_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags);
+
if (atomic_read(&rdev->read_errors))
atomic_set(&rdev->read_errors, 0);
} else {
@@ -2536,7 +2555,7 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
spin_lock_irqsave(&conf->device_lock, flags);
clear_bit(In_sync, &rdev->flags);
- mddev->degraded = calc_degraded(conf);
+ mddev->degraded = raid5_calc_degraded(conf);
spin_unlock_irqrestore(&conf->device_lock, flags);
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
@@ -2550,6 +2569,7 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
bdevname(rdev->bdev, b),
mdname(mddev),
conf->raid_disks - mddev->degraded);
+ r5c_update_on_rdev_error(mddev);
}
/*
@@ -2878,6 +2898,30 @@ sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous)
return r_sector;
}
+/*
+ * There are cases where we want handle_stripe_dirtying() and
+ * schedule_reconstruction() to delay towrite to some dev of a stripe.
+ *
+ * This function checks whether we want to delay the towrite. Specifically,
+ * we delay the towrite when:
+ *
+ * 1. degraded stripe has a non-overwrite to the missing dev, AND this
+ * stripe has data in journal (for other devices).
+ *
+ * In this case, when reading data for the non-overwrite dev, it is
+ * necessary to handle complex rmw of write back cache (prexor with
+ * orig_page, and xor with page). To keep read path simple, we would
+ * like to flush data in journal to RAID disks first, so complex rmw
+ * is handled in the write patch (handle_stripe_dirtying).
+ *
+ */
+static inline bool delay_towrite(struct r5dev *dev,
+ struct stripe_head_state *s)
+{
+ return !test_bit(R5_OVERWRITE, &dev->flags) &&
+ !test_bit(R5_Insync, &dev->flags) && s->injournal;
+}
+
static void
schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
int rcw, int expand)
@@ -2898,7 +2942,7 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
- if (dev->towrite) {
+ if (dev->towrite && !delay_towrite(dev, s)) {
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantdrain, &dev->flags);
if (!expand)
@@ -3293,13 +3337,6 @@ static int want_replace(struct stripe_head *sh, int disk_idx)
return rv;
}
-/* fetch_block - checks the given member device to see if its data needs
- * to be read or computed to satisfy a request.
- *
- * Returns 1 when no more member devices need to be checked, otherwise returns
- * 0 to tell the loop in handle_stripe_fill to continue
- */
-
static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
int disk_idx, int disks)
{
@@ -3390,6 +3427,12 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
return 0;
}
+/* fetch_block - checks the given member device to see if its data needs
+ * to be read or computed to satisfy a request.
+ *
+ * Returns 1 when no more member devices need to be checked, otherwise returns
+ * 0 to tell the loop in handle_stripe_fill to continue
+ */
static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
int disk_idx, int disks)
{
@@ -3476,10 +3519,26 @@ static void handle_stripe_fill(struct stripe_head *sh,
* midst of changing due to a write
*/
if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
- !sh->reconstruct_state)
+ !sh->reconstruct_state) {
+
+ /*
+ * For degraded stripe with data in journal, do not handle
+ * read requests yet, instead, flush the stripe to raid
+ * disks first, this avoids handling complex rmw of write
+ * back cache (prexor with orig_page, and then xor with
+ * page) in the read path
+ */
+ if (s->injournal && s->failed) {
+ if (test_bit(STRIPE_R5C_CACHING, &sh->state))
+ r5c_make_stripe_write_out(sh);
+ goto out;
+ }
+
for (i = disks; i--; )
if (fetch_block(sh, s, i, disks))
break;
+ }
+out:
set_bit(STRIPE_HANDLE, &sh->state);
}
@@ -3592,6 +3651,21 @@ unhash:
break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS);
}
+/*
+ * For RMW in write back cache, we need extra page in prexor to store the
+ * old data. This page is stored in dev->orig_page.
+ *
+ * This function checks whether we have data for prexor. The exact logic
+ * is:
+ * R5_UPTODATE && (!R5_InJournal || R5_OrigPageUPTDODATE)
+ */
+static inline bool uptodate_for_rmw(struct r5dev *dev)
+{
+ return (test_bit(R5_UPTODATE, &dev->flags)) &&
+ (!test_bit(R5_InJournal, &dev->flags) ||
+ test_bit(R5_OrigPageUPTDODATE, &dev->flags));
+}
+
static int handle_stripe_dirtying(struct r5conf *conf,
struct stripe_head *sh,
struct stripe_head_state *s,
@@ -3620,12 +3694,11 @@ static int handle_stripe_dirtying(struct r5conf *conf,
} else for (i = disks; i--; ) {
/* would I have to read this buffer for read_modify_write */
struct r5dev *dev = &sh->dev[i];
- if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx ||
+ if (((dev->towrite && !delay_towrite(dev, s)) ||
+ i == sh->pd_idx || i == sh->qd_idx ||
test_bit(R5_InJournal, &dev->flags)) &&
!test_bit(R5_LOCKED, &dev->flags) &&
- !((test_bit(R5_UPTODATE, &dev->flags) &&
- (!test_bit(R5_InJournal, &dev->flags) ||
- dev->page != dev->orig_page)) ||
+ !(uptodate_for_rmw(dev) ||
test_bit(R5_Wantcompute, &dev->flags))) {
if (test_bit(R5_Insync, &dev->flags))
rmw++;
@@ -3637,7 +3710,6 @@ static int handle_stripe_dirtying(struct r5conf *conf,
i != sh->pd_idx && i != sh->qd_idx &&
!test_bit(R5_LOCKED, &dev->flags) &&
!(test_bit(R5_UPTODATE, &dev->flags) ||
- test_bit(R5_InJournal, &dev->flags) ||
test_bit(R5_Wantcompute, &dev->flags))) {
if (test_bit(R5_Insync, &dev->flags))
rcw++;
@@ -3687,13 +3759,11 @@ static int handle_stripe_dirtying(struct r5conf *conf,
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
- if ((dev->towrite ||
+ if (((dev->towrite && !delay_towrite(dev, s)) ||
i == sh->pd_idx || i == sh->qd_idx ||
test_bit(R5_InJournal, &dev->flags)) &&
!test_bit(R5_LOCKED, &dev->flags) &&
- !((test_bit(R5_UPTODATE, &dev->flags) &&
- (!test_bit(R5_InJournal, &dev->flags) ||
- dev->page != dev->orig_page)) ||
+ !(uptodate_for_rmw(dev) ||
test_bit(R5_Wantcompute, &dev->flags)) &&
test_bit(R5_Insync, &dev->flags)) {
if (test_bit(STRIPE_PREREAD_ACTIVE,
@@ -3720,7 +3790,6 @@ static int handle_stripe_dirtying(struct r5conf *conf,
i != sh->pd_idx && i != sh->qd_idx &&
!test_bit(R5_LOCKED, &dev->flags) &&
!(test_bit(R5_UPTODATE, &dev->flags) ||
- test_bit(R5_InJournal, &dev->flags) ||
test_bit(R5_Wantcompute, &dev->flags))) {
rcw++;
if (test_bit(R5_Insync, &dev->flags) &&
@@ -7023,7 +7092,7 @@ static int raid5_run(struct mddev *mddev)
/*
* 0 for a fully functional array, 1 or 2 for a degraded array.
*/
- mddev->degraded = calc_degraded(conf);
+ mddev->degraded = raid5_calc_degraded(conf);
if (has_failed(conf)) {
pr_crit("md/raid:%s: not enough operational devices (%d/%d failed)\n",
@@ -7270,7 +7339,7 @@ static int raid5_spare_active(struct mddev *mddev)
}
}
spin_lock_irqsave(&conf->device_lock, flags);
- mddev->degraded = calc_degraded(conf);
+ mddev->degraded = raid5_calc_degraded(conf);
spin_unlock_irqrestore(&conf->device_lock, flags);
print_raid5_conf(conf);
return count;
@@ -7630,7 +7699,7 @@ static int raid5_start_reshape(struct mddev *mddev)
* pre and post number of devices.
*/
spin_lock_irqsave(&conf->device_lock, flags);
- mddev->degraded = calc_degraded(conf);
+ mddev->degraded = raid5_calc_degraded(conf);
spin_unlock_irqrestore(&conf->device_lock, flags);
}
mddev->raid_disks = conf->raid_disks;
@@ -7718,7 +7787,7 @@ static void raid5_finish_reshape(struct mddev *mddev)
} else {
int d;
spin_lock_irq(&conf->device_lock);
- mddev->degraded = calc_degraded(conf);
+ mddev->degraded = raid5_calc_degraded(conf);
spin_unlock_irq(&conf->device_lock);
for (d = conf->raid_disks ;
d < conf->raid_disks - mddev->delta_disks;
@@ -7829,8 +7898,9 @@ static void *raid5_takeover_raid1(struct mddev *mddev)
mddev->new_chunk_sectors = chunksect;
ret = setup_conf(mddev);
- if (!IS_ERR_VALUE(ret))
- clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
+ if (!IS_ERR(ret))
+ mddev_clear_unsupported_flags(mddev,
+ UNSUPPORTED_MDDEV_FLAGS);
return ret;
}
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index ed8e1362ab36..1440fa26e296 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -322,6 +322,11 @@ enum r5dev_flags {
* data and parity being written are in the journal
* device
*/
+ R5_OrigPageUPTDODATE, /* with write back cache, we read old data into
+ * dev->orig_page for prexor. When this flag is
+ * set, orig_page contains latest data in the
+ * raid disk.
+ */
};
/*
@@ -753,6 +758,7 @@ extern sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
extern struct stripe_head *
raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
int previous, int noblock, int noquiesce);
+extern int raid5_calc_degraded(struct r5conf *conf);
extern int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev);
extern void r5l_exit_log(struct r5l_log *log);
extern int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh);
@@ -781,4 +787,5 @@ extern void r5c_flush_cache(struct r5conf *conf, int num);
extern void r5c_check_stripe_cache_usage(struct r5conf *conf);
extern void r5c_check_cached_full_stripe(struct r5conf *conf);
extern struct md_sysfs_entry r5c_journal_mode;
+extern void r5c_update_on_rdev_error(struct mddev *mddev);
#endif
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index 0ea4efb3de66..ebb5e391b800 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -30,8 +30,9 @@
#include "cec-priv.h"
-static int cec_report_features(struct cec_adapter *adap, unsigned int la_idx);
-static int cec_report_phys_addr(struct cec_adapter *adap, unsigned int la_idx);
+static void cec_fill_msg_report_features(struct cec_adapter *adap,
+ struct cec_msg *msg,
+ unsigned int la_idx);
/*
* 400 ms is the time it takes for one 16 byte message to be
@@ -288,10 +289,10 @@ static void cec_data_cancel(struct cec_data *data)
/* Mark it as an error */
data->msg.tx_ts = ktime_get_ns();
- data->msg.tx_status = CEC_TX_STATUS_ERROR |
- CEC_TX_STATUS_MAX_RETRIES;
+ data->msg.tx_status |= CEC_TX_STATUS_ERROR |
+ CEC_TX_STATUS_MAX_RETRIES;
+ data->msg.tx_error_cnt++;
data->attempts = 0;
- data->msg.tx_error_cnt = 1;
/* Queue transmitted message for monitoring purposes */
cec_queue_msg_monitor(data->adap, &data->msg, 1);
@@ -851,7 +852,7 @@ static const u8 cec_msg_size[256] = {
[CEC_MSG_REQUEST_ARC_TERMINATION] = 2 | DIRECTED,
[CEC_MSG_TERMINATE_ARC] = 2 | DIRECTED,
[CEC_MSG_REQUEST_CURRENT_LATENCY] = 4 | BCAST,
- [CEC_MSG_REPORT_CURRENT_LATENCY] = 7 | BCAST,
+ [CEC_MSG_REPORT_CURRENT_LATENCY] = 6 | BCAST,
[CEC_MSG_CDC_MESSAGE] = 2 | BCAST,
};
@@ -1250,30 +1251,49 @@ configured:
for (i = 1; i < las->num_log_addrs; i++)
las->log_addr[i] = CEC_LOG_ADDR_INVALID;
}
+ for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
+ las->log_addr[i] = CEC_LOG_ADDR_INVALID;
adap->is_configured = true;
adap->is_configuring = false;
cec_post_state_event(adap);
- mutex_unlock(&adap->lock);
+ /*
+ * Now post the Report Features and Report Physical Address broadcast
+ * messages. Note that these are non-blocking transmits, meaning that
+ * they are just queued up and once adap->lock is unlocked the main
+ * thread will kick in and start transmitting these.
+ *
+ * If after this function is done (but before one or more of these
+ * messages are actually transmitted) the CEC adapter is unconfigured,
+ * then any remaining messages will be dropped by the main thread.
+ */
for (i = 0; i < las->num_log_addrs; i++) {
+ struct cec_msg msg = {};
+
if (las->log_addr[i] == CEC_LOG_ADDR_INVALID ||
(las->flags & CEC_LOG_ADDRS_FL_CDC_ONLY))
continue;
- /*
- * Report Features must come first according
- * to CEC 2.0
- */
- if (las->log_addr[i] != CEC_LOG_ADDR_UNREGISTERED)
- cec_report_features(adap, i);
- cec_report_phys_addr(adap, i);
+ msg.msg[0] = (las->log_addr[i] << 4) | 0x0f;
+
+ /* Report Features must come first according to CEC 2.0 */
+ if (las->log_addr[i] != CEC_LOG_ADDR_UNREGISTERED &&
+ adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0) {
+ cec_fill_msg_report_features(adap, &msg, i);
+ cec_transmit_msg_fh(adap, &msg, NULL, false);
+ }
+
+ /* Report Physical Address */
+ cec_msg_report_physical_addr(&msg, adap->phys_addr,
+ las->primary_device_type[i]);
+ dprintk(2, "config: la %d pa %x.%x.%x.%x\n",
+ las->log_addr[i],
+ cec_phys_addr_exp(adap->phys_addr));
+ cec_transmit_msg_fh(adap, &msg, NULL, false);
}
- for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
- las->log_addr[i] = CEC_LOG_ADDR_INVALID;
- mutex_lock(&adap->lock);
adap->kthread_config = NULL;
- mutex_unlock(&adap->lock);
complete(&adap->config_completion);
+ mutex_unlock(&adap->lock);
return 0;
unconfigure:
@@ -1526,52 +1546,32 @@ EXPORT_SYMBOL_GPL(cec_s_log_addrs);
/* High-level core CEC message handling */
-/* Transmit the Report Features message */
-static int cec_report_features(struct cec_adapter *adap, unsigned int la_idx)
+/* Fill in the Report Features message */
+static void cec_fill_msg_report_features(struct cec_adapter *adap,
+ struct cec_msg *msg,
+ unsigned int la_idx)
{
- struct cec_msg msg = { };
const struct cec_log_addrs *las = &adap->log_addrs;
const u8 *features = las->features[la_idx];
bool op_is_dev_features = false;
unsigned int idx;
- /* This is 2.0 and up only */
- if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0)
- return 0;
-
/* Report Features */
- msg.msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
- msg.len = 4;
- msg.msg[1] = CEC_MSG_REPORT_FEATURES;
- msg.msg[2] = adap->log_addrs.cec_version;
- msg.msg[3] = las->all_device_types[la_idx];
+ msg->msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
+ msg->len = 4;
+ msg->msg[1] = CEC_MSG_REPORT_FEATURES;
+ msg->msg[2] = adap->log_addrs.cec_version;
+ msg->msg[3] = las->all_device_types[la_idx];
/* Write RC Profiles first, then Device Features */
for (idx = 0; idx < ARRAY_SIZE(las->features[0]); idx++) {
- msg.msg[msg.len++] = features[idx];
+ msg->msg[msg->len++] = features[idx];
if ((features[idx] & CEC_OP_FEAT_EXT) == 0) {
if (op_is_dev_features)
break;
op_is_dev_features = true;
}
}
- return cec_transmit_msg(adap, &msg, false);
-}
-
-/* Transmit the Report Physical Address message */
-static int cec_report_phys_addr(struct cec_adapter *adap, unsigned int la_idx)
-{
- const struct cec_log_addrs *las = &adap->log_addrs;
- struct cec_msg msg = { };
-
- /* Report Physical Address */
- msg.msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
- cec_msg_report_physical_addr(&msg, adap->phys_addr,
- las->primary_device_type[la_idx]);
- dprintk(2, "config: la %d pa %x.%x.%x.%x\n",
- las->log_addr[la_idx],
- cec_phys_addr_exp(adap->phys_addr));
- return cec_transmit_msg(adap, &msg, false);
}
/* Transmit the Feature Abort message */
@@ -1777,9 +1777,10 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
}
case CEC_MSG_GIVE_FEATURES:
- if (adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0)
- return cec_report_features(adap, la_idx);
- return 0;
+ if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0)
+ return cec_feature_abort(adap, msg);
+ cec_fill_msg_report_features(adap, &tx_cec_msg, la_idx);
+ return cec_transmit_msg(adap, &tx_cec_msg, false);
default:
/*
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index bc5e8cfe7ca2..8f11d7e45993 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -719,6 +719,9 @@ static void dvb_net_ule_check_crc(struct dvb_net_ule_handle *h,
skb_copy_from_linear_data(h->priv->ule_skb, dest_addr,
ETH_ALEN);
skb_pull(h->priv->ule_skb, ETH_ALEN);
+ } else {
+ /* dest_addr buffer is only valid if h->priv->ule_dbit == 0 */
+ eth_zero_addr(dest_addr);
}
/* Handle ULE Extension Headers. */
@@ -750,16 +753,8 @@ static void dvb_net_ule_check_crc(struct dvb_net_ule_handle *h,
if (!h->priv->ule_bridged) {
skb_push(h->priv->ule_skb, ETH_HLEN);
h->ethh = (struct ethhdr *)h->priv->ule_skb->data;
- if (!h->priv->ule_dbit) {
- /*
- * dest_addr buffer is only valid if
- * h->priv->ule_dbit == 0
- */
- memcpy(h->ethh->h_dest, dest_addr, ETH_ALEN);
- eth_zero_addr(h->ethh->h_source);
- } else /* zeroize source and dest */
- memset(h->ethh, 0, ETH_ALEN * 2);
-
+ memcpy(h->ethh->h_dest, dest_addr, ETH_ALEN);
+ eth_zero_addr(h->ethh->h_source);
h->ethh->h_proto = htons(h->priv->ule_sndu_type);
}
/* else: skb is in correct state; nothing to do. */
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index b31fa6fae009..b979ea148251 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -655,6 +655,7 @@ config VIDEO_S5K6A3
config VIDEO_S5K4ECGX
tristate "Samsung S5K4ECGX sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ select CRC32
---help---
This is a V4L2 sensor-level driver for Samsung S5K4ECGX 5M
camera sensor with an embedded SoC image signal processor.
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index 59872b31f832..f4e92bdfe192 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -2741,9 +2741,7 @@ static const struct v4l2_subdev_internal_ops smiapp_internal_ops = {
* I2C Driver
*/
-#ifdef CONFIG_PM
-
-static int smiapp_suspend(struct device *dev)
+static int __maybe_unused smiapp_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
@@ -2768,7 +2766,7 @@ static int smiapp_suspend(struct device *dev)
return 0;
}
-static int smiapp_resume(struct device *dev)
+static int __maybe_unused smiapp_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
@@ -2783,13 +2781,6 @@ static int smiapp_resume(struct device *dev)
return rval;
}
-#else
-
-#define smiapp_suspend NULL
-#define smiapp_resume NULL
-
-#endif /* CONFIG_PM */
-
static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev)
{
struct smiapp_hwconfig *hwcfg;
@@ -2913,13 +2904,9 @@ static int smiapp_probe(struct i2c_client *client,
if (IS_ERR(sensor->xshutdown))
return PTR_ERR(sensor->xshutdown);
- pm_runtime_enable(&client->dev);
-
- rval = pm_runtime_get_sync(&client->dev);
- if (rval < 0) {
- rval = -ENODEV;
- goto out_power_off;
- }
+ rval = smiapp_power_on(&client->dev);
+ if (rval < 0)
+ return rval;
rval = smiapp_identify_module(sensor);
if (rval) {
@@ -3100,6 +3087,9 @@ static int smiapp_probe(struct i2c_client *client,
if (rval < 0)
goto out_media_entity_cleanup;
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_get_noresume(&client->dev);
+ pm_runtime_enable(&client->dev);
pm_runtime_set_autosuspend_delay(&client->dev, 1000);
pm_runtime_use_autosuspend(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
@@ -3113,8 +3103,7 @@ out_cleanup:
smiapp_cleanup(sensor);
out_power_off:
- pm_runtime_put(&client->dev);
- pm_runtime_disable(&client->dev);
+ smiapp_power_off(&client->dev);
return rval;
}
@@ -3127,8 +3116,10 @@ static int smiapp_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(subdev);
- pm_runtime_suspend(&client->dev);
pm_runtime_disable(&client->dev);
+ if (!pm_runtime_status_suspended(&client->dev))
+ smiapp_power_off(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
for (i = 0; i < sensor->ssds_used; i++) {
v4l2_device_unregister_subdev(&sensor->ssds[i].sd);
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 3a0fe8cc64e9..48646a7f3fb0 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -291,8 +291,12 @@ static void tvp5150_selmux(struct v4l2_subdev *sd)
tvp5150_write(sd, TVP5150_OP_MODE_CTL, opmode);
tvp5150_write(sd, TVP5150_VD_IN_SRC_SEL_1, input);
- /* Svideo should enable YCrCb output and disable GPCL output
- * For Composite and TV, it should be the reverse
+ /*
+ * Setup the FID/GLCO/VLK/HVLK and INTREQ/GPCL/VBLK output signals. For
+ * S-Video we output the vertical lock (VLK) signal on FID/GLCO/VLK/HVLK
+ * and set INTREQ/GPCL/VBLK to logic 0. For composite we output the
+ * field indicator (FID) signal on FID/GLCO/VLK/HVLK and set
+ * INTREQ/GPCL/VBLK to logic 1.
*/
val = tvp5150_read(sd, TVP5150_MISC_CTL);
if (val < 0) {
@@ -301,9 +305,9 @@ static void tvp5150_selmux(struct v4l2_subdev *sd)
}
if (decoder->input == TVP5150_SVIDEO)
- val = (val & ~0x40) | 0x10;
+ val = (val & ~TVP5150_MISC_CTL_GPCL) | TVP5150_MISC_CTL_HVLK;
else
- val = (val & ~0x10) | 0x40;
+ val = (val & ~TVP5150_MISC_CTL_HVLK) | TVP5150_MISC_CTL_GPCL;
tvp5150_write(sd, TVP5150_MISC_CTL, val);
};
@@ -455,7 +459,12 @@ static const struct i2c_reg_value tvp5150_init_enable[] = {
},{ /* Automatic offset and AGC enabled */
TVP5150_ANAL_CHL_CTL, 0x15
},{ /* Activate YCrCb output 0x9 or 0xd ? */
- TVP5150_MISC_CTL, 0x6f
+ TVP5150_MISC_CTL, TVP5150_MISC_CTL_GPCL |
+ TVP5150_MISC_CTL_INTREQ_OE |
+ TVP5150_MISC_CTL_YCBCR_OE |
+ TVP5150_MISC_CTL_SYNC_OE |
+ TVP5150_MISC_CTL_VBLANK |
+ TVP5150_MISC_CTL_CLOCK_OE,
},{ /* Activates video std autodetection for all standards */
TVP5150_AUTOSW_MSK, 0x0
},{ /* Default format: 0x47. For 4:2:2: 0x40 */
@@ -861,8 +870,6 @@ static int tvp5150_fill_fmt(struct v4l2_subdev *sd,
f = &format->format;
- tvp5150_reset(sd, 0);
-
f->width = decoder->rect.width;
f->height = decoder->rect.height / 2;
@@ -1051,21 +1058,27 @@ static const struct media_entity_operations tvp5150_sd_media_ops = {
static int tvp5150_s_stream(struct v4l2_subdev *sd, int enable)
{
struct tvp5150 *decoder = to_tvp5150(sd);
- /* Output format: 8-bit ITU-R BT.656 with embedded syncs */
- int val = 0x09;
-
- /* Output format: 8-bit 4:2:2 YUV with discrete sync */
- if (decoder->mbus_type == V4L2_MBUS_PARALLEL)
- val = 0x0d;
+ int val;
- /* Initializes TVP5150 to its default values */
- /* # set PCLK (27MHz) */
- tvp5150_write(sd, TVP5150_CONF_SHARED_PIN, 0x00);
+ /* Enable or disable the video output signals. */
+ val = tvp5150_read(sd, TVP5150_MISC_CTL);
+ if (val < 0)
+ return val;
+
+ val &= ~(TVP5150_MISC_CTL_YCBCR_OE | TVP5150_MISC_CTL_SYNC_OE |
+ TVP5150_MISC_CTL_CLOCK_OE);
+
+ if (enable) {
+ /*
+ * Enable the YCbCr and clock outputs. In discrete sync mode
+ * (non-BT.656) additionally enable the the sync outputs.
+ */
+ val |= TVP5150_MISC_CTL_YCBCR_OE | TVP5150_MISC_CTL_CLOCK_OE;
+ if (decoder->mbus_type == V4L2_MBUS_PARALLEL)
+ val |= TVP5150_MISC_CTL_SYNC_OE;
+ }
- if (enable)
- tvp5150_write(sd, TVP5150_MISC_CTL, val);
- else
- tvp5150_write(sd, TVP5150_MISC_CTL, 0x00);
+ tvp5150_write(sd, TVP5150_MISC_CTL, val);
return 0;
}
@@ -1524,7 +1537,6 @@ static int tvp5150_probe(struct i2c_client *c,
res = core->hdl.error;
goto err;
}
- v4l2_ctrl_handler_setup(&core->hdl);
/* Default is no cropping */
core->rect.top = 0;
@@ -1535,6 +1547,8 @@ static int tvp5150_probe(struct i2c_client *c,
core->rect.left = 0;
core->rect.width = TVP5150_H_MAX;
+ tvp5150_reset(sd, 0); /* Calls v4l2_ctrl_handler_setup() */
+
res = v4l2_async_register_subdev(sd);
if (res < 0)
goto err;
diff --git a/drivers/media/i2c/tvp5150_reg.h b/drivers/media/i2c/tvp5150_reg.h
index 25a994944918..30a48c28d05a 100644
--- a/drivers/media/i2c/tvp5150_reg.h
+++ b/drivers/media/i2c/tvp5150_reg.h
@@ -9,6 +9,15 @@
#define TVP5150_ANAL_CHL_CTL 0x01 /* Analog channel controls */
#define TVP5150_OP_MODE_CTL 0x02 /* Operation mode controls */
#define TVP5150_MISC_CTL 0x03 /* Miscellaneous controls */
+#define TVP5150_MISC_CTL_VBLK_GPCL BIT(7)
+#define TVP5150_MISC_CTL_GPCL BIT(6)
+#define TVP5150_MISC_CTL_INTREQ_OE BIT(5)
+#define TVP5150_MISC_CTL_HVLK BIT(4)
+#define TVP5150_MISC_CTL_YCBCR_OE BIT(3)
+#define TVP5150_MISC_CTL_SYNC_OE BIT(2)
+#define TVP5150_MISC_CTL_VBLANK BIT(1)
+#define TVP5150_MISC_CTL_CLOCK_OE BIT(0)
+
#define TVP5150_AUTOSW_MSK 0x04 /* Autoswitch mask: TVP5150A / TVP5150AM */
/* Reserved 05h */
diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
index 979634000597..d5c911c09e2b 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.c
+++ b/drivers/media/pci/cobalt/cobalt-driver.c
@@ -308,9 +308,7 @@ static void cobalt_pci_iounmap(struct cobalt *cobalt, struct pci_dev *pci_dev)
static void cobalt_free_msi(struct cobalt *cobalt, struct pci_dev *pci_dev)
{
free_irq(pci_dev->irq, (void *)cobalt);
-
- if (cobalt->msi_enabled)
- pci_disable_msi(pci_dev);
+ pci_free_irq_vectors(pci_dev);
}
static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev,
@@ -387,14 +385,12 @@ static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev,
from being generated. */
cobalt_set_interrupt(cobalt, false);
- if (pci_enable_msi_range(pci_dev, 1, 1) < 1) {
+ if (pci_alloc_irq_vectors(pci_dev, 1, 1, PCI_IRQ_MSI) < 1) {
cobalt_err("Could not enable MSI\n");
- cobalt->msi_enabled = false;
ret = -EIO;
goto err_release;
}
msi_config_show(cobalt, pci_dev);
- cobalt->msi_enabled = true;
/* Register IRQ */
if (request_irq(pci_dev->irq, cobalt_irq_handler, IRQF_SHARED,
diff --git a/drivers/media/pci/cobalt/cobalt-driver.h b/drivers/media/pci/cobalt/cobalt-driver.h
index ed00dc9d9399..00f773ec359a 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.h
+++ b/drivers/media/pci/cobalt/cobalt-driver.h
@@ -287,8 +287,6 @@ struct cobalt {
u32 irq_none;
u32 irq_full_fifo;
- bool msi_enabled;
-
/* omnitek dma */
int dma_channels;
int first_fifo_channel;
diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c
index 07fa08be9e99..d54ebe7e0215 100644
--- a/drivers/media/usb/dvb-usb/pctv452e.c
+++ b/drivers/media/usb/dvb-usb/pctv452e.c
@@ -97,14 +97,13 @@ struct pctv452e_state {
u8 c; /* transaction counter, wraps around... */
u8 initialized; /* set to 1 if 0x15 has been sent */
u16 last_rc_key;
-
- unsigned char data[80];
};
static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data,
unsigned int write_len, unsigned int read_len)
{
struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
+ u8 *buf;
u8 id;
unsigned int rlen;
int ret;
@@ -114,36 +113,39 @@ static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data,
return -EIO;
}
- mutex_lock(&state->ca_mutex);
+ buf = kmalloc(64, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
id = state->c++;
- state->data[0] = SYNC_BYTE_OUT;
- state->data[1] = id;
- state->data[2] = cmd;
- state->data[3] = write_len;
+ buf[0] = SYNC_BYTE_OUT;
+ buf[1] = id;
+ buf[2] = cmd;
+ buf[3] = write_len;
- memcpy(state->data + 4, data, write_len);
+ memcpy(buf + 4, data, write_len);
rlen = (read_len > 0) ? 64 : 0;
- ret = dvb_usb_generic_rw(d, state->data, 4 + write_len,
- state->data, rlen, /* delay_ms */ 0);
+ ret = dvb_usb_generic_rw(d, buf, 4 + write_len,
+ buf, rlen, /* delay_ms */ 0);
if (0 != ret)
goto failed;
ret = -EIO;
- if (SYNC_BYTE_IN != state->data[0] || id != state->data[1])
+ if (SYNC_BYTE_IN != buf[0] || id != buf[1])
goto failed;
- memcpy(data, state->data + 4, read_len);
+ memcpy(data, buf + 4, read_len);
- mutex_unlock(&state->ca_mutex);
+ kfree(buf);
return 0;
failed:
err("CI error %d; %02X %02X %02X -> %*ph.",
- ret, SYNC_BYTE_OUT, id, cmd, 3, state->data);
+ ret, SYNC_BYTE_OUT, id, cmd, 3, buf);
- mutex_unlock(&state->ca_mutex);
+ kfree(buf);
return ret;
}
@@ -410,53 +412,57 @@ static int pctv452e_i2c_msg(struct dvb_usb_device *d, u8 addr,
u8 *rcv_buf, u8 rcv_len)
{
struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
+ u8 *buf;
u8 id;
int ret;
- mutex_lock(&state->ca_mutex);
+ buf = kmalloc(64, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
id = state->c++;
ret = -EINVAL;
if (snd_len > 64 - 7 || rcv_len > 64 - 7)
goto failed;
- state->data[0] = SYNC_BYTE_OUT;
- state->data[1] = id;
- state->data[2] = PCTV_CMD_I2C;
- state->data[3] = snd_len + 3;
- state->data[4] = addr << 1;
- state->data[5] = snd_len;
- state->data[6] = rcv_len;
+ buf[0] = SYNC_BYTE_OUT;
+ buf[1] = id;
+ buf[2] = PCTV_CMD_I2C;
+ buf[3] = snd_len + 3;
+ buf[4] = addr << 1;
+ buf[5] = snd_len;
+ buf[6] = rcv_len;
- memcpy(state->data + 7, snd_buf, snd_len);
+ memcpy(buf + 7, snd_buf, snd_len);
- ret = dvb_usb_generic_rw(d, state->data, 7 + snd_len,
- state->data, /* rcv_len */ 64,
+ ret = dvb_usb_generic_rw(d, buf, 7 + snd_len,
+ buf, /* rcv_len */ 64,
/* delay_ms */ 0);
if (ret < 0)
goto failed;
/* TT USB protocol error. */
ret = -EIO;
- if (SYNC_BYTE_IN != state->data[0] || id != state->data[1])
+ if (SYNC_BYTE_IN != buf[0] || id != buf[1])
goto failed;
/* I2C device didn't respond as expected. */
ret = -EREMOTEIO;
- if (state->data[5] < snd_len || state->data[6] < rcv_len)
+ if (buf[5] < snd_len || buf[6] < rcv_len)
goto failed;
- memcpy(rcv_buf, state->data + 7, rcv_len);
- mutex_unlock(&state->ca_mutex);
+ memcpy(rcv_buf, buf + 7, rcv_len);
+ kfree(buf);
return rcv_len;
failed:
err("I2C error %d; %02X %02X %02X %02X %02X -> %*ph",
ret, SYNC_BYTE_OUT, id, addr << 1, snd_len, rcv_len,
- 7, state->data);
+ 7, buf);
- mutex_unlock(&state->ca_mutex);
+ kfree(buf);
return ret;
}
@@ -505,7 +511,7 @@ static u32 pctv452e_i2c_func(struct i2c_adapter *adapter)
static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
{
struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
- u8 *rx;
+ u8 *b0, *rx;
int ret;
info("%s: %d\n", __func__, i);
@@ -516,11 +522,12 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
if (state->initialized)
return 0;
- rx = kmalloc(PCTV_ANSWER_LEN, GFP_KERNEL);
- if (!rx)
+ b0 = kmalloc(5 + PCTV_ANSWER_LEN, GFP_KERNEL);
+ if (!b0)
return -ENOMEM;
- mutex_lock(&state->ca_mutex);
+ rx = b0 + 5;
+
/* hmm where shoud this should go? */
ret = usb_set_interface(d->udev, 0, ISOC_INTERFACE_ALTERNATIVE);
if (ret != 0)
@@ -528,66 +535,70 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
__func__, ret);
/* this is a one-time initialization, dont know where to put */
- state->data[0] = 0xaa;
- state->data[1] = state->c++;
- state->data[2] = PCTV_CMD_RESET;
- state->data[3] = 1;
- state->data[4] = 0;
+ b0[0] = 0xaa;
+ b0[1] = state->c++;
+ b0[2] = PCTV_CMD_RESET;
+ b0[3] = 1;
+ b0[4] = 0;
/* reset board */
- ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0);
+ ret = dvb_usb_generic_rw(d, b0, 5, rx, PCTV_ANSWER_LEN, 0);
if (ret)
goto ret;
- state->data[1] = state->c++;
- state->data[4] = 1;
+ b0[1] = state->c++;
+ b0[4] = 1;
/* reset board (again?) */
- ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0);
+ ret = dvb_usb_generic_rw(d, b0, 5, rx, PCTV_ANSWER_LEN, 0);
if (ret)
goto ret;
state->initialized = 1;
ret:
- mutex_unlock(&state->ca_mutex);
- kfree(rx);
+ kfree(b0);
return ret;
}
static int pctv452e_rc_query(struct dvb_usb_device *d)
{
struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
+ u8 *b, *rx;
int ret, i;
u8 id;
- mutex_lock(&state->ca_mutex);
+ b = kmalloc(CMD_BUFFER_SIZE + PCTV_ANSWER_LEN, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+
+ rx = b + CMD_BUFFER_SIZE;
+
id = state->c++;
/* prepare command header */
- state->data[0] = SYNC_BYTE_OUT;
- state->data[1] = id;
- state->data[2] = PCTV_CMD_IR;
- state->data[3] = 0;
+ b[0] = SYNC_BYTE_OUT;
+ b[1] = id;
+ b[2] = PCTV_CMD_IR;
+ b[3] = 0;
/* send ir request */
- ret = dvb_usb_generic_rw(d, state->data, 4,
- state->data, PCTV_ANSWER_LEN, 0);
+ ret = dvb_usb_generic_rw(d, b, 4, rx, PCTV_ANSWER_LEN, 0);
if (ret != 0)
goto ret;
if (debug > 3) {
- info("%s: read: %2d: %*ph: ", __func__, ret, 3, state->data);
- for (i = 0; (i < state->data[3]) && ((i + 3) < PCTV_ANSWER_LEN); i++)
- info(" %02x", state->data[i + 3]);
+ info("%s: read: %2d: %*ph: ", __func__, ret, 3, rx);
+ for (i = 0; (i < rx[3]) && ((i+3) < PCTV_ANSWER_LEN); i++)
+ info(" %02x", rx[i+3]);
info("\n");
}
- if ((state->data[3] == 9) && (state->data[12] & 0x01)) {
+ if ((rx[3] == 9) && (rx[12] & 0x01)) {
/* got a "press" event */
- state->last_rc_key = RC_SCANCODE_RC5(state->data[7], state->data[6]);
+ state->last_rc_key = RC_SCANCODE_RC5(rx[7], rx[6]);
if (debug > 2)
info("%s: cmd=0x%02x sys=0x%02x\n",
- __func__, state->data[6], state->data[7]);
+ __func__, rx[6], rx[7]);
rc_keydown(d->rc_dev, RC_TYPE_RC5, state->last_rc_key, 0);
} else if (state->last_rc_key) {
@@ -595,7 +606,7 @@ static int pctv452e_rc_query(struct dvb_usb_device *d)
state->last_rc_key = 0;
}
ret:
- mutex_unlock(&state->ca_mutex);
+ kfree(b);
return ret;
}
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index a0547dbf9806..76382c858c35 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -330,7 +330,7 @@ static int h_memstick_read_dev_id(struct memstick_dev *card,
struct ms_id_register id_reg;
if (!(*mrq)) {
- memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, NULL,
+ memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, &id_reg,
sizeof(struct ms_id_register));
*mrq = &card->current_mrq;
return 0;
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 18e05ca7584f..3600c9993a98 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -152,6 +152,9 @@ static void mei_mkhi_fix(struct mei_cl_device *cldev)
{
int ret;
+ if (!cldev->bus->hbm_f_os_supported)
+ return;
+
ret = mei_cldev_enable(cldev);
if (ret)
return;
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 0037153c80a6..2d9c5dd06e42 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -450,7 +450,7 @@ bool mei_cldev_enabled(struct mei_cl_device *cldev)
EXPORT_SYMBOL_GPL(mei_cldev_enabled);
/**
- * mei_cldev_enable_device - enable me client device
+ * mei_cldev_enable - enable me client device
* create connection with me client
*
* @cldev: me client device
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 391936c1aa04..b0395601c6ae 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -1541,7 +1541,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1;
if (rets < 0)
- return rets;
+ goto err;
if (rets == 0) {
cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
@@ -1575,11 +1575,8 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
cb->buf.size, cb->buf_idx);
rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx);
- if (rets) {
- cl->status = rets;
- list_move_tail(&cb->list, &cmpl_list->list);
- return rets;
- }
+ if (rets)
+ goto err;
cl->status = 0;
cl->writing_state = MEI_WRITING;
@@ -1587,14 +1584,21 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
cb->completed = mei_hdr.msg_complete == 1;
if (first_chunk) {
- if (mei_cl_tx_flow_ctrl_creds_reduce(cl))
- return -EIO;
+ if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) {
+ rets = -EIO;
+ goto err;
+ }
}
if (mei_hdr.msg_complete)
list_move_tail(&cb->list, &dev->write_waiting_list.list);
return 0;
+
+err:
+ cl->status = rets;
+ list_move_tail(&cb->list, &cmpl_list->list);
+ return rets;
}
/**
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index c6c051b52f55..c6217a4993ad 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -180,6 +180,8 @@ static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf,
dev->hbm_f_ev_supported);
pos += scnprintf(buf + pos, bufsz - pos, "\tFA: %01d\n",
dev->hbm_f_fa_supported);
+ pos += scnprintf(buf + pos, bufsz - pos, "\tOS: %01d\n",
+ dev->hbm_f_os_supported);
}
pos += scnprintf(buf + pos, bufsz - pos, "pg: %s, %s\n",
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index dd7f15a65eed..25b4a1ba522d 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -989,6 +989,10 @@ static void mei_hbm_config_features(struct mei_device *dev)
/* Fixed Address Client Support */
if (dev->version.major_version >= HBM_MAJOR_VERSION_FA)
dev->hbm_f_fa_supported = 1;
+
+ /* OS ver message Support */
+ if (dev->version.major_version >= HBM_MAJOR_VERSION_OS)
+ dev->hbm_f_os_supported = 1;
}
/**
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index 9daf3f9aed25..e1e4d47d4d7d 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -76,6 +76,12 @@
#define HBM_MINOR_VERSION_FA 0
#define HBM_MAJOR_VERSION_FA 2
+/*
+ * MEI version with OS ver message support
+ */
+#define HBM_MINOR_VERSION_OS 0
+#define HBM_MAJOR_VERSION_OS 2
+
/* Host bus message command opcode */
#define MEI_HBM_CMD_OP_MSK 0x7f
/* Host bus message command RESPONSE */
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 699693cd8c59..8dadb98662a9 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -406,6 +406,7 @@ const char *mei_pg_state_str(enum mei_pg_state state);
* @hbm_f_ev_supported : hbm feature event notification
* @hbm_f_fa_supported : hbm feature fixed address client
* @hbm_f_ie_supported : hbm feature immediate reply to enum request
+ * @hbm_f_os_supported : hbm feature support OS ver message
*
* @me_clients_rwsem: rw lock over me_clients list
* @me_clients : list of FW clients
@@ -487,6 +488,7 @@ struct mei_device {
unsigned int hbm_f_ev_supported:1;
unsigned int hbm_f_fa_supported:1;
unsigned int hbm_f_ie_supported:1;
+ unsigned int hbm_f_os_supported:1;
struct rw_semaphore me_clients_rwsem;
struct list_head me_clients;
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index b11c3455b040..e6ea8503f40c 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -506,9 +506,6 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
}
} while (busy);
- if (host->ops->card_busy && send_status)
- return mmc_switch_status(card);
-
return 0;
}
@@ -577,24 +574,26 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
if (!use_busy_signal)
goto out;
- /* Switch to new timing before poll and check switch status. */
- if (timing)
- mmc_set_timing(host, timing);
-
/*If SPI or used HW busy detection above, then we don't need to poll. */
if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
- mmc_host_is_spi(host)) {
- if (send_status)
- err = mmc_switch_status(card);
+ mmc_host_is_spi(host))
goto out_tim;
- }
/* Let's try to poll to find out when the command is completed. */
err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err);
+ if (err)
+ goto out;
out_tim:
- if (err && timing)
- mmc_set_timing(host, old_timing);
+ /* Switch to new timing before check switch status. */
+ if (timing)
+ mmc_set_timing(host, timing);
+
+ if (send_status) {
+ err = mmc_switch_status(card);
+ if (err && timing)
+ mmc_set_timing(host, old_timing);
+ }
out:
mmc_retune_release(host);
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index b44306b886cb..73db08558e4d 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -3354,10 +3354,11 @@ int dw_mci_runtime_resume(struct device *dev)
if (!slot)
continue;
- if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
+ if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
- dw_mci_setup_bus(slot, true);
- }
+
+ /* Force setup bus to guarantee available clock output */
+ dw_mci_setup_bus(slot, true);
}
/* Now that slots are all setup, we can enable card detect */
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index b352760c041e..09739352834c 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -578,13 +578,15 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
{
struct meson_host *host = dev_id;
struct mmc_request *mrq;
- struct mmc_command *cmd = host->cmd;
+ struct mmc_command *cmd;
u32 irq_en, status, raw_status;
irqreturn_t ret = IRQ_HANDLED;
if (WARN_ON(!host))
return IRQ_NONE;
+ cmd = host->cmd;
+
mrq = host->mrq;
if (WARN_ON(!mrq))
@@ -670,10 +672,10 @@ static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
int ret = IRQ_HANDLED;
if (WARN_ON(!mrq))
- ret = IRQ_NONE;
+ return IRQ_NONE;
if (WARN_ON(!cmd))
- ret = IRQ_NONE;
+ return IRQ_NONE;
data = cmd->data;
if (data) {
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 44ecebd1ea8c..c8b8ac66ff7e 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -309,6 +309,9 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host)
cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
cmd1 = cmd->arg;
+ if (cmd->opcode == MMC_STOP_TRANSMISSION)
+ cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
+
if (host->sdio_irq_en) {
ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
@@ -417,8 +420,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
ssp->base + HW_SSP_BLOCK_SIZE);
}
- if ((cmd->opcode == MMC_STOP_TRANSMISSION) ||
- (cmd->opcode == SD_IO_RW_EXTENDED))
+ if (cmd->opcode == SD_IO_RW_EXTENDED)
cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
cmd1 = cmd->arg;
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 160f695cc09c..278a5a435ab7 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -395,7 +395,8 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
/* Power on the SDHCI controller and its children */
acpi_device_fix_up_power(device);
list_for_each_entry(child, &device->children, node)
- acpi_device_fix_up_power(child);
+ if (child->status.present && child->status.enabled)
+ acpi_device_fix_up_power(child);
if (acpi_bus_get_status(device) || !device->status.present)
return -ENODEV;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 353a9ddf6b97..9ce5dcb4abd0 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -426,6 +426,7 @@ config MTD_NAND_ORION
config MTD_NAND_OXNAS
tristate "NAND Flash support for Oxford Semiconductor SoC"
+ depends on HAS_IOMEM
help
This enables the NAND flash controller on Oxford Semiconductor SoCs.
@@ -540,7 +541,7 @@ config MTD_NAND_FSMC
Flexible Static Memory Controller (FSMC)
config MTD_NAND_XWAY
- tristate "Support for NAND on Lantiq XWAY SoC"
+ bool "Support for NAND on Lantiq XWAY SoC"
depends on LANTIQ && SOC_TYPE_XWAY
help
Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index 5553a5d9efd1..846a66c1b133 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -775,7 +775,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
init_completion(&host->comp_controller);
host->irq = platform_get_irq(pdev, 0);
- if ((host->irq < 0) || (host->irq >= NR_IRQS)) {
+ if (host->irq < 0) {
dev_err(&pdev->dev, "failed to get platform irq\n");
res = -EINVAL;
goto err_exit3;
diff --git a/drivers/mtd/nand/tango_nand.c b/drivers/mtd/nand/tango_nand.c
index 28c7f474be77..4a5e948c62df 100644
--- a/drivers/mtd/nand/tango_nand.c
+++ b/drivers/mtd/nand/tango_nand.c
@@ -632,11 +632,13 @@ static int tango_nand_probe(struct platform_device *pdev)
if (IS_ERR(nfc->pbus_base))
return PTR_ERR(nfc->pbus_base);
+ writel_relaxed(MODE_RAW, nfc->pbus_base + PBUS_PAD_MODE);
+
clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
- nfc->chan = dma_request_chan(&pdev->dev, "nfc_sbox");
+ nfc->chan = dma_request_chan(&pdev->dev, "rxtx");
if (IS_ERR(nfc->chan))
return PTR_ERR(nfc->chan);
diff --git a/drivers/mtd/nand/xway_nand.c b/drivers/mtd/nand/xway_nand.c
index 1f2948c0c458..895101a5e686 100644
--- a/drivers/mtd/nand/xway_nand.c
+++ b/drivers/mtd/nand/xway_nand.c
@@ -232,7 +232,6 @@ static const struct of_device_id xway_nand_match[] = {
{ .compatible = "lantiq,nand-xway" },
{},
};
-MODULE_DEVICE_TABLE(of, xway_nand_match);
static struct platform_driver xway_nand_driver = {
.probe = xway_nand_probe,
@@ -243,6 +242,4 @@ static struct platform_driver xway_nand_driver = {
},
};
-module_platform_driver(xway_nand_driver);
-
-MODULE_LICENSE("GPL");
+builtin_platform_driver(xway_nand_driver);
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index b8c293373ecc..a306de4318d7 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -190,7 +190,7 @@ static netdev_tx_t ipddp_xmit(struct sk_buff *skb, struct net_device *dev)
*/
static int ipddp_create(struct ipddp_route *new_rt)
{
- struct ipddp_route *rt = kmalloc(sizeof(*rt), GFP_KERNEL);
+ struct ipddp_route *rt = kzalloc(sizeof(*rt), GFP_KERNEL);
if (rt == NULL)
return -ENOMEM;
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index 7be393c96b1a..cf7c18947189 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -161,6 +161,7 @@ static int c_can_pci_probe(struct pci_dev *pdev,
dev->irq = pdev->irq;
priv->base = addr;
+ priv->device = &pdev->dev;
if (!c_can_pci_data->freq) {
dev_err(&pdev->dev, "no clock frequency defined\n");
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 680d1ff07a55..6749b1829469 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -948,7 +948,12 @@ static int ti_hecc_probe(struct platform_device *pdev)
netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll,
HECC_DEF_NAPI_WEIGHT);
- clk_enable(priv->clk);
+ err = clk_prepare_enable(priv->clk);
+ if (err) {
+ dev_err(&pdev->dev, "clk_prepare_enable() failed\n");
+ goto probe_exit_clk;
+ }
+
err = register_candev(ndev);
if (err) {
dev_err(&pdev->dev, "register_candev() failed\n");
@@ -981,7 +986,7 @@ static int ti_hecc_remove(struct platform_device *pdev)
struct ti_hecc_priv *priv = netdev_priv(ndev);
unregister_candev(ndev);
- clk_disable(priv->clk);
+ clk_disable_unprepare(priv->clk);
clk_put(priv->clk);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
iounmap(priv->base);
@@ -1006,7 +1011,7 @@ static int ti_hecc_suspend(struct platform_device *pdev, pm_message_t state)
hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
priv->can.state = CAN_STATE_SLEEPING;
- clk_disable(priv->clk);
+ clk_disable_unprepare(priv->clk);
return 0;
}
@@ -1015,8 +1020,11 @@ static int ti_hecc_resume(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct ti_hecc_priv *priv = netdev_priv(dev);
+ int err;
- clk_enable(priv->clk);
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ return err;
hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 9ec33b51a0ed..2ce7ae97ac91 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -393,7 +393,7 @@ static int bcm_sf2_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
return bcm_sf2_sw_indir_rw(priv, 1, addr, regnum, 0);
else
- return mdiobus_read(priv->master_mii_bus, addr, regnum);
+ return mdiobus_read_nested(priv->master_mii_bus, addr, regnum);
}
static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
@@ -407,7 +407,7 @@ static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val);
else
- mdiobus_write(priv->master_mii_bus, addr, regnum, val);
+ mdiobus_write_nested(priv->master_mii_bus, addr, regnum, val);
return 0;
}
@@ -982,6 +982,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
struct device_node *dn = pdev->dev.of_node;
struct b53_platform_data *pdata;
+ struct dsa_switch_ops *ops;
struct bcm_sf2_priv *priv;
struct b53_device *dev;
struct dsa_switch *ds;
@@ -995,6 +996,10 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ ops = devm_kzalloc(&pdev->dev, sizeof(*ops), GFP_KERNEL);
+ if (!ops)
+ return -ENOMEM;
+
dev = b53_switch_alloc(&pdev->dev, &bcm_sf2_io_ops, priv);
if (!dev)
return -ENOMEM;
@@ -1014,6 +1019,8 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
ds = dev->ds;
/* Override the parts that are non-standard wrt. normal b53 devices */
+ memcpy(ops, ds->ops, sizeof(*ops));
+ ds->ops = ops;
ds->ops->get_tag_protocol = bcm_sf2_sw_get_tag_protocol;
ds->ops->setup = bcm_sf2_sw_setup;
ds->ops->get_phy_flags = bcm_sf2_sw_get_phy_flags;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 5b7ba25e0065..8a280e7d66bd 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -891,6 +891,8 @@
#define PCS_V1_WINDOW_SELECT 0x03fc
#define PCS_V2_WINDOW_DEF 0x9060
#define PCS_V2_WINDOW_SELECT 0x9064
+#define PCS_V2_RV_WINDOW_DEF 0x1060
+#define PCS_V2_RV_WINDOW_SELECT 0x1064
/* PCS register entry bit positions and sizes */
#define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index aaf0350076a9..a7d16db5c4b2 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1151,7 +1151,7 @@ static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
spin_lock_irqsave(&pdata->xpcs_lock, flags);
- XPCS32_IOWRITE(pdata, PCS_V2_WINDOW_SELECT, index);
+ XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
mmd_data = XPCS16_IOREAD(pdata, offset);
spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
@@ -1183,7 +1183,7 @@ static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
spin_lock_irqsave(&pdata->xpcs_lock, flags);
- XPCS32_IOWRITE(pdata, PCS_V2_WINDOW_SELECT, index);
+ XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
XPCS16_IOWRITE(pdata, offset, mmd_data);
spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
}
@@ -3407,8 +3407,10 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
/* Flush Tx queues */
ret = xgbe_flush_tx_queues(pdata);
- if (ret)
+ if (ret) {
+ netdev_err(pdata->netdev, "error flushing TX queues\n");
return ret;
+ }
/*
* Initialize DMA related features
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 155190db682d..1c87cc204075 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -539,6 +539,7 @@ static irqreturn_t xgbe_isr(int irq, void *data)
}
}
+isr_done:
/* If there is not a separate AN irq, handle it here */
if (pdata->dev_irq == pdata->an_irq)
pdata->phy_if.an_isr(irq, pdata);
@@ -551,7 +552,6 @@ static irqreturn_t xgbe_isr(int irq, void *data)
if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq))
pdata->i2c_if.i2c_isr(irq, pdata);
-isr_done:
return IRQ_HANDLED;
}
@@ -1070,7 +1070,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
DBGPR("-->xgbe_start\n");
- hw_if->init(pdata);
+ ret = hw_if->init(pdata);
+ if (ret)
+ return ret;
xgbe_napi_enable(pdata, 1);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index e76b7f65b805..c2730f15bd8b 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -265,6 +265,7 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct xgbe_prv_data *pdata;
struct device *dev = &pdev->dev;
void __iomem * const *iomap_table;
+ struct pci_dev *rdev;
unsigned int ma_lo, ma_hi;
unsigned int reg;
int bar_mask;
@@ -326,8 +327,20 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (netif_msg_probe(pdata))
dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs);
+ /* Set the PCS indirect addressing definition registers */
+ rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
+ if (rdev &&
+ (rdev->vendor == PCI_VENDOR_ID_AMD) && (rdev->device == 0x15d0)) {
+ pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
+ } else {
+ pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
+ }
+ pci_dev_put(rdev);
+
/* Configure the PCS indirect addressing support */
- reg = XPCS32_IOREAD(pdata, PCS_V2_WINDOW_DEF);
+ reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
pdata->xpcs_window <<= 6;
pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index f52a9bd05bac..00108815b55e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -955,6 +955,8 @@ struct xgbe_prv_data {
/* XPCS indirect addressing lock */
spinlock_t xpcs_lock;
+ unsigned int xpcs_window_def_reg;
+ unsigned int xpcs_window_sel_reg;
unsigned int xpcs_window;
unsigned int xpcs_window_size;
unsigned int xpcs_window_mask;
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index c8f525574d68..7dcc907a449d 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -685,8 +685,6 @@ static int alx_alloc_rings(struct alx_priv *alx)
return -ENOMEM;
}
- alx_reinit_rings(alx);
-
return 0;
}
@@ -703,7 +701,7 @@ static void alx_free_rings(struct alx_priv *alx)
if (alx->qnapi[0] && alx->qnapi[0]->rxq)
kfree(alx->qnapi[0]->rxq->bufs);
- if (!alx->descmem.virt)
+ if (alx->descmem.virt)
dma_free_coherent(&alx->hw.pdev->dev,
alx->descmem.size,
alx->descmem.virt,
@@ -984,6 +982,7 @@ static int alx_realloc_resources(struct alx_priv *alx)
alx_free_rings(alx);
alx_free_napis(alx);
alx_disable_advanced_intr(alx);
+ alx_init_intr(alx, false);
err = alx_alloc_napis(alx);
if (err)
@@ -1241,6 +1240,12 @@ static int __alx_open(struct alx_priv *alx, bool resume)
if (err)
goto out_free_rings;
+ /* must be called after alx_request_irq because the chip stops working
+ * if we copy the dma addresses in alx_init_ring_ptrs twice when
+ * requesting msi-x interrupts failed
+ */
+ alx_reinit_rings(alx);
+
netif_set_real_num_tx_queues(alx->dev, alx->num_txq);
netif_set_real_num_rx_queues(alx->dev, alx->num_rxq);
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 3b14d5144228..c483618b57bd 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -913,6 +913,8 @@ static int bcm_enet_open(struct net_device *dev)
priv->old_link = 0;
priv->old_duplex = -1;
priv->old_pause = -1;
+ } else {
+ phydev = NULL;
}
/* mask all interrupts and request them */
@@ -1083,7 +1085,7 @@ static int bcm_enet_open(struct net_device *dev)
enet_dmac_writel(priv, priv->dma_chan_int_mask,
ENETDMAC_IRMASK, priv->tx_chan);
- if (priv->has_phy)
+ if (phydev)
phy_start(phydev);
else
bcm_enet_adjust_link(dev);
@@ -1126,7 +1128,7 @@ out_freeirq:
free_irq(dev->irq, dev);
out_phy_disconnect:
- if (priv->has_phy)
+ if (phydev)
phy_disconnect(phydev);
return ret;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 25d1eb4933d0..744ed6ddaf37 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -710,11 +710,8 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
unsigned int pkts_compl = 0, bytes_compl = 0;
struct bcm_sysport_cb *cb;
- struct netdev_queue *txq;
u32 hw_ind;
- txq = netdev_get_tx_queue(ndev, ring->index);
-
/* Compute how many descriptors have been processed since last call */
hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
@@ -745,9 +742,6 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
ring->c_index = c_index;
- if (netif_tx_queue_stopped(txq) && pkts_compl)
- netif_tx_wake_queue(txq);
-
netif_dbg(priv, tx_done, ndev,
"ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
ring->index, ring->c_index, pkts_compl, bytes_compl);
@@ -759,16 +753,33 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
struct bcm_sysport_tx_ring *ring)
{
+ struct netdev_queue *txq;
unsigned int released;
unsigned long flags;
+ txq = netdev_get_tx_queue(priv->netdev, ring->index);
+
spin_lock_irqsave(&ring->lock, flags);
released = __bcm_sysport_tx_reclaim(priv, ring);
+ if (released)
+ netif_tx_wake_queue(txq);
+
spin_unlock_irqrestore(&ring->lock, flags);
return released;
}
+/* Locked version of the per-ring TX reclaim, but does not wake the queue */
+static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
+ struct bcm_sysport_tx_ring *ring)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ __bcm_sysport_tx_reclaim(priv, ring);
+ spin_unlock_irqrestore(&ring->lock, flags);
+}
+
static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
{
struct bcm_sysport_tx_ring *ring =
@@ -1012,15 +1023,6 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
goto out;
}
- /* Insert TSB and checksum infos */
- if (priv->tsb_en) {
- skb = bcm_sysport_insert_tsb(skb, dev);
- if (!skb) {
- ret = NETDEV_TX_OK;
- goto out;
- }
- }
-
/* The Ethernet switch we are interfaced with needs packets to be at
* least 64 bytes (including FCS) otherwise they will be discarded when
* they enter the switch port logic. When Broadcom tags are enabled, we
@@ -1028,13 +1030,21 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
* (including FCS and tag) because the length verification is done after
* the Broadcom tag is stripped off the ingress packet.
*/
- if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
+ if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
ret = NETDEV_TX_OK;
goto out;
}
- skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ?
- ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len;
+ /* Insert TSB and checksum infos */
+ if (priv->tsb_en) {
+ skb = bcm_sysport_insert_tsb(skb, dev);
+ if (!skb) {
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+ }
+
+ skb_len = skb->len;
mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
if (dma_mapping_error(kdev, mapping)) {
@@ -1253,7 +1263,7 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
napi_disable(&ring->napi);
netif_napi_del(&ring->napi);
- bcm_sysport_tx_reclaim(priv, ring);
+ bcm_sysport_tx_clean(priv, ring);
kfree(ring->cbs);
ring->cbs = NULL;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 9608cb49a11c..4fcc6a84a087 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1099,7 +1099,7 @@ static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
{
#ifdef CONFIG_INET
struct tcphdr *th;
- int len, nw_off, tcp_opt_len;
+ int len, nw_off, tcp_opt_len = 0;
if (tcp_ts)
tcp_opt_len = 12;
@@ -5314,17 +5314,12 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
if ((link_info->support_auto_speeds | diff) !=
link_info->support_auto_speeds) {
/* An advertised speed is no longer supported, so we need to
- * update the advertisement settings. See bnxt_reset() for
- * comments about the rtnl_lock() sequence below.
+ * update the advertisement settings. Caller holds RTNL
+ * so we can modify link settings.
*/
- clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
- rtnl_lock();
link_info->advertising = link_info->support_auto_speeds;
- if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
- (link_info->autoneg & BNXT_AUTONEG_SPEED))
+ if (link_info->autoneg & BNXT_AUTONEG_SPEED)
bnxt_hwrm_set_link_setting(bp, true, false);
- set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
- rtnl_unlock();
}
return 0;
}
@@ -6200,29 +6195,37 @@ bnxt_restart_timer:
mod_timer(&bp->timer, jiffies + bp->current_interval);
}
-/* Only called from bnxt_sp_task() */
-static void bnxt_reset(struct bnxt *bp, bool silent)
+static void bnxt_rtnl_lock_sp(struct bnxt *bp)
{
- /* bnxt_reset_task() calls bnxt_close_nic() which waits
- * for BNXT_STATE_IN_SP_TASK to clear.
- * If there is a parallel dev_close(), bnxt_close() may be holding
+ /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
+ * set. If the device is being closed, bnxt_close() may be holding
* rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
* must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
*/
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
rtnl_lock();
- if (test_bit(BNXT_STATE_OPEN, &bp->state))
- bnxt_reset_task(bp, silent);
+}
+
+static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
+{
set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
rtnl_unlock();
}
+/* Only called from bnxt_sp_task() */
+static void bnxt_reset(struct bnxt *bp, bool silent)
+{
+ bnxt_rtnl_lock_sp(bp);
+ if (test_bit(BNXT_STATE_OPEN, &bp->state))
+ bnxt_reset_task(bp, silent);
+ bnxt_rtnl_unlock_sp(bp);
+}
+
static void bnxt_cfg_ntp_filters(struct bnxt *);
static void bnxt_sp_task(struct work_struct *work)
{
struct bnxt *bp = container_of(work, struct bnxt, sp_task);
- int rc;
set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
smp_mb__after_atomic();
@@ -6236,16 +6239,6 @@ static void bnxt_sp_task(struct work_struct *work)
if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
bnxt_cfg_ntp_filters(bp);
- if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
- if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
- &bp->sp_event))
- bnxt_hwrm_phy_qcaps(bp);
-
- rc = bnxt_update_link(bp, true);
- if (rc)
- netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
- rc);
- }
if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
bnxt_hwrm_exec_fwd_req(bp);
if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
@@ -6266,18 +6259,39 @@ static void bnxt_sp_task(struct work_struct *work)
bnxt_hwrm_tunnel_dst_port_free(
bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
}
+ if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
+ bnxt_hwrm_port_qstats(bp);
+
+ /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
+ * must be the last functions to be called before exiting.
+ */
+ if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
+ int rc = 0;
+
+ if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
+ &bp->sp_event))
+ bnxt_hwrm_phy_qcaps(bp);
+
+ bnxt_rtnl_lock_sp(bp);
+ if (test_bit(BNXT_STATE_OPEN, &bp->state))
+ rc = bnxt_update_link(bp, true);
+ bnxt_rtnl_unlock_sp(bp);
+ if (rc)
+ netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
+ rc);
+ }
+ if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
+ bnxt_rtnl_lock_sp(bp);
+ if (test_bit(BNXT_STATE_OPEN, &bp->state))
+ bnxt_get_port_module_status(bp);
+ bnxt_rtnl_unlock_sp(bp);
+ }
if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
bnxt_reset(bp, false);
if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
bnxt_reset(bp, true);
- if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event))
- bnxt_get_port_module_status(bp);
-
- if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
- bnxt_hwrm_port_qstats(bp);
-
smp_mb__before_atomic();
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 185e9e047aa9..ae42de4fdddf 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -8720,11 +8720,14 @@ static void tg3_free_consistent(struct tg3 *tp)
tg3_mem_rx_release(tp);
tg3_mem_tx_release(tp);
+ /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
+ tg3_full_lock(tp, 0);
if (tp->hw_stats) {
dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
tp->hw_stats, tp->stats_mapping);
tp->hw_stats = NULL;
}
+ tg3_full_unlock(tp);
}
/*
diff --git a/drivers/net/ethernet/cadence/macb_pci.c b/drivers/net/ethernet/cadence/macb_pci.c
index 92be2cd8f817..9906fda76087 100644
--- a/drivers/net/ethernet/cadence/macb_pci.c
+++ b/drivers/net/ethernet/cadence/macb_pci.c
@@ -1,5 +1,5 @@
/**
- * macb_pci.c - Cadence GEM PCI wrapper.
+ * Cadence GEM PCI wrapper.
*
* Copyright (C) 2016 Cadence Design Systems - http://www.cadence.com
*
@@ -45,32 +45,27 @@ static int macb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct macb_platform_data plat_data;
struct resource res[2];
- /* sanity check */
- if (!id)
- return -EINVAL;
-
/* enable pci device */
- err = pci_enable_device(pdev);
+ err = pcim_enable_device(pdev);
if (err < 0) {
- dev_err(&pdev->dev, "Enabling PCI device has failed: 0x%04X",
- err);
- return -EACCES;
+ dev_err(&pdev->dev, "Enabling PCI device has failed: %d", err);
+ return err;
}
pci_set_master(pdev);
/* set up resources */
memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res));
- res[0].start = pdev->resource[0].start;
- res[0].end = pdev->resource[0].end;
+ res[0].start = pci_resource_start(pdev, 0);
+ res[0].end = pci_resource_end(pdev, 0);
res[0].name = PCI_DRIVER_NAME;
res[0].flags = IORESOURCE_MEM;
- res[1].start = pdev->irq;
+ res[1].start = pci_irq_vector(pdev, 0);
res[1].name = PCI_DRIVER_NAME;
res[1].flags = IORESOURCE_IRQ;
- dev_info(&pdev->dev, "EMAC physical base addr = 0x%p\n",
- (void *)(uintptr_t)pci_resource_start(pdev, 0));
+ dev_info(&pdev->dev, "EMAC physical base addr: %pa\n",
+ &res[0].start);
/* set up macb platform data */
memset(&plat_data, 0, sizeof(plat_data));
@@ -100,7 +95,7 @@ static int macb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
plat_info.num_res = ARRAY_SIZE(res);
plat_info.data = &plat_data;
plat_info.size_data = sizeof(plat_data);
- plat_info.dma_mask = DMA_BIT_MASK(32);
+ plat_info.dma_mask = pdev->dma_mask;
/* register platform device */
plat_dev = platform_device_register_full(&plat_info);
@@ -120,7 +115,6 @@ err_hclk_register:
clk_unregister(plat_data.pclk);
err_pclk_register:
- pci_disable_device(pdev);
return err;
}
@@ -130,7 +124,6 @@ static void macb_remove(struct pci_dev *pdev)
struct macb_platform_data *plat_data = dev_get_platdata(&plat_dev->dev);
platform_device_unregister(plat_dev);
- pci_disable_device(pdev);
clk_unregister(plat_data->pclk);
clk_unregister(plat_data->hclk);
}
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index bbc8bd16cb97..dcbce6cac63e 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -77,7 +77,7 @@ config OCTEON_MGMT_ETHERNET
config LIQUIDIO_VF
tristate "Cavium LiquidIO VF support"
depends on 64BIT && PCI_MSI
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
---help---
This driver supports Cavium LiquidIO Intelligent Server Adapter
based on CN23XX chips.
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 9211c750e064..2f85b64f01fa 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -47,8 +47,9 @@ struct lmac {
struct bgx {
u8 bgx_id;
struct lmac lmac[MAX_LMAC_PER_BGX];
- int lmac_count;
+ u8 lmac_count;
u8 max_lmac;
+ u8 acpi_lmac_idx;
void __iomem *reg_base;
struct pci_dev *pdev;
bool is_dlm;
@@ -1143,13 +1144,13 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle,
if (acpi_bus_get_device(handle, &adev))
goto out;
- acpi_get_mac_address(dev, adev, bgx->lmac[bgx->lmac_count].mac);
+ acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac);
- SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, dev);
+ SET_NETDEV_DEV(&bgx->lmac[bgx->acpi_lmac_idx].netdev, dev);
- bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count;
+ bgx->lmac[bgx->acpi_lmac_idx].lmacid = bgx->acpi_lmac_idx;
+ bgx->acpi_lmac_idx++; /* move to next LMAC */
out:
- bgx->lmac_count++;
return AE_OK;
}
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
index 0f0de5b63622..d04a6c163445 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
@@ -133,17 +133,15 @@ cxgb_find_route6(struct cxgb4_lld_info *lldi,
if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
fl6.flowi6_oif = sin6_scope_id;
dst = ip6_route_output(&init_net, NULL, &fl6);
- if (!dst)
- goto out;
- if (!cxgb_our_interface(lldi, get_real_dev,
- ip6_dst_idev(dst)->dev) &&
- !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
+ if (dst->error ||
+ (!cxgb_our_interface(lldi, get_real_dev,
+ ip6_dst_idev(dst)->dev) &&
+ !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK))) {
dst_release(dst);
- dst = NULL;
+ return NULL;
}
}
-out:
return dst;
}
EXPORT_SYMBOL(cxgb_find_route6);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 0e74529a4209..30e855004c57 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1118,7 +1118,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
err:
mutex_unlock(&adapter->mcc_lock);
- if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
+ if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
status = -EPERM;
return status;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 7e1633bf5a22..1a7f8ad7b9c6 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -275,8 +275,7 @@ static int be_dev_mac_add(struct be_adapter *adapter, u8 *mac)
/* Check if mac has already been added as part of uc-list */
for (i = 0; i < adapter->uc_macs; i++) {
- if (ether_addr_equal((u8 *)&adapter->uc_list[i * ETH_ALEN],
- mac)) {
+ if (ether_addr_equal(adapter->uc_list[i].mac, mac)) {
/* mac already added, skip addition */
adapter->pmac_id[0] = adapter->pmac_id[i + 1];
return 0;
@@ -319,6 +318,13 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
return 0;
+ /* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC
+ * address
+ */
+ if (BEx_chip(adapter) && be_virtfn(adapter) &&
+ !check_privilege(adapter, BE_PRIV_FILTMGMT))
+ return -EPERM;
+
/* if device is not running, copy MAC to netdev->dev_addr */
if (!netif_running(netdev))
goto done;
@@ -1655,14 +1661,12 @@ static void be_clear_mc_list(struct be_adapter *adapter)
static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx)
{
- if (ether_addr_equal((u8 *)&adapter->uc_list[uc_idx * ETH_ALEN],
- adapter->dev_mac)) {
+ if (ether_addr_equal(adapter->uc_list[uc_idx].mac, adapter->dev_mac)) {
adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0];
return 0;
}
- return be_cmd_pmac_add(adapter,
- (u8 *)&adapter->uc_list[uc_idx * ETH_ALEN],
+ return be_cmd_pmac_add(adapter, adapter->uc_list[uc_idx].mac,
adapter->if_handle,
&adapter->pmac_id[uc_idx + 1], 0);
}
@@ -1698,9 +1702,8 @@ static void be_set_uc_list(struct be_adapter *adapter)
}
if (adapter->update_uc_list) {
- i = 1; /* First slot is claimed by the Primary MAC */
-
/* cache the uc-list in adapter array */
+ i = 0;
netdev_for_each_uc_addr(ha, netdev) {
ether_addr_copy(adapter->uc_list[i].mac, ha->addr);
i++;
@@ -3613,7 +3616,11 @@ static void be_rx_qs_destroy(struct be_adapter *adapter)
static void be_disable_if_filters(struct be_adapter *adapter)
{
- be_dev_mac_del(adapter, adapter->pmac_id[0]);
+ /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */
+ if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
+ check_privilege(adapter, BE_PRIV_FILTMGMT))
+ be_dev_mac_del(adapter, adapter->pmac_id[0]);
+
be_clear_uc_list(adapter);
be_clear_mc_list(adapter);
@@ -3766,8 +3773,9 @@ static int be_enable_if_filters(struct be_adapter *adapter)
if (status)
return status;
- /* For BE3 VFs, the PF programs the initial MAC address */
- if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
+ /* Don't add MAC on BE3 VFs without FILTMGMT privilege */
+ if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
+ check_privilege(adapter, BE_PRIV_FILTMGMT)) {
status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
if (status)
return status;
@@ -5155,7 +5163,9 @@ static netdev_features_t be_features_check(struct sk_buff *skb,
skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
skb->inner_protocol != htons(ETH_P_TEB) ||
skb_inner_mac_header(skb) - skb_transport_header(skb) !=
- sizeof(struct udphdr) + sizeof(struct vxlanhdr))
+ sizeof(struct udphdr) + sizeof(struct vxlanhdr) ||
+ !adapter->vxlan_port ||
+ udp_hdr(skb)->dest != adapter->vxlan_port)
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
return features;
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 624ba9058dc4..c9b7ad65e563 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -733,6 +733,7 @@ static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
/* Enable Congestion State Change Notifications and CS taildrop */
+ memset(&initcgr, 0, sizeof(initcgr));
initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES);
initcgr.cgr.cscn_en = QM_CGR_EN;
@@ -2291,7 +2292,8 @@ static int dpaa_open(struct net_device *net_dev)
net_dev->phydev = mac_dev->init_phy(net_dev, priv->mac_dev);
if (!net_dev->phydev) {
netif_err(priv, ifup, net_dev, "init_phy() failed\n");
- return -ENODEV;
+ err = -ENODEV;
+ goto phy_init_failed;
}
for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
@@ -2314,6 +2316,7 @@ mac_start_failed:
for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++)
fman_port_disable(mac_dev->port[i]);
+phy_init_failed:
dpaa_eth_napi_disable(priv);
return err;
@@ -2420,6 +2423,7 @@ static int dpaa_ingress_cgr_init(struct dpaa_priv *priv)
}
/* Enable CS TD, but disable Congestion State Change Notifications. */
+ memset(&initcgr, 0, sizeof(initcgr));
initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES);
initcgr.cgr.cscn_en = QM_CGR_EN;
cs_th = DPAA_INGRESS_CS_THRESHOLD;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index a6e7afa878be..c1b671667920 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2948,7 +2948,7 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
}
/* try reuse page */
- if (unlikely(page_count(page) != 1))
+ if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
return false;
/* change offset to the other half */
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index a831f947ca8c..309f5c66083c 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1601,8 +1601,11 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
netdev->netdev_ops = &ibmveth_netdev_ops;
netdev->ethtool_ops = &netdev_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->dev);
- netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
- NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ netdev->hw_features = NETIF_F_SG;
+ if (vio_get_attribute(dev, "ibm,illan-options", NULL) != NULL) {
+ netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM;
+ }
netdev->features |= netdev->hw_features;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index a761001308dc..1515abaa5ac9 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3962,8 +3962,8 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
PAGE_SIZE,
DMA_FROM_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC);
- __page_frag_drain(buffer_info->page, 0,
- buffer_info->pagecnt_bias);
+ __page_frag_cache_drain(buffer_info->page,
+ buffer_info->pagecnt_bias);
buffer_info->page = NULL;
}
@@ -6991,7 +6991,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
PAGE_SIZE, DMA_FROM_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC);
- __page_frag_drain(page, 0, rx_buffer->pagecnt_bias);
+ __page_frag_cache_drain(page, rx_buffer->pagecnt_bias);
}
/* clear contents of rx_buffer */
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index cbeea915f026..8037426ec50f 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -900,10 +900,10 @@ static void korina_restart_task(struct work_struct *work)
DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR,
&lp->rx_dma_regs->dmasm);
- korina_free_ring(dev);
-
napi_disable(&lp->napi);
+ korina_free_ring(dev);
+
if (korina_init(dev) < 0) {
printk(KERN_ERR "%s: cannot restart device\n", dev->name);
return;
@@ -1064,12 +1064,12 @@ static int korina_close(struct net_device *dev)
tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR;
writel(tmp, &lp->rx_dma_regs->dmasm);
- korina_free_ring(dev);
-
napi_disable(&lp->napi);
cancel_work_sync(&lp->restart_task);
+ korina_free_ring(dev);
+
free_irq(lp->rx_irq, dev);
free_irq(lp->tx_irq, dev);
free_irq(lp->ovr_irq, dev);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 3dd87889e67e..1c29c86f8709 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -2517,7 +2517,7 @@ static int mtk_remove(struct platform_device *pdev)
}
const struct of_device_id of_mtk_match[] = {
- { .compatible = "mediatek,mt7623-eth" },
+ { .compatible = "mediatek,mt2701-eth" },
{},
};
MODULE_DEVICE_TABLE(of, of_mtk_match);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index a849da92f857..6b8635378f1f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -101,13 +101,19 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
{
struct mlx4_cq *cq;
+ rcu_read_lock();
cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
cqn & (dev->caps.num_cqs - 1));
+ rcu_read_unlock();
+
if (!cq) {
mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
return;
}
+ /* Acessing the CQ outside of rcu_read_lock is safe, because
+ * the CQ is freed only after interrupt handling is completed.
+ */
++cq->arm_sn;
cq->comp(cq);
@@ -118,23 +124,19 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
struct mlx4_cq *cq;
- spin_lock(&cq_table->lock);
-
+ rcu_read_lock();
cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
- if (cq)
- atomic_inc(&cq->refcount);
-
- spin_unlock(&cq_table->lock);
+ rcu_read_unlock();
if (!cq) {
- mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
+ mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn);
return;
}
+ /* Acessing the CQ outside of rcu_read_lock is safe, because
+ * the CQ is freed only after interrupt handling is completed.
+ */
cq->event(cq, event_type);
-
- if (atomic_dec_and_test(&cq->refcount))
- complete(&cq->free);
}
static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
@@ -301,9 +303,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
if (err)
return err;
- spin_lock_irq(&cq_table->lock);
+ spin_lock(&cq_table->lock);
err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
- spin_unlock_irq(&cq_table->lock);
+ spin_unlock(&cq_table->lock);
if (err)
goto err_icm;
@@ -349,9 +351,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
return 0;
err_radix:
- spin_lock_irq(&cq_table->lock);
+ spin_lock(&cq_table->lock);
radix_tree_delete(&cq_table->tree, cq->cqn);
- spin_unlock_irq(&cq_table->lock);
+ spin_unlock(&cq_table->lock);
err_icm:
mlx4_cq_free_icm(dev, cq->cqn);
@@ -370,15 +372,15 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
if (err)
mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
+ spin_lock(&cq_table->lock);
+ radix_tree_delete(&cq_table->tree, cq->cqn);
+ spin_unlock(&cq_table->lock);
+
synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
- spin_lock_irq(&cq_table->lock);
- radix_tree_delete(&cq_table->tree, cq->cqn);
- spin_unlock_irq(&cq_table->lock);
-
if (atomic_dec_and_test(&cq->refcount))
complete(&cq->free);
wait_for_completion(&cq->free);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 015198c14fa8..504461a464c5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -245,13 +245,9 @@ static u32 freq_to_shift(u16 freq)
{
u32 freq_khz = freq * 1000;
u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
- u64 tmp_rounded =
- roundup_pow_of_two(max_val_cycles) > max_val_cycles ?
- roundup_pow_of_two(max_val_cycles) - 1 : UINT_MAX;
- u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
- max_val_cycles : tmp_rounded;
+ u64 max_val_cycles_rounded = 1ULL << fls64(max_val_cycles - 1);
/* calculate max possible multiplier in order to fit in 64bit */
- u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
+ u64 max_mul = div64_u64(ULLONG_MAX, max_val_cycles_rounded);
/* This comes from the reverse of clocksource_khz2mult */
return ilog2(div_u64(max_mul * freq_khz, 1000000));
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index d9c9f86a30df..d5a9372ed84d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1732,8 +1732,6 @@ static void mlx4_en_get_channels(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- memset(channel, 0, sizeof(*channel));
-
channel->max_rx = MAX_RX_RINGS;
channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP;
@@ -1752,10 +1750,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
int xdp_count;
int err = 0;
- if (channel->other_count || channel->combined_count ||
- channel->tx_count > MLX4_EN_MAX_TX_RING_P_UP ||
- channel->rx_count > MAX_RX_RINGS ||
- !channel->tx_count || !channel->rx_count)
+ if (!channel->tx_count || !channel->rx_count)
return -EINVAL;
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index bcd955339058..761f8b12399c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1638,7 +1638,8 @@ int mlx4_en_start_port(struct net_device *dev)
/* Configure tx cq's and rings */
for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
- u8 num_tx_rings_p_up = t == TX ? priv->num_tx_rings_p_up : 1;
+ u8 num_tx_rings_p_up = t == TX ?
+ priv->num_tx_rings_p_up : priv->tx_ring_num[t];
for (i = 0; i < priv->tx_ring_num[t]; i++) {
/* Configure cq */
@@ -1747,8 +1748,11 @@ int mlx4_en_start_port(struct net_device *dev)
/* Process all completions if exist to prevent
* the queues freezing if they are full
*/
- for (i = 0; i < priv->rx_ring_num; i++)
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ local_bh_disable();
napi_schedule(&priv->rx_cq[i]->napi);
+ local_bh_enable();
+ }
netif_tx_start_all_queues(dev);
netif_device_attach(dev);
@@ -2276,7 +2280,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
if (priv->tx_ring_num[TX_XDP] &&
!mlx4_en_check_xdp_mtu(dev, new_mtu))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
dev->mtu = new_mtu;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 3c37e216bbf3..eac527e25ec9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -445,8 +445,14 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
ring->stride = stride;
- if (ring->stride <= TXBB_SIZE)
+ if (ring->stride <= TXBB_SIZE) {
+ /* Stamp first unused send wqe */
+ __be32 *ptr = (__be32 *)ring->buf;
+ __be32 stamp = cpu_to_be32(1 << STAMP_SHIFT);
+ *ptr = stamp;
+ /* Move pointer to start of rx section */
ring->buf += TXBB_SIZE;
+ }
ring->log_stride = ffs(ring->stride) - 1;
ring->buf_size = ring->size * ring->stride;
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index cd3638e6fe25..0509996957d9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -554,8 +554,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
break;
case MLX4_EVENT_TYPE_SRQ_LIMIT:
- mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
- __func__);
+ mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n",
+ __func__, be32_to_cpu(eqe->event.srq.srqn),
+ eq->eqn);
case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
if (mlx4_is_master(dev)) {
/* forward only to slave owning the SRQ */
@@ -570,15 +571,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
eq->eqn, eq->cons_index, ret);
break;
}
- mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
- __func__, slave,
- be32_to_cpu(eqe->event.srq.srqn),
- eqe->type, eqe->subtype);
+ if (eqe->type ==
+ MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
+ mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
+ __func__, slave,
+ be32_to_cpu(eqe->event.srq.srqn),
+ eqe->type, eqe->subtype);
if (!ret && slave != dev->caps.function) {
- mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
- __func__, eqe->type,
- eqe->subtype, slave);
+ if (eqe->type ==
+ MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
+ mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
+ __func__, eqe->type,
+ eqe->subtype, slave);
mlx4_slave_event(dev, slave, eqe);
break;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index 2a9dd460a95f..e1f9e7cebf8f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -118,8 +118,13 @@ static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
if (!buf)
return -ENOMEM;
+ if (offset_in_page(buf)) {
+ dma_free_coherent(dev, PAGE_SIZE << order,
+ buf, sg_dma_address(mem));
+ return -ENOMEM;
+ }
+
sg_set_buf(mem, buf, PAGE_SIZE << order);
- BUG_ON(mem->offset);
sg_dma_len(mem) = PAGE_SIZE << order;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 5e7840a7a33b..bffa6f345f2f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -42,6 +42,7 @@
#include <linux/io-mapping.h>
#include <linux/delay.h>
#include <linux/kmod.h>
+#include <linux/etherdevice.h>
#include <net/devlink.h>
#include <linux/mlx4/device.h>
@@ -782,6 +783,23 @@ int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
}
EXPORT_SYMBOL(mlx4_is_slave_active);
+void mlx4_handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
+ struct _rule_hw *eth_header)
+{
+ if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
+ is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
+ struct mlx4_net_trans_rule_hw_eth *eth =
+ (struct mlx4_net_trans_rule_hw_eth *)eth_header;
+ struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
+ bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
+ next_rule->rsvd == 0;
+
+ if (last_rule)
+ ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
+ }
+}
+EXPORT_SYMBOL(mlx4_handle_eth_header_mcast_prio);
+
static void slave_adjust_steering_mode(struct mlx4_dev *dev,
struct mlx4_dev_cap *dev_cap,
struct mlx4_init_hca_param *hca_param)
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index c548beaaf910..1822382212ee 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2980,6 +2980,9 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
put_res(dev, slave, srqn, RES_SRQ);
qp->srq = srq;
}
+
+ /* Save param3 for dynamic changes from VST back to VGT */
+ qp->param3 = qpc->param3;
put_res(dev, slave, rcqn, RES_CQ);
put_res(dev, slave, mtt_base, RES_MTT);
res_end_move(dev, slave, RES_QP, qpn);
@@ -3772,7 +3775,6 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
int qpn = vhcr->in_modifier & 0x7fffff;
struct res_qp *qp;
u8 orig_sched_queue;
- __be32 orig_param3 = qpc->param3;
u8 orig_vlan_control = qpc->pri_path.vlan_control;
u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
u8 orig_pri_path_fl = qpc->pri_path.fl;
@@ -3814,7 +3816,6 @@ out:
*/
if (!err) {
qp->sched_queue = orig_sched_queue;
- qp->param3 = orig_param3;
qp->vlan_control = orig_vlan_control;
qp->fvl_rx = orig_fvl_rx;
qp->pri_path_fl = orig_pri_path_fl;
@@ -4164,22 +4165,6 @@ static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
return 0;
}
-static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
- struct _rule_hw *eth_header)
-{
- if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
- is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
- struct mlx4_net_trans_rule_hw_eth *eth =
- (struct mlx4_net_trans_rule_hw_eth *)eth_header;
- struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
- bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
- next_rule->rsvd == 0;
-
- if (last_rule)
- ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
- }
-}
-
/*
* In case of missing eth header, append eth header with a MAC address
* assigned to the VF.
@@ -4363,10 +4348,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
- handle_eth_header_mcast_prio(ctrl, rule_header);
-
- if (slave == dev->caps.function)
- goto execute;
+ mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
switch (header_id) {
case MLX4_NET_TRANS_RULE_ID_ETH:
@@ -4394,7 +4376,6 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
goto err_put_qp;
}
-execute:
err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
vhcr->in_modifier, 0,
MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
@@ -4473,6 +4454,7 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
struct res_qp *rqp;
struct res_fs_rule *rrule;
u64 mirr_reg_id;
+ int qpn;
if (dev->caps.steering_mode !=
MLX4_STEERING_MODE_DEVICE_MANAGED)
@@ -4489,10 +4471,11 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
}
mirr_reg_id = rrule->mirr_rule_id;
kfree(rrule->mirr_mbox);
+ qpn = rrule->qpn;
/* Release the rule form busy state before removal */
put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
- err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
+ err = get_res(dev, slave, qpn, RES_QP, &rqp);
if (err)
return err;
@@ -4517,7 +4500,7 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
if (!err)
atomic_dec(&rqp->ref_count);
out:
- put_res(dev, slave, rrule->qpn, RES_QP);
+ put_res(dev, slave, qpn, RES_QP);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 7f6c225666c1..f0b460f47f29 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -723,6 +723,9 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
int i;
struct ieee_ets ets;
+ if (!MLX5_CAP_GEN(priv->mdev, ets))
+ return;
+
memset(&ets, 0, sizeof(ets));
ets.ets_cap = mlx5_max_tc(priv->mdev) + 1;
for (i = 0; i < ets.ets_cap; i++) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 352462af8d51..5197817e4b2f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -171,7 +171,6 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
return NUM_SW_COUNTERS +
MLX5E_NUM_Q_CNTRS(priv) +
NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS +
- NUM_PCIE_COUNTERS +
MLX5E_NUM_RQ_STATS(priv) +
MLX5E_NUM_SQ_STATS(priv) +
MLX5E_NUM_PFC_COUNTERS(priv) +
@@ -219,14 +218,6 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
pport_2819_stats_desc[i].format);
- for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pcie_perf_stats_desc[i].format);
-
- for (i = 0; i < NUM_PCIE_TAS_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pcie_tas_stats_desc[i].format);
-
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
@@ -339,14 +330,6 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
pport_2819_stats_desc, i);
- for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
- pcie_perf_stats_desc, i);
-
- for (i = 0; i < NUM_PCIE_TAS_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_tas_counters,
- pcie_tas_stats_desc, i);
-
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
@@ -560,7 +543,6 @@ static int mlx5e_set_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- int ncv = mlx5e_get_max_num_channels(priv->mdev);
unsigned int count = ch->combined_count;
bool arfs_enabled;
bool was_opened;
@@ -571,16 +553,6 @@ static int mlx5e_set_channels(struct net_device *dev,
__func__);
return -EINVAL;
}
- if (ch->rx_count || ch->tx_count) {
- netdev_info(dev, "%s: separate rx/tx count not supported\n",
- __func__);
- return -EINVAL;
- }
- if (count > ncv) {
- netdev_info(dev, "%s: count (%d) > max (%d)\n",
- __func__, count, ncv);
- return -EINVAL;
- }
if (priv->params.num_channels == count)
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 3691451c728c..d088effd7160 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -247,6 +247,7 @@ static int set_flow_attrs(u32 *match_c, u32 *match_v,
}
if (fs->flow_type & FLOW_MAC_EXT &&
!is_zero_ether_addr(fs->m_ext.h_dest)) {
+ mask_spec(fs->m_ext.h_dest, fs->h_ext.h_dest, ETH_ALEN);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
outer_headers_c, dmac_47_16),
fs->m_ext.h_dest);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index cbfa38fc72c0..2b7dd315020c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -291,36 +291,12 @@ static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
&qcnt->rx_out_of_buffer);
}
-static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
-{
- struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
- struct mlx5_core_dev *mdev = priv->mdev;
- int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
- void *out;
- u32 *in;
-
- in = mlx5_vzalloc(sz);
- if (!in)
- return;
-
- out = pcie_stats->pcie_perf_counters;
- MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
-
- out = pcie_stats->pcie_tas_counters;
- MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
-
- kvfree(in);
-}
-
void mlx5e_update_stats(struct mlx5e_priv *priv)
{
mlx5e_update_q_counter(priv);
mlx5e_update_vport_counters(priv);
mlx5e_update_pport_counters(priv);
mlx5e_update_sw_counters(priv);
- mlx5e_update_pcie_counters(priv);
}
void mlx5e_update_stats_work(struct work_struct *work)
@@ -3699,14 +3675,8 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
- struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5_eswitch *esw = mdev->priv.eswitch;
-
mlx5e_vxlan_cleanup(priv);
- if (MLX5_CAP_GEN(mdev, vport_group_manager))
- mlx5_eswitch_unregister_vport_rep(esw, 0);
-
if (priv->xdp_prog)
bpf_prog_put(priv->xdp_prog);
}
@@ -3805,14 +3775,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
mlx5_lag_add(mdev, netdev);
- if (mlx5e_vxlan_allowed(mdev)) {
- rtnl_lock();
- udp_tunnel_get_rx_info(netdev);
- rtnl_unlock();
- }
-
mlx5e_enable_async_events(priv);
- queue_work(priv->wq, &priv->set_rx_mode_work);
if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
@@ -3822,13 +3785,30 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
rep.netdev = netdev;
mlx5_eswitch_register_vport_rep(esw, 0, &rep);
}
+
+ if (netdev->reg_state != NETREG_REGISTERED)
+ return;
+
+ /* Device already registered: sync netdev system state */
+ if (mlx5e_vxlan_allowed(mdev)) {
+ rtnl_lock();
+ udp_tunnel_get_rx_info(netdev);
+ rtnl_unlock();
+ }
+
+ queue_work(priv->wq, &priv->set_rx_mode_work);
}
static void mlx5e_nic_disable(struct mlx5e_priv *priv)
{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+
queue_work(priv->wq, &priv->set_rx_mode_work);
+ if (MLX5_CAP_GEN(mdev, vport_group_manager))
+ mlx5_eswitch_unregister_vport_rep(esw, 0);
mlx5e_disable_async_events(priv);
- mlx5_lag_remove(priv->mdev);
+ mlx5_lag_remove(mdev);
}
static const struct mlx5e_profile mlx5e_nic_profile = {
@@ -3966,10 +3946,6 @@ void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
const struct mlx5e_profile *profile = priv->profile;
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
- if (profile->disable)
- profile->disable(priv);
-
- flush_workqueue(priv->wq);
rtnl_lock();
if (netif_running(netdev))
@@ -3977,6 +3953,10 @@ void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
netif_device_detach(netdev);
rtnl_unlock();
+ if (profile->disable)
+ profile->disable(priv);
+ flush_workqueue(priv->wq);
+
mlx5e_destroy_q_counter(priv);
profile->cleanup_rx(priv);
mlx5e_close_drop_rq(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 0e2fb3ed1790..06d5e6fecb0a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -193,6 +193,9 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
return false;
}
+ if (unlikely(page_is_pfmemalloc(dma_info->page)))
+ return false;
+
cache->page_cache[cache->tail] = *dma_info;
cache->tail = tail_next;
return true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
index 1fffe48a93cc..cbfac06b7ffd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
@@ -109,7 +109,6 @@ static bool mlx5e_am_on_top(struct mlx5e_rx_am *am)
switch (am->tune_state) {
case MLX5E_AM_PARKING_ON_TOP:
case MLX5E_AM_PARKING_TIRED:
- WARN_ONCE(true, "mlx5e_am_on_top: PARKING\n");
return true;
case MLX5E_AM_GOING_RIGHT:
return (am->steps_left > 1) && (am->steps_right == 1);
@@ -123,7 +122,6 @@ static void mlx5e_am_turn(struct mlx5e_rx_am *am)
switch (am->tune_state) {
case MLX5E_AM_PARKING_ON_TOP:
case MLX5E_AM_PARKING_TIRED:
- WARN_ONCE(true, "mlx5e_am_turn: PARKING\n");
break;
case MLX5E_AM_GOING_RIGHT:
am->tune_state = MLX5E_AM_GOING_LEFT;
@@ -144,7 +142,6 @@ static int mlx5e_am_step(struct mlx5e_rx_am *am)
switch (am->tune_state) {
case MLX5E_AM_PARKING_ON_TOP:
case MLX5E_AM_PARKING_TIRED:
- WARN_ONCE(true, "mlx5e_am_step: PARKING\n");
break;
case MLX5E_AM_GOING_RIGHT:
if (am->profile_ix == (MLX5E_PARAMS_AM_NUM_PROFILES - 1))
@@ -282,10 +279,8 @@ static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
u32 delta_us = ktime_us_delta(end->time, start->time);
unsigned int npkts = end->pkt_ctr - start->pkt_ctr;
- if (!delta_us) {
- WARN_ONCE(true, "mlx5e_am_calc_stats: delta_us=0\n");
+ if (!delta_us)
return;
- }
curr_stats->ppms = (npkts * USEC_PER_MSEC) / delta_us;
curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index f202f872f57f..ba5db1dd23a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -39,7 +39,7 @@
#define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \
(*(u32 *)((char *)ptr + dsc[i].offset))
#define MLX5E_READ_CTR32_BE(ptr, dsc, i) \
- be32_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
+ be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
@@ -276,32 +276,6 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
{ "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
};
-#define PCIE_PERF_OFF(c) \
- MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
-#define PCIE_PERF_GET(pcie_stats, c) \
- MLX5_GET(mpcnt_reg, pcie_stats->pcie_perf_counters, \
- counter_set.pcie_perf_cntrs_grp_data_layout.c)
-#define PCIE_TAS_OFF(c) \
- MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_tas_cntrs_grp_data_layout.c)
-#define PCIE_TAS_GET(pcie_stats, c) \
- MLX5_GET(mpcnt_reg, pcie_stats->pcie_tas_counters, \
- counter_set.pcie_tas_cntrs_grp_data_layout.c)
-
-struct mlx5e_pcie_stats {
- __be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
- __be64 pcie_tas_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
-};
-
-static const struct counter_desc pcie_perf_stats_desc[] = {
- { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
- { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
-};
-
-static const struct counter_desc pcie_tas_stats_desc[] = {
- { "tx_pci_transport_nonfatal_msg", PCIE_TAS_OFF(non_fatal_err_msg_sent) },
- { "tx_pci_transport_fatal_msg", PCIE_TAS_OFF(fatal_err_msg_sent) },
-};
-
struct mlx5e_rq_stats {
u64 packets;
u64 bytes;
@@ -386,8 +360,6 @@ static const struct counter_desc sq_stats_desc[] = {
#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
-#define NUM_PCIE_PERF_COUNTERS ARRAY_SIZE(pcie_perf_stats_desc)
-#define NUM_PCIE_TAS_COUNTERS ARRAY_SIZE(pcie_tas_stats_desc)
#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \
ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
#define NUM_PPORT_PER_PRIO_PFC_COUNTERS \
@@ -397,7 +369,6 @@ static const struct counter_desc sq_stats_desc[] = {
NUM_PPORT_2819_COUNTERS + \
NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \
NUM_PPORT_PRIO)
-#define NUM_PCIE_COUNTERS (NUM_PCIE_PERF_COUNTERS + NUM_PCIE_TAS_COUNTERS)
#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
@@ -406,7 +377,6 @@ struct mlx5e_stats {
struct mlx5e_qcounter_stats qcnt;
struct mlx5e_vport_stats vport;
struct mlx5e_pport_stats pport;
- struct mlx5e_pcie_stats pcie;
struct rtnl_link_stats64 vf_vport;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index f8829b517156..46bef6a26a8c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -161,15 +161,21 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
}
}
+/* we get here also when setting rule to the FW failed, etc. It means that the
+ * flow rule itself might not exist, but some offloading related to the actions
+ * should be cleaned.
+ */
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_fc *counter = NULL;
- counter = mlx5_flow_rule_counter(flow->rule);
-
- mlx5_del_flow_rules(flow->rule);
+ if (!IS_ERR(flow->rule)) {
+ counter = mlx5_flow_rule_counter(flow->rule);
+ mlx5_del_flow_rules(flow->rule);
+ mlx5_fc_destroy(priv->mdev, counter);
+ }
if (esw && esw->mode == SRIOV_OFFLOADS) {
mlx5_eswitch_del_vlan_action(esw, flow->attr);
@@ -177,8 +183,6 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
mlx5e_detach_encap(priv, flow);
}
- mlx5_fc_destroy(priv->mdev, counter);
-
if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
mlx5_destroy_flow_table(priv->fs.tc.t);
priv->fs.tc.t = NULL;
@@ -225,6 +229,11 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers);
+ struct flow_dissector_key_control *enc_control =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_CONTROL,
+ f->key);
+
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
struct flow_dissector_key_ports *key =
skb_flow_dissector_target(f->dissector,
@@ -237,28 +246,34 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
/* Full udp dst port must be given */
if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
- return -EOPNOTSUPP;
-
- /* udp src port isn't supported */
- if (memchr_inv(&mask->src, 0, sizeof(mask->src)))
- return -EOPNOTSUPP;
+ goto vxlan_match_offload_err;
if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
parse_vxlan_attr(spec, f);
- else
+ else {
+ netdev_warn(priv->netdev,
+ "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
return -EOPNOTSUPP;
+ }
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
udp_dport, ntohs(mask->dst));
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
udp_dport, ntohs(key->dst));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ udp_sport, ntohs(mask->src));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ udp_sport, ntohs(key->src));
} else { /* udp dst port must be given */
- return -EOPNOTSUPP;
+vxlan_match_offload_err:
+ netdev_warn(priv->netdev,
+ "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
+ return -EOPNOTSUPP;
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
+ if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
struct flow_dissector_key_ipv4_addrs *key =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
@@ -280,10 +295,10 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
ntohl(key->dst));
- }
- MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
+ }
/* Enforce DMAC when offloading incoming tunneled flows.
* Flow counters require a match on the DMAC.
@@ -346,6 +361,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
if (parse_tunnel_attr(priv, spec, f))
return -EOPNOTSUPP;
break;
+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+ netdev_warn(priv->netdev,
+ "IPv6 tunnel decap offload isn't supported\n");
default:
return -EOPNOTSUPP;
}
@@ -375,6 +393,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
key->flags & FLOW_DIS_IS_FRAGMENT);
+
+ /* the HW doesn't need L3 inline to match on frag=no */
+ if (key->flags & FLOW_DIS_IS_FRAGMENT)
+ *min_inline = MLX5_INLINE_MODE_IP;
}
}
@@ -646,18 +668,18 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
int ttl;
#if IS_ENABLED(CONFIG_INET)
+ int ret;
+
rt = ip_route_output_key(dev_net(mirred_dev), fl4);
- if (IS_ERR(rt)) {
- pr_warn("%s: no route to %pI4\n", __func__, &fl4->daddr);
- return -EOPNOTSUPP;
- }
+ ret = PTR_ERR_OR_ZERO(rt);
+ if (ret)
+ return ret;
#else
return -EOPNOTSUPP;
#endif
if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) {
- pr_warn("%s: Can't offload the flow, netdevices aren't on the same HW e-switch\n",
- __func__);
+ pr_warn("%s: can't offload, devices not on same HW e-switch\n", __func__);
ip_rt_put(rt);
return -EOPNOTSUPP;
}
@@ -718,8 +740,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
struct net_device **out_dev)
{
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
+ struct neighbour *n = NULL;
struct flowi4 fl4 = {};
- struct neighbour *n;
char *encap_header;
int encap_size;
__be32 saddr;
@@ -750,7 +772,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
e->out_dev = *out_dev;
if (!(n->nud_state & NUD_VALID)) {
- err = -ENOTSUPP;
+ pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr);
+ err = -EOPNOTSUPP;
goto out;
}
@@ -772,6 +795,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
encap_size, encap_header, &e->encap_id);
out:
+ if (err && n)
+ neigh_release(n);
kfree(encap_header);
return err;
}
@@ -792,9 +817,17 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
int tunnel_type;
int err;
- /* udp dst port must be given */
+ /* udp dst port must be set */
if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
+ goto vxlan_encap_offload_err;
+
+ /* setting udp src port isn't supported */
+ if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
+vxlan_encap_offload_err:
+ netdev_warn(priv->netdev,
+ "must set udp dst port and not set udp src port\n");
return -EOPNOTSUPP;
+ }
if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) &&
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
@@ -802,6 +835,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
info.tun_id = tunnel_id_to_key32(key->tun_id);
tunnel_type = MLX5_HEADER_TYPE_VXLAN;
} else {
+ netdev_warn(priv->netdev,
+ "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
return -EOPNOTSUPP;
}
@@ -809,6 +844,9 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
case AF_INET:
info.daddr = key->u.ipv4.dst;
break;
+ case AF_INET6:
+ netdev_warn(priv->netdev,
+ "IPv6 tunnel encap offload isn't supported\n");
default:
return -EOPNOTSUPP;
}
@@ -986,7 +1024,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
if (IS_ERR(flow->rule)) {
err = PTR_ERR(flow->rule);
- goto err_free;
+ goto err_del_rule;
}
err = rhashtable_insert_fast(&tc->ht, &flow->node,
@@ -997,7 +1035,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
goto out;
err_del_rule:
- mlx5_del_flow_rules(flow->rule);
+ mlx5e_tc_del_flow(priv, flow);
err_free:
kfree(flow);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index d6807c3cc461..f14d9c9ba773 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1860,7 +1860,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
if (!ESW_ALLOWED(esw))
return -EPERM;
- if (!LEGAL_VPORT(esw, vport))
+ if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac))
return -EINVAL;
mutex_lock(&esw->state_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 466e161010f7..03293ed1cc22 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -695,6 +695,12 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
if (err)
goto err_reps;
}
+
+ /* disable PF RoCE so missed packets don't go through RoCE steering */
+ mlx5_dev_list_lock();
+ mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+ mlx5_dev_list_unlock();
+
return 0;
err_reps:
@@ -718,6 +724,11 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
{
int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
+ /* enable back PF RoCE */
+ mlx5_dev_list_lock();
+ mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+ mlx5_dev_list_unlock();
+
mlx5_eswitch_disable_sriov(esw);
err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index a263d8904a4c..0ac7a2fc916c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1263,6 +1263,7 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
handle = add_rule_fte(fte, fg, dest, dest_num, false);
if (IS_ERR(handle)) {
+ unlock_ref_node(&fte->node);
kfree(fte);
goto unlock_fg;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 54e5a786f191..d01e9f21d469 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -503,6 +503,13 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
to_fw_pkey_sz(dev, 128));
+ /* Check log_max_qp from HCA caps to set in current profile */
+ if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < profile[prof_sel].log_max_qp) {
+ mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
+ profile[prof_sel].log_max_qp,
+ MLX5_CAP_GEN_MAX(dev, log_max_qp));
+ profile[prof_sel].log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
+ }
if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
prof->log_max_qp);
@@ -575,7 +582,6 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
struct mlx5_priv *priv = &mdev->priv;
struct msix_entry *msix = priv->msix_arr;
int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
- int numa_node = priv->numa_node;
int err;
if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
@@ -583,7 +589,7 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
return -ENOMEM;
}
- cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+ cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
priv->irq_info[i].mask);
err = irq_set_affinity_hint(irq, priv->irq_info[i].mask);
@@ -1189,6 +1195,9 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
{
int err = 0;
+ if (cleanup)
+ mlx5_drain_health_wq(dev);
+
mutex_lock(&dev->intf_state_mutex);
if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
@@ -1351,7 +1360,7 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
mlx5_enter_error_state(dev);
mlx5_unload_one(dev, priv, false);
- /* In case of kernel call save the pci state and drain health wq */
+ /* In case of kernel call save the pci state and drain the health wq */
if (state) {
pci_save_state(pdev);
mlx5_drain_health_wq(dev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index d147ddd97997..0af3338bfcb4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -209,21 +209,21 @@ MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1);
/* pci_eqe_cmd_token
* Command completion event - token
*/
-MLXSW_ITEM32(pci, eqe, cmd_token, 0x08, 16, 16);
+MLXSW_ITEM32(pci, eqe, cmd_token, 0x00, 16, 16);
/* pci_eqe_cmd_status
* Command completion event - status
*/
-MLXSW_ITEM32(pci, eqe, cmd_status, 0x08, 0, 8);
+MLXSW_ITEM32(pci, eqe, cmd_status, 0x00, 0, 8);
/* pci_eqe_cmd_out_param_h
* Command completion event - output parameter - higher part
*/
-MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x0C, 0, 32);
+MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x04, 0, 32);
/* pci_eqe_cmd_out_param_l
* Command completion event - output parameter - lower part
*/
-MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x10, 0, 32);
+MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x08, 0, 32);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index d768c7b6c6d6..003093abb170 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -684,6 +684,7 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
dev_kfree_skb_any(skb_orig);
return NETDEV_TX_OK;
}
+ dev_consume_skb_any(skb_orig);
}
if (eth_skb_pad(skb)) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 01d0efa9c5c7..9e494a446b7e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -1172,7 +1172,8 @@ static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
static int
mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_nexthop_group *nh_grp)
+ struct mlxsw_sp_nexthop_group *nh_grp,
+ bool reallocate)
{
u32 adj_index = nh_grp->adj_index; /* base */
struct mlxsw_sp_nexthop *nh;
@@ -1187,7 +1188,7 @@ mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
continue;
}
- if (nh->update) {
+ if (nh->update || reallocate) {
err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
adj_index, nh);
if (err)
@@ -1248,7 +1249,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
/* Nothing was added or removed, so no need to reallocate. Just
* update MAC on existing adjacency indexes.
*/
- err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp);
+ err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp,
+ false);
if (err) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
goto set_trap;
@@ -1276,7 +1278,7 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
nh_grp->adj_index_valid = 1;
nh_grp->adj_index = adj_index;
nh_grp->ecmp_size = ecmp_size;
- err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp);
+ err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, true);
if (err) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
goto set_trap;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 150ccf5192a9..2e88115e8735 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -345,6 +345,7 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
dev_kfree_skb_any(skb_orig);
return NETDEV_TX_OK;
}
+ dev_consume_skb_any(skb_orig);
}
mlxsw_sx_txhdr_construct(skb, &tx_info);
/* TX header is consumed by HW on the way so we shouldn't count its
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 8e5cb7605b0f..873ce2cd76ba 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -297,7 +297,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
list_del(&p_pkt->list_entry);
b_last_packet = list_empty(&p_tx->active_descq);
list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
- if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
+ if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
struct qed_ooo_buffer *p_buffer;
p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
@@ -309,7 +309,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
b_last_frag =
p_tx->cur_completing_bd_idx == p_pkt->bd_used;
tx_frag = p_pkt->bds_set[0].tx_frag;
- if (p_ll2_conn->gsi_enable)
+ if (p_ll2_conn->conn.gsi_enable)
qed_ll2b_release_tx_gsi_packet(p_hwfn,
p_ll2_conn->
my_id,
@@ -378,7 +378,7 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
spin_unlock_irqrestore(&p_tx->lock, flags);
tx_frag = p_pkt->bds_set[0].tx_frag;
- if (p_ll2_conn->gsi_enable)
+ if (p_ll2_conn->conn.gsi_enable)
qed_ll2b_complete_tx_gsi_packet(p_hwfn,
p_ll2_conn->my_id,
p_pkt->cookie,
@@ -550,7 +550,7 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
- if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
+ if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
struct qed_ooo_buffer *p_buffer;
p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
@@ -738,7 +738,7 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
p_buffer->vlan, bd_flags,
l4_hdr_offset_w,
- p_ll2_conn->tx_dest, 0,
+ p_ll2_conn->conn.tx_dest, 0,
first_frag,
p_buffer->packet_length,
p_buffer, true);
@@ -858,7 +858,7 @@ qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
u16 buf_idx;
int rc = 0;
- if (p_ll2_info->conn_type != QED_LL2_TYPE_ISCSI_OOO)
+ if (p_ll2_info->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
return rc;
if (!rx_num_ooo_buffers)
@@ -901,7 +901,7 @@ static void
qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn)
{
- if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO)
+ if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
return;
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
@@ -913,7 +913,7 @@ static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
{
struct qed_ooo_buffer *p_buffer;
- if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO)
+ if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
return;
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
@@ -945,23 +945,19 @@ static int qed_ll2_start_ooo(struct qed_dev *cdev,
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
- struct qed_ll2_info *ll2_info;
+ struct qed_ll2_conn ll2_info;
int rc;
- ll2_info = kzalloc(sizeof(*ll2_info), GFP_KERNEL);
- if (!ll2_info)
- return -ENOMEM;
- ll2_info->conn_type = QED_LL2_TYPE_ISCSI_OOO;
- ll2_info->mtu = params->mtu;
- ll2_info->rx_drop_ttl0_flg = params->drop_ttl0_packets;
- ll2_info->rx_vlan_removal_en = params->rx_vlan_stripping;
- ll2_info->tx_tc = OOO_LB_TC;
- ll2_info->tx_dest = CORE_TX_DEST_LB;
-
- rc = qed_ll2_acquire_connection(hwfn, ll2_info,
+ ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO;
+ ll2_info.mtu = params->mtu;
+ ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
+ ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
+ ll2_info.tx_tc = OOO_LB_TC;
+ ll2_info.tx_dest = CORE_TX_DEST_LB;
+
+ rc = qed_ll2_acquire_connection(hwfn, &ll2_info,
QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
handle);
- kfree(ll2_info);
if (rc) {
DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
goto out;
@@ -1006,7 +1002,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn,
u8 action_on_error)
{
- enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
+ enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
struct core_rx_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
@@ -1032,7 +1028,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->sb_index = p_rx->rx_sb_index;
p_ramrod->complete_event_flg = 1;
- p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
+ p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
DMA_REGPAIR_LE(p_ramrod->bd_base,
p_rx->rxq_chain.p_phys_addr);
cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
@@ -1040,8 +1036,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
qed_chain_get_pbl_phys(&p_rx->rcq_chain));
- p_ramrod->drop_ttl0_flg = p_ll2_conn->rx_drop_ttl0_flg;
- p_ramrod->inner_vlan_removal_en = p_ll2_conn->rx_vlan_removal_en;
+ p_ramrod->drop_ttl0_flg = p_ll2_conn->conn.rx_drop_ttl0_flg;
+ p_ramrod->inner_vlan_removal_en = p_ll2_conn->conn.rx_vlan_removal_en;
p_ramrod->queue_id = p_ll2_conn->queue_id;
p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0
: 1;
@@ -1056,14 +1052,14 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
}
p_ramrod->action_on_error.error_type = action_on_error;
- p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
+ p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
return qed_spq_post(p_hwfn, p_ent, NULL);
}
static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn)
{
- enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
+ enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
struct core_tx_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
@@ -1075,7 +1071,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
return 0;
- if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO)
+ if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
p_ll2_conn->tx_stats_en = 0;
else
p_ll2_conn->tx_stats_en = 1;
@@ -1096,7 +1092,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
p_ramrod->sb_index = p_tx->tx_sb_index;
- p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
+ p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
@@ -1106,7 +1102,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->pbl_size = cpu_to_le16(pbl_size);
memset(&pq_params, 0, sizeof(pq_params));
- pq_params.core.tc = p_ll2_conn->tx_tc;
+ pq_params.core.tc = p_ll2_conn->conn.tx_tc;
pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
@@ -1123,7 +1119,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
}
- p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
+ p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
return qed_spq_post(p_hwfn, p_ent, NULL);
}
@@ -1224,7 +1220,7 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, QED_MSG_LL2,
"Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
- p_ll2_info->conn_type, rx_num_desc);
+ p_ll2_info->conn.conn_type, rx_num_desc);
out:
return rc;
@@ -1262,7 +1258,7 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, QED_MSG_LL2,
"Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
- p_ll2_info->conn_type, tx_num_desc);
+ p_ll2_info->conn.conn_type, tx_num_desc);
out:
if (rc)
@@ -1273,7 +1269,7 @@ out:
}
int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_params,
+ struct qed_ll2_conn *p_params,
u16 rx_num_desc,
u16 tx_num_desc,
u8 *p_connection_handle)
@@ -1302,15 +1298,7 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
if (!p_ll2_info)
return -EBUSY;
- p_ll2_info->conn_type = p_params->conn_type;
- p_ll2_info->mtu = p_params->mtu;
- p_ll2_info->rx_drop_ttl0_flg = p_params->rx_drop_ttl0_flg;
- p_ll2_info->rx_vlan_removal_en = p_params->rx_vlan_removal_en;
- p_ll2_info->tx_tc = p_params->tx_tc;
- p_ll2_info->tx_dest = p_params->tx_dest;
- p_ll2_info->ai_err_packet_too_big = p_params->ai_err_packet_too_big;
- p_ll2_info->ai_err_no_buf = p_params->ai_err_no_buf;
- p_ll2_info->gsi_enable = p_params->gsi_enable;
+ p_ll2_info->conn = *p_params;
rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc);
if (rc)
@@ -1371,9 +1359,9 @@ static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
SET_FIELD(action_on_error,
CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG,
- p_ll2_conn->ai_err_packet_too_big);
+ p_ll2_conn->conn.ai_err_packet_too_big);
SET_FIELD(action_on_error,
- CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->ai_err_no_buf);
+ CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->conn.ai_err_no_buf);
return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
}
@@ -1600,7 +1588,7 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
"LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
p_ll2->queue_id,
p_ll2->cid,
- p_ll2->conn_type,
+ p_ll2->conn.conn_type,
prod_idx,
first_frag_len,
num_of_bds,
@@ -1676,7 +1664,7 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
(NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
"LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
p_ll2_conn->queue_id,
- p_ll2_conn->cid, p_ll2_conn->conn_type, db_msg.spq_prod);
+ p_ll2_conn->cid, p_ll2_conn->conn.conn_type, db_msg.spq_prod);
}
int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
@@ -1817,7 +1805,7 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
qed_ll2_rxq_flush(p_hwfn, connection_handle);
}
- if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO)
+ if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
return rc;
@@ -1993,7 +1981,7 @@ static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
{
- struct qed_ll2_info ll2_info;
+ struct qed_ll2_conn ll2_info;
struct qed_ll2_buffer *buffer, *tmp_buffer;
enum qed_ll2_conn_type conn_type;
struct qed_ptt *p_ptt;
@@ -2041,6 +2029,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
/* Prepare the temporary ll2 information */
memset(&ll2_info, 0, sizeof(ll2_info));
+
ll2_info.conn_type = conn_type;
ll2_info.mtu = params->mtu;
ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
@@ -2120,7 +2109,6 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
}
ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
-
return 0;
release_terminate_all:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index 6625a3ae5a33..31417928b635 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -112,15 +112,8 @@ struct qed_ll2_tx_queue {
bool b_completing_packet;
};
-struct qed_ll2_info {
- /* Lock protecting the state of LL2 */
- struct mutex mutex;
+struct qed_ll2_conn {
enum qed_ll2_conn_type conn_type;
- u32 cid;
- u8 my_id;
- u8 queue_id;
- u8 tx_stats_id;
- bool b_active;
u16 mtu;
u8 rx_drop_ttl0_flg;
u8 rx_vlan_removal_en;
@@ -128,10 +121,21 @@ struct qed_ll2_info {
enum core_tx_dest tx_dest;
enum core_error_handle ai_err_packet_too_big;
enum core_error_handle ai_err_no_buf;
+ u8 gsi_enable;
+};
+
+struct qed_ll2_info {
+ /* Lock protecting the state of LL2 */
+ struct mutex mutex;
+ struct qed_ll2_conn conn;
+ u32 cid;
+ u8 my_id;
+ u8 queue_id;
+ u8 tx_stats_id;
+ bool b_active;
u8 tx_stats_en;
struct qed_ll2_rx_queue rx_queue;
struct qed_ll2_tx_queue tx_queue;
- u8 gsi_enable;
};
/**
@@ -149,7 +153,7 @@ struct qed_ll2_info {
* @return 0 on success, failure otherwise
*/
int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_params,
+ struct qed_ll2_conn *p_params,
u16 rx_num_desc,
u16 tx_num_desc,
u8 *p_connection_handle);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index 2a16547c8966..2dbdb3298991 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -2632,7 +2632,7 @@ static int qed_roce_ll2_start(struct qed_dev *cdev,
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_roce_ll2_info *roce_ll2;
- struct qed_ll2_info ll2_params;
+ struct qed_ll2_conn ll2_params;
int rc;
if (!params) {
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
index 99a14df28b96..2851b4c56570 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
@@ -201,6 +201,13 @@ int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt)
else
adpt->phydev = mdiobus_get_phy(mii_bus, phy_addr);
+ /* of_phy_find_device() claims a reference to the phydev,
+ * so we do that here manually as well. When the driver
+ * later unloads, it can unilaterally drop the reference
+ * without worrying about ACPI vs DT.
+ */
+ if (adpt->phydev)
+ get_device(&adpt->phydev->mdio.dev);
} else {
struct device_node *phy_np;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 422289c232bc..f46d300bd585 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -719,8 +719,7 @@ static int emac_probe(struct platform_device *pdev)
err_undo_napi:
netif_napi_del(&adpt->rx_q.napi);
err_undo_mdiobus:
- if (!has_acpi_companion(&pdev->dev))
- put_device(&adpt->phydev->mdio.dev);
+ put_device(&adpt->phydev->mdio.dev);
mdiobus_unregister(adpt->mii_bus);
err_undo_clocks:
emac_clks_teardown(adpt);
@@ -740,8 +739,7 @@ static int emac_remove(struct platform_device *pdev)
emac_clks_teardown(adpt);
- if (!has_acpi_companion(&pdev->dev))
- put_device(&adpt->phydev->mdio.dev);
+ put_device(&adpt->phydev->mdio.dev);
mdiobus_unregister(adpt->mii_bus);
free_netdev(netdev);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index f9b97f5946f8..8f1623bf2134 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -326,6 +326,7 @@ enum cfg_version {
static const struct pci_device_id rtl8169_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
@@ -695,7 +696,7 @@ enum rtl_tx_desc_bit_1 {
enum rtl_rx_desc_bit {
/* Rx private */
PID1 = (1 << 18), /* Protocol ID bit 1/2 */
- PID0 = (1 << 17), /* Protocol ID bit 2/2 */
+ PID0 = (1 << 17), /* Protocol ID bit 0/2 */
#define RxProtoUDP (PID1)
#define RxProtoTCP (PID0)
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 92d7692c840d..301f48755093 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -179,6 +179,49 @@ static struct mdiobb_ops bb_ops = {
.get_mdio_data = ravb_get_mdio_data,
};
+/* Free TX skb function for AVB-IP */
+static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &priv->stats[q];
+ struct ravb_tx_desc *desc;
+ int free_num = 0;
+ int entry;
+ u32 size;
+
+ for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
+ bool txed;
+
+ entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
+ NUM_TX_DESC);
+ desc = &priv->tx_ring[q][entry];
+ txed = desc->die_dt == DT_FEMPTY;
+ if (free_txed_only && !txed)
+ break;
+ /* Descriptor type must be checked before all other reads */
+ dma_rmb();
+ size = le16_to_cpu(desc->ds_tagl) & TX_DS;
+ /* Free the original skb. */
+ if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
+ dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
+ size, DMA_TO_DEVICE);
+ /* Last packet descriptor? */
+ if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
+ entry /= NUM_TX_DESC;
+ dev_kfree_skb_any(priv->tx_skb[q][entry]);
+ priv->tx_skb[q][entry] = NULL;
+ if (txed)
+ stats->tx_packets++;
+ }
+ free_num++;
+ }
+ if (txed)
+ stats->tx_bytes += size;
+ desc->die_dt = DT_EEMPTY;
+ }
+ return free_num;
+}
+
/* Free skb's and DMA buffers for Ethernet AVB */
static void ravb_ring_free(struct net_device *ndev, int q)
{
@@ -194,19 +237,21 @@ static void ravb_ring_free(struct net_device *ndev, int q)
kfree(priv->rx_skb[q]);
priv->rx_skb[q] = NULL;
- /* Free TX skb ringbuffer */
- if (priv->tx_skb[q]) {
- for (i = 0; i < priv->num_tx_ring[q]; i++)
- dev_kfree_skb(priv->tx_skb[q][i]);
- }
- kfree(priv->tx_skb[q]);
- priv->tx_skb[q] = NULL;
-
/* Free aligned TX buffers */
kfree(priv->tx_align[q]);
priv->tx_align[q] = NULL;
if (priv->rx_ring[q]) {
+ for (i = 0; i < priv->num_rx_ring[q]; i++) {
+ struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
+
+ if (!dma_mapping_error(ndev->dev.parent,
+ le32_to_cpu(desc->dptr)))
+ dma_unmap_single(ndev->dev.parent,
+ le32_to_cpu(desc->dptr),
+ PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
+ }
ring_size = sizeof(struct ravb_ex_rx_desc) *
(priv->num_rx_ring[q] + 1);
dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
@@ -215,12 +260,20 @@ static void ravb_ring_free(struct net_device *ndev, int q)
}
if (priv->tx_ring[q]) {
+ ravb_tx_free(ndev, q, false);
+
ring_size = sizeof(struct ravb_tx_desc) *
(priv->num_tx_ring[q] * NUM_TX_DESC + 1);
dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
priv->tx_desc_dma[q]);
priv->tx_ring[q] = NULL;
}
+
+ /* Free TX skb ringbuffer.
+ * SKBs are freed by ravb_tx_free() call above.
+ */
+ kfree(priv->tx_skb[q]);
+ priv->tx_skb[q] = NULL;
}
/* Format skb and descriptor buffer for Ethernet AVB */
@@ -431,44 +484,6 @@ static int ravb_dmac_init(struct net_device *ndev)
return 0;
}
-/* Free TX skb function for AVB-IP */
-static int ravb_tx_free(struct net_device *ndev, int q)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- struct net_device_stats *stats = &priv->stats[q];
- struct ravb_tx_desc *desc;
- int free_num = 0;
- int entry;
- u32 size;
-
- for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
- entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
- NUM_TX_DESC);
- desc = &priv->tx_ring[q][entry];
- if (desc->die_dt != DT_FEMPTY)
- break;
- /* Descriptor type must be checked before all other reads */
- dma_rmb();
- size = le16_to_cpu(desc->ds_tagl) & TX_DS;
- /* Free the original skb. */
- if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
- dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
- size, DMA_TO_DEVICE);
- /* Last packet descriptor? */
- if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
- entry /= NUM_TX_DESC;
- dev_kfree_skb_any(priv->tx_skb[q][entry]);
- priv->tx_skb[q][entry] = NULL;
- stats->tx_packets++;
- }
- free_num++;
- }
- stats->tx_bytes += size;
- desc->die_dt = DT_EEMPTY;
- }
- return free_num;
-}
-
static void ravb_get_tx_tstamp(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
@@ -902,7 +917,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
spin_lock_irqsave(&priv->lock, flags);
/* Clear TX interrupt */
ravb_write(ndev, ~mask, TIS);
- ravb_tx_free(ndev, q);
+ ravb_tx_free(ndev, q, true);
netif_wake_subqueue(ndev, q);
mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
@@ -926,14 +941,10 @@ static int ravb_poll(struct napi_struct *napi, int budget)
/* Receive error message handling */
priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
- if (priv->rx_over_errors != ndev->stats.rx_over_errors) {
+ if (priv->rx_over_errors != ndev->stats.rx_over_errors)
ndev->stats.rx_over_errors = priv->rx_over_errors;
- netif_err(priv, rx_err, ndev, "Receive Descriptor Empty\n");
- }
- if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) {
+ if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
- netif_err(priv, rx_err, ndev, "Receive FIFO Overflow\n");
- }
out:
return budget - quota;
}
@@ -1508,6 +1519,19 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
entry / NUM_TX_DESC * DPTR_ALIGN;
len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
+ /* Zero length DMA descriptors are problematic as they seem to
+ * terminate DMA transfers. Avoid them by simply using a length of
+ * DPTR_ALIGN (4) when skb data is aligned to DPTR_ALIGN.
+ *
+ * As skb is guaranteed to have at least ETH_ZLEN (60) bytes of
+ * data by the call to skb_put_padto() above this is safe with
+ * respect to both the length of the first DMA descriptor (len)
+ * overflowing the available data and the length of the second DMA
+ * descriptor (skb->len - len) being negative.
+ */
+ if (len == 0)
+ len = DPTR_ALIGN;
+
memcpy(buffer, skb->data, len);
dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
if (dma_mapping_error(ndev->dev.parent, dma_addr))
@@ -1558,7 +1582,8 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
priv->cur_tx[q] += NUM_TX_DESC;
if (priv->cur_tx[q] - priv->dirty_tx[q] >
- (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q))
+ (priv->num_tx_ring[q] - 1) * NUM_TX_DESC &&
+ !ravb_tx_free(ndev, q, true))
netif_stop_subqueue(ndev, q);
exit:
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index f341c1bc7001..f729a6b43958 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -574,6 +574,7 @@ static struct sh_eth_cpu_data r8a7740_data = {
.rpadir_value = 2 << 16,
.no_trimd = 1,
.no_ade = 1,
+ .hw_crc = 1,
.tsu = 1,
.select_mii = 1,
.shift_rd0 = 1,
@@ -802,7 +803,7 @@ static struct sh_eth_cpu_data sh7734_data = {
.ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
- .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+ .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff,
.tx_check = EESR_TC1 | EESR_FTC,
.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
@@ -819,6 +820,7 @@ static struct sh_eth_cpu_data sh7734_data = {
.tsu = 1,
.hw_crc = 1,
.select_mii = 1,
+ .shift_rd0 = 1,
};
/* SH7763 */
@@ -831,7 +833,7 @@ static struct sh_eth_cpu_data sh7763_data = {
.ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
- .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+ .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff,
.tx_check = EESR_TC1 | EESR_FTC,
.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
@@ -1656,7 +1658,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
else
goto out;
- if (!likely(mdp->irq_enabled)) {
+ if (unlikely(!mdp->irq_enabled)) {
sh_eth_write(ndev, 0, EESIPR);
goto out;
}
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index de2947ccc5ad..5eb0e684fd76 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -1323,7 +1323,8 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
}
/* don't fail init if RSS setup doesn't work */
- efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table);
+ rc = efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table);
+ efx->rss_active = (rc == 0);
return 0;
}
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 87bdc56b4e3a..18ebaea44e82 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -975,6 +975,8 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev,
case ETHTOOL_GRXFH: {
info->data = 0;
+ if (!efx->rss_active) /* No RSS */
+ return 0;
switch (info->flow_type) {
case UDP_V4_FLOW:
if (efx->rx_hash_udp_4tuple)
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 1a635ced62d0..1c62c1a00fca 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -860,6 +860,7 @@ struct vfdi_status;
* @rx_hash_key: Toeplitz hash key for RSS
* @rx_indir_table: Indirection table for RSS
* @rx_scatter: Scatter mode enabled for receives
+ * @rss_active: RSS enabled on hardware
* @rx_hash_udp_4tuple: UDP 4-tuple hashing enabled
* @int_error_count: Number of internal errors seen recently
* @int_error_expire: Time at which error count will be expired
@@ -998,6 +999,7 @@ struct efx_nic {
u8 rx_hash_key[40];
u32 rx_indir_table[128];
bool rx_scatter;
+ bool rss_active;
bool rx_hash_udp_4tuple;
unsigned int_error_count;
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index a3901bc96586..4e54e5dc9fcb 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -403,6 +403,7 @@ static int siena_init_nic(struct efx_nic *efx)
efx_writeo(efx, &temp, FR_AZ_RX_CFG);
siena_rx_push_rss_config(efx, false, efx->rx_indir_table);
+ efx->rss_active = true;
/* Enable event logging */
rc = efx_mcdi_log_ctrl(efx, true, false, 0);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c
index c35597586121..3dc7d279f805 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c
@@ -60,8 +60,9 @@ struct oxnas_dwmac {
struct regmap *regmap;
};
-static int oxnas_dwmac_init(struct oxnas_dwmac *dwmac)
+static int oxnas_dwmac_init(struct platform_device *pdev, void *priv)
{
+ struct oxnas_dwmac *dwmac = priv;
unsigned int value;
int ret;
@@ -105,20 +106,20 @@ static int oxnas_dwmac_init(struct oxnas_dwmac *dwmac)
return 0;
}
+static void oxnas_dwmac_exit(struct platform_device *pdev, void *priv)
+{
+ struct oxnas_dwmac *dwmac = priv;
+
+ clk_disable_unprepare(dwmac->clk);
+}
+
static int oxnas_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
struct stmmac_resources stmmac_res;
- struct device_node *sysctrl;
struct oxnas_dwmac *dwmac;
int ret;
- sysctrl = of_parse_phandle(pdev->dev.of_node, "oxsemi,sys-ctrl", 0);
- if (!sysctrl) {
- dev_err(&pdev->dev, "failed to get sys-ctrl node\n");
- return -EINVAL;
- }
-
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
if (ret)
return ret;
@@ -128,72 +129,48 @@ static int oxnas_dwmac_probe(struct platform_device *pdev)
return PTR_ERR(plat_dat);
dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
- if (!dwmac)
- return -ENOMEM;
+ if (!dwmac) {
+ ret = -ENOMEM;
+ goto err_remove_config_dt;
+ }
dwmac->dev = &pdev->dev;
plat_dat->bsp_priv = dwmac;
+ plat_dat->init = oxnas_dwmac_init;
+ plat_dat->exit = oxnas_dwmac_exit;
- dwmac->regmap = syscon_node_to_regmap(sysctrl);
+ dwmac->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "oxsemi,sys-ctrl");
if (IS_ERR(dwmac->regmap)) {
dev_err(&pdev->dev, "failed to have sysctrl regmap\n");
- return PTR_ERR(dwmac->regmap);
+ ret = PTR_ERR(dwmac->regmap);
+ goto err_remove_config_dt;
}
dwmac->clk = devm_clk_get(&pdev->dev, "gmac");
- if (IS_ERR(dwmac->clk))
- return PTR_ERR(dwmac->clk);
+ if (IS_ERR(dwmac->clk)) {
+ ret = PTR_ERR(dwmac->clk);
+ goto err_remove_config_dt;
+ }
- ret = oxnas_dwmac_init(dwmac);
+ ret = oxnas_dwmac_init(pdev, plat_dat->bsp_priv);
if (ret)
- return ret;
+ goto err_remove_config_dt;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
- clk_disable_unprepare(dwmac->clk);
+ goto err_dwmac_exit;
- return ret;
-}
-static int oxnas_dwmac_remove(struct platform_device *pdev)
-{
- struct oxnas_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
- int ret = stmmac_dvr_remove(&pdev->dev);
-
- clk_disable_unprepare(dwmac->clk);
-
- return ret;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int oxnas_dwmac_suspend(struct device *dev)
-{
- struct oxnas_dwmac *dwmac = get_stmmac_bsp_priv(dev);
- int ret;
-
- ret = stmmac_suspend(dev);
- clk_disable_unprepare(dwmac->clk);
-
- return ret;
-}
-
-static int oxnas_dwmac_resume(struct device *dev)
-{
- struct oxnas_dwmac *dwmac = get_stmmac_bsp_priv(dev);
- int ret;
-
- ret = oxnas_dwmac_init(dwmac);
- if (ret)
- return ret;
+ return 0;
- ret = stmmac_resume(dev);
+err_dwmac_exit:
+ oxnas_dwmac_exit(pdev, plat_dat->bsp_priv);
+err_remove_config_dt:
+ stmmac_remove_config_dt(pdev, plat_dat);
return ret;
}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(oxnas_dwmac_pm_ops,
- oxnas_dwmac_suspend, oxnas_dwmac_resume);
static const struct of_device_id oxnas_dwmac_match[] = {
{ .compatible = "oxsemi,ox820-dwmac" },
@@ -203,10 +180,10 @@ MODULE_DEVICE_TABLE(of, oxnas_dwmac_match);
static struct platform_driver oxnas_dwmac_driver = {
.probe = oxnas_dwmac_probe,
- .remove = oxnas_dwmac_remove,
+ .remove = stmmac_pltfr_remove,
.driver = {
.name = "oxnas-dwmac",
- .pm = &oxnas_dwmac_pm_ops,
+ .pm = &stmmac_pltfr_pm_ops,
.of_match_table = oxnas_dwmac_match,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index bb40382e205d..e3f6389e1b01 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3319,8 +3319,16 @@ int stmmac_dvr_probe(struct device *device,
ndev->max_mtu = JUMBO_LEN;
else
ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
- if (priv->plat->maxmtu < ndev->max_mtu)
+ /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
+ * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
+ */
+ if ((priv->plat->maxmtu < ndev->max_mtu) &&
+ (priv->plat->maxmtu >= ndev->min_mtu))
ndev->max_mtu = priv->plat->maxmtu;
+ else if (priv->plat->maxmtu < ndev->min_mtu)
+ dev_warn(priv->device,
+ "%s: warning: maxmtu having invalid value (%d)\n",
+ __func__, priv->plat->maxmtu);
if (flow_ctrl)
priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
@@ -3332,20 +3340,14 @@ int stmmac_dvr_probe(struct device *device,
*/
if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
priv->use_riwt = 1;
- netdev_info(priv->dev, "Enable RX Mitigation via HW Watchdog Timer\n");
+ dev_info(priv->device,
+ "Enable RX Mitigation via HW Watchdog Timer\n");
}
netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
spin_lock_init(&priv->lock);
- ret = register_netdev(ndev);
- if (ret) {
- netdev_err(priv->dev, "%s: ERROR %i registering the device\n",
- __func__, ret);
- goto error_netdev_register;
- }
-
/* If a specific clk_csr value is passed from the platform
* this means that the CSR Clock Range selection cannot be
* changed at run-time and it is fixed. Viceversa the driver'll try to
@@ -3365,18 +3367,28 @@ int stmmac_dvr_probe(struct device *device,
/* MDIO bus Registration */
ret = stmmac_mdio_register(ndev);
if (ret < 0) {
- netdev_err(priv->dev,
- "%s: MDIO bus (id: %d) registration failed",
- __func__, priv->plat->bus_id);
+ dev_err(priv->device,
+ "%s: MDIO bus (id: %d) registration failed",
+ __func__, priv->plat->bus_id);
goto error_mdio_register;
}
}
- return 0;
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(priv->device, "%s: ERROR %i registering the device\n",
+ __func__, ret);
+ goto error_netdev_register;
+ }
+
+ return ret;
-error_mdio_register:
- unregister_netdev(ndev);
error_netdev_register:
+ if (priv->hw->pcs != STMMAC_PCS_RGMII &&
+ priv->hw->pcs != STMMAC_PCS_TBI &&
+ priv->hw->pcs != STMMAC_PCS_RTBI)
+ stmmac_mdio_unregister(ndev);
+error_mdio_register:
netif_napi_del(&priv->napi);
error_hw_init:
clk_disable_unprepare(priv->pclk);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index fda01f770eff..b0344c213752 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -116,7 +116,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
unsigned int mii_address = priv->hw->mii.addr;
unsigned int mii_data = priv->hw->mii.data;
- u32 value = MII_WRITE | MII_BUSY;
+ u32 value = MII_BUSY;
value |= (phyaddr << priv->hw->mii.addr_shift)
& priv->hw->mii.addr_mask;
@@ -126,6 +126,8 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
& priv->hw->mii.clk_csr_mask;
if (priv->plat->has_gmac4)
value |= MII_GMAC4_WRITE;
+ else
+ value |= MII_WRITE;
/* Wait until any existing MII operation is complete */
if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index a2831773431a..3da4737620cb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -89,6 +89,9 @@ static void stmmac_default_data(struct plat_stmmacenet_data *plat)
/* Set default value for unicast filter entries */
plat->unicast_filter_entries = 1;
+
+ /* Set the maxmtu to a default of JUMBO_LEN */
+ plat->maxmtu = JUMBO_LEN;
}
static int quark_default_data(struct plat_stmmacenet_data *plat,
@@ -126,6 +129,9 @@ static int quark_default_data(struct plat_stmmacenet_data *plat,
/* Set default value for unicast filter entries */
plat->unicast_filter_entries = 1;
+ /* Set the maxmtu to a default of JUMBO_LEN */
+ plat->maxmtu = JUMBO_LEN;
+
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 082cd48db6a7..36942f5a6a53 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -351,6 +351,7 @@ void stmmac_remove_config_dt(struct platform_device *pdev,
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
of_node_put(plat->phy_node);
+ of_node_put(plat->mdio_node);
}
#else
struct plat_stmmacenet_data *
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 77c88fcf2b86..9b8a30bf939b 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -1210,7 +1210,7 @@ int cpmac_init(void)
goto fail_alloc;
}
-#warning FIXME: unhardcode gpio&reset bits
+ /* FIXME: unhardcode gpio&reset bits */
ar7_gpio_disable(26);
ar7_gpio_disable(27);
ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 8b6810bad54b..99d3df788ce8 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -69,7 +69,6 @@ struct gtp_dev {
struct socket *sock0;
struct socket *sock1u;
- struct net *net;
struct net_device *dev;
unsigned int hash_size;
@@ -316,7 +315,7 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
- xnet = !net_eq(gtp->net, dev_net(gtp->dev));
+ xnet = !net_eq(sock_net(sk), dev_net(gtp->dev));
switch (udp_sk(sk)->encap_type) {
case UDP_ENCAP_GTP0:
@@ -612,7 +611,7 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
pktinfo.fl4.saddr, pktinfo.fl4.daddr,
pktinfo.iph->tos,
ip4_dst_hoplimit(&pktinfo.rt->dst),
- htons(IP_DF),
+ 0,
pktinfo.gtph_port, pktinfo.gtph_port,
true, false);
break;
@@ -658,7 +657,7 @@ static void gtp_link_setup(struct net_device *dev)
static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
static void gtp_hashtable_free(struct gtp_dev *gtp);
static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
- int fd_gtp0, int fd_gtp1, struct net *src_net);
+ int fd_gtp0, int fd_gtp1);
static int gtp_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
@@ -675,7 +674,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
- err = gtp_encap_enable(dev, gtp, fd0, fd1, src_net);
+ err = gtp_encap_enable(dev, gtp, fd0, fd1);
if (err < 0)
goto out_err;
@@ -821,7 +820,7 @@ static void gtp_hashtable_free(struct gtp_dev *gtp)
}
static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
- int fd_gtp0, int fd_gtp1, struct net *src_net)
+ int fd_gtp0, int fd_gtp1)
{
struct udp_tunnel_sock_cfg tuncfg = {NULL};
struct socket *sock0, *sock1u;
@@ -858,7 +857,6 @@ static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
gtp->sock0 = sock0;
gtp->sock1u = sock1u;
- gtp->net = src_net;
tuncfg.sk_user_data = gtp;
tuncfg.encap_rcv = gtp_encap_recv;
@@ -1376,3 +1374,4 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>");
MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic");
MODULE_ALIAS_RTNL_LINK("gtp");
+MODULE_ALIAS_GENL_FAMILY("gtp");
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index c9414c054852..fcab8019dda0 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -659,6 +659,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
* policy filters on the host). Deliver these via the VF
* interface in the guest.
*/
+ rcu_read_lock();
vf_netdev = rcu_dereference(net_device_ctx->vf_netdev);
if (vf_netdev && (vf_netdev->flags & IFF_UP))
net = vf_netdev;
@@ -667,6 +668,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci);
if (unlikely(!skb)) {
++net->stats.rx_dropped;
+ rcu_read_unlock();
return NVSP_STAT_FAIL;
}
@@ -696,6 +698,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
* TODO - use NAPI?
*/
netif_rx(skb);
+ rcu_read_unlock();
return 0;
}
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 46d53a6c8cf8..76ba7ecfe142 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -1715,9 +1715,9 @@ static int at86rf230_probe(struct spi_device *spi)
/* Reset */
if (gpio_is_valid(rstn)) {
udelay(1);
- gpio_set_value(rstn, 0);
+ gpio_set_value_cansleep(rstn, 0);
udelay(1);
- gpio_set_value(rstn, 1);
+ gpio_set_value_cansleep(rstn, 1);
usleep_range(120, 240);
}
diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
index 1253f864737a..ef688518ad77 100644
--- a/drivers/net/ieee802154/atusb.c
+++ b/drivers/net/ieee802154/atusb.c
@@ -117,13 +117,26 @@ static int atusb_read_reg(struct atusb *atusb, uint8_t reg)
{
struct usb_device *usb_dev = atusb->usb_dev;
int ret;
+ uint8_t *buffer;
uint8_t value;
+ buffer = kmalloc(1, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
dev_dbg(&usb_dev->dev, "atusb: reg = 0x%x\n", reg);
ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
- 0, reg, &value, 1, 1000);
- return ret >= 0 ? value : ret;
+ 0, reg, buffer, 1, 1000);
+
+ if (ret >= 0) {
+ value = buffer[0];
+ kfree(buffer);
+ return value;
+ } else {
+ kfree(buffer);
+ return ret;
+ }
}
static int atusb_write_subreg(struct atusb *atusb, uint8_t reg, uint8_t mask,
@@ -549,13 +562,6 @@ static int
atusb_set_frame_retries(struct ieee802154_hw *hw, s8 retries)
{
struct atusb *atusb = hw->priv;
- struct device *dev = &atusb->usb_dev->dev;
-
- if (atusb->fw_ver_maj == 0 && atusb->fw_ver_min < 3) {
- dev_info(dev, "Automatic frame retransmission is only available from "
- "firmware version 0.3. Please update if you want this feature.");
- return -EINVAL;
- }
return atusb_write_subreg(atusb, SR_MAX_FRAME_RETRIES, retries);
}
@@ -608,9 +614,13 @@ static const struct ieee802154_ops atusb_ops = {
static int atusb_get_and_show_revision(struct atusb *atusb)
{
struct usb_device *usb_dev = atusb->usb_dev;
- unsigned char buffer[3];
+ unsigned char *buffer;
int ret;
+ buffer = kmalloc(3, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
/* Get a couple of the ATMega Firmware values */
ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0,
@@ -631,15 +641,20 @@ static int atusb_get_and_show_revision(struct atusb *atusb)
dev_info(&usb_dev->dev, "Please update to version 0.2 or newer");
}
+ kfree(buffer);
return ret;
}
static int atusb_get_and_show_build(struct atusb *atusb)
{
struct usb_device *usb_dev = atusb->usb_dev;
- char build[ATUSB_BUILD_SIZE + 1];
+ char *build;
int ret;
+ build = kmalloc(ATUSB_BUILD_SIZE + 1, GFP_KERNEL);
+ if (!build)
+ return -ENOMEM;
+
ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0,
build, ATUSB_BUILD_SIZE, 1000);
@@ -648,6 +663,7 @@ static int atusb_get_and_show_build(struct atusb *atusb)
dev_info(&usb_dev->dev, "Firmware: build %s\n", build);
}
+ kfree(build);
return ret;
}
@@ -698,7 +714,7 @@ fail:
static int atusb_set_extended_addr(struct atusb *atusb)
{
struct usb_device *usb_dev = atusb->usb_dev;
- unsigned char buffer[IEEE802154_EXTENDED_ADDR_LEN];
+ unsigned char *buffer;
__le64 extended_addr;
u64 addr;
int ret;
@@ -710,12 +726,20 @@ static int atusb_set_extended_addr(struct atusb *atusb)
return 0;
}
+ buffer = kmalloc(IEEE802154_EXTENDED_ADDR_LEN, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
/* Firmware is new enough so we fetch the address from EEPROM */
ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
ATUSB_EUI64_READ, ATUSB_REQ_FROM_DEV, 0, 0,
buffer, IEEE802154_EXTENDED_ADDR_LEN, 1000);
- if (ret < 0)
- dev_err(&usb_dev->dev, "failed to fetch extended address\n");
+ if (ret < 0) {
+ dev_err(&usb_dev->dev, "failed to fetch extended address, random address set\n");
+ ieee802154_random_extended_addr(&atusb->hw->phy->perm_extended_addr);
+ kfree(buffer);
+ return ret;
+ }
memcpy(&extended_addr, buffer, IEEE802154_EXTENDED_ADDR_LEN);
/* Check if read address is not empty and the unicast bit is set correctly */
@@ -729,6 +753,7 @@ static int atusb_set_extended_addr(struct atusb *atusb)
&addr);
}
+ kfree(buffer);
return ret;
}
@@ -770,8 +795,7 @@ static int atusb_probe(struct usb_interface *interface,
hw->parent = &usb_dev->dev;
hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT |
- IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS |
- IEEE802154_HW_FRAME_RETRIES;
+ IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS;
hw->phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL |
WPAN_PHY_FLAG_CCA_MODE;
@@ -800,6 +824,9 @@ static int atusb_probe(struct usb_interface *interface,
atusb_get_and_show_build(atusb);
atusb_set_extended_addr(atusb);
+ if (atusb->fw_ver_maj >= 0 && atusb->fw_ver_min >= 3)
+ hw->flags |= IEEE802154_HW_FRAME_RETRIES;
+
ret = atusb_get_and_clear_error(atusb);
if (ret) {
dev_err(&atusb->usb_dev->dev,
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 031093e1c25f..dbfbb33ac66c 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -99,6 +99,11 @@ struct ipvl_port {
int count;
};
+struct ipvl_skb_cb {
+ bool tx_pkt;
+};
+#define IPVL_SKB_CB(_skb) ((struct ipvl_skb_cb *)&((_skb)->cb[0]))
+
static inline struct ipvl_port *ipvlan_port_get_rcu(const struct net_device *d)
{
return rcu_dereference(d->rx_handler_data);
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index b4e990743e1d..83ce74acf82d 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -198,7 +198,7 @@ void ipvlan_process_multicast(struct work_struct *work)
unsigned int mac_hash;
int ret;
u8 pkt_type;
- bool hlocal, dlocal;
+ bool tx_pkt;
__skb_queue_head_init(&list);
@@ -207,8 +207,11 @@ void ipvlan_process_multicast(struct work_struct *work)
spin_unlock_bh(&port->backlog.lock);
while ((skb = __skb_dequeue(&list)) != NULL) {
+ struct net_device *dev = skb->dev;
+ bool consumed = false;
+
ethh = eth_hdr(skb);
- hlocal = ether_addr_equal(ethh->h_source, port->dev->dev_addr);
+ tx_pkt = IPVL_SKB_CB(skb)->tx_pkt;
mac_hash = ipvlan_mac_hash(ethh->h_dest);
if (ether_addr_equal(ethh->h_dest, port->dev->broadcast))
@@ -216,41 +219,45 @@ void ipvlan_process_multicast(struct work_struct *work)
else
pkt_type = PACKET_MULTICAST;
- dlocal = false;
rcu_read_lock();
list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
- if (hlocal && (ipvlan->dev == skb->dev)) {
- dlocal = true;
+ if (tx_pkt && (ipvlan->dev == skb->dev))
continue;
- }
if (!test_bit(mac_hash, ipvlan->mac_filters))
continue;
-
+ if (!(ipvlan->dev->flags & IFF_UP))
+ continue;
ret = NET_RX_DROP;
len = skb->len + ETH_HLEN;
nskb = skb_clone(skb, GFP_ATOMIC);
- if (!nskb)
- goto acct;
-
- nskb->pkt_type = pkt_type;
- nskb->dev = ipvlan->dev;
- if (hlocal)
- ret = dev_forward_skb(ipvlan->dev, nskb);
- else
- ret = netif_rx(nskb);
-acct:
+ local_bh_disable();
+ if (nskb) {
+ consumed = true;
+ nskb->pkt_type = pkt_type;
+ nskb->dev = ipvlan->dev;
+ if (tx_pkt)
+ ret = dev_forward_skb(ipvlan->dev, nskb);
+ else
+ ret = netif_rx(nskb);
+ }
ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
+ local_bh_enable();
}
rcu_read_unlock();
- if (dlocal) {
+ if (tx_pkt) {
/* If the packet originated here, send it out. */
skb->dev = port->dev;
skb->pkt_type = pkt_type;
dev_queue_xmit(skb);
} else {
- kfree_skb(skb);
+ if (consumed)
+ consume_skb(skb);
+ else
+ kfree_skb(skb);
}
+ if (dev)
+ dev_put(dev);
}
}
@@ -470,15 +477,24 @@ out:
}
static void ipvlan_multicast_enqueue(struct ipvl_port *port,
- struct sk_buff *skb)
+ struct sk_buff *skb, bool tx_pkt)
{
if (skb->protocol == htons(ETH_P_PAUSE)) {
kfree_skb(skb);
return;
}
+ /* Record that the deferred packet is from TX or RX path. By
+ * looking at mac-addresses on packet will lead to erronus decisions.
+ * (This would be true for a loopback-mode on master device or a
+ * hair-pin mode of the switch.)
+ */
+ IPVL_SKB_CB(skb)->tx_pkt = tx_pkt;
+
spin_lock(&port->backlog.lock);
if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) {
+ if (skb->dev)
+ dev_hold(skb->dev);
__skb_queue_tail(&port->backlog, skb);
spin_unlock(&port->backlog.lock);
schedule_work(&port->wq);
@@ -537,7 +553,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
} else if (is_multicast_ether_addr(eth->h_dest)) {
ipvlan_skb_crossing_ns(skb, NULL);
- ipvlan_multicast_enqueue(ipvlan->port, skb);
+ ipvlan_multicast_enqueue(ipvlan->port, skb, true);
return NET_XMIT_SUCCESS;
}
@@ -634,7 +650,7 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
*/
if (nskb) {
ipvlan_skb_crossing_ns(nskb, NULL);
- ipvlan_multicast_enqueue(port, nskb);
+ ipvlan_multicast_enqueue(port, nskb, false);
}
}
} else {
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 693ec5b66222..8b0f99300cbc 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -135,6 +135,7 @@ err:
static void ipvlan_port_destroy(struct net_device *dev)
{
struct ipvl_port *port = ipvlan_port_get_rtnl(dev);
+ struct sk_buff *skb;
dev->priv_flags &= ~IFF_IPVLAN_MASTER;
if (port->mode == IPVLAN_MODE_L3S) {
@@ -144,7 +145,11 @@ static void ipvlan_port_destroy(struct net_device *dev)
}
netdev_rx_handler_unregister(dev);
cancel_work_sync(&port->wq);
- __skb_queue_purge(&port->backlog);
+ while ((skb = __skb_dequeue(&port->backlog)) != NULL) {
+ if (skb->dev)
+ dev_put(skb->dev);
+ kfree_skb(skb);
+ }
kfree(port);
}
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 5c26653eceb5..402618565838 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -825,7 +825,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
return -EINVAL;
if (virtio_net_hdr_from_skb(skb, &vnet_hdr,
- macvtap_is_little_endian(q)))
+ macvtap_is_little_endian(q), true))
BUG();
if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index d361835b315d..8dbd59baa34d 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -279,6 +279,7 @@ config MARVELL_PHY
config MESON_GXL_PHY
tristate "Amlogic Meson GXL Internal PHY"
+ depends on ARCH_MESON || COMPILE_TEST
---help---
Currently has a driver for the Amlogic Meson GXL Internal PHY
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index e741bf614c4e..b0492ef2cdaa 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -21,6 +21,23 @@ MODULE_DESCRIPTION("Broadcom 63xx internal PHY driver");
MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
MODULE_LICENSE("GPL");
+static int bcm63xx_config_intr(struct phy_device *phydev)
+{
+ int reg, err;
+
+ reg = phy_read(phydev, MII_BCM63XX_IR);
+ if (reg < 0)
+ return reg;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ reg &= ~MII_BCM63XX_IR_GMASK;
+ else
+ reg |= MII_BCM63XX_IR_GMASK;
+
+ err = phy_write(phydev, MII_BCM63XX_IR, reg);
+ return err;
+}
+
static int bcm63xx_config_init(struct phy_device *phydev)
{
int reg, err;
@@ -55,7 +72,7 @@ static struct phy_driver bcm63xx_driver[] = {
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
- .config_intr = bcm_phy_config_intr,
+ .config_intr = bcm63xx_config_intr,
}, {
/* same phy as above, with just a different OUI */
.phy_id = 0x002bdc00,
@@ -67,7 +84,7 @@ static struct phy_driver bcm63xx_driver[] = {
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
- .config_intr = bcm_phy_config_intr,
+ .config_intr = bcm63xx_config_intr,
} };
module_phy_driver(bcm63xx_driver);
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index 800b39f06279..a10d0e7fc5f7 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -17,6 +17,7 @@
#include <linux/phy.h>
#define TI_DP83848C_PHY_ID 0x20005ca0
+#define TI_DP83620_PHY_ID 0x20005ce0
#define NS_DP83848C_PHY_ID 0x20005c90
#define TLK10X_PHY_ID 0x2000a210
#define TI_DP83822_PHY_ID 0x2000a240
@@ -77,6 +78,7 @@ static int dp83848_config_intr(struct phy_device *phydev)
static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
{ TI_DP83848C_PHY_ID, 0xfffffff0 },
{ NS_DP83848C_PHY_ID, 0xfffffff0 },
+ { TI_DP83620_PHY_ID, 0xfffffff0 },
{ TLK10X_PHY_ID, 0xfffffff0 },
{ TI_DP83822_PHY_ID, 0xfffffff0 },
{ }
@@ -106,6 +108,7 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
static struct phy_driver dp83848_driver[] = {
DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"),
DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"),
+ DP83848_PHY_DRIVER(TI_DP83620_PHY_ID, "TI DP83620 10/100 Mbps PHY"),
DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"),
DP83848_PHY_DRIVER(TI_DP83822_PHY_ID, "TI DP83822 10/100 Mbps PHY"),
};
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 1b639242f9e2..ca1b462bf7b2 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -29,6 +29,7 @@
#define MII_DP83867_MICR 0x12
#define MII_DP83867_ISR 0x13
#define DP83867_CTRL 0x1f
+#define DP83867_CFG3 0x1e
/* Extended Registers */
#define DP83867_RGMIICTL 0x0032
@@ -98,6 +99,8 @@ static int dp83867_config_intr(struct phy_device *phydev)
micr_status |=
(MII_DP83867_MICR_AN_ERR_INT_EN |
MII_DP83867_MICR_SPEED_CHNG_INT_EN |
+ MII_DP83867_MICR_AUTONEG_COMP_INT_EN |
+ MII_DP83867_MICR_LINK_STS_CHNG_INT_EN |
MII_DP83867_MICR_DUP_MODE_CHNG_INT_EN |
MII_DP83867_MICR_SLEEP_MODE_CHNG_INT_EN);
@@ -129,12 +132,16 @@ static int dp83867_of_init(struct phy_device *phydev)
ret = of_property_read_u32(of_node, "ti,rx-internal-delay",
&dp83867->rx_id_delay);
- if (ret)
+ if (ret &&
+ (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID))
return ret;
ret = of_property_read_u32(of_node, "ti,tx-internal-delay",
&dp83867->tx_id_delay);
- if (ret)
+ if (ret &&
+ (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID))
return ret;
return of_property_read_u32(of_node, "ti,fifo-depth",
@@ -214,6 +221,13 @@ static int dp83867_config_init(struct phy_device *phydev)
}
}
+ /* Enable Interrupt output INT_OE in CFG3 register */
+ if (phy_interrupt_is_valid(phydev)) {
+ val = phy_read(phydev, DP83867_CFG3);
+ val |= BIT(7);
+ phy_write(phydev, DP83867_CFG3, val);
+ }
+
return 0;
}
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index e269262471a4..ed0d235cf850 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1192,7 +1192,8 @@ static int marvell_read_status(struct phy_device *phydev)
int err;
/* Check the fiber mode first */
- if (phydev->supported & SUPPORTED_FIBRE) {
+ if (phydev->supported & SUPPORTED_FIBRE &&
+ phydev->interface != PHY_INTERFACE_MODE_SGMII) {
err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_FIBER);
if (err < 0)
goto error;
@@ -1678,6 +1679,8 @@ static struct phy_driver marvell_drivers[] = {
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
.did_interrupt = &m88e1121_did_interrupt,
+ .get_wol = &m88e1318_get_wol,
+ .set_wol = &m88e1318_set_wol,
.resume = &marvell_resume,
.suspend = &marvell_suspend,
.get_sset_count = marvell_get_sset_count,
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 9a77289109b7..e55809c5beb7 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -1008,6 +1008,20 @@ static struct phy_driver ksphy_driver[] = {
.get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
.resume = genphy_resume,
+}, {
+ .phy_id = PHY_ID_KSZ8795,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
+ .name = "Micrel KSZ8795",
+ .features = (SUPPORTED_Pause | SUPPORTED_Asym_Pause),
+ .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .config_init = kszphy_config_init,
+ .config_aneg = ksz8873mll_config_aneg,
+ .read_status = ksz8873mll_read_status,
+ .get_sset_count = kszphy_get_sset_count,
+ .get_strings = kszphy_get_strings,
+ .get_stats = kszphy_get_stats,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
} };
module_phy_driver(ksphy_driver);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 25f93a98863b..7cc1b7dcfe05 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -29,6 +29,7 @@
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/phy.h>
+#include <linux/phy_led_triggers.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/mdio.h>
@@ -649,14 +650,18 @@ void phy_start_machine(struct phy_device *phydev)
* phy_trigger_machine - trigger the state machine to run
*
* @phydev: the phy_device struct
+ * @sync: indicate whether we should wait for the workqueue cancelation
*
* Description: There has been a change in state which requires that the
* state machine runs.
*/
-static void phy_trigger_machine(struct phy_device *phydev)
+static void phy_trigger_machine(struct phy_device *phydev, bool sync)
{
- cancel_delayed_work_sync(&phydev->state_queue);
+ if (sync)
+ cancel_delayed_work_sync(&phydev->state_queue);
+ else
+ cancel_delayed_work(&phydev->state_queue);
queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
}
@@ -693,7 +698,7 @@ static void phy_error(struct phy_device *phydev)
phydev->state = PHY_HALTED;
mutex_unlock(&phydev->lock);
- phy_trigger_machine(phydev);
+ phy_trigger_machine(phydev, false);
}
/**
@@ -840,7 +845,7 @@ void phy_change(struct phy_device *phydev)
}
/* reschedule state queue work to run as soon as possible */
- phy_trigger_machine(phydev);
+ phy_trigger_machine(phydev, true);
return;
ignore:
@@ -942,7 +947,7 @@ void phy_start(struct phy_device *phydev)
if (do_resume)
phy_resume(phydev);
- phy_trigger_machine(phydev);
+ phy_trigger_machine(phydev, true);
}
EXPORT_SYMBOL(phy_start);
@@ -1065,6 +1070,15 @@ void phy_state_machine(struct work_struct *work)
if (old_link != phydev->link)
phydev->state = PHY_CHANGELINK;
}
+ /*
+ * Failsafe: check that nobody set phydev->link=0 between two
+ * poll cycles, otherwise we won't leave RUNNING state as long
+ * as link remains down.
+ */
+ if (!phydev->link && phydev->state == PHY_RUNNING) {
+ phydev->state = PHY_CHANGELINK;
+ phydev_err(phydev, "no link in PHY_RUNNING\n");
+ }
break;
case PHY_CHANGELINK:
err = phy_read_status(phydev);
diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c
index fa62bdf2f526..94ca42e630bb 100644
--- a/drivers/net/phy/phy_led_triggers.c
+++ b/drivers/net/phy/phy_led_triggers.c
@@ -12,6 +12,7 @@
*/
#include <linux/leds.h>
#include <linux/phy.h>
+#include <linux/phy_led_triggers.h>
#include <linux/netdevice.h>
static struct phy_led_trigger *phy_speed_to_led_trigger(struct phy_device *phy,
@@ -102,8 +103,10 @@ int phy_led_triggers_register(struct phy_device *phy)
sizeof(struct phy_led_trigger) *
phy->phy_num_led_triggers,
GFP_KERNEL);
- if (!phy->phy_led_triggers)
- return -ENOMEM;
+ if (!phy->phy_led_triggers) {
+ err = -ENOMEM;
+ goto out_clear;
+ }
for (i = 0; i < phy->phy_num_led_triggers; i++) {
err = phy_led_trigger_register(phy, &phy->phy_led_triggers[i],
@@ -120,6 +123,8 @@ out_unreg:
while (i--)
phy_led_trigger_unregister(&phy->phy_led_triggers[i]);
devm_kfree(&phy->mdio.dev, phy->phy_led_triggers);
+out_clear:
+ phy->phy_num_led_triggers = 0;
return err;
}
EXPORT_SYMBOL_GPL(phy_led_triggers_register);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index cd8e02c94be0..2cd10b26b650 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1360,7 +1360,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
return -EINVAL;
if (virtio_net_hdr_from_skb(skb, &gso,
- tun_is_little_endian(tun))) {
+ tun_is_little_endian(tun), true)) {
struct skb_shared_info *sinfo = skb_shinfo(skb);
pr_err("unexpected GSO type: "
"0x%x, gso_size %d, hdr_len %d\n",
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 6c646e228833..6e98ede997d3 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -1367,6 +1367,7 @@ static struct usb_driver asix_driver = {
.probe = usbnet_probe,
.suspend = asix_suspend,
.resume = asix_resume,
+ .reset_resume = asix_resume,
.disconnect = usbnet_disconnect,
.supports_autosuspend = 1,
.disable_hub_initiated_lpm = 1,
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index fe7b2886cb6b..86144f9a80ee 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -531,6 +531,7 @@ static const struct driver_info wwan_info = {
#define SAMSUNG_VENDOR_ID 0x04e8
#define LENOVO_VENDOR_ID 0x17ef
#define NVIDIA_VENDOR_ID 0x0955
+#define HP_VENDOR_ID 0x03f0
static const struct usb_device_id products[] = {
/* BLACKLIST !!
@@ -677,6 +678,13 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+/* HP lt2523 (Novatel E371) - handled by qmi_wwan */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(HP_VENDOR_ID, 0x421d, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* AnyDATA ADU960S - handled by qmi_wwan */
{
USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 6fe1cdb0174f..24d5272cdce5 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -654,6 +654,13 @@ static const struct usb_device_id products[] = {
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&qmi_wwan_info,
},
+ { /* HP lt2523 (Novatel E371) */
+ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d,
+ USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
{ /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
.driver_info = (unsigned long)&qmi_wwan_info,
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 7dc61228c55b..ad42295356dd 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -32,7 +32,7 @@
#define NETNEXT_VERSION "08"
/* Information for net */
-#define NET_VERSION "6"
+#define NET_VERSION "8"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -1730,7 +1730,7 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc)
u8 checksum = CHECKSUM_NONE;
u32 opts2, opts3;
- if (tp->version == RTL_VER_01 || tp->version == RTL_VER_02)
+ if (!(tp->netdev->features & NETIF_F_RXCSUM))
goto return_result;
opts2 = le32_to_cpu(rx_desc->opts2);
@@ -1936,6 +1936,9 @@ static int r8152_poll(struct napi_struct *napi, int budget)
napi_complete(napi);
if (!list_empty(&tp->rx_done))
napi_schedule(napi);
+ else if (!skb_queue_empty(&tp->tx_queue) &&
+ !list_empty(&tp->tx_free))
+ napi_schedule(napi);
}
return work_done;
@@ -3155,10 +3158,13 @@ static void set_carrier(struct r8152 *tp)
if (!netif_carrier_ok(netdev)) {
tp->rtl_ops.enable(tp);
set_bit(RTL8152_SET_RX_MODE, &tp->flags);
+ netif_stop_queue(netdev);
napi_disable(&tp->napi);
netif_carrier_on(netdev);
rtl_start_rx(tp);
napi_enable(&tp->napi);
+ netif_wake_queue(netdev);
+ netif_info(tp, link, netdev, "carrier on\n");
}
} else {
if (netif_carrier_ok(netdev)) {
@@ -3166,6 +3172,7 @@ static void set_carrier(struct r8152 *tp)
napi_disable(&tp->napi);
tp->rtl_ops.disable(tp);
napi_enable(&tp->napi);
+ netif_info(tp, link, netdev, "carrier off\n");
}
}
}
@@ -3515,12 +3522,12 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
if (!netif_running(netdev))
return 0;
+ netif_stop_queue(netdev);
napi_disable(&tp->napi);
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
if (netif_carrier_ok(netdev)) {
- netif_stop_queue(netdev);
mutex_lock(&tp->control);
tp->rtl_ops.disable(tp);
mutex_unlock(&tp->control);
@@ -3545,12 +3552,17 @@ static int rtl8152_post_reset(struct usb_interface *intf)
if (netif_carrier_ok(netdev)) {
mutex_lock(&tp->control);
tp->rtl_ops.enable(tp);
+ rtl_start_rx(tp);
rtl8152_set_rx_mode(netdev);
mutex_unlock(&tp->control);
- netif_wake_queue(netdev);
}
napi_enable(&tp->napi);
+ netif_wake_queue(netdev);
+ usb_submit_urb(tp->intr_urb, GFP_KERNEL);
+
+ if (!list_empty(&tp->rx_done))
+ napi_schedule(&tp->napi);
return 0;
}
@@ -3572,43 +3584,98 @@ static bool delay_autosuspend(struct r8152 *tp)
*/
if (!sw_linking && tp->rtl_ops.in_nway(tp))
return true;
+ else if (!skb_queue_empty(&tp->tx_queue))
+ return true;
else
return false;
}
-static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
+static int rtl8152_rumtime_suspend(struct r8152 *tp)
{
- struct r8152 *tp = usb_get_intfdata(intf);
struct net_device *netdev = tp->netdev;
int ret = 0;
- mutex_lock(&tp->control);
+ set_bit(SELECTIVE_SUSPEND, &tp->flags);
+ smp_mb__after_atomic();
+
+ if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
+ u32 rcr = 0;
- if (PMSG_IS_AUTO(message)) {
- if (netif_running(netdev) && delay_autosuspend(tp)) {
+ if (delay_autosuspend(tp)) {
+ clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ smp_mb__after_atomic();
ret = -EBUSY;
goto out1;
}
- set_bit(SELECTIVE_SUSPEND, &tp->flags);
- } else {
- netif_device_detach(netdev);
+ if (netif_carrier_ok(netdev)) {
+ u32 ocp_data;
+
+ rcr = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
+ ocp_data = rcr & ~RCR_ACPT_ALL;
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
+ rxdy_gated_en(tp, true);
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA,
+ PLA_OOB_CTRL);
+ if (!(ocp_data & RXFIFO_EMPTY)) {
+ rxdy_gated_en(tp, false);
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr);
+ clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ smp_mb__after_atomic();
+ ret = -EBUSY;
+ goto out1;
+ }
+ }
+
+ clear_bit(WORK_ENABLE, &tp->flags);
+ usb_kill_urb(tp->intr_urb);
+
+ tp->rtl_ops.autosuspend_en(tp, true);
+
+ if (netif_carrier_ok(netdev)) {
+ napi_disable(&tp->napi);
+ rtl_stop_rx(tp);
+ rxdy_gated_en(tp, false);
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr);
+ napi_enable(&tp->napi);
+ }
}
+out1:
+ return ret;
+}
+
+static int rtl8152_system_suspend(struct r8152 *tp)
+{
+ struct net_device *netdev = tp->netdev;
+ int ret = 0;
+
+ netif_device_detach(netdev);
+
if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
napi_disable(&tp->napi);
- if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
- rtl_stop_rx(tp);
- tp->rtl_ops.autosuspend_en(tp, true);
- } else {
- cancel_delayed_work_sync(&tp->schedule);
- tp->rtl_ops.down(tp);
- }
+ cancel_delayed_work_sync(&tp->schedule);
+ tp->rtl_ops.down(tp);
napi_enable(&tp->napi);
}
-out1:
+
+ return ret;
+}
+
+static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct r8152 *tp = usb_get_intfdata(intf);
+ int ret;
+
+ mutex_lock(&tp->control);
+
+ if (PMSG_IS_AUTO(message))
+ ret = rtl8152_rumtime_suspend(tp);
+ else
+ ret = rtl8152_system_suspend(tp);
+
mutex_unlock(&tp->control);
return ret;
@@ -3629,12 +3696,15 @@ static int rtl8152_resume(struct usb_interface *intf)
if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
tp->rtl_ops.autosuspend_en(tp, false);
- clear_bit(SELECTIVE_SUSPEND, &tp->flags);
napi_disable(&tp->napi);
set_bit(WORK_ENABLE, &tp->flags);
if (netif_carrier_ok(tp->netdev))
rtl_start_rx(tp);
napi_enable(&tp->napi);
+ clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ smp_mb__after_atomic();
+ if (!list_empty(&tp->rx_done))
+ napi_schedule(&tp->napi);
} else {
tp->rtl_ops.up(tp);
netif_carrier_off(tp->netdev);
@@ -4308,6 +4378,11 @@ static int rtl8152_probe(struct usb_interface *intf,
NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
+ if (tp->version == RTL_VER_01) {
+ netdev->features &= ~NETIF_F_RXCSUM;
+ netdev->hw_features &= ~NETIF_F_RXCSUM;
+ }
+
netdev->ethtool_ops = &ops;
netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4a105006ca63..765c2d6358da 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -48,8 +48,16 @@ module_param(gso, bool, 0444);
*/
DECLARE_EWMA(pkt_len, 1, 64)
+/* With mergeable buffers we align buffer address and use the low bits to
+ * encode its true size. Buffer size is up to 1 page so we need to align to
+ * square root of page size to ensure we reserve enough bits to encode the true
+ * size.
+ */
+#define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2)
+
/* Minimum alignment for mergeable packet buffers. */
-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \
+ 1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT)
#define VIRTNET_DRIVER_VERSION "1.0.0"
@@ -1104,7 +1112,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
hdr = skb_vnet_hdr(skb);
if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
- virtio_is_little_endian(vi->vdev)))
+ virtio_is_little_endian(vi->vdev), false))
BUG();
if (vi->mergeable_rx_bufs)
@@ -1707,6 +1715,11 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog)
u16 xdp_qp = 0, curr_qp;
int i, err;
+ if (prog && prog->xdp_adjust_head) {
+ netdev_warn(dev, "Does not support bpf_xdp_adjust_head()\n");
+ return -EOPNOTSUPP;
+ }
+
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
@@ -1890,8 +1903,12 @@ static void free_receive_page_frags(struct virtnet_info *vi)
put_page(vi->rq[i].alloc_frag.page);
}
-static bool is_xdp_queue(struct virtnet_info *vi, int q)
+static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
{
+ /* For small receive mode always use kfree_skb variants */
+ if (!vi->mergeable_rx_bufs)
+ return false;
+
if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
return false;
else if (q < vi->curr_queue_pairs)
@@ -1908,7 +1925,7 @@ static void free_unused_bufs(struct virtnet_info *vi)
for (i = 0; i < vi->max_queue_pairs; i++) {
struct virtqueue *vq = vi->sq[i].vq;
while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
- if (!is_xdp_queue(vi, i))
+ if (!is_xdp_raw_buffer_queue(vi, i))
dev_kfree_skb(buf);
else
put_page(virt_to_head_page(buf));
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 7532646c3b7b..454f907d419a 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -263,7 +263,9 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
.flowi4_iif = LOOPBACK_IFINDEX,
.flowi4_tos = RT_TOS(ip4h->tos),
.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF,
+ .flowi4_proto = ip4h->protocol,
.daddr = ip4h->daddr,
+ .saddr = ip4h->saddr,
};
struct net *net = dev_net(vrf_dev);
struct rtable *rt;
@@ -967,6 +969,7 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
*/
need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
if (!ipv6_ndisc_frame(skb) && !need_strict) {
+ vrf_rx_stats(vrf_dev, skb->len);
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
@@ -1011,6 +1014,8 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
goto out;
}
+ vrf_rx_stats(vrf_dev, skb->len);
+
skb_push(skb, skb->mac_len);
dev_queue_xmit_nit(skb, vrf_dev);
skb_pull(skb, skb->mac_len);
@@ -1247,6 +1252,8 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
return -EINVAL;
vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]);
+ if (vrf->tb_id == RT_TABLE_UNSPEC)
+ return -EINVAL;
dev->priv_flags |= IFF_L3MDEV_MASTER;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index bb70dd5723b5..50b62db213b0 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1798,7 +1798,7 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device *dev,
struct vxlan_sock *sock4,
struct sk_buff *skb, int oif, u8 tos,
- __be32 daddr, __be32 *saddr,
+ __be32 daddr, __be32 *saddr, __be16 dport, __be16 sport,
struct dst_cache *dst_cache,
const struct ip_tunnel_info *info)
{
@@ -1824,6 +1824,8 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device
fl4.flowi4_proto = IPPROTO_UDP;
fl4.daddr = daddr;
fl4.saddr = *saddr;
+ fl4.fl4_dport = dport;
+ fl4.fl4_sport = sport;
rt = ip_route_output_key(vxlan->net, &fl4);
if (likely(!IS_ERR(rt))) {
@@ -1851,6 +1853,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
__be32 label,
const struct in6_addr *daddr,
struct in6_addr *saddr,
+ __be16 dport, __be16 sport,
struct dst_cache *dst_cache,
const struct ip_tunnel_info *info)
{
@@ -1877,6 +1880,8 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label);
fl6.flowi6_mark = skb->mark;
fl6.flowi6_proto = IPPROTO_UDP;
+ fl6.fl6_dport = dport;
+ fl6.fl6_sport = sport;
err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
sock6->sock->sk,
@@ -2068,6 +2073,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
rdst ? rdst->remote_ifindex : 0, tos,
dst->sin.sin_addr.s_addr,
&src->sin.sin_addr.s_addr,
+ dst_port, src_port,
dst_cache, info);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
@@ -2104,6 +2110,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
rdst ? rdst->remote_ifindex : 0, tos,
label, &dst->sin6.sin6_addr,
&src->sin6.sin6_addr,
+ dst_port, src_port,
dst_cache, info);
if (IS_ERR(ndst)) {
err = PTR_ERR(ndst);
@@ -2261,7 +2268,7 @@ static void vxlan_cleanup(unsigned long arg)
= container_of(p, struct vxlan_fdb, hlist);
unsigned long timeout;
- if (f->state & NUD_PERMANENT)
+ if (f->state & (NUD_PERMANENT | NUD_NOARP))
continue;
timeout = f->used + vxlan->cfg.age_interval * HZ;
@@ -2347,7 +2354,7 @@ static int vxlan_open(struct net_device *dev)
}
/* Purge the forwarding table */
-static void vxlan_flush(struct vxlan_dev *vxlan)
+static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
{
unsigned int h;
@@ -2357,6 +2364,8 @@ static void vxlan_flush(struct vxlan_dev *vxlan)
hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
struct vxlan_fdb *f
= container_of(p, struct vxlan_fdb, hlist);
+ if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP)))
+ continue;
/* the all_zeros_mac entry is deleted at vxlan_uninit */
if (!is_zero_ether_addr(f->eth_addr))
vxlan_fdb_destroy(vxlan, f);
@@ -2378,7 +2387,7 @@ static int vxlan_stop(struct net_device *dev)
del_timer_sync(&vxlan->age_timer);
- vxlan_flush(vxlan);
+ vxlan_flush(vxlan, false);
vxlan_sock_release(vxlan);
return ret;
@@ -2430,7 +2439,7 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos,
info->key.u.ipv4.dst,
- &info->key.u.ipv4.src, NULL, info);
+ &info->key.u.ipv4.src, dport, sport, NULL, info);
if (IS_ERR(rt))
return PTR_ERR(rt);
ip_rt_put(rt);
@@ -2441,7 +2450,7 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos,
info->key.label, &info->key.u.ipv6.dst,
- &info->key.u.ipv6.src, NULL, info);
+ &info->key.u.ipv6.src, dport, sport, NULL, info);
if (IS_ERR(ndst))
return PTR_ERR(ndst);
dst_release(ndst);
@@ -2883,7 +2892,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
memcpy(&vxlan->cfg, conf, sizeof(*conf));
if (!vxlan->cfg.dst_port) {
if (conf->flags & VXLAN_F_GPE)
- vxlan->cfg.dst_port = 4790; /* IANA assigned VXLAN-GPE port */
+ vxlan->cfg.dst_port = htons(4790); /* IANA VXLAN-GPE port */
else
vxlan->cfg.dst_port = default_port;
}
@@ -3051,6 +3060,8 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+ vxlan_flush(vxlan, true);
+
spin_lock(&vn->sock_lock);
if (!hlist_unhashed(&vxlan->hlist))
hlist_del_rcu(&vxlan->hlist);
diff --git a/drivers/net/wan/slic_ds26522.c b/drivers/net/wan/slic_ds26522.c
index b776a0ab106c..9d9b4e0def2a 100644
--- a/drivers/net/wan/slic_ds26522.c
+++ b/drivers/net/wan/slic_ds26522.c
@@ -218,7 +218,7 @@ static int slic_ds26522_probe(struct spi_device *spi)
ret = slic_ds26522_init_configure(spi);
if (ret == 0)
- pr_info("DS26522 cs%d configurated\n", spi->chip_select);
+ pr_info("DS26522 cs%d configured\n", spi->chip_select);
return ret;
}
diff --git a/drivers/net/wireless/intersil/orinoco/mic.c b/drivers/net/wireless/intersil/orinoco/mic.c
index bc7397d709d3..08bc7822f820 100644
--- a/drivers/net/wireless/intersil/orinoco/mic.c
+++ b/drivers/net/wireless/intersil/orinoco/mic.c
@@ -16,7 +16,7 @@
/********************************************************************/
int orinoco_mic_init(struct orinoco_private *priv)
{
- priv->tx_tfm_mic = crypto_alloc_ahash("michael_mic", 0,
+ priv->tx_tfm_mic = crypto_alloc_shash("michael_mic", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tx_tfm_mic)) {
printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
@@ -25,7 +25,7 @@ int orinoco_mic_init(struct orinoco_private *priv)
return -ENOMEM;
}
- priv->rx_tfm_mic = crypto_alloc_ahash("michael_mic", 0,
+ priv->rx_tfm_mic = crypto_alloc_shash("michael_mic", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->rx_tfm_mic)) {
printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
@@ -40,17 +40,16 @@ int orinoco_mic_init(struct orinoco_private *priv)
void orinoco_mic_free(struct orinoco_private *priv)
{
if (priv->tx_tfm_mic)
- crypto_free_ahash(priv->tx_tfm_mic);
+ crypto_free_shash(priv->tx_tfm_mic);
if (priv->rx_tfm_mic)
- crypto_free_ahash(priv->rx_tfm_mic);
+ crypto_free_shash(priv->rx_tfm_mic);
}
-int orinoco_mic(struct crypto_ahash *tfm_michael, u8 *key,
+int orinoco_mic(struct crypto_shash *tfm_michael, u8 *key,
u8 *da, u8 *sa, u8 priority,
u8 *data, size_t data_len, u8 *mic)
{
- AHASH_REQUEST_ON_STACK(req, tfm_michael);
- struct scatterlist sg[2];
+ SHASH_DESC_ON_STACK(desc, tfm_michael);
u8 hdr[ETH_HLEN + 2]; /* size of header + padding */
int err;
@@ -67,18 +66,27 @@ int orinoco_mic(struct crypto_ahash *tfm_michael, u8 *key,
hdr[ETH_ALEN * 2 + 2] = 0;
hdr[ETH_ALEN * 2 + 3] = 0;
- /* Use scatter gather to MIC header and data in one go */
- sg_init_table(sg, 2);
- sg_set_buf(&sg[0], hdr, sizeof(hdr));
- sg_set_buf(&sg[1], data, data_len);
+ desc->tfm = tfm_michael;
+ desc->flags = 0;
- if (crypto_ahash_setkey(tfm_michael, key, MIC_KEYLEN))
- return -1;
+ err = crypto_shash_setkey(tfm_michael, key, MIC_KEYLEN);
+ if (err)
+ return err;
+
+ err = crypto_shash_init(desc);
+ if (err)
+ return err;
+
+ err = crypto_shash_update(desc, hdr, sizeof(hdr));
+ if (err)
+ return err;
+
+ err = crypto_shash_update(desc, data, data_len);
+ if (err)
+ return err;
+
+ err = crypto_shash_final(desc, mic);
+ shash_desc_zero(desc);
- ahash_request_set_tfm(req, tfm_michael);
- ahash_request_set_callback(req, 0, NULL, NULL);
- ahash_request_set_crypt(req, sg, mic, data_len + sizeof(hdr));
- err = crypto_ahash_digest(req);
- ahash_request_zero(req);
return err;
}
diff --git a/drivers/net/wireless/intersil/orinoco/mic.h b/drivers/net/wireless/intersil/orinoco/mic.h
index ce731d05cc98..e8724e889219 100644
--- a/drivers/net/wireless/intersil/orinoco/mic.h
+++ b/drivers/net/wireless/intersil/orinoco/mic.h
@@ -6,6 +6,7 @@
#define _ORINOCO_MIC_H_
#include <linux/types.h>
+#include <crypto/hash.h>
#define MICHAEL_MIC_LEN 8
@@ -15,7 +16,7 @@ struct crypto_ahash;
int orinoco_mic_init(struct orinoco_private *priv);
void orinoco_mic_free(struct orinoco_private *priv);
-int orinoco_mic(struct crypto_ahash *tfm_michael, u8 *key,
+int orinoco_mic(struct crypto_shash *tfm_michael, u8 *key,
u8 *da, u8 *sa, u8 priority,
u8 *data, size_t data_len, u8 *mic);
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco.h b/drivers/net/wireless/intersil/orinoco/orinoco.h
index 2f0c84b1c440..5fa1c3e3713f 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco.h
+++ b/drivers/net/wireless/intersil/orinoco/orinoco.h
@@ -152,8 +152,8 @@ struct orinoco_private {
u8 *wpa_ie;
int wpa_ie_len;
- struct crypto_ahash *rx_tfm_mic;
- struct crypto_ahash *tx_tfm_mic;
+ struct crypto_shash *rx_tfm_mic;
+ struct crypto_shash *tx_tfm_mic;
unsigned int wpa_enabled:1;
unsigned int tkip_cm_active:1;
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index 0a508649903d..49015b05f3d1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -1063,6 +1063,7 @@ int rtl_usb_probe(struct usb_interface *intf,
return -ENOMEM;
}
rtlpriv = hw->priv;
+ rtlpriv->hw = hw;
rtlpriv->usb_data = kzalloc(RTL_USB_MAX_RX_COUNT * sizeof(u32),
GFP_KERNEL);
if (!rtlpriv->usb_data)
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index e30ffd29b7e9..579521327b03 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -221,18 +221,18 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
struct xenvif_queue *queue = NULL;
- unsigned int num_queues = vif->num_queues;
unsigned long rx_bytes = 0;
unsigned long rx_packets = 0;
unsigned long tx_bytes = 0;
unsigned long tx_packets = 0;
unsigned int index;
+ spin_lock(&vif->lock);
if (vif->queues == NULL)
goto out;
/* Aggregate tx and rx stats from each queue */
- for (index = 0; index < num_queues; ++index) {
+ for (index = 0; index < vif->num_queues; ++index) {
queue = &vif->queues[index];
rx_bytes += queue->stats.rx_bytes;
rx_packets += queue->stats.rx_packets;
@@ -241,6 +241,8 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
}
out:
+ spin_unlock(&vif->lock);
+
vif->dev->stats.rx_bytes = rx_bytes;
vif->dev->stats.rx_packets = rx_packets;
vif->dev->stats.tx_bytes = tx_bytes;
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 3124eaec9427..85b742e1c42f 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -493,11 +493,22 @@ static int backend_create_xenvif(struct backend_info *be)
static void backend_disconnect(struct backend_info *be)
{
if (be->vif) {
+ unsigned int queue_index;
+
xen_unregister_watchers(be->vif);
#ifdef CONFIG_DEBUG_FS
xenvif_debugfs_delif(be->vif);
#endif /* CONFIG_DEBUG_FS */
xenvif_disconnect_data(be->vif);
+ for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
+ xenvif_deinit_queue(&be->vif->queues[queue_index]);
+
+ spin_lock(&be->vif->lock);
+ vfree(be->vif->queues);
+ be->vif->num_queues = 0;
+ be->vif->queues = NULL;
+ spin_unlock(&be->vif->lock);
+
xenvif_disconnect_ctrl(be->vif);
}
}
@@ -1034,6 +1045,8 @@ static void connect(struct backend_info *be)
err:
if (be->vif->num_queues > 0)
xenvif_disconnect_data(be->vif); /* Clean up existing queues */
+ for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
+ xenvif_deinit_queue(&be->vif->queues[queue_index]);
vfree(be->vif->queues);
be->vif->queues = NULL;
be->vif->num_queues = 0;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index a479cd99911d..8315fe73ecd0 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -321,7 +321,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
queue->rx.req_prod_pvt = req_prod;
/* Not enough requests? Try again later. */
- if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) {
+ if (req_prod - queue->rx.sring->req_prod < NET_RX_SLOTS_MIN) {
mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
return;
}
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 6307088b375f..a518cb1b59d4 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -957,6 +957,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
{
resource_size_t allocated = 0, available = 0;
struct nd_region *nd_region = to_nd_region(dev->parent);
+ struct nd_namespace_common *ndns = to_ndns(dev);
struct nd_mapping *nd_mapping;
struct nvdimm_drvdata *ndd;
struct nd_label_id label_id;
@@ -964,7 +965,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
u8 *uuid = NULL;
int rc, i;
- if (dev->driver || to_ndns(dev)->claim)
+ if (dev->driver || ndns->claim)
return -EBUSY;
if (is_namespace_pmem(dev)) {
@@ -1034,20 +1035,16 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
nd_namespace_pmem_set_resource(nd_region, nspm,
val * nd_region->ndr_mappings);
- } else if (is_namespace_blk(dev)) {
- struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
-
- /*
- * Try to delete the namespace if we deleted all of its
- * allocation, this is not the seed device for the
- * region, and it is not actively claimed by a btt
- * instance.
- */
- if (val == 0 && nd_region->ns_seed != dev
- && !nsblk->common.claim)
- nd_device_unregister(dev, ND_ASYNC);
}
+ /*
+ * Try to delete the namespace if we deleted all of its
+ * allocation, this is not the seed device for the region, and
+ * it is not actively claimed by a btt instance.
+ */
+ if (val == 0 && nd_region->ns_seed != dev && !ndns->claim)
+ nd_device_unregister(dev, ND_ASYNC);
+
return rc;
}
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 7282d7495bf1..5b536be5a12e 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -90,7 +90,9 @@ static int read_pmem(struct page *page, unsigned int off,
rc = memcpy_from_pmem(mem + off, pmem_addr, len);
kunmap_atomic(mem);
- return rc;
+ if (rc)
+ return -EIO;
+ return 0;
}
static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index b40cfb076f02..8a3c3e32a704 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1106,12 +1106,7 @@ int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
if (ret)
return ret;
- /* Checking for ctrl->tagset is a trick to avoid sleeping on module
- * load, since we only need the quirk on reset_controller. Notice
- * that the HGST device needs this delay only in firmware activation
- * procedure; unfortunately we have no (easy) way to verify this.
- */
- if ((ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) && ctrl->tagset)
+ if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
msleep(NVME_QUIRK_DELAY_AMOUNT);
return nvme_wait_ready(ctrl, cap, false);
@@ -1193,8 +1188,8 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
}
- if (ctrl->stripe_size)
- blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9);
+ if (ctrl->quirks & NVME_QUIRK_STRIPE_SIZE)
+ blk_queue_chunk_sectors(q, ctrl->max_hw_sectors);
blk_queue_virt_boundary(q, ctrl->page_size - 1);
if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
vwc = true;
@@ -1250,19 +1245,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->max_hw_sectors =
min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
- if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && id->vs[3]) {
- unsigned int max_hw_sectors;
-
- ctrl->stripe_size = 1 << (id->vs[3] + page_shift);
- max_hw_sectors = ctrl->stripe_size >> (page_shift - 9);
- if (ctrl->max_hw_sectors) {
- ctrl->max_hw_sectors = min(max_hw_sectors,
- ctrl->max_hw_sectors);
- } else {
- ctrl->max_hw_sectors = max_hw_sectors;
- }
- }
-
nvme_set_queue_limits(ctrl, ctrl->admin_q);
ctrl->sgls = le32_to_cpu(id->sgls);
ctrl->kas = le16_to_cpu(id->kas);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 771e2e761872..e65041c640cb 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1491,19 +1491,20 @@ static int
nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
{
struct nvme_fc_queue *queue = &ctrl->queues[1];
- int i, j, ret;
+ int i, ret;
for (i = 1; i < ctrl->queue_count; i++, queue++) {
ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
- if (ret) {
- for (j = i-1; j >= 0; j--)
- __nvme_fc_delete_hw_queue(ctrl,
- &ctrl->queues[j], j);
- return ret;
- }
+ if (ret)
+ goto delete_queues;
}
return 0;
+
+delete_queues:
+ for (; i >= 0; i--)
+ __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
+ return ret;
}
static int
@@ -1653,23 +1654,22 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
struct nvme_fc_fcp_op *op)
{
struct nvmefc_fcp_req *freq = &op->fcp_req;
- u32 map_len = nvme_map_len(rq);
enum dma_data_direction dir;
int ret;
freq->sg_cnt = 0;
- if (!map_len)
+ if (!blk_rq_payload_bytes(rq))
return 0;
freq->sg_table.sgl = freq->first_sgl;
- ret = sg_alloc_table_chained(&freq->sg_table, rq->nr_phys_segments,
- freq->sg_table.sgl);
+ ret = sg_alloc_table_chained(&freq->sg_table,
+ blk_rq_nr_phys_segments(rq), freq->sg_table.sgl);
if (ret)
return -ENOMEM;
op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
- WARN_ON(op->nents > rq->nr_phys_segments);
+ WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
op->nents, dir);
@@ -1853,7 +1853,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ret)
return ret;
- data_len = nvme_map_len(rq);
+ data_len = blk_rq_payload_bytes(rq);
if (data_len)
io_dir = ((rq_data_dir(rq) == WRITE) ?
NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
@@ -2401,8 +2401,8 @@ __nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
WARN_ON_ONCE(!changed);
dev_info(ctrl->ctrl.device,
- "NVME-FC{%d}: new ctrl: NQN \"%s\" (%p)\n",
- ctrl->cnum, ctrl->ctrl.opts->subsysnqn, &ctrl);
+ "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
+ ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
kref_get(&ctrl->ctrl.kref);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index bd5321441d12..aead6d08ed2c 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -135,7 +135,6 @@ struct nvme_ctrl {
u32 page_size;
u32 max_hw_sectors;
- u32 stripe_size;
u16 oncs;
u16 vid;
atomic_t abort_limit;
@@ -226,14 +225,6 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
return (sector >> (ns->lba_shift - 9));
}
-static inline unsigned nvme_map_len(struct request *rq)
-{
- if (req_op(rq) == REQ_OP_DISCARD)
- return sizeof(struct nvme_dsm_range);
- else
- return blk_rq_bytes(rq);
-}
-
static inline void nvme_cleanup_cmd(struct request *req)
{
if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 3d21a154dce7..3faefabf339c 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -306,11 +306,11 @@ static __le64 **iod_list(struct request *req)
return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req));
}
-static int nvme_init_iod(struct request *rq, unsigned size,
- struct nvme_dev *dev)
+static int nvme_init_iod(struct request *rq, struct nvme_dev *dev)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
int nseg = blk_rq_nr_phys_segments(rq);
+ unsigned int size = blk_rq_payload_bytes(rq);
if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC);
@@ -420,12 +420,11 @@ static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
}
#endif
-static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req,
- int total_len)
+static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct dma_pool *pool;
- int length = total_len;
+ int length = blk_rq_payload_bytes(req);
struct scatterlist *sg = iod->sg;
int dma_len = sg_dma_len(sg);
u64 dma_addr = sg_dma_address(sg);
@@ -501,7 +500,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req,
}
static int nvme_map_data(struct nvme_dev *dev, struct request *req,
- unsigned size, struct nvme_command *cmnd)
+ struct nvme_command *cmnd)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct request_queue *q = req->q;
@@ -519,7 +518,7 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req,
DMA_ATTR_NO_WARN))
goto out;
- if (!nvme_setup_prps(dev, req, size))
+ if (!nvme_setup_prps(dev, req))
goto out_unmap;
ret = BLK_MQ_RQ_QUEUE_ERROR;
@@ -580,7 +579,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_dev *dev = nvmeq->dev;
struct request *req = bd->rq;
struct nvme_command cmnd;
- unsigned map_len;
int ret = BLK_MQ_RQ_QUEUE_OK;
/*
@@ -600,13 +598,12 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ret != BLK_MQ_RQ_QUEUE_OK)
return ret;
- map_len = nvme_map_len(req);
- ret = nvme_init_iod(req, map_len, dev);
+ ret = nvme_init_iod(req, dev);
if (ret != BLK_MQ_RQ_QUEUE_OK)
goto out_free_cmd;
if (blk_rq_nr_phys_segments(req))
- ret = nvme_map_data(dev, req, map_len, &cmnd);
+ ret = nvme_map_data(dev, req, &cmnd);
if (ret != BLK_MQ_RQ_QUEUE_OK)
goto out_cleanup_iod;
@@ -712,15 +709,8 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id);
nvme_req(req)->result = cqe.result;
blk_mq_complete_request(req, le16_to_cpu(cqe.status) >> 1);
-
}
- /* If the controller ignores the cq head doorbell and continuously
- * writes to the queue, it is theoretically possible to wrap around
- * the queue twice and mistakenly return IRQ_NONE. Linux only
- * requires that 0.1% of your interrupts are handled, so this isn't
- * a big problem.
- */
if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
return;
@@ -1909,10 +1899,10 @@ static int nvme_dev_map(struct nvme_dev *dev)
if (!dev->bar)
goto release;
- return 0;
+ return 0;
release:
- pci_release_mem_regions(pdev);
- return -ENODEV;
+ pci_release_mem_regions(pdev);
+ return -ENODEV;
}
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index f587af345889..557f29b1f1bb 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -981,8 +981,7 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
}
static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
- struct request *rq, unsigned int map_len,
- struct nvme_command *c)
+ struct request *rq, struct nvme_command *c)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_device *dev = queue->device;
@@ -1014,9 +1013,9 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
}
if (count == 1) {
- if (rq_data_dir(rq) == WRITE &&
- map_len <= nvme_rdma_inline_data_size(queue) &&
- nvme_rdma_queue_idx(queue))
+ if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) &&
+ blk_rq_payload_bytes(rq) <=
+ nvme_rdma_inline_data_size(queue))
return nvme_rdma_map_sg_inline(queue, req, c);
if (dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)
@@ -1422,7 +1421,7 @@ static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
struct request *rq)
{
if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
- struct nvme_command *cmd = (struct nvme_command *)rq->cmd;
+ struct nvme_command *cmd = nvme_req(rq)->cmd;
if (rq->cmd_type != REQ_TYPE_DRV_PRIV ||
cmd->common.opcode != nvme_fabrics_command ||
@@ -1444,7 +1443,6 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_command *c = sqe->data;
bool flush = false;
struct ib_device *dev;
- unsigned int map_len;
int ret;
WARN_ON_ONCE(rq->tag < 0);
@@ -1462,8 +1460,7 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_mq_start_request(rq);
- map_len = nvme_map_len(rq);
- ret = nvme_rdma_map_data(queue, rq, map_len, c);
+ ret = nvme_rdma_map_data(queue, rq, c);
if (ret < 0) {
dev_err(queue->ctrl->ctrl.device,
"Failed to map data (%d)\n", ret);
diff --git a/drivers/nvme/host/scsi.c b/drivers/nvme/host/scsi.c
index b71e95044b43..a5c09e703bd8 100644
--- a/drivers/nvme/host/scsi.c
+++ b/drivers/nvme/host/scsi.c
@@ -2160,30 +2160,6 @@ static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
return nvme_trans_status_code(hdr, nvme_sc);
}
-static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
- u8 *cmd)
-{
- u8 immed, no_flush;
-
- immed = cmd[1] & 0x01;
- no_flush = cmd[4] & 0x04;
-
- if (immed != 0) {
- return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
- ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
- SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
- } else {
- if (no_flush == 0) {
- /* Issue NVME FLUSH command prior to START STOP UNIT */
- int res = nvme_trans_synchronize_cache(ns, hdr);
- if (res)
- return res;
- }
-
- return 0;
- }
-}
-
static int nvme_trans_format_unit(struct nvme_ns *ns, struct sg_io_hdr *hdr,
u8 *cmd)
{
@@ -2439,9 +2415,6 @@ static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr)
case SECURITY_PROTOCOL_OUT:
retcode = nvme_trans_security_protocol(ns, hdr, cmd);
break;
- case START_STOP:
- retcode = nvme_trans_start_stop(ns, hdr, cmd);
- break;
case SYNCHRONIZE_CACHE:
retcode = nvme_trans_synchronize_cache(ns, hdr);
break;
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index ec1ad2aa0a4c..95ae52390478 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -382,7 +382,6 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
{
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
- u64 val;
u32 val32;
u16 status = 0;
@@ -392,8 +391,7 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
(subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
break;
case NVME_FEAT_KATO:
- val = le64_to_cpu(req->cmd->prop_set.value);
- val32 = val & 0xffff;
+ val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
nvmet_set_result(req, req->sq->ctrl->kato);
break;
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 6f5074153dcd..be8c800078e2 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -631,6 +631,7 @@ static void nvmet_subsys_release(struct config_item *item)
{
struct nvmet_subsys *subsys = to_subsys(item);
+ nvmet_subsys_del_ctrls(subsys);
nvmet_subsys_put(subsys);
}
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index b1d66ed655c9..fc5ba2f9e15f 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -200,7 +200,7 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
ctrl->cntlid, ctrl->kato);
- ctrl->ops->delete_ctrl(ctrl);
+ nvmet_ctrl_fatal_error(ctrl);
}
static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
@@ -816,6 +816,9 @@ static void nvmet_ctrl_free(struct kref *ref)
list_del(&ctrl->subsys_entry);
mutex_unlock(&subsys->lock);
+ flush_work(&ctrl->async_event_work);
+ cancel_work_sync(&ctrl->fatal_err_work);
+
ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid);
nvmet_subsys_put(subsys);
@@ -935,6 +938,16 @@ static void nvmet_subsys_free(struct kref *ref)
kfree(subsys);
}
+void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
+{
+ struct nvmet_ctrl *ctrl;
+
+ mutex_lock(&subsys->lock);
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
+ ctrl->ops->delete_ctrl(ctrl);
+ mutex_unlock(&subsys->lock);
+}
+
void nvmet_subsys_put(struct nvmet_subsys *subsys)
{
kref_put(&subsys->ref, nvmet_subsys_free);
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 173e842f19c9..ba57f9852bde 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1314,7 +1314,7 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
(struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
struct fcnvme_ls_disconnect_acc *acc =
(struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
- struct nvmet_fc_tgt_queue *queue;
+ struct nvmet_fc_tgt_queue *queue = NULL;
struct nvmet_fc_tgt_assoc *assoc;
int ret = 0;
bool del_assoc = false;
@@ -1348,7 +1348,18 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
assoc = nvmet_fc_find_target_assoc(tgtport,
be64_to_cpu(rqst->associd.association_id));
iod->assoc = assoc;
- if (!assoc)
+ if (assoc) {
+ if (rqst->discon_cmd.scope ==
+ FCNVME_DISCONN_CONNECTION) {
+ queue = nvmet_fc_find_target_queue(tgtport,
+ be64_to_cpu(
+ rqst->discon_cmd.id));
+ if (!queue) {
+ nvmet_fc_tgt_a_put(assoc);
+ ret = VERR_NO_CONN;
+ }
+ }
+ } else
ret = VERR_NO_ASSOC;
}
@@ -1373,21 +1384,18 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
FCNVME_LS_DISCONNECT);
- if (rqst->discon_cmd.scope == FCNVME_DISCONN_CONNECTION) {
- queue = nvmet_fc_find_target_queue(tgtport,
- be64_to_cpu(rqst->discon_cmd.id));
- if (queue) {
- int qid = queue->qid;
+ /* are we to delete a Connection ID (queue) */
+ if (queue) {
+ int qid = queue->qid;
- nvmet_fc_delete_target_queue(queue);
+ nvmet_fc_delete_target_queue(queue);
- /* release the get taken by find_target_queue */
- nvmet_fc_tgt_q_put(queue);
+ /* release the get taken by find_target_queue */
+ nvmet_fc_tgt_q_put(queue);
- /* tear association down if io queue terminated */
- if (!qid)
- del_assoc = true;
- }
+ /* tear association down if io queue terminated */
+ if (!qid)
+ del_assoc = true;
}
/* release get taken in nvmet_fc_find_target_assoc */
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index bcb8ebeb01c5..4e8e6a22bce1 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -845,7 +845,7 @@ fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
rport->lport = nport->lport;
nport->rport = rport;
- return ret ? ret : count;
+ return count;
}
@@ -952,7 +952,7 @@ fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
tport->lport = nport->lport;
nport->tport = tport;
- return ret ? ret : count;
+ return count;
}
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 23d5eb1c944f..cc7ad06b43a7 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -282,6 +282,7 @@ void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
enum nvme_subsys_type type);
void nvmet_subsys_put(struct nvmet_subsys *subsys);
+void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid);
void nvmet_put_namespace(struct nvmet_ns *ns);
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 8c3760a78ac0..60990220bd83 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -438,6 +438,10 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
{
struct ib_recv_wr *bad_wr;
+ ib_dma_sync_single_for_device(ndev->device,
+ cmd->sge[0].addr, cmd->sge[0].length,
+ DMA_FROM_DEVICE);
+
if (ndev->srq)
return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr);
return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr);
@@ -538,6 +542,11 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req)
first_wr = &rsp->send_wr;
nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
+
+ ib_dma_sync_single_for_device(rsp->queue->dev->device,
+ rsp->send_sge.addr, rsp->send_sge.length,
+ DMA_TO_DEVICE);
+
if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) {
pr_err("sending cmd response failed\n");
nvmet_rdma_release_rsp(rsp);
@@ -698,6 +707,14 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
cmd->n_rdma = 0;
cmd->req.port = queue->port;
+
+ ib_dma_sync_single_for_cpu(queue->dev->device,
+ cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
+ DMA_FROM_DEVICE);
+ ib_dma_sync_single_for_cpu(queue->dev->device,
+ cmd->send_sge.addr, cmd->send_sge.length,
+ DMA_TO_DEVICE);
+
if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
&queue->nvme_sq, &nvmet_rdma_ops))
return;
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 965911d9b36a..398ea7f54826 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -981,8 +981,8 @@ static int __nvmem_cell_read(struct nvmem_device *nvmem,
* @cell: nvmem cell to be read.
* @len: pointer to length of cell which will be populated on successful read.
*
- * Return: ERR_PTR() on error or a valid pointer to a char * buffer on success.
- * The buffer should be freed by the consumer with a kfree().
+ * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
+ * buffer should be freed by the consumer with a kfree().
*/
void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
{
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index ac27b9bac3b9..8e7b120696fa 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -71,7 +71,7 @@ static struct nvmem_config imx_ocotp_nvmem_config = {
static const struct of_device_id imx_ocotp_dt_ids[] = {
{ .compatible = "fsl,imx6q-ocotp", (void *)128 },
- { .compatible = "fsl,imx6sl-ocotp", (void *)32 },
+ { .compatible = "fsl,imx6sl-ocotp", (void *)64 },
{ .compatible = "fsl,imx6sx-ocotp", (void *)128 },
{ },
};
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
index b5305f08b184..2bdb6c389328 100644
--- a/drivers/nvmem/qfprom.c
+++ b/drivers/nvmem/qfprom.c
@@ -21,11 +21,11 @@ static int qfprom_reg_read(void *context,
unsigned int reg, void *_val, size_t bytes)
{
void __iomem *base = context;
- u32 *val = _val;
- int i = 0, words = bytes / 4;
+ u8 *val = _val;
+ int i = 0, words = bytes;
while (words--)
- *val++ = readl(base + reg + (i++ * 4));
+ *val++ = readb(base + reg + i++);
return 0;
}
@@ -34,11 +34,11 @@ static int qfprom_reg_write(void *context,
unsigned int reg, void *_val, size_t bytes)
{
void __iomem *base = context;
- u32 *val = _val;
- int i = 0, words = bytes / 4;
+ u8 *val = _val;
+ int i = 0, words = bytes;
while (words--)
- writel(*val++, base + reg + (i++ * 4));
+ writeb(*val++, base + reg + i++);
return 0;
}
@@ -53,7 +53,7 @@ static int qfprom_remove(struct platform_device *pdev)
static struct nvmem_config econfig = {
.name = "qfprom",
.owner = THIS_MODULE,
- .stride = 4,
+ .stride = 1,
.word_size = 1,
.reg_read = qfprom_reg_read,
.reg_write = qfprom_reg_write,
diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c
index dd6d4ccb41e4..3858b87fd0bb 100644
--- a/drivers/parport/parport_gsc.c
+++ b/drivers/parport/parport_gsc.c
@@ -293,7 +293,7 @@ struct parport *parport_gsc_probe_port(unsigned long base,
p->irq = PARPORT_IRQ_NONE;
}
if (p->irq != PARPORT_IRQ_NONE) {
- printk(", irq %d", p->irq);
+ pr_cont(", irq %d", p->irq);
if (p->dma == PARPORT_DMA_AUTO) {
p->dma = PARPORT_DMA_NONE;
@@ -303,8 +303,8 @@ struct parport *parport_gsc_probe_port(unsigned long base,
is mandatory (see above) */
p->dma = PARPORT_DMA_NONE;
- printk(" [");
-#define printmode(x) {if(p->modes&PARPORT_MODE_##x){printk("%s%s",f?",":"",#x);f++;}}
+ pr_cont(" [");
+#define printmode(x) {if(p->modes&PARPORT_MODE_##x){pr_cont("%s%s",f?",":"",#x);f++;}}
{
int f = 0;
printmode(PCSPP);
@@ -315,7 +315,7 @@ struct parport *parport_gsc_probe_port(unsigned long base,
// printmode(DMA);
}
#undef printmode
- printk("]\n");
+ pr_cont("]\n");
if (p->irq != PARPORT_IRQ_NONE) {
if (request_irq (p->irq, parport_irq_handler,
diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c
index 1f38d0836751..f1b633bce525 100644
--- a/drivers/pci/host/pci-xgene-msi.c
+++ b/drivers/pci/host/pci-xgene-msi.c
@@ -517,7 +517,7 @@ static int xgene_msi_probe(struct platform_device *pdev)
rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/xgene:online",
xgene_msi_hwirq_alloc, NULL);
- if (rc)
+ if (rc < 0)
goto err_cpuhp;
pci_xgene_online = rc;
rc = cpuhp_setup_state(CPUHP_PCI_XGENE_DEAD, "pci/xgene:dead", NULL,
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index bed19994c1e9..af8f6e92e885 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -807,11 +807,6 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
{
u32 val;
- /* get iATU unroll support */
- pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
- dev_dbg(pp->dev, "iATU unroll: %s\n",
- pp->iatu_unroll_enabled ? "enabled" : "disabled");
-
/* set the number of lanes */
val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL);
val &= ~PORT_LINK_MODE_MASK;
@@ -882,6 +877,11 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
* we should not program the ATU here.
*/
if (!pp->ops->rd_other_conf) {
+ /* get iATU unroll support */
+ pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
+ dev_dbg(pp->dev, "iATU unroll: %s\n",
+ pp->iatu_unroll_enabled ? "enabled" : "disabled");
+
dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
PCIE_ATU_TYPE_MEM, pp->mem_base,
pp->mem_bus_addr, pp->mem_size);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index e164b5c9f0f0..204960e70333 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1169,6 +1169,7 @@ void set_pcie_port_type(struct pci_dev *pdev)
pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
if (!pos)
return;
+
pdev->pcie_cap = pos;
pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
pdev->pcie_flags_reg = reg16;
@@ -1176,13 +1177,14 @@ void set_pcie_port_type(struct pci_dev *pdev)
pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
/*
- * A Root Port is always the upstream end of a Link. No PCIe
- * component has two Links. Two Links are connected by a Switch
- * that has a Port on each Link and internal logic to connect the
- * two Ports.
+ * A Root Port or a PCI-to-PCIe bridge is always the upstream end
+ * of a Link. No PCIe component has two Links. Two Links are
+ * connected by a Switch that has a Port on each Link and internal
+ * logic to connect the two Ports.
*/
type = pci_pcie_type(pdev);
- if (type == PCI_EXP_TYPE_ROOT_PORT)
+ if (type == PCI_EXP_TYPE_ROOT_PORT ||
+ type == PCI_EXP_TYPE_PCIE_BRIDGE)
pdev->has_secondary_link = 1;
else if (type == PCI_EXP_TYPE_UPSTREAM ||
type == PCI_EXP_TYPE_DOWNSTREAM) {
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 54044a8ecbd7..8f8c2af45781 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -8,9 +8,16 @@ config PINCTRL
menu "Pin controllers"
depends on PINCTRL
+config GENERIC_PINCTRL_GROUPS
+ bool
+
config PINMUX
bool "Support pin multiplexing controllers" if COMPILE_TEST
+config GENERIC_PINMUX_FUNCTIONS
+ bool
+ select PINMUX
+
config PINCONF
bool "Support pin configuration controllers" if COMPILE_TEST
@@ -159,8 +166,8 @@ config PINCTRL_ROCKCHIP
config PINCTRL_SINGLE
tristate "One-register-per-pin type device tree based pinctrl driver"
depends on OF
- select PINMUX
- select PINCONF
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
select GENERIC_PINCONF
help
This selects the device tree based generic pinctrl driver.
@@ -293,6 +300,7 @@ source "drivers/pinctrl/spear/Kconfig"
source "drivers/pinctrl/stm32/Kconfig"
source "drivers/pinctrl/sunxi/Kconfig"
source "drivers/pinctrl/tegra/Kconfig"
+source "drivers/pinctrl/ti/Kconfig"
source "drivers/pinctrl/uniphier/Kconfig"
source "drivers/pinctrl/vt8500/Kconfig"
source "drivers/pinctrl/mediatek/Kconfig"
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 25d50a86981d..a251f439626f 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -53,6 +53,7 @@ obj-$(CONFIG_PINCTRL_SH_PFC) += sh-pfc/
obj-$(CONFIG_PINCTRL_SPEAR) += spear/
obj-$(CONFIG_PINCTRL_STM32) += stm32/
obj-$(CONFIG_PINCTRL_SUNXI) += sunxi/
+obj-y += ti/
obj-$(CONFIG_PINCTRL_UNIPHIER) += uniphier/
obj-$(CONFIG_ARCH_VT8500) += vt8500/
obj-$(CONFIG_PINCTRL_MTK) += mediatek/
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c
index a21b071ff290..7de596e2b9d4 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c
@@ -43,9 +43,18 @@
* Not all pins have their signals defined (yet).
*/
+#define D6 0
+SSSF_PIN_DECL(D6, GPIOA0, MAC1LINK, SIG_DESC_SET(SCU80, 0));
+
+#define B5 1
+SSSF_PIN_DECL(B5, GPIOA1, MAC2LINK, SIG_DESC_SET(SCU80, 1));
+
#define A4 2
SSSF_PIN_DECL(A4, GPIOA2, TIMER3, SIG_DESC_SET(SCU80, 2));
+#define E6 3
+SSSF_PIN_DECL(E6, GPIOA3, TIMER4, SIG_DESC_SET(SCU80, 3));
+
#define I2C9_DESC SIG_DESC_SET(SCU90, 22)
#define C5 4
@@ -80,6 +89,26 @@ MS_PIN_DECL(D5, GPIOA7, MDIO2, TIMER8);
FUNC_GROUP_DECL(TIMER8, D5);
FUNC_GROUP_DECL(MDIO2, A3, D5);
+#define J21 8
+SSSF_PIN_DECL(J21, GPIOB0, SALT1, SIG_DESC_SET(SCU80, 8));
+
+#define J20 9
+SSSF_PIN_DECL(J20, GPIOB1, SALT2, SIG_DESC_SET(SCU80, 9));
+
+#define H18 10
+SSSF_PIN_DECL(H18, GPIOB2, SALT3, SIG_DESC_SET(SCU80, 10));
+
+#define F18 11
+SSSF_PIN_DECL(F18, GPIOB3, SALT4, SIG_DESC_SET(SCU80, 11));
+
+#define E19 12
+SIG_EXPR_DECL(LPCRST, LPCRST, SIG_DESC_SET(SCU80, 12));
+SIG_EXPR_DECL(LPCRST, LPCRSTS, SIG_DESC_SET(HW_STRAP1, 14));
+SIG_EXPR_LIST_DECL_DUAL(LPCRST, LPCRST, LPCRSTS);
+SS_PIN_DECL(E19, GPIOB4, LPCRST);
+
+FUNC_GROUP_DECL(LPCRST, E19);
+
#define H19 13
#define H19_DESC SIG_DESC_SET(SCU80, 13)
SIG_EXPR_LIST_DECL_SINGLE(LPCPD, LPCPD, H19_DESC);
@@ -92,6 +121,19 @@ FUNC_GROUP_DECL(LPCSMI, H19);
#define H20 14
SSSF_PIN_DECL(H20, GPIOB6, LPCPME, SIG_DESC_SET(SCU80, 14));
+#define E18 15
+SIG_EXPR_LIST_DECL_SINGLE(EXTRST, EXTRST,
+ SIG_DESC_SET(SCU80, 15),
+ SIG_DESC_BIT(SCU90, 31, 0),
+ SIG_DESC_SET(SCU3C, 3));
+SIG_EXPR_LIST_DECL_SINGLE(SPICS1, SPICS1,
+ SIG_DESC_SET(SCU80, 15),
+ SIG_DESC_SET(SCU90, 31));
+MS_PIN_DECL(E18, GPIOB7, EXTRST, SPICS1);
+
+FUNC_GROUP_DECL(EXTRST, E18);
+FUNC_GROUP_DECL(SPICS1, E18);
+
#define SD1_DESC SIG_DESC_SET(SCU90, 0)
#define I2C10_DESC SIG_DESC_SET(SCU90, 23)
@@ -170,6 +212,62 @@ MS_PIN_DECL(D16, GPIOD1, SD2CMD, GPID0OUT);
FUNC_GROUP_DECL(GPID0, A18, D16);
+#define GPID2_DESC SIG_DESC_SET(SCU8C, 9)
+
+#define B17 26
+SIG_EXPR_LIST_DECL_SINGLE(SD2DAT0, SD2, SD2_DESC);
+SIG_EXPR_DECL(GPID2IN, GPID2, GPID2_DESC);
+SIG_EXPR_DECL(GPID2IN, GPID, GPID_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPID2IN, GPID2, GPID);
+MS_PIN_DECL(B17, GPIOD2, SD2DAT0, GPID2IN);
+
+#define A17 27
+SIG_EXPR_LIST_DECL_SINGLE(SD2DAT1, SD2, SD2_DESC);
+SIG_EXPR_DECL(GPID2OUT, GPID2, GPID2_DESC);
+SIG_EXPR_DECL(GPID2OUT, GPID, GPID_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPID2OUT, GPID2, GPID);
+MS_PIN_DECL(A17, GPIOD3, SD2DAT1, GPID2OUT);
+
+FUNC_GROUP_DECL(GPID2, B17, A17);
+
+#define GPID4_DESC SIG_DESC_SET(SCU8C, 10)
+
+#define C16 28
+SIG_EXPR_LIST_DECL_SINGLE(SD2DAT2, SD2, SD2_DESC);
+SIG_EXPR_DECL(GPID4IN, GPID4, GPID4_DESC);
+SIG_EXPR_DECL(GPID4IN, GPID, GPID_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPID4IN, GPID4, GPID);
+MS_PIN_DECL(C16, GPIOD4, SD2DAT2, GPID4IN);
+
+#define B16 29
+SIG_EXPR_LIST_DECL_SINGLE(SD2DAT3, SD2, SD2_DESC);
+SIG_EXPR_DECL(GPID4OUT, GPID4, GPID4_DESC);
+SIG_EXPR_DECL(GPID4OUT, GPID, GPID_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPID4OUT, GPID4, GPID);
+MS_PIN_DECL(B16, GPIOD5, SD2DAT3, GPID4OUT);
+
+FUNC_GROUP_DECL(GPID4, C16, B16);
+
+#define GPID6_DESC SIG_DESC_SET(SCU8C, 11)
+
+#define A16 30
+SIG_EXPR_LIST_DECL_SINGLE(SD2CD, SD2, SD2_DESC);
+SIG_EXPR_DECL(GPID6IN, GPID6, GPID6_DESC);
+SIG_EXPR_DECL(GPID6IN, GPID, GPID_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPID6IN, GPID6, GPID);
+MS_PIN_DECL(A16, GPIOD6, SD2CD, GPID6IN);
+
+#define E15 31
+SIG_EXPR_LIST_DECL_SINGLE(SD2WP, SD2, SD2_DESC);
+SIG_EXPR_DECL(GPID6OUT, GPID6, GPID6_DESC);
+SIG_EXPR_DECL(GPID6OUT, GPID, GPID_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPID6OUT, GPID6, GPID);
+MS_PIN_DECL(E15, GPIOD7, SD2WP, GPID6OUT);
+
+FUNC_GROUP_DECL(GPID6, A16, E15);
+FUNC_GROUP_DECL(SD2, A18, D16, B17, A17, C16, B16, A16, E15);
+FUNC_GROUP_DECL(GPID, A18, D16, B17, A17, C16, B16, A16, E15);
+
#define GPIE_DESC SIG_DESC_SET(HW_STRAP1, 22)
#define GPIE0_DESC SIG_DESC_SET(SCU8C, 12)
#define GPIE2_DESC SIG_DESC_SET(SCU8C, 13)
@@ -266,6 +364,15 @@ MS_PIN_DECL(B19, GPIOF1, NDCD4, SIOPBI);
FUNC_GROUP_DECL(NDCD4, B19);
FUNC_GROUP_DECL(SIOPBI, B19);
+#define A20 42
+SIG_EXPR_LIST_DECL_SINGLE(NDSR4, NDSR4, SIG_DESC_SET(SCU80, 26));
+SIG_EXPR_DECL(SIOPWRGD, SIOPWRGD, SIG_DESC_SET(SCUA4, 12));
+SIG_EXPR_DECL(SIOPWRGD, ACPI, ACPI_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SIOPWRGD, SIOPWRGD, ACPI);
+MS_PIN_DECL(A20, GPIOF2, NDSR4, SIOPWRGD);
+FUNC_GROUP_DECL(NDSR4, A20);
+FUNC_GROUP_DECL(SIOPWRGD, A20);
+
#define D17 43
SIG_EXPR_LIST_DECL_SINGLE(NRI4, NRI4, SIG_DESC_SET(SCU80, 27));
SIG_EXPR_DECL(SIOPBO, SIOPBO, SIG_DESC_SET(SCUA4, 14));
@@ -275,7 +382,17 @@ MS_PIN_DECL(D17, GPIOF3, NRI4, SIOPBO);
FUNC_GROUP_DECL(NRI4, D17);
FUNC_GROUP_DECL(SIOPBO, D17);
-FUNC_GROUP_DECL(ACPI, B19, D17);
+#define B18 44
+SSSF_PIN_DECL(B18, GPIOF4, NDTR4, SIG_DESC_SET(SCU80, 28));
+
+#define A19 45
+SIG_EXPR_LIST_DECL_SINGLE(NDTS4, NDTS4, SIG_DESC_SET(SCU80, 29));
+SIG_EXPR_DECL(SIOSCI, SIOSCI, SIG_DESC_SET(SCUA4, 15));
+SIG_EXPR_DECL(SIOSCI, ACPI, ACPI_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SIOSCI, SIOSCI, ACPI);
+MS_PIN_DECL(A19, GPIOF5, NDTS4, SIOSCI);
+FUNC_GROUP_DECL(NDTS4, A19);
+FUNC_GROUP_DECL(SIOSCI, A19);
#define E16 46
SSSF_PIN_DECL(E16, GPIOF6, TXD4, SIG_DESC_SET(SCU80, 30));
@@ -283,6 +400,34 @@ SSSF_PIN_DECL(E16, GPIOF6, TXD4, SIG_DESC_SET(SCU80, 30));
#define C17 47
SSSF_PIN_DECL(C17, GPIOF7, RXD4, SIG_DESC_SET(SCU80, 31));
+#define A14 48
+SSSF_PIN_DECL(A14, GPIOG0, SGPSCK, SIG_DESC_SET(SCU84, 0));
+
+#define E13 49
+SSSF_PIN_DECL(E13, GPIOG1, SGPSLD, SIG_DESC_SET(SCU84, 1));
+
+#define D13 50
+SSSF_PIN_DECL(D13, GPIOG2, SGPSI0, SIG_DESC_SET(SCU84, 2));
+
+#define C13 51
+SSSF_PIN_DECL(C13, GPIOG3, SGPSI1, SIG_DESC_SET(SCU84, 3));
+
+#define B13 52
+SIG_EXPR_LIST_DECL_SINGLE(OSCCLK, OSCCLK, SIG_DESC_SET(SCU2C, 1));
+SIG_EXPR_LIST_DECL_SINGLE(WDTRST1, WDTRST1, SIG_DESC_SET(SCU84, 4));
+MS_PIN_DECL(B13, GPIOG4, OSCCLK, WDTRST1);
+
+FUNC_GROUP_DECL(OSCCLK, B13);
+FUNC_GROUP_DECL(WDTRST1, B13);
+
+#define Y21 53
+SIG_EXPR_LIST_DECL_SINGLE(USBCKI, USBCKI, SIG_DESC_SET(HW_STRAP1, 23));
+SIG_EXPR_LIST_DECL_SINGLE(WDTRST2, WDTRST2, SIG_DESC_SET(SCU84, 5));
+MS_PIN_DECL(Y21, GPIOG5, USBCKI, WDTRST2);
+
+FUNC_GROUP_DECL(USBCKI, Y21);
+FUNC_GROUP_DECL(WDTRST2, Y21);
+
#define AA22 54
SSSF_PIN_DECL(AA22, GPIOG6, FLBUSY, SIG_DESC_SET(SCU84, 6));
@@ -292,7 +437,7 @@ SSSF_PIN_DECL(U18, GPIOG7, FLWP, SIG_DESC_SET(SCU84, 7));
#define UART6_DESC SIG_DESC_SET(SCU90, 7)
#define ROM16_DESC SIG_DESC_SET(SCU90, 6)
#define FLASH_WIDE SIG_DESC_SET(HW_STRAP1, 4)
-#define BOOT_SRC_NOR { HW_STRAP1, GENMASK(1, 0), 0, 0 }
+#define BOOT_SRC_NOR { ASPEED_IP_SCU, HW_STRAP1, GENMASK(1, 0), 0, 0 }
#define A8 56
SIG_EXPR_DECL(ROMD8, ROM16, ROM16_DESC);
@@ -352,6 +497,93 @@ MS_PIN_DECL(E7, GPIOH7, ROMD15, RXD6);
FUNC_GROUP_DECL(UART6, A8, C7, B7, A7, D7, B6, A6, E7);
+#define SPI1_DESC \
+ { ASPEED_IP_SCU, HW_STRAP1, GENMASK(13, 12), 1, 0 }
+#define SPI1DEBUG_DESC \
+ { ASPEED_IP_SCU, HW_STRAP1, GENMASK(13, 12), 2, 0 }
+#define SPI1PASSTHRU_DESC \
+ { ASPEED_IP_SCU, HW_STRAP1, GENMASK(13, 12), 3, 0 }
+
+#define C22 64
+SIG_EXPR_DECL(SYSCS, SPI1DEBUG, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SYSCS, SPI1PASSTHRU, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SYSCS, SPI1DEBUG, SPI1PASSTHRU);
+SS_PIN_DECL(C22, GPIOI0, SYSCS);
+
+#define G18 65
+SIG_EXPR_DECL(SYSCK, SPI1DEBUG, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SYSCK, SPI1PASSTHRU, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SYSCK, SPI1DEBUG, SPI1PASSTHRU);
+SS_PIN_DECL(G18, GPIOI1, SYSCK);
+
+#define D19 66
+SIG_EXPR_DECL(SYSDO, SPI1DEBUG, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SYSDO, SPI1PASSTHRU, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SYSDO, SPI1DEBUG, SPI1PASSTHRU);
+SS_PIN_DECL(D19, GPIOI2, SYSDO);
+
+#define C20 67
+SIG_EXPR_DECL(SYSDI, SPI1DEBUG, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SYSDI, SPI1PASSTHRU, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SYSDI, SPI1DEBUG, SPI1PASSTHRU);
+SS_PIN_DECL(C20, GPIOI3, SYSDI);
+
+#define VB_DESC SIG_DESC_SET(HW_STRAP1, 5)
+
+#define B22 68
+SIG_EXPR_DECL(SPI1CS0, SPI1, SPI1_DESC);
+SIG_EXPR_DECL(SPI1CS0, SPI1DEBUG, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SPI1CS0, SPI1PASSTHRU, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL(SPI1CS0, SIG_EXPR_PTR(SPI1CS0, SPI1),
+ SIG_EXPR_PTR(SPI1CS0, SPI1DEBUG),
+ SIG_EXPR_PTR(SPI1CS0, SPI1PASSTHRU));
+SIG_EXPR_LIST_DECL_SINGLE(VBCS, VGABIOS_ROM, VB_DESC);
+MS_PIN_DECL(B22, GPIOI4, SPI1CS0, VBCS);
+
+#define G19 69
+SIG_EXPR_DECL(SPI1CK, SPI1, SPI1_DESC);
+SIG_EXPR_DECL(SPI1CK, SPI1DEBUG, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SPI1CK, SPI1PASSTHRU, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL(SPI1CK, SIG_EXPR_PTR(SPI1CK, SPI1),
+ SIG_EXPR_PTR(SPI1CK, SPI1DEBUG),
+ SIG_EXPR_PTR(SPI1CK, SPI1PASSTHRU));
+SIG_EXPR_LIST_DECL_SINGLE(VBCK, VGABIOS_ROM, VB_DESC);
+MS_PIN_DECL(G19, GPIOI5, SPI1CK, VBCK);
+
+#define C18 70
+SIG_EXPR_DECL(SPI1DO, SPI1, SPI1_DESC);
+SIG_EXPR_DECL(SPI1DO, SPI1DEBUG, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SPI1DO, SPI1PASSTHRU, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL(SPI1DO, SIG_EXPR_PTR(SPI1DO, SPI1),
+ SIG_EXPR_PTR(SPI1DO, SPI1DEBUG),
+ SIG_EXPR_PTR(SPI1DO, SPI1PASSTHRU));
+SIG_EXPR_LIST_DECL_SINGLE(VBDO, VGABIOS_ROM, VB_DESC);
+MS_PIN_DECL(C18, GPIOI6, SPI1DO, VBDO);
+
+#define E20 71
+SIG_EXPR_DECL(SPI1DI, SPI1, SPI1_DESC);
+SIG_EXPR_DECL(SPI1DI, SPI1DEBUG, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SPI1DI, SPI1PASSTHRU, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL(SPI1DI, SIG_EXPR_PTR(SPI1DI, SPI1),
+ SIG_EXPR_PTR(SPI1DI, SPI1DEBUG),
+ SIG_EXPR_PTR(SPI1DI, SPI1PASSTHRU));
+SIG_EXPR_LIST_DECL_SINGLE(VBDI, VGABIOS_ROM, VB_DESC);
+MS_PIN_DECL(E20, GPIOI7, SPI1DI, VBDI);
+
+FUNC_GROUP_DECL(SPI1, B22, G19, C18, E20);
+FUNC_GROUP_DECL(SPI1DEBUG, C22, G18, D19, C20, B22, G19, C18, E20);
+FUNC_GROUP_DECL(SPI1PASSTHRU, C22, G18, D19, C20, B22, G19, C18, E20);
+FUNC_GROUP_DECL(VGABIOS_ROM, B22, G19, C18, E20);
+
+#define J5 72
+SSSF_PIN_DECL(J5, GPIOJ0, SGPMCK, SIG_DESC_SET(SCU84, 8));
+
+#define J4 73
+SSSF_PIN_DECL(J4, GPIOJ1, SGPMLD, SIG_DESC_SET(SCU84, 9));
+
+#define K5 74
+SSSF_PIN_DECL(K5, GPIOJ2, SGPMO, SIG_DESC_SET(SCU84, 10));
+
#define J3 75
SSSF_PIN_DECL(J3, GPIOJ3, SGPMI, SIG_DESC_SET(SCU84, 11));
@@ -418,9 +650,9 @@ FUNC_GROUP_DECL(I2C8, G5, F3);
#define U1 88
SSSF_PIN_DECL(U1, GPIOL0, NCTS1, SIG_DESC_SET(SCU84, 16));
-#define VPI18_DESC { SCU90, GENMASK(5, 4), 1, 0 }
-#define VPI24_DESC { SCU90, GENMASK(5, 4), 2, 0 }
-#define VPI30_DESC { SCU90, GENMASK(5, 4), 3, 0 }
+#define VPI18_DESC { ASPEED_IP_SCU, SCU90, GENMASK(5, 4), 1, 0 }
+#define VPI24_DESC { ASPEED_IP_SCU, SCU90, GENMASK(5, 4), 2, 0 }
+#define VPI30_DESC { ASPEED_IP_SCU, SCU90, GENMASK(5, 4), 3, 0 }
#define T5 89
#define T5_DESC SIG_DESC_SET(SCU84, 17)
@@ -496,6 +728,102 @@ SIG_EXPR_LIST_DECL_SINGLE(RXD1, RXD1, U5_DESC);
MS_PIN_DECL(U5, GPIOL7, VPIB1, RXD1);
FUNC_GROUP_DECL(RXD1, U5);
+#define V3 96
+#define V3_DESC SIG_DESC_SET(SCU84, 24)
+SIG_EXPR_DECL(VPIOB2, VPI18, VPI18_DESC, V3_DESC);
+SIG_EXPR_DECL(VPIOB2, VPI24, VPI24_DESC, V3_DESC);
+SIG_EXPR_DECL(VPIOB2, VPI30, VPI30_DESC, V3_DESC);
+SIG_EXPR_LIST_DECL(VPIOB2, SIG_EXPR_PTR(VPIOB2, VPI18),
+ SIG_EXPR_PTR(VPIOB2, VPI24),
+ SIG_EXPR_PTR(VPIOB2, VPI30));
+SIG_EXPR_LIST_DECL_SINGLE(NCTS2, NCTS2, V3_DESC);
+MS_PIN_DECL(V3, GPIOM0, VPIOB2, NCTS2);
+FUNC_GROUP_DECL(NCTS2, V3);
+
+#define W2 97
+#define W2_DESC SIG_DESC_SET(SCU84, 25)
+SIG_EXPR_DECL(VPIOB3, VPI18, VPI18_DESC, W2_DESC);
+SIG_EXPR_DECL(VPIOB3, VPI24, VPI24_DESC, W2_DESC);
+SIG_EXPR_DECL(VPIOB3, VPI30, VPI30_DESC, W2_DESC);
+SIG_EXPR_LIST_DECL(VPIOB3, SIG_EXPR_PTR(VPIOB3, VPI18),
+ SIG_EXPR_PTR(VPIOB3, VPI24),
+ SIG_EXPR_PTR(VPIOB3, VPI30));
+SIG_EXPR_LIST_DECL_SINGLE(NDCD2, NDCD2, W2_DESC);
+MS_PIN_DECL(W2, GPIOM1, VPIOB3, NDCD2);
+FUNC_GROUP_DECL(NDCD2, W2);
+
+#define Y1 98
+#define Y1_DESC SIG_DESC_SET(SCU84, 26)
+SIG_EXPR_DECL(VPIOB4, VPI18, VPI18_DESC, Y1_DESC);
+SIG_EXPR_DECL(VPIOB4, VPI24, VPI24_DESC, Y1_DESC);
+SIG_EXPR_DECL(VPIOB4, VPI30, VPI30_DESC, Y1_DESC);
+SIG_EXPR_LIST_DECL(VPIOB4, SIG_EXPR_PTR(VPIOB4, VPI18),
+ SIG_EXPR_PTR(VPIOB4, VPI24),
+ SIG_EXPR_PTR(VPIOB4, VPI30));
+SIG_EXPR_LIST_DECL_SINGLE(NDSR2, NDSR2, Y1_DESC);
+MS_PIN_DECL(Y1, GPIOM2, VPIOB4, NDSR2);
+FUNC_GROUP_DECL(NDSR2, Y1);
+
+#define V4 99
+#define V4_DESC SIG_DESC_SET(SCU84, 27)
+SIG_EXPR_DECL(VPIOB5, VPI18, VPI18_DESC, V4_DESC);
+SIG_EXPR_DECL(VPIOB5, VPI24, VPI24_DESC, V4_DESC);
+SIG_EXPR_DECL(VPIOB5, VPI30, VPI30_DESC, V4_DESC);
+SIG_EXPR_LIST_DECL(VPIOB5, SIG_EXPR_PTR(VPIOB5, VPI18),
+ SIG_EXPR_PTR(VPIOB5, VPI24),
+ SIG_EXPR_PTR(VPIOB5, VPI30));
+SIG_EXPR_LIST_DECL_SINGLE(NRI2, NRI2, V4_DESC);
+MS_PIN_DECL(V4, GPIOM3, VPIOB5, NRI2);
+FUNC_GROUP_DECL(NRI2, V4);
+
+#define W3 100
+#define W3_DESC SIG_DESC_SET(SCU84, 28)
+SIG_EXPR_DECL(VPIOB6, VPI18, VPI18_DESC, W3_DESC);
+SIG_EXPR_DECL(VPIOB6, VPI24, VPI24_DESC, W3_DESC);
+SIG_EXPR_DECL(VPIOB6, VPI30, VPI30_DESC, W3_DESC);
+SIG_EXPR_LIST_DECL(VPIOB6, SIG_EXPR_PTR(VPIOB6, VPI18),
+ SIG_EXPR_PTR(VPIOB6, VPI24),
+ SIG_EXPR_PTR(VPIOB6, VPI30));
+SIG_EXPR_LIST_DECL_SINGLE(NDTR2, NDTR2, W3_DESC);
+MS_PIN_DECL(W3, GPIOM4, VPIOB6, NDTR2);
+FUNC_GROUP_DECL(NDTR2, W3);
+
+#define Y2 101
+#define Y2_DESC SIG_DESC_SET(SCU84, 29)
+SIG_EXPR_DECL(VPIOB7, VPI18, VPI18_DESC, Y2_DESC);
+SIG_EXPR_DECL(VPIOB7, VPI24, VPI24_DESC, Y2_DESC);
+SIG_EXPR_DECL(VPIOB7, VPI30, VPI30_DESC, Y2_DESC);
+SIG_EXPR_LIST_DECL(VPIOB7, SIG_EXPR_PTR(VPIOB7, VPI18),
+ SIG_EXPR_PTR(VPIOB7, VPI24),
+ SIG_EXPR_PTR(VPIOB7, VPI30));
+SIG_EXPR_LIST_DECL_SINGLE(NRTS2, NRTS2, Y2_DESC);
+MS_PIN_DECL(Y2, GPIOM5, VPIOB7, NRTS2);
+FUNC_GROUP_DECL(NRTS2, Y2);
+
+#define AA1 102
+#define AA1_DESC SIG_DESC_SET(SCU84, 30)
+SIG_EXPR_DECL(VPIOB8, VPI18, VPI18_DESC, AA1_DESC);
+SIG_EXPR_DECL(VPIOB8, VPI24, VPI24_DESC, AA1_DESC);
+SIG_EXPR_DECL(VPIOB8, VPI30, VPI30_DESC, AA1_DESC);
+SIG_EXPR_LIST_DECL(VPIOB8, SIG_EXPR_PTR(VPIOB8, VPI18),
+ SIG_EXPR_PTR(VPIOB8, VPI24),
+ SIG_EXPR_PTR(VPIOB8, VPI30));
+SIG_EXPR_LIST_DECL_SINGLE(TXD2, TXD2, AA1_DESC);
+MS_PIN_DECL(AA1, GPIOM6, VPIOB8, TXD2);
+FUNC_GROUP_DECL(TXD2, AA1);
+
+#define V5 103
+#define V5_DESC SIG_DESC_SET(SCU84, 31)
+SIG_EXPR_DECL(VPIOB9, VPI18, VPI18_DESC, V5_DESC);
+SIG_EXPR_DECL(VPIOB9, VPI24, VPI24_DESC, V5_DESC);
+SIG_EXPR_DECL(VPIOB9, VPI30, VPI30_DESC, V5_DESC);
+SIG_EXPR_LIST_DECL(VPIOB9, SIG_EXPR_PTR(VPIOB9, VPI18),
+ SIG_EXPR_PTR(VPIOB9, VPI24),
+ SIG_EXPR_PTR(VPIOB9, VPI30));
+SIG_EXPR_LIST_DECL_SINGLE(RXD2, RXD2, V5_DESC);
+MS_PIN_DECL(V5, GPIOM7, VPIOB9, RXD2);
+FUNC_GROUP_DECL(RXD2, V5);
+
#define W4 104
#define W4_DESC SIG_DESC_SET(SCU88, 0)
SIG_EXPR_LIST_DECL_SINGLE(VPIG0, VPI30, VPI30_DESC, W4_DESC);
@@ -580,10 +908,57 @@ SS_PIN_DECL(V6, GPIOO0, VPIG8);
SIG_EXPR_LIST_DECL_SINGLE(VPIG9, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 9));
SS_PIN_DECL(Y5, GPIOO1, VPIG9);
-FUNC_GROUP_DECL(VPI18, T5, U3, V1, U4, V2, AA22, W5, Y4, AA3, AB2);
-FUNC_GROUP_DECL(VPI24, T5, U3, V1, U4, V2, AA22, W5, Y4, AA3, AB2, V6, Y5);
-FUNC_GROUP_DECL(VPI30, T5, U3, V1, U4, V2, W1, U5, W4, Y3, AA22, W5, Y4, AA3,
- AB2);
+#define AA4 114
+SIG_EXPR_LIST_DECL_SINGLE(VPIR0, VPI30, VPI30_DESC, SIG_DESC_SET(SCU88, 10));
+SS_PIN_DECL(AA4, GPIOO2, VPIR0);
+
+#define AB3 115
+SIG_EXPR_LIST_DECL_SINGLE(VPIR1, VPI30, VPI30_DESC, SIG_DESC_SET(SCU88, 11));
+SS_PIN_DECL(AB3, GPIOO3, VPIR1);
+
+#define W6 116
+SIG_EXPR_LIST_DECL_SINGLE(VPIR2, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 12));
+SS_PIN_DECL(W6, GPIOO4, VPIR2);
+
+#define AA5 117
+SIG_EXPR_LIST_DECL_SINGLE(VPIR3, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 13));
+SS_PIN_DECL(AA5, GPIOO5, VPIR3);
+
+#define AB4 118
+SIG_EXPR_LIST_DECL_SINGLE(VPIR4, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 14));
+SS_PIN_DECL(AB4, GPIOO6, VPIR4);
+
+#define V7 119
+SIG_EXPR_LIST_DECL_SINGLE(VPIR5, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 15));
+SS_PIN_DECL(V7, GPIOO7, VPIR5);
+
+#define Y6 120
+SIG_EXPR_LIST_DECL_SINGLE(VPIR6, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 16));
+SS_PIN_DECL(Y6, GPIOP0, VPIR6);
+
+#define AB5 121
+SIG_EXPR_LIST_DECL_SINGLE(VPIR7, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 17));
+SS_PIN_DECL(AB5, GPIOP1, VPIR7);
+
+#define W7 122
+SIG_EXPR_LIST_DECL_SINGLE(VPIR8, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 18));
+SS_PIN_DECL(W7, GPIOP2, VPIR8);
+
+#define AA6 123
+SIG_EXPR_LIST_DECL_SINGLE(VPIR9, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 19));
+SS_PIN_DECL(AA6, GPIOP3, VPIR9);
+
+FUNC_GROUP_DECL(VPI18, T5, U3, V1, U4, V2, V3, W2, Y1, V4, W3, Y2, AA1, V5,
+ AA22, W5, Y4, AA3, AB2);
+FUNC_GROUP_DECL(VPI24, T5, U3, V1, U4, V2, V3, W2, Y1, V4, W3, Y2, AA1, V5,
+ AA22, W5, Y4, AA3, AB2, V6, Y5, W6, AA5, AB4, V7, Y6, AB5, W7,
+ AA6);
+FUNC_GROUP_DECL(VPI30, T5, U3, V1, U4, V2, W1, U5, V3, W2, Y1, V4, W3, Y2, AA1,
+ V5, W4, Y3, AA22, W5, Y4, AA3, AB2, AA4, AB3);
+
+#define AB6 124
+SIG_EXPR_LIST_DECL_SINGLE(GPIOP4, GPIOP4);
+MS_PIN_DECL_(AB6, SIG_EXPR_LIST_PTR(GPIOP4));
#define Y7 125
SIG_EXPR_LIST_DECL_SINGLE(GPIOP5, GPIOP5);
@@ -619,6 +994,18 @@ SS_PIN_DECL(F5, GPIOQ3, SDA4);
FUNC_GROUP_DECL(I2C4, B1, F5);
+#define I2C14_DESC SIG_DESC_SET(SCU90, 27)
+
+#define H4 132
+SIG_EXPR_LIST_DECL_SINGLE(SCL14, I2C14, I2C14_DESC);
+SS_PIN_DECL(H4, GPIOQ4, SCL14);
+
+#define H3 133
+SIG_EXPR_LIST_DECL_SINGLE(SDA14, I2C14, I2C14_DESC);
+SS_PIN_DECL(H3, GPIOQ5, SDA14);
+
+FUNC_GROUP_DECL(I2C14, H4, H3);
+
#define DASH9028_DESC SIG_DESC_SET(SCU90, 28)
#define H2 134
@@ -641,11 +1028,11 @@ SSSF_PIN_DECL(Y22, GPIOR2, ROMCS3, SIG_DESC_SET(SCU88, 26));
#define U19 139
SSSF_PIN_DECL(U19, GPIOR3, ROMCS4, SIG_DESC_SET(SCU88, 27));
-#define VPOOFF0_DESC { SCU94, GENMASK(1, 0), 0, 0 }
-#define VPO12_DESC { SCU94, GENMASK(1, 0), 1, 0 }
-#define VPO24_DESC { SCU94, GENMASK(1, 0), 2, 0 }
-#define VPOOFF1_DESC { SCU94, GENMASK(1, 0), 3, 0 }
-#define VPO_OFF_12 { SCU94, 0x2, 0, 0 }
+#define VPOOFF0_DESC { ASPEED_IP_SCU, SCU94, GENMASK(1, 0), 0, 0 }
+#define VPO12_DESC { ASPEED_IP_SCU, SCU94, GENMASK(1, 0), 1, 0 }
+#define VPO24_DESC { ASPEED_IP_SCU, SCU94, GENMASK(1, 0), 2, 0 }
+#define VPOOFF1_DESC { ASPEED_IP_SCU, SCU94, GENMASK(1, 0), 3, 0 }
+#define VPO_OFF_12 { ASPEED_IP_SCU, SCU94, 0x2, 0, 0 }
#define VPO_24_OFF SIG_DESC_SET(SCU94, 1)
#define V21 140
@@ -776,13 +1163,6 @@ SIG_EXPR_LIST_DECL(ROMA23, SIG_EXPR_PTR(ROMA23, ROM8),
SIG_EXPR_LIST_DECL_SINGLE(VPOR5, VPO24, K18_DESC, VPO_24_OFF);
MS_PIN_DECL(K18, GPIOS7, ROMA23, VPOR5);
-FUNC_GROUP_DECL(ROM8, V20, U21, T19, V22, U20, R18, N21, L22, K18, W21, Y22,
- U19);
-FUNC_GROUP_DECL(ROM16, V20, U21, T19, V22, U20, R18, N21, L22, K18,
- A8, C7, B7, A7, D7, B6, A6, E7, W21, Y22, U19);
-FUNC_GROUP_DECL(VPO12, U21, T19, V22, U20);
-FUNC_GROUP_DECL(VPO24, U21, T19, V22, U20, L22, K18, V21, W22);
-
#define RMII1_DESC SIG_DESC_BIT(HW_STRAP1, 6, 0)
#define A12 152
@@ -827,6 +1207,50 @@ SIG_EXPR_LIST_DECL_SINGLE(RGMII1TXD3, RGMII1);
MS_PIN_DECL_(A13, SIG_EXPR_LIST_PTR(GPIOT5), SIG_EXPR_LIST_PTR(DASHA13),
SIG_EXPR_LIST_PTR(RGMII1TXD3));
+#define RMII2_DESC SIG_DESC_BIT(HW_STRAP1, 7, 0)
+
+#define D9 158
+SIG_EXPR_LIST_DECL_SINGLE(GPIOT6, GPIOT6, SIG_DESC_SET(SCUA0, 6));
+SIG_EXPR_LIST_DECL_SINGLE(RMII2TXEN, RMII2, RMII2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2TXCK, RGMII2);
+MS_PIN_DECL_(D9, SIG_EXPR_LIST_PTR(GPIOT6), SIG_EXPR_LIST_PTR(RMII2TXEN),
+ SIG_EXPR_LIST_PTR(RGMII2TXCK));
+
+#define E9 159
+SIG_EXPR_LIST_DECL_SINGLE(GPIOT7, GPIOT7, SIG_DESC_SET(SCUA0, 7));
+SIG_EXPR_LIST_DECL_SINGLE(DASHE9, RMII2, RMII2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2TXCTL, RGMII2);
+MS_PIN_DECL_(E9, SIG_EXPR_LIST_PTR(GPIOT7), SIG_EXPR_LIST_PTR(DASHE9),
+ SIG_EXPR_LIST_PTR(RGMII2TXCTL));
+
+#define A10 160
+SIG_EXPR_LIST_DECL_SINGLE(GPIOU0, GPIOU0, SIG_DESC_SET(SCUA0, 8));
+SIG_EXPR_LIST_DECL_SINGLE(RMII2TXD0, RMII2, RMII2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2TXD0, RGMII2);
+MS_PIN_DECL_(A10, SIG_EXPR_LIST_PTR(GPIOU0), SIG_EXPR_LIST_PTR(RMII2TXD0),
+ SIG_EXPR_LIST_PTR(RGMII2TXD0));
+
+#define B10 161
+SIG_EXPR_LIST_DECL_SINGLE(GPIOU1, GPIOU1, SIG_DESC_SET(SCUA0, 9));
+SIG_EXPR_LIST_DECL_SINGLE(RMII2TXD1, RMII2, RMII2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2TXD1, RGMII2);
+MS_PIN_DECL_(B10, SIG_EXPR_LIST_PTR(GPIOU1), SIG_EXPR_LIST_PTR(RMII2TXD1),
+ SIG_EXPR_LIST_PTR(RGMII2TXD1));
+
+#define C10 162
+SIG_EXPR_LIST_DECL_SINGLE(GPIOU2, GPIOU2, SIG_DESC_SET(SCUA0, 10));
+SIG_EXPR_LIST_DECL_SINGLE(DASHC10, RMII2, RMII2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2TXD2, RGMII2);
+MS_PIN_DECL_(C10, SIG_EXPR_LIST_PTR(GPIOU2), SIG_EXPR_LIST_PTR(DASHC10),
+ SIG_EXPR_LIST_PTR(RGMII2TXD2));
+
+#define D10 163
+SIG_EXPR_LIST_DECL_SINGLE(GPIOU3, GPIOU3, SIG_DESC_SET(SCUA0, 11));
+SIG_EXPR_LIST_DECL_SINGLE(DASHD10, RMII2, RMII2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2TXD3, RGMII2);
+MS_PIN_DECL_(D10, SIG_EXPR_LIST_PTR(GPIOU3), SIG_EXPR_LIST_PTR(DASHD10),
+ SIG_EXPR_LIST_PTR(RGMII2TXD3));
+
#define E11 164
SIG_EXPR_LIST_DECL_SINGLE(GPIOU4, GPIOU4, SIG_DESC_SET(SCUA0, 12));
SIG_EXPR_LIST_DECL_SINGLE(RMII1RCLK, RMII1, RMII1_DESC);
@@ -869,11 +1293,419 @@ SIG_EXPR_LIST_DECL_SINGLE(RGMII1RXD3, RGMII1);
MS_PIN_DECL_(E10, SIG_EXPR_LIST_PTR(GPIOV1), SIG_EXPR_LIST_PTR(RMII1RXER),
SIG_EXPR_LIST_PTR(RGMII1RXD3));
+#define C9 170
+SIG_EXPR_LIST_DECL_SINGLE(GPIOV2, GPIOV2, SIG_DESC_SET(SCUA0, 18));
+SIG_EXPR_LIST_DECL_SINGLE(RMII2RCLK, RMII2, RMII2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2RXCK, RGMII2);
+MS_PIN_DECL_(C9, SIG_EXPR_LIST_PTR(GPIOV2), SIG_EXPR_LIST_PTR(RMII2RCLK),
+ SIG_EXPR_LIST_PTR(RGMII2RXCK));
+
+#define B9 171
+SIG_EXPR_LIST_DECL_SINGLE(GPIOV3, GPIOV3, SIG_DESC_SET(SCUA0, 19));
+SIG_EXPR_LIST_DECL_SINGLE(DASHB9, RMII2, RMII2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2RXCTL, RGMII2);
+MS_PIN_DECL_(B9, SIG_EXPR_LIST_PTR(GPIOV3), SIG_EXPR_LIST_PTR(DASHB9),
+ SIG_EXPR_LIST_PTR(RGMII2RXCTL));
+
+#define A9 172
+SIG_EXPR_LIST_DECL_SINGLE(GPIOV4, GPIOV4, SIG_DESC_SET(SCUA0, 20));
+SIG_EXPR_LIST_DECL_SINGLE(RMII2RXD0, RMII2, RMII2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2RXD0, RGMII2);
+MS_PIN_DECL_(A9, SIG_EXPR_LIST_PTR(GPIOV4), SIG_EXPR_LIST_PTR(RMII2RXD0),
+ SIG_EXPR_LIST_PTR(RGMII2RXD0));
+
+#define E8 173
+SIG_EXPR_LIST_DECL_SINGLE(GPIOV5, GPIOV5, SIG_DESC_SET(SCUA0, 21));
+SIG_EXPR_LIST_DECL_SINGLE(RMII2RXD1, RMII2, RMII2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2RXD1, RGMII2);
+MS_PIN_DECL_(E8, SIG_EXPR_LIST_PTR(GPIOV5), SIG_EXPR_LIST_PTR(RMII2RXD1),
+ SIG_EXPR_LIST_PTR(RGMII2RXD1));
+
+#define D8 174
+SIG_EXPR_LIST_DECL_SINGLE(GPIOV6, GPIOV6, SIG_DESC_SET(SCUA0, 22));
+SIG_EXPR_LIST_DECL_SINGLE(RMII2CRSDV, RMII2, RMII2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2RXD2, RGMII2);
+MS_PIN_DECL_(D8, SIG_EXPR_LIST_PTR(GPIOV6), SIG_EXPR_LIST_PTR(RMII2CRSDV),
+ SIG_EXPR_LIST_PTR(RGMII2RXD2));
+
+#define C8 175
+SIG_EXPR_LIST_DECL_SINGLE(GPIOV7, GPIOV7, SIG_DESC_SET(SCUA0, 23));
+SIG_EXPR_LIST_DECL_SINGLE(RMII2RXER, RMII2, RMII2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RGMII2RXD3, RGMII2);
+MS_PIN_DECL_(C8, SIG_EXPR_LIST_PTR(GPIOV7), SIG_EXPR_LIST_PTR(RMII2RXER),
+ SIG_EXPR_LIST_PTR(RGMII2RXD3));
+
FUNC_GROUP_DECL(RMII1, A12, B12, C12, D12, E12, A13, E11, D11, C11, B11, A11,
E10);
FUNC_GROUP_DECL(RGMII1, A12, B12, C12, D12, E12, A13, E11, D11, C11, B11, A11,
E10);
+FUNC_GROUP_DECL(RMII2, D9, E9, A10, B10, C10, D10, C9, B9, A9, E8, D8, C8);
+FUNC_GROUP_DECL(RGMII2, D9, E9, A10, B10, C10, D10, C9, B9, A9, E8, D8, C8);
+
+#define L5 176
+SIG_EXPR_LIST_DECL_SINGLE(GPIOW0, GPIOW0, SIG_DESC_SET(SCUA0, 24));
+SIG_EXPR_LIST_DECL_SINGLE(ADC0, ADC0);
+MS_PIN_DECL_(L5, SIG_EXPR_LIST_PTR(GPIOW0), SIG_EXPR_LIST_PTR(ADC0));
+FUNC_GROUP_DECL(ADC0, L5);
+
+#define L4 177
+SIG_EXPR_LIST_DECL_SINGLE(GPIOW1, GPIOW1, SIG_DESC_SET(SCUA0, 25));
+SIG_EXPR_LIST_DECL_SINGLE(ADC1, ADC1);
+MS_PIN_DECL_(L4, SIG_EXPR_LIST_PTR(GPIOW1), SIG_EXPR_LIST_PTR(ADC1));
+FUNC_GROUP_DECL(ADC1, L4);
+
+#define L3 178
+SIG_EXPR_LIST_DECL_SINGLE(GPIOW2, GPIOW2, SIG_DESC_SET(SCUA0, 26));
+SIG_EXPR_LIST_DECL_SINGLE(ADC2, ADC2);
+MS_PIN_DECL_(L3, SIG_EXPR_LIST_PTR(GPIOW2), SIG_EXPR_LIST_PTR(ADC2));
+FUNC_GROUP_DECL(ADC2, L3);
+
+#define L2 179
+SIG_EXPR_LIST_DECL_SINGLE(GPIOW3, GPIOW3, SIG_DESC_SET(SCUA0, 27));
+SIG_EXPR_LIST_DECL_SINGLE(ADC3, ADC3);
+MS_PIN_DECL_(L2, SIG_EXPR_LIST_PTR(GPIOW3), SIG_EXPR_LIST_PTR(ADC3));
+FUNC_GROUP_DECL(ADC3, L2);
+
+#define L1 180
+SIG_EXPR_LIST_DECL_SINGLE(GPIOW4, GPIOW4, SIG_DESC_SET(SCUA0, 28));
+SIG_EXPR_LIST_DECL_SINGLE(ADC4, ADC4);
+MS_PIN_DECL_(L1, SIG_EXPR_LIST_PTR(GPIOW4), SIG_EXPR_LIST_PTR(ADC4));
+FUNC_GROUP_DECL(ADC4, L1);
+
+#define M5 181
+SIG_EXPR_LIST_DECL_SINGLE(GPIOW5, GPIOW5, SIG_DESC_SET(SCUA0, 29));
+SIG_EXPR_LIST_DECL_SINGLE(ADC5, ADC5);
+MS_PIN_DECL_(M5, SIG_EXPR_LIST_PTR(GPIOW5), SIG_EXPR_LIST_PTR(ADC5));
+FUNC_GROUP_DECL(ADC5, M5);
+
+#define M4 182
+SIG_EXPR_LIST_DECL_SINGLE(GPIOW6, GPIOW6, SIG_DESC_SET(SCUA0, 30));
+SIG_EXPR_LIST_DECL_SINGLE(ADC6, ADC6);
+MS_PIN_DECL_(M4, SIG_EXPR_LIST_PTR(GPIOW6), SIG_EXPR_LIST_PTR(ADC6));
+FUNC_GROUP_DECL(ADC6, M4);
+
+#define M3 183
+SIG_EXPR_LIST_DECL_SINGLE(GPIOW7, GPIOW7, SIG_DESC_SET(SCUA0, 31));
+SIG_EXPR_LIST_DECL_SINGLE(ADC7, ADC7);
+MS_PIN_DECL_(M3, SIG_EXPR_LIST_PTR(GPIOW7), SIG_EXPR_LIST_PTR(ADC7));
+FUNC_GROUP_DECL(ADC7, M3);
+
+#define M2 184
+SIG_EXPR_LIST_DECL_SINGLE(GPIOX0, GPIOX0, SIG_DESC_SET(SCUA4, 0));
+SIG_EXPR_LIST_DECL_SINGLE(ADC8, ADC8);
+MS_PIN_DECL_(M2, SIG_EXPR_LIST_PTR(GPIOX0), SIG_EXPR_LIST_PTR(ADC8));
+FUNC_GROUP_DECL(ADC8, M2);
+
+#define M1 185
+SIG_EXPR_LIST_DECL_SINGLE(GPIOX1, GPIOX1, SIG_DESC_SET(SCUA4, 1));
+SIG_EXPR_LIST_DECL_SINGLE(ADC9, ADC9);
+MS_PIN_DECL_(M1, SIG_EXPR_LIST_PTR(GPIOX1), SIG_EXPR_LIST_PTR(ADC9));
+FUNC_GROUP_DECL(ADC9, M1);
+
+#define N5 186
+SIG_EXPR_LIST_DECL_SINGLE(GPIOX2, GPIOX2, SIG_DESC_SET(SCUA4, 2));
+SIG_EXPR_LIST_DECL_SINGLE(ADC10, ADC10);
+MS_PIN_DECL_(N5, SIG_EXPR_LIST_PTR(GPIOX2), SIG_EXPR_LIST_PTR(ADC10));
+FUNC_GROUP_DECL(ADC10, N5);
+
+#define N4 187
+SIG_EXPR_LIST_DECL_SINGLE(GPIOX3, GPIOX3, SIG_DESC_SET(SCUA4, 3));
+SIG_EXPR_LIST_DECL_SINGLE(ADC11, ADC11);
+MS_PIN_DECL_(N4, SIG_EXPR_LIST_PTR(GPIOX3), SIG_EXPR_LIST_PTR(ADC11));
+FUNC_GROUP_DECL(ADC11, N4);
+
+#define N3 188
+SIG_EXPR_LIST_DECL_SINGLE(GPIOX4, GPIOX4, SIG_DESC_SET(SCUA4, 4));
+SIG_EXPR_LIST_DECL_SINGLE(ADC12, ADC12);
+MS_PIN_DECL_(N3, SIG_EXPR_LIST_PTR(GPIOX4), SIG_EXPR_LIST_PTR(ADC12));
+FUNC_GROUP_DECL(ADC12, N3);
+
+#define N2 189
+SIG_EXPR_LIST_DECL_SINGLE(GPIOX5, GPIOX5, SIG_DESC_SET(SCUA4, 5));
+SIG_EXPR_LIST_DECL_SINGLE(ADC13, ADC13);
+MS_PIN_DECL_(N2, SIG_EXPR_LIST_PTR(GPIOX5), SIG_EXPR_LIST_PTR(ADC13));
+FUNC_GROUP_DECL(ADC13, N2);
+
+#define N1 190
+SIG_EXPR_LIST_DECL_SINGLE(GPIOX6, GPIOX6, SIG_DESC_SET(SCUA4, 6));
+SIG_EXPR_LIST_DECL_SINGLE(ADC14, ADC14);
+MS_PIN_DECL_(N1, SIG_EXPR_LIST_PTR(GPIOX6), SIG_EXPR_LIST_PTR(ADC14));
+FUNC_GROUP_DECL(ADC14, N1);
+
+#define P5 191
+SIG_EXPR_LIST_DECL_SINGLE(GPIOX7, GPIOX7, SIG_DESC_SET(SCUA4, 7));
+SIG_EXPR_LIST_DECL_SINGLE(ADC15, ADC15);
+MS_PIN_DECL_(P5, SIG_EXPR_LIST_PTR(GPIOX7), SIG_EXPR_LIST_PTR(ADC15));
+FUNC_GROUP_DECL(ADC15, P5);
+
+#define C21 192
+SIG_EXPR_DECL(SIOS3, SIOS3, SIG_DESC_SET(SCUA4, 8));
+SIG_EXPR_DECL(SIOS3, ACPI, ACPI_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SIOS3, SIOS3, ACPI);
+SS_PIN_DECL(C21, GPIOY0, SIOS3);
+FUNC_GROUP_DECL(SIOS3, C21);
+
+#define F20 193
+SIG_EXPR_DECL(SIOS5, SIOS5, SIG_DESC_SET(SCUA4, 9));
+SIG_EXPR_DECL(SIOS5, ACPI, ACPI_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SIOS5, SIOS5, ACPI);
+SS_PIN_DECL(F20, GPIOY1, SIOS5);
+FUNC_GROUP_DECL(SIOS5, F20);
+
+#define G20 194
+SIG_EXPR_DECL(SIOPWREQ, SIOPWREQ, SIG_DESC_SET(SCUA4, 10));
+SIG_EXPR_DECL(SIOPWREQ, ACPI, ACPI_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SIOPWREQ, SIOPWREQ, ACPI);
+SS_PIN_DECL(G20, GPIOY2, SIOPWREQ);
+FUNC_GROUP_DECL(SIOPWREQ, G20);
+
+#define K20 195
+SIG_EXPR_DECL(SIOONCTRL, SIOONCTRL, SIG_DESC_SET(SCUA4, 11));
+SIG_EXPR_DECL(SIOONCTRL, ACPI, ACPI_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SIOONCTRL, SIOONCTRL, ACPI);
+SS_PIN_DECL(K20, GPIOY3, SIOONCTRL);
+FUNC_GROUP_DECL(SIOONCTRL, K20);
+
+FUNC_GROUP_DECL(ACPI, B19, A20, D17, A19, C21, F20, G20, K20);
+
+#define R22 200
+#define R22_DESC SIG_DESC_SET(SCUA4, 16)
+SIG_EXPR_DECL(ROMA2, ROM8, R22_DESC, VPOOFF0_DESC);
+SIG_EXPR_DECL(ROMA2, ROM16, R22_DESC, VPOOFF0_DESC);
+SIG_EXPR_LIST_DECL_DUAL(ROMA2, ROM8, ROM16);
+SIG_EXPR_DECL(VPOB0, VPO12, R22_DESC, VPO12_DESC);
+SIG_EXPR_DECL(VPOB0, VPO24, R22_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOB0, VPOOFF1, R22_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL(VPOB0, SIG_EXPR_PTR(VPOB0, VPO12),
+ SIG_EXPR_PTR(VPOB0, VPO24), SIG_EXPR_PTR(VPOB0, VPOOFF1));
+MS_PIN_DECL(R22, GPIOZ0, ROMA2, VPOB0);
+
+#define P18 201
+#define P18_DESC SIG_DESC_SET(SCUA4, 17)
+SIG_EXPR_DECL(ROMA3, ROM8, P18_DESC, VPOOFF0_DESC);
+SIG_EXPR_DECL(ROMA3, ROM16, P18_DESC, VPOOFF0_DESC);
+SIG_EXPR_LIST_DECL_DUAL(ROMA3, ROM8, ROM16);
+SIG_EXPR_DECL(VPOB1, VPO12, P18_DESC, VPO12_DESC);
+SIG_EXPR_DECL(VPOB1, VPO24, P18_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOB1, VPOOFF1, P18_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL(VPOB1, SIG_EXPR_PTR(VPOB1, VPO12),
+ SIG_EXPR_PTR(VPOB1, VPO24), SIG_EXPR_PTR(VPOB1, VPOOFF1));
+MS_PIN_DECL(P18, GPIOZ1, ROMA3, VPOB1);
+
+#define P19 202
+#define P19_DESC SIG_DESC_SET(SCUA4, 18)
+SIG_EXPR_DECL(ROMA4, ROM8, P19_DESC, VPOOFF0_DESC);
+SIG_EXPR_DECL(ROMA4, ROM16, P19_DESC, VPOOFF0_DESC);
+SIG_EXPR_LIST_DECL_DUAL(ROMA4, ROM8, ROM16);
+SIG_EXPR_DECL(VPOB2, VPO12, P19_DESC, VPO12_DESC);
+SIG_EXPR_DECL(VPOB2, VPO24, P19_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOB2, VPOOFF1, P19_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL(VPOB2, SIG_EXPR_PTR(VPOB2, VPO12),
+ SIG_EXPR_PTR(VPOB2, VPO24), SIG_EXPR_PTR(VPOB2, VPOOFF1));
+MS_PIN_DECL(P19, GPIOZ2, ROMA4, VPOB2);
+
+#define P20 203
+#define P20_DESC SIG_DESC_SET(SCUA4, 19)
+SIG_EXPR_DECL(ROMA5, ROM8, P20_DESC, VPOOFF0_DESC);
+SIG_EXPR_DECL(ROMA5, ROM16, P20_DESC, VPOOFF0_DESC);
+SIG_EXPR_LIST_DECL_DUAL(ROMA5, ROM8, ROM16);
+SIG_EXPR_DECL(VPOB3, VPO12, P20_DESC, VPO12_DESC);
+SIG_EXPR_DECL(VPOB3, VPO24, P20_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOB3, VPOOFF1, P20_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL(VPOB3, SIG_EXPR_PTR(VPOB3, VPO12),
+ SIG_EXPR_PTR(VPOB3, VPO24), SIG_EXPR_PTR(VPOB3, VPOOFF1));
+MS_PIN_DECL(P20, GPIOZ3, ROMA5, VPOB3);
+
+#define P21 204
+#define P21_DESC SIG_DESC_SET(SCUA4, 20)
+SIG_EXPR_DECL(ROMA6, ROM8, P21_DESC, VPOOFF0_DESC);
+SIG_EXPR_DECL(ROMA6, ROM16, P21_DESC, VPOOFF0_DESC);
+SIG_EXPR_LIST_DECL_DUAL(ROMA6, ROM8, ROM16);
+SIG_EXPR_DECL(VPOB4, VPO12, P21_DESC, VPO12_DESC);
+SIG_EXPR_DECL(VPOB4, VPO24, P21_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOB4, VPOOFF1, P21_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL(VPOB4, SIG_EXPR_PTR(VPOB4, VPO12),
+ SIG_EXPR_PTR(VPOB4, VPO24), SIG_EXPR_PTR(VPOB4, VPOOFF1));
+MS_PIN_DECL(P21, GPIOZ4, ROMA6, VPOB4);
+
+#define P22 205
+#define P22_DESC SIG_DESC_SET(SCUA4, 21)
+SIG_EXPR_DECL(ROMA7, ROM8, P22_DESC, VPOOFF0_DESC);
+SIG_EXPR_DECL(ROMA7, ROM16, P22_DESC, VPOOFF0_DESC);
+SIG_EXPR_LIST_DECL_DUAL(ROMA7, ROM8, ROM16);
+SIG_EXPR_DECL(VPOB5, VPO12, P22_DESC, VPO12_DESC);
+SIG_EXPR_DECL(VPOB5, VPO24, P22_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOB5, VPOOFF1, P22_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL(VPOB5, SIG_EXPR_PTR(VPOB5, VPO12),
+ SIG_EXPR_PTR(VPOB5, VPO24), SIG_EXPR_PTR(VPOB5, VPOOFF1));
+MS_PIN_DECL(P22, GPIOZ5, ROMA7, VPOB5);
+
+#define M19 206
+#define M19_DESC SIG_DESC_SET(SCUA4, 22)
+SIG_EXPR_DECL(ROMA8, ROM8, M19_DESC, VPOOFF0_DESC);
+SIG_EXPR_DECL(ROMA8, ROM16, M19_DESC, VPOOFF0_DESC);
+SIG_EXPR_LIST_DECL_DUAL(ROMA8, ROM8, ROM16);
+SIG_EXPR_DECL(VPOB6, VPO12, M19_DESC, VPO12_DESC);
+SIG_EXPR_DECL(VPOB6, VPO24, M19_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOB6, VPOOFF1, M19_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL(VPOB6, SIG_EXPR_PTR(VPOB6, VPO12),
+ SIG_EXPR_PTR(VPOB6, VPO24), SIG_EXPR_PTR(VPOB6, VPOOFF1));
+MS_PIN_DECL(M19, GPIOZ6, ROMA8, VPOB6);
+
+#define M20 207
+#define M20_DESC SIG_DESC_SET(SCUA4, 23)
+SIG_EXPR_DECL(ROMA9, ROM8, M20_DESC, VPOOFF0_DESC);
+SIG_EXPR_DECL(ROMA9, ROM16, M20_DESC, VPOOFF0_DESC);
+SIG_EXPR_LIST_DECL_DUAL(ROMA9, ROM8, ROM16);
+SIG_EXPR_DECL(VPOB7, VPO12, M20_DESC, VPO12_DESC);
+SIG_EXPR_DECL(VPOB7, VPO24, M20_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOB7, VPOOFF1, M20_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL(VPOB7, SIG_EXPR_PTR(VPOB7, VPO12),
+ SIG_EXPR_PTR(VPOB7, VPO24), SIG_EXPR_PTR(VPOB7, VPOOFF1));
+MS_PIN_DECL(M20, GPIOZ7, ROMA9, VPOB7);
+
+#define M21 208
+#define M21_DESC SIG_DESC_SET(SCUA4, 24)
+SIG_EXPR_DECL(ROMA10, ROM8, M21_DESC, VPOOFF0_DESC);
+SIG_EXPR_DECL(ROMA10, ROM16, M21_DESC, VPOOFF0_DESC);
+SIG_EXPR_LIST_DECL_DUAL(ROMA10, ROM8, ROM16);
+SIG_EXPR_DECL(VPOG0, VPO12, M21_DESC, VPO12_DESC);
+SIG_EXPR_DECL(VPOG0, VPO24, M21_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOG0, VPOOFF1, M21_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL(VPOG0, SIG_EXPR_PTR(VPOG0, VPO12),
+ SIG_EXPR_PTR(VPOG0, VPO24), SIG_EXPR_PTR(VPOG0, VPOOFF1));
+MS_PIN_DECL(M21, GPIOAA0, ROMA10, VPOG0);
+
+#define M22 209
+#define M22_DESC SIG_DESC_SET(SCUA4, 25)
+SIG_EXPR_DECL(ROMA11, ROM8, M22_DESC, VPOOFF0_DESC);
+SIG_EXPR_DECL(ROMA11, ROM16, M22_DESC, VPOOFF0_DESC);
+SIG_EXPR_LIST_DECL_DUAL(ROMA11, ROM8, ROM16);
+SIG_EXPR_DECL(VPOG1, VPO12, M22_DESC, VPO12_DESC);
+SIG_EXPR_DECL(VPOG1, VPO24, M22_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOG1, VPOOFF1, M22_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL(VPOG1, SIG_EXPR_PTR(VPOG1, VPO12),
+ SIG_EXPR_PTR(VPOG1, VPO24), SIG_EXPR_PTR(VPOG1, VPOOFF1));
+MS_PIN_DECL(M22, GPIOAA1, ROMA11, VPOG1);
+
+#define L18 210
+#define L18_DESC SIG_DESC_SET(SCUA4, 26)
+SIG_EXPR_DECL(ROMA12, ROM8, L18_DESC, VPOOFF0_DESC);
+SIG_EXPR_DECL(ROMA12, ROM16, L18_DESC, VPOOFF0_DESC);
+SIG_EXPR_LIST_DECL_DUAL(ROMA12, ROM8, ROM16);
+SIG_EXPR_DECL(VPOG2, VPO12, L18_DESC, VPO12_DESC);
+SIG_EXPR_DECL(VPOG2, VPO24, L18_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOG2, VPOOFF1, L18_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL(VPOG2, SIG_EXPR_PTR(VPOG2, VPO12),
+ SIG_EXPR_PTR(VPOG2, VPO24), SIG_EXPR_PTR(VPOG2, VPOOFF1));
+MS_PIN_DECL(L18, GPIOAA2, ROMA12, VPOG2);
+
+#define L19 211
+#define L19_DESC SIG_DESC_SET(SCUA4, 27)
+SIG_EXPR_DECL(ROMA13, ROM8, L19_DESC, VPOOFF0_DESC);
+SIG_EXPR_DECL(ROMA13, ROM16, L19_DESC, VPOOFF0_DESC);
+SIG_EXPR_LIST_DECL_DUAL(ROMA13, ROM8, ROM16);
+SIG_EXPR_DECL(VPOG3, VPO12, L19_DESC, VPO12_DESC);
+SIG_EXPR_DECL(VPOG3, VPO24, L19_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOG3, VPOOFF1, L19_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL(VPOG3, SIG_EXPR_PTR(VPOG3, VPO12),
+ SIG_EXPR_PTR(VPOG3, VPO24), SIG_EXPR_PTR(VPOG3, VPOOFF1));
+MS_PIN_DECL(L19, GPIOAA3, ROMA13, VPOG3);
+
+#define L20 212
+#define L20_DESC SIG_DESC_SET(SCUA4, 28)
+SIG_EXPR_DECL(ROMA14, ROM8, L20_DESC, VPO_OFF_12);
+SIG_EXPR_DECL(ROMA14, ROM16, L20_DESC, VPO_OFF_12);
+SIG_EXPR_LIST_DECL_DUAL(ROMA14, ROM8, ROM16);
+SIG_EXPR_DECL(VPOG4, VPO24, L20_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOG4, VPOOFF1, L20_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL_DUAL(VPOG4, VPO24, VPOOFF1);
+MS_PIN_DECL(L20, GPIOAA4, ROMA14, VPOG4);
+
+#define L21 213
+#define L21_DESC SIG_DESC_SET(SCUA4, 29)
+SIG_EXPR_DECL(ROMA15, ROM8, L21_DESC, VPO_OFF_12);
+SIG_EXPR_DECL(ROMA15, ROM16, L21_DESC, VPO_OFF_12);
+SIG_EXPR_LIST_DECL_DUAL(ROMA15, ROM8, ROM16);
+SIG_EXPR_DECL(VPOG5, VPO24, L21_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOG5, VPOOFF1, L21_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL_DUAL(VPOG5, VPO24, VPOOFF1);
+MS_PIN_DECL(L21, GPIOAA5, ROMA15, VPOG5);
+
+#define T18 214
+#define T18_DESC SIG_DESC_SET(SCUA4, 30)
+SIG_EXPR_DECL(ROMA16, ROM8, T18_DESC, VPO_OFF_12);
+SIG_EXPR_DECL(ROMA16, ROM16, T18_DESC, VPO_OFF_12);
+SIG_EXPR_LIST_DECL_DUAL(ROMA16, ROM8, ROM16);
+SIG_EXPR_DECL(VPOG6, VPO24, T18_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOG6, VPOOFF1, T18_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL_DUAL(VPOG6, VPO24, VPOOFF1);
+MS_PIN_DECL(T18, GPIOAA6, ROMA16, VPOG6);
+
+#define N18 215
+#define N18_DESC SIG_DESC_SET(SCUA4, 31)
+SIG_EXPR_DECL(ROMA17, ROM8, N18_DESC, VPO_OFF_12);
+SIG_EXPR_DECL(ROMA17, ROM16, N18_DESC, VPO_OFF_12);
+SIG_EXPR_LIST_DECL_DUAL(ROMA17, ROM8, ROM16);
+SIG_EXPR_DECL(VPOG7, VPO24, N18_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOG7, VPOOFF1, N18_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL_DUAL(VPOG7, VPO24, VPOOFF1);
+MS_PIN_DECL(N18, GPIOAA7, ROMA17, VPOG7);
+
+#define N19 216
+#define N19_DESC SIG_DESC_SET(SCUA8, 0)
+SIG_EXPR_DECL(ROMA18, ROM8, N19_DESC, VPO_OFF_12);
+SIG_EXPR_DECL(ROMA18, ROM16, N19_DESC, VPO_OFF_12);
+SIG_EXPR_LIST_DECL_DUAL(ROMA18, ROM8, ROM16);
+SIG_EXPR_DECL(VPOR0, VPO24, N19_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOR0, VPOOFF1, N19_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL_DUAL(VPOR0, VPO24, VPOOFF1);
+MS_PIN_DECL(N19, GPIOAB0, ROMA18, VPOR0);
+
+#define M18 217
+#define M18_DESC SIG_DESC_SET(SCUA8, 1)
+SIG_EXPR_DECL(ROMA19, ROM8, M18_DESC, VPO_OFF_12);
+SIG_EXPR_DECL(ROMA19, ROM16, M18_DESC, VPO_OFF_12);
+SIG_EXPR_LIST_DECL_DUAL(ROMA19, ROM8, ROM16);
+SIG_EXPR_DECL(VPOR1, VPO24, M18_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOR1, VPOOFF1, M18_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL_DUAL(VPOR1, VPO24, VPOOFF1);
+MS_PIN_DECL(M18, GPIOAB1, ROMA19, VPOR1);
+
+#define N22 218
+#define N22_DESC SIG_DESC_SET(SCUA8, 2)
+SIG_EXPR_DECL(ROMA20, ROM8, N22_DESC, VPO_OFF_12);
+SIG_EXPR_DECL(ROMA20, ROM16, N22_DESC, VPO_OFF_12);
+SIG_EXPR_LIST_DECL_DUAL(ROMA20, ROM8, ROM16);
+SIG_EXPR_DECL(VPOR2, VPO24, N22_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOR2, VPOOFF1, N22_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL_DUAL(VPOR2, VPO24, VPOOFF1);
+MS_PIN_DECL(N22, GPIOAB2, ROMA20, VPOR2);
+
+#define N20 219
+#define N20_DESC SIG_DESC_SET(SCUA8, 3)
+SIG_EXPR_DECL(ROMA21, ROM8, N20_DESC, VPO_OFF_12);
+SIG_EXPR_DECL(ROMA21, ROM16, N20_DESC, VPO_OFF_12);
+SIG_EXPR_LIST_DECL_DUAL(ROMA21, ROM8, ROM16);
+SIG_EXPR_DECL(VPOR3, VPO24, N20_DESC, VPO24_DESC);
+SIG_EXPR_DECL(VPOR3, VPOOFF1, N20_DESC, VPOOFF1_DESC);
+SIG_EXPR_LIST_DECL_DUAL(VPOR3, VPO24, VPOOFF1);
+MS_PIN_DECL(N20, GPIOAB3, ROMA21, VPOR3);
+
+FUNC_GROUP_DECL(ROM8, V20, U21, T19, V22, U20, R18, N21, L22, K18, W21, Y22,
+ U19, R22, P18, P19, P20, P21, P22, M19, M20, M21, M22, L18,
+ L19, L20, L21, T18, N18, N19, M18, N22, N20);
+FUNC_GROUP_DECL(ROM16, V20, U21, T19, V22, U20, R18, N21, L22, K18,
+ A8, C7, B7, A7, D7, B6, A6, E7, W21, Y22, U19, R22, P18, P19,
+ P20, P21, P22, M19, M20, M21, M22, L18, L19, L20, L21, T18,
+ N18, N19, M18, N22, N20);
+FUNC_GROUP_DECL(VPO12, U21, T19, V22, U20, R22, P18, P19, P20, P21, P22, M19,
+ M20, M21, M22, L18, L19, L20, L21, T18, N18, N19, M18, N22,
+ N20);
+FUNC_GROUP_DECL(VPO24, U21, T19, V22, U20, L22, K18, V21, W22, R22, P18, P19,
+ P20, P21, P22, M19, M20, M21, M22, L18, L19);
+
/* Note we account for GPIOY4-GPIOY7 even though they're not valid, thus 216
* pins becomes 220.
*/
@@ -883,84 +1715,180 @@ FUNC_GROUP_DECL(RGMII1, A12, B12, C12, D12, E12, A13, E11, D11, C11, B11, A11,
static struct pinctrl_pin_desc aspeed_g4_pins[ASPEED_G4_NR_PINS] = {
ASPEED_PINCTRL_PIN(A1),
+ ASPEED_PINCTRL_PIN(A10),
ASPEED_PINCTRL_PIN(A11),
ASPEED_PINCTRL_PIN(A12),
ASPEED_PINCTRL_PIN(A13),
+ ASPEED_PINCTRL_PIN(A14),
ASPEED_PINCTRL_PIN(A15),
+ ASPEED_PINCTRL_PIN(A16),
+ ASPEED_PINCTRL_PIN(A17),
ASPEED_PINCTRL_PIN(A18),
+ ASPEED_PINCTRL_PIN(A19),
ASPEED_PINCTRL_PIN(A2),
+ ASPEED_PINCTRL_PIN(A20),
ASPEED_PINCTRL_PIN(A3),
ASPEED_PINCTRL_PIN(A4),
ASPEED_PINCTRL_PIN(A5),
ASPEED_PINCTRL_PIN(A6),
ASPEED_PINCTRL_PIN(A7),
ASPEED_PINCTRL_PIN(A8),
+ ASPEED_PINCTRL_PIN(A9),
+ ASPEED_PINCTRL_PIN(AA1),
ASPEED_PINCTRL_PIN(AA2),
ASPEED_PINCTRL_PIN(AA22),
ASPEED_PINCTRL_PIN(AA3),
+ ASPEED_PINCTRL_PIN(AA4),
+ ASPEED_PINCTRL_PIN(AA5),
+ ASPEED_PINCTRL_PIN(AA6),
ASPEED_PINCTRL_PIN(AA7),
ASPEED_PINCTRL_PIN(AB1),
ASPEED_PINCTRL_PIN(AB2),
+ ASPEED_PINCTRL_PIN(AB3),
+ ASPEED_PINCTRL_PIN(AB4),
+ ASPEED_PINCTRL_PIN(AB5),
+ ASPEED_PINCTRL_PIN(AB6),
ASPEED_PINCTRL_PIN(AB7),
ASPEED_PINCTRL_PIN(B1),
+ ASPEED_PINCTRL_PIN(B10),
ASPEED_PINCTRL_PIN(B11),
ASPEED_PINCTRL_PIN(B12),
+ ASPEED_PINCTRL_PIN(B13),
ASPEED_PINCTRL_PIN(B14),
ASPEED_PINCTRL_PIN(B15),
+ ASPEED_PINCTRL_PIN(B16),
+ ASPEED_PINCTRL_PIN(B17),
+ ASPEED_PINCTRL_PIN(B18),
ASPEED_PINCTRL_PIN(B19),
ASPEED_PINCTRL_PIN(B2),
+ ASPEED_PINCTRL_PIN(B22),
ASPEED_PINCTRL_PIN(B3),
ASPEED_PINCTRL_PIN(B4),
+ ASPEED_PINCTRL_PIN(B5),
ASPEED_PINCTRL_PIN(B6),
ASPEED_PINCTRL_PIN(B7),
+ ASPEED_PINCTRL_PIN(B9),
ASPEED_PINCTRL_PIN(C1),
+ ASPEED_PINCTRL_PIN(C10),
ASPEED_PINCTRL_PIN(C11),
ASPEED_PINCTRL_PIN(C12),
+ ASPEED_PINCTRL_PIN(C13),
ASPEED_PINCTRL_PIN(C14),
ASPEED_PINCTRL_PIN(C15),
+ ASPEED_PINCTRL_PIN(C16),
ASPEED_PINCTRL_PIN(C17),
+ ASPEED_PINCTRL_PIN(C18),
ASPEED_PINCTRL_PIN(C2),
+ ASPEED_PINCTRL_PIN(C20),
+ ASPEED_PINCTRL_PIN(C21),
+ ASPEED_PINCTRL_PIN(C22),
ASPEED_PINCTRL_PIN(C3),
ASPEED_PINCTRL_PIN(C4),
ASPEED_PINCTRL_PIN(C5),
ASPEED_PINCTRL_PIN(C6),
ASPEED_PINCTRL_PIN(C7),
+ ASPEED_PINCTRL_PIN(C8),
+ ASPEED_PINCTRL_PIN(C9),
ASPEED_PINCTRL_PIN(D1),
+ ASPEED_PINCTRL_PIN(D10),
ASPEED_PINCTRL_PIN(D11),
ASPEED_PINCTRL_PIN(D12),
+ ASPEED_PINCTRL_PIN(D13),
ASPEED_PINCTRL_PIN(D14),
ASPEED_PINCTRL_PIN(D15),
ASPEED_PINCTRL_PIN(D16),
ASPEED_PINCTRL_PIN(D17),
ASPEED_PINCTRL_PIN(D18),
+ ASPEED_PINCTRL_PIN(D19),
ASPEED_PINCTRL_PIN(D2),
ASPEED_PINCTRL_PIN(D3),
ASPEED_PINCTRL_PIN(D4),
ASPEED_PINCTRL_PIN(D5),
+ ASPEED_PINCTRL_PIN(D6),
ASPEED_PINCTRL_PIN(D7),
+ ASPEED_PINCTRL_PIN(D8),
+ ASPEED_PINCTRL_PIN(D9),
ASPEED_PINCTRL_PIN(E10),
ASPEED_PINCTRL_PIN(E11),
ASPEED_PINCTRL_PIN(E12),
+ ASPEED_PINCTRL_PIN(E13),
ASPEED_PINCTRL_PIN(E14),
+ ASPEED_PINCTRL_PIN(E15),
ASPEED_PINCTRL_PIN(E16),
+ ASPEED_PINCTRL_PIN(E18),
+ ASPEED_PINCTRL_PIN(E19),
ASPEED_PINCTRL_PIN(E2),
+ ASPEED_PINCTRL_PIN(E20),
ASPEED_PINCTRL_PIN(E3),
ASPEED_PINCTRL_PIN(E5),
+ ASPEED_PINCTRL_PIN(E6),
ASPEED_PINCTRL_PIN(E7),
+ ASPEED_PINCTRL_PIN(E8),
+ ASPEED_PINCTRL_PIN(E9),
+ ASPEED_PINCTRL_PIN(F18),
+ ASPEED_PINCTRL_PIN(F20),
ASPEED_PINCTRL_PIN(F3),
ASPEED_PINCTRL_PIN(F4),
ASPEED_PINCTRL_PIN(F5),
+ ASPEED_PINCTRL_PIN(G18),
+ ASPEED_PINCTRL_PIN(G19),
+ ASPEED_PINCTRL_PIN(G20),
ASPEED_PINCTRL_PIN(G5),
ASPEED_PINCTRL_PIN(H1),
+ ASPEED_PINCTRL_PIN(H18),
ASPEED_PINCTRL_PIN(H19),
ASPEED_PINCTRL_PIN(H2),
ASPEED_PINCTRL_PIN(H20),
+ ASPEED_PINCTRL_PIN(H3),
+ ASPEED_PINCTRL_PIN(H4),
+ ASPEED_PINCTRL_PIN(J20),
+ ASPEED_PINCTRL_PIN(J21),
ASPEED_PINCTRL_PIN(J3),
+ ASPEED_PINCTRL_PIN(J4),
+ ASPEED_PINCTRL_PIN(J5),
ASPEED_PINCTRL_PIN(K18),
+ ASPEED_PINCTRL_PIN(K20),
+ ASPEED_PINCTRL_PIN(K5),
+ ASPEED_PINCTRL_PIN(L1),
+ ASPEED_PINCTRL_PIN(L18),
+ ASPEED_PINCTRL_PIN(L19),
+ ASPEED_PINCTRL_PIN(L2),
+ ASPEED_PINCTRL_PIN(L20),
+ ASPEED_PINCTRL_PIN(L21),
ASPEED_PINCTRL_PIN(L22),
+ ASPEED_PINCTRL_PIN(L3),
+ ASPEED_PINCTRL_PIN(L4),
+ ASPEED_PINCTRL_PIN(L5),
+ ASPEED_PINCTRL_PIN(M1),
+ ASPEED_PINCTRL_PIN(M18),
+ ASPEED_PINCTRL_PIN(M19),
+ ASPEED_PINCTRL_PIN(M2),
+ ASPEED_PINCTRL_PIN(M20),
+ ASPEED_PINCTRL_PIN(M21),
+ ASPEED_PINCTRL_PIN(M22),
+ ASPEED_PINCTRL_PIN(M3),
+ ASPEED_PINCTRL_PIN(M4),
+ ASPEED_PINCTRL_PIN(M5),
+ ASPEED_PINCTRL_PIN(N1),
+ ASPEED_PINCTRL_PIN(N18),
+ ASPEED_PINCTRL_PIN(N19),
+ ASPEED_PINCTRL_PIN(N2),
+ ASPEED_PINCTRL_PIN(N20),
ASPEED_PINCTRL_PIN(N21),
+ ASPEED_PINCTRL_PIN(N22),
+ ASPEED_PINCTRL_PIN(N3),
+ ASPEED_PINCTRL_PIN(N4),
+ ASPEED_PINCTRL_PIN(N5),
+ ASPEED_PINCTRL_PIN(P18),
+ ASPEED_PINCTRL_PIN(P19),
+ ASPEED_PINCTRL_PIN(P20),
+ ASPEED_PINCTRL_PIN(P21),
+ ASPEED_PINCTRL_PIN(P22),
+ ASPEED_PINCTRL_PIN(P5),
ASPEED_PINCTRL_PIN(R18),
+ ASPEED_PINCTRL_PIN(R22),
ASPEED_PINCTRL_PIN(T1),
+ ASPEED_PINCTRL_PIN(T18),
ASPEED_PINCTRL_PIN(T19),
ASPEED_PINCTRL_PIN(T2),
ASPEED_PINCTRL_PIN(T4),
@@ -979,28 +1907,61 @@ static struct pinctrl_pin_desc aspeed_g4_pins[ASPEED_G4_NR_PINS] = {
ASPEED_PINCTRL_PIN(V20),
ASPEED_PINCTRL_PIN(V21),
ASPEED_PINCTRL_PIN(V22),
+ ASPEED_PINCTRL_PIN(V3),
+ ASPEED_PINCTRL_PIN(V4),
+ ASPEED_PINCTRL_PIN(V5),
ASPEED_PINCTRL_PIN(V6),
+ ASPEED_PINCTRL_PIN(V7),
ASPEED_PINCTRL_PIN(W1),
+ ASPEED_PINCTRL_PIN(W2),
ASPEED_PINCTRL_PIN(W21),
ASPEED_PINCTRL_PIN(W22),
+ ASPEED_PINCTRL_PIN(W3),
ASPEED_PINCTRL_PIN(W4),
ASPEED_PINCTRL_PIN(W5),
+ ASPEED_PINCTRL_PIN(W6),
+ ASPEED_PINCTRL_PIN(W7),
+ ASPEED_PINCTRL_PIN(Y1),
+ ASPEED_PINCTRL_PIN(Y2),
+ ASPEED_PINCTRL_PIN(Y21),
ASPEED_PINCTRL_PIN(Y22),
ASPEED_PINCTRL_PIN(Y3),
ASPEED_PINCTRL_PIN(Y4),
ASPEED_PINCTRL_PIN(Y5),
+ ASPEED_PINCTRL_PIN(Y6),
ASPEED_PINCTRL_PIN(Y7),
};
static const struct aspeed_pin_group aspeed_g4_groups[] = {
ASPEED_PINCTRL_GROUP(ACPI),
+ ASPEED_PINCTRL_GROUP(ADC0),
+ ASPEED_PINCTRL_GROUP(ADC1),
+ ASPEED_PINCTRL_GROUP(ADC10),
+ ASPEED_PINCTRL_GROUP(ADC11),
+ ASPEED_PINCTRL_GROUP(ADC12),
+ ASPEED_PINCTRL_GROUP(ADC13),
+ ASPEED_PINCTRL_GROUP(ADC14),
+ ASPEED_PINCTRL_GROUP(ADC15),
+ ASPEED_PINCTRL_GROUP(ADC2),
+ ASPEED_PINCTRL_GROUP(ADC3),
+ ASPEED_PINCTRL_GROUP(ADC4),
+ ASPEED_PINCTRL_GROUP(ADC5),
+ ASPEED_PINCTRL_GROUP(ADC6),
+ ASPEED_PINCTRL_GROUP(ADC7),
+ ASPEED_PINCTRL_GROUP(ADC8),
+ ASPEED_PINCTRL_GROUP(ADC9),
ASPEED_PINCTRL_GROUP(BMCINT),
ASPEED_PINCTRL_GROUP(DDCCLK),
ASPEED_PINCTRL_GROUP(DDCDAT),
+ ASPEED_PINCTRL_GROUP(EXTRST),
ASPEED_PINCTRL_GROUP(FLACK),
ASPEED_PINCTRL_GROUP(FLBUSY),
ASPEED_PINCTRL_GROUP(FLWP),
+ ASPEED_PINCTRL_GROUP(GPID),
ASPEED_PINCTRL_GROUP(GPID0),
+ ASPEED_PINCTRL_GROUP(GPID2),
+ ASPEED_PINCTRL_GROUP(GPID4),
+ ASPEED_PINCTRL_GROUP(GPID6),
ASPEED_PINCTRL_GROUP(GPIE0),
ASPEED_PINCTRL_GROUP(GPIE2),
ASPEED_PINCTRL_GROUP(GPIE4),
@@ -1009,6 +1970,7 @@ static const struct aspeed_pin_group aspeed_g4_groups[] = {
ASPEED_PINCTRL_GROUP(I2C11),
ASPEED_PINCTRL_GROUP(I2C12),
ASPEED_PINCTRL_GROUP(I2C13),
+ ASPEED_PINCTRL_GROUP(I2C14),
ASPEED_PINCTRL_GROUP(I2C3),
ASPEED_PINCTRL_GROUP(I2C4),
ASPEED_PINCTRL_GROUP(I2C5),
@@ -1018,25 +1980,37 @@ static const struct aspeed_pin_group aspeed_g4_groups[] = {
ASPEED_PINCTRL_GROUP(I2C9),
ASPEED_PINCTRL_GROUP(LPCPD),
ASPEED_PINCTRL_GROUP(LPCPME),
- ASPEED_PINCTRL_GROUP(LPCPME),
+ ASPEED_PINCTRL_GROUP(LPCRST),
ASPEED_PINCTRL_GROUP(LPCSMI),
+ ASPEED_PINCTRL_GROUP(MAC1LINK),
+ ASPEED_PINCTRL_GROUP(MAC2LINK),
ASPEED_PINCTRL_GROUP(MDIO1),
ASPEED_PINCTRL_GROUP(MDIO2),
ASPEED_PINCTRL_GROUP(NCTS1),
+ ASPEED_PINCTRL_GROUP(NCTS2),
ASPEED_PINCTRL_GROUP(NCTS3),
ASPEED_PINCTRL_GROUP(NCTS4),
ASPEED_PINCTRL_GROUP(NDCD1),
+ ASPEED_PINCTRL_GROUP(NDCD2),
ASPEED_PINCTRL_GROUP(NDCD3),
ASPEED_PINCTRL_GROUP(NDCD4),
ASPEED_PINCTRL_GROUP(NDSR1),
+ ASPEED_PINCTRL_GROUP(NDSR2),
ASPEED_PINCTRL_GROUP(NDSR3),
+ ASPEED_PINCTRL_GROUP(NDSR4),
ASPEED_PINCTRL_GROUP(NDTR1),
+ ASPEED_PINCTRL_GROUP(NDTR2),
ASPEED_PINCTRL_GROUP(NDTR3),
+ ASPEED_PINCTRL_GROUP(NDTR4),
+ ASPEED_PINCTRL_GROUP(NDTS4),
ASPEED_PINCTRL_GROUP(NRI1),
+ ASPEED_PINCTRL_GROUP(NRI2),
ASPEED_PINCTRL_GROUP(NRI3),
ASPEED_PINCTRL_GROUP(NRI4),
ASPEED_PINCTRL_GROUP(NRTS1),
+ ASPEED_PINCTRL_GROUP(NRTS2),
ASPEED_PINCTRL_GROUP(NRTS3),
+ ASPEED_PINCTRL_GROUP(OSCCLK),
ASPEED_PINCTRL_GROUP(PWM0),
ASPEED_PINCTRL_GROUP(PWM1),
ASPEED_PINCTRL_GROUP(PWM2),
@@ -1046,7 +2020,9 @@ static const struct aspeed_pin_group aspeed_g4_groups[] = {
ASPEED_PINCTRL_GROUP(PWM6),
ASPEED_PINCTRL_GROUP(PWM7),
ASPEED_PINCTRL_GROUP(RGMII1),
+ ASPEED_PINCTRL_GROUP(RGMII2),
ASPEED_PINCTRL_GROUP(RMII1),
+ ASPEED_PINCTRL_GROUP(RMII2),
ASPEED_PINCTRL_GROUP(ROM16),
ASPEED_PINCTRL_GROUP(ROM8),
ASPEED_PINCTRL_GROUP(ROMCS1),
@@ -1054,21 +2030,48 @@ static const struct aspeed_pin_group aspeed_g4_groups[] = {
ASPEED_PINCTRL_GROUP(ROMCS3),
ASPEED_PINCTRL_GROUP(ROMCS4),
ASPEED_PINCTRL_GROUP(RXD1),
+ ASPEED_PINCTRL_GROUP(RXD2),
ASPEED_PINCTRL_GROUP(RXD3),
ASPEED_PINCTRL_GROUP(RXD4),
+ ASPEED_PINCTRL_GROUP(SALT1),
+ ASPEED_PINCTRL_GROUP(SALT2),
+ ASPEED_PINCTRL_GROUP(SALT3),
+ ASPEED_PINCTRL_GROUP(SALT4),
ASPEED_PINCTRL_GROUP(SD1),
+ ASPEED_PINCTRL_GROUP(SD2),
+ ASPEED_PINCTRL_GROUP(SGPMCK),
ASPEED_PINCTRL_GROUP(SGPMI),
+ ASPEED_PINCTRL_GROUP(SGPMLD),
+ ASPEED_PINCTRL_GROUP(SGPMO),
+ ASPEED_PINCTRL_GROUP(SGPSCK),
+ ASPEED_PINCTRL_GROUP(SGPSI0),
+ ASPEED_PINCTRL_GROUP(SGPSI1),
+ ASPEED_PINCTRL_GROUP(SGPSLD),
+ ASPEED_PINCTRL_GROUP(SIOONCTRL),
ASPEED_PINCTRL_GROUP(SIOPBI),
ASPEED_PINCTRL_GROUP(SIOPBO),
+ ASPEED_PINCTRL_GROUP(SIOPWREQ),
+ ASPEED_PINCTRL_GROUP(SIOPWRGD),
+ ASPEED_PINCTRL_GROUP(SIOS3),
+ ASPEED_PINCTRL_GROUP(SIOS5),
+ ASPEED_PINCTRL_GROUP(SIOSCI),
+ ASPEED_PINCTRL_GROUP(SPI1),
+ ASPEED_PINCTRL_GROUP(SPI1DEBUG),
+ ASPEED_PINCTRL_GROUP(SPI1PASSTHRU),
+ ASPEED_PINCTRL_GROUP(SPICS1),
ASPEED_PINCTRL_GROUP(TIMER3),
+ ASPEED_PINCTRL_GROUP(TIMER4),
ASPEED_PINCTRL_GROUP(TIMER5),
ASPEED_PINCTRL_GROUP(TIMER6),
ASPEED_PINCTRL_GROUP(TIMER7),
ASPEED_PINCTRL_GROUP(TIMER8),
ASPEED_PINCTRL_GROUP(TXD1),
+ ASPEED_PINCTRL_GROUP(TXD2),
ASPEED_PINCTRL_GROUP(TXD3),
ASPEED_PINCTRL_GROUP(TXD4),
ASPEED_PINCTRL_GROUP(UART6),
+ ASPEED_PINCTRL_GROUP(USBCKI),
+ ASPEED_PINCTRL_GROUP(VGABIOS_ROM),
ASPEED_PINCTRL_GROUP(VGAHS),
ASPEED_PINCTRL_GROUP(VGAVS),
ASPEED_PINCTRL_GROUP(VPI18),
@@ -1076,17 +2079,40 @@ static const struct aspeed_pin_group aspeed_g4_groups[] = {
ASPEED_PINCTRL_GROUP(VPI30),
ASPEED_PINCTRL_GROUP(VPO12),
ASPEED_PINCTRL_GROUP(VPO24),
+ ASPEED_PINCTRL_GROUP(WDTRST1),
+ ASPEED_PINCTRL_GROUP(WDTRST2),
};
static const struct aspeed_pin_function aspeed_g4_functions[] = {
ASPEED_PINCTRL_FUNC(ACPI),
+ ASPEED_PINCTRL_FUNC(ADC0),
+ ASPEED_PINCTRL_FUNC(ADC1),
+ ASPEED_PINCTRL_FUNC(ADC10),
+ ASPEED_PINCTRL_FUNC(ADC11),
+ ASPEED_PINCTRL_FUNC(ADC12),
+ ASPEED_PINCTRL_FUNC(ADC13),
+ ASPEED_PINCTRL_FUNC(ADC14),
+ ASPEED_PINCTRL_FUNC(ADC15),
+ ASPEED_PINCTRL_FUNC(ADC2),
+ ASPEED_PINCTRL_FUNC(ADC3),
+ ASPEED_PINCTRL_FUNC(ADC4),
+ ASPEED_PINCTRL_FUNC(ADC5),
+ ASPEED_PINCTRL_FUNC(ADC6),
+ ASPEED_PINCTRL_FUNC(ADC7),
+ ASPEED_PINCTRL_FUNC(ADC8),
+ ASPEED_PINCTRL_FUNC(ADC9),
ASPEED_PINCTRL_FUNC(BMCINT),
ASPEED_PINCTRL_FUNC(DDCCLK),
ASPEED_PINCTRL_FUNC(DDCDAT),
+ ASPEED_PINCTRL_FUNC(EXTRST),
ASPEED_PINCTRL_FUNC(FLACK),
ASPEED_PINCTRL_FUNC(FLBUSY),
ASPEED_PINCTRL_FUNC(FLWP),
+ ASPEED_PINCTRL_FUNC(GPID),
ASPEED_PINCTRL_FUNC(GPID0),
+ ASPEED_PINCTRL_FUNC(GPID2),
+ ASPEED_PINCTRL_FUNC(GPID4),
+ ASPEED_PINCTRL_FUNC(GPID6),
ASPEED_PINCTRL_FUNC(GPIE0),
ASPEED_PINCTRL_FUNC(GPIE2),
ASPEED_PINCTRL_FUNC(GPIE4),
@@ -1095,6 +2121,7 @@ static const struct aspeed_pin_function aspeed_g4_functions[] = {
ASPEED_PINCTRL_FUNC(I2C11),
ASPEED_PINCTRL_FUNC(I2C12),
ASPEED_PINCTRL_FUNC(I2C13),
+ ASPEED_PINCTRL_FUNC(I2C14),
ASPEED_PINCTRL_FUNC(I2C3),
ASPEED_PINCTRL_FUNC(I2C4),
ASPEED_PINCTRL_FUNC(I2C5),
@@ -1104,24 +2131,37 @@ static const struct aspeed_pin_function aspeed_g4_functions[] = {
ASPEED_PINCTRL_FUNC(I2C9),
ASPEED_PINCTRL_FUNC(LPCPD),
ASPEED_PINCTRL_FUNC(LPCPME),
+ ASPEED_PINCTRL_FUNC(LPCRST),
ASPEED_PINCTRL_FUNC(LPCSMI),
+ ASPEED_PINCTRL_FUNC(MAC1LINK),
+ ASPEED_PINCTRL_FUNC(MAC2LINK),
ASPEED_PINCTRL_FUNC(MDIO1),
ASPEED_PINCTRL_FUNC(MDIO2),
ASPEED_PINCTRL_FUNC(NCTS1),
+ ASPEED_PINCTRL_FUNC(NCTS2),
ASPEED_PINCTRL_FUNC(NCTS3),
ASPEED_PINCTRL_FUNC(NCTS4),
ASPEED_PINCTRL_FUNC(NDCD1),
+ ASPEED_PINCTRL_FUNC(NDCD2),
ASPEED_PINCTRL_FUNC(NDCD3),
ASPEED_PINCTRL_FUNC(NDCD4),
ASPEED_PINCTRL_FUNC(NDSR1),
+ ASPEED_PINCTRL_FUNC(NDSR2),
ASPEED_PINCTRL_FUNC(NDSR3),
+ ASPEED_PINCTRL_FUNC(NDSR4),
ASPEED_PINCTRL_FUNC(NDTR1),
+ ASPEED_PINCTRL_FUNC(NDTR2),
ASPEED_PINCTRL_FUNC(NDTR3),
+ ASPEED_PINCTRL_FUNC(NDTR4),
+ ASPEED_PINCTRL_FUNC(NDTS4),
ASPEED_PINCTRL_FUNC(NRI1),
+ ASPEED_PINCTRL_FUNC(NRI2),
ASPEED_PINCTRL_FUNC(NRI3),
ASPEED_PINCTRL_FUNC(NRI4),
ASPEED_PINCTRL_FUNC(NRTS1),
+ ASPEED_PINCTRL_FUNC(NRTS2),
ASPEED_PINCTRL_FUNC(NRTS3),
+ ASPEED_PINCTRL_FUNC(OSCCLK),
ASPEED_PINCTRL_FUNC(PWM0),
ASPEED_PINCTRL_FUNC(PWM1),
ASPEED_PINCTRL_FUNC(PWM2),
@@ -1131,7 +2171,9 @@ static const struct aspeed_pin_function aspeed_g4_functions[] = {
ASPEED_PINCTRL_FUNC(PWM6),
ASPEED_PINCTRL_FUNC(PWM7),
ASPEED_PINCTRL_FUNC(RGMII1),
+ ASPEED_PINCTRL_FUNC(RGMII2),
ASPEED_PINCTRL_FUNC(RMII1),
+ ASPEED_PINCTRL_FUNC(RMII2),
ASPEED_PINCTRL_FUNC(ROM16),
ASPEED_PINCTRL_FUNC(ROM8),
ASPEED_PINCTRL_FUNC(ROMCS1),
@@ -1139,21 +2181,48 @@ static const struct aspeed_pin_function aspeed_g4_functions[] = {
ASPEED_PINCTRL_FUNC(ROMCS3),
ASPEED_PINCTRL_FUNC(ROMCS4),
ASPEED_PINCTRL_FUNC(RXD1),
+ ASPEED_PINCTRL_FUNC(RXD2),
ASPEED_PINCTRL_FUNC(RXD3),
ASPEED_PINCTRL_FUNC(RXD4),
+ ASPEED_PINCTRL_FUNC(SALT1),
+ ASPEED_PINCTRL_FUNC(SALT2),
+ ASPEED_PINCTRL_FUNC(SALT3),
+ ASPEED_PINCTRL_FUNC(SALT4),
ASPEED_PINCTRL_FUNC(SD1),
+ ASPEED_PINCTRL_FUNC(SD2),
+ ASPEED_PINCTRL_FUNC(SGPMCK),
ASPEED_PINCTRL_FUNC(SGPMI),
+ ASPEED_PINCTRL_FUNC(SGPMLD),
+ ASPEED_PINCTRL_FUNC(SGPMO),
+ ASPEED_PINCTRL_FUNC(SGPSCK),
+ ASPEED_PINCTRL_FUNC(SGPSI0),
+ ASPEED_PINCTRL_FUNC(SGPSI1),
+ ASPEED_PINCTRL_FUNC(SGPSLD),
+ ASPEED_PINCTRL_FUNC(SIOONCTRL),
ASPEED_PINCTRL_FUNC(SIOPBI),
ASPEED_PINCTRL_FUNC(SIOPBO),
+ ASPEED_PINCTRL_FUNC(SIOPWREQ),
+ ASPEED_PINCTRL_FUNC(SIOPWRGD),
+ ASPEED_PINCTRL_FUNC(SIOS3),
+ ASPEED_PINCTRL_FUNC(SIOS5),
+ ASPEED_PINCTRL_FUNC(SIOSCI),
+ ASPEED_PINCTRL_FUNC(SPI1),
+ ASPEED_PINCTRL_FUNC(SPI1DEBUG),
+ ASPEED_PINCTRL_FUNC(SPI1PASSTHRU),
+ ASPEED_PINCTRL_FUNC(SPICS1),
ASPEED_PINCTRL_FUNC(TIMER3),
+ ASPEED_PINCTRL_FUNC(TIMER4),
ASPEED_PINCTRL_FUNC(TIMER5),
ASPEED_PINCTRL_FUNC(TIMER6),
ASPEED_PINCTRL_FUNC(TIMER7),
ASPEED_PINCTRL_FUNC(TIMER8),
ASPEED_PINCTRL_FUNC(TXD1),
+ ASPEED_PINCTRL_FUNC(TXD2),
ASPEED_PINCTRL_FUNC(TXD3),
ASPEED_PINCTRL_FUNC(TXD4),
ASPEED_PINCTRL_FUNC(UART6),
+ ASPEED_PINCTRL_FUNC(USBCKI),
+ ASPEED_PINCTRL_FUNC(VGABIOS_ROM),
ASPEED_PINCTRL_FUNC(VGAHS),
ASPEED_PINCTRL_FUNC(VGAVS),
ASPEED_PINCTRL_FUNC(VPI18),
@@ -1161,6 +2230,8 @@ static const struct aspeed_pin_function aspeed_g4_functions[] = {
ASPEED_PINCTRL_FUNC(VPI30),
ASPEED_PINCTRL_FUNC(VPO12),
ASPEED_PINCTRL_FUNC(VPO24),
+ ASPEED_PINCTRL_FUNC(WDTRST1),
+ ASPEED_PINCTRL_FUNC(WDTRST2),
};
static struct aspeed_pinctrl_data aspeed_g4_pinctrl_data = {
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
index 87b46390b695..43221a3c7e23 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
@@ -10,6 +10,7 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -24,14 +25,28 @@
#include "../pinctrl-utils.h"
#include "pinctrl-aspeed.h"
-#define ASPEED_G5_NR_PINS 228
+#define ASPEED_G5_NR_PINS 232
-#define COND1 { SCU90, BIT(6), 0, 0 }
-#define COND2 { SCU94, GENMASK(1, 0), 0, 0 }
+#define COND1 { ASPEED_IP_SCU, SCU90, BIT(6), 0, 0 }
+#define COND2 { ASPEED_IP_SCU, SCU94, GENMASK(1, 0), 0, 0 }
+
+/* LHCR0 is offset from the end of the H8S/2168-compatible registers */
+#define LHCR0 0x20
+#define GFX064 0x64
#define B14 0
SSSF_PIN_DECL(B14, GPIOA0, MAC1LINK, SIG_DESC_SET(SCU80, 0));
+#define D14 1
+SSSF_PIN_DECL(D14, GPIOA1, MAC2LINK, SIG_DESC_SET(SCU80, 1));
+
+#define D13 2
+SIG_EXPR_LIST_DECL_SINGLE(SPI1CS1, SPI1CS1, SIG_DESC_SET(SCU80, 15));
+SIG_EXPR_LIST_DECL_SINGLE(TIMER3, TIMER3, SIG_DESC_SET(SCU80, 2));
+MS_PIN_DECL(D13, GPIOA2, SPI1CS1, TIMER3);
+FUNC_GROUP_DECL(SPI1CS1, D13);
+FUNC_GROUP_DECL(TIMER3, D13);
+
#define E13 3
SSSF_PIN_DECL(E13, GPIOA3, TIMER4, SIG_DESC_SET(SCU80, 3));
@@ -71,6 +86,32 @@ FUNC_GROUP_DECL(TIMER8, B13);
FUNC_GROUP_DECL(MDIO2, C13, B13);
+#define K19 8
+GPIO_PIN_DECL(K19, GPIOB0);
+
+#define L19 9
+GPIO_PIN_DECL(L19, GPIOB1);
+
+#define L18 10
+GPIO_PIN_DECL(L18, GPIOB2);
+
+#define K18 11
+GPIO_PIN_DECL(K18, GPIOB3);
+
+#define J20 12
+SSSF_PIN_DECL(J20, GPIOB4, USBCKI, SIG_DESC_SET(HW_STRAP1, 23));
+
+#define H21 13
+#define H21_DESC SIG_DESC_SET(SCU80, 13)
+SIG_EXPR_LIST_DECL_SINGLE(LPCPD, LPCPD, H21_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(LPCSMI, LPCSMI, H21_DESC);
+MS_PIN_DECL(H21, GPIOB5, LPCPD, LPCSMI);
+FUNC_GROUP_DECL(LPCPD, H21);
+FUNC_GROUP_DECL(LPCSMI, H21);
+
+#define H22 14
+SSSF_PIN_DECL(H22, GPIOB6, LPCPME, SIG_DESC_SET(SCU80, 14));
+
#define H20 15
GPIO_PIN_DECL(H20, GPIOB7);
@@ -167,7 +208,44 @@ MS_PIN_DECL(D20, GPIOD3, SD2DAT1, GPID2OUT);
FUNC_GROUP_DECL(GPID2, F20, D20);
-#define GPIE_DESC SIG_DESC_SET(HW_STRAP1, 21)
+#define GPID4_DESC SIG_DESC_SET(SCU8C, 10)
+
+#define D21 28
+SIG_EXPR_LIST_DECL_SINGLE(SD2DAT2, SD2, SD2_DESC);
+SIG_EXPR_DECL(GPID4IN, GPID4, GPID4_DESC);
+SIG_EXPR_DECL(GPID4IN, GPID, GPID_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPID4IN, GPID4, GPID);
+MS_PIN_DECL(D21, GPIOD4, SD2DAT2, GPID4IN);
+
+#define E20 29
+SIG_EXPR_LIST_DECL_SINGLE(SD2DAT3, SD2, SD2_DESC);
+SIG_EXPR_DECL(GPID4OUT, GPID4, GPID4_DESC);
+SIG_EXPR_DECL(GPID4OUT, GPID, GPID_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPID4OUT, GPID4, GPID);
+MS_PIN_DECL(E20, GPIOD5, SD2DAT3, GPID4OUT);
+
+FUNC_GROUP_DECL(GPID4, D21, E20);
+
+#define GPID6_DESC SIG_DESC_SET(SCU8C, 11)
+
+#define G18 30
+SIG_EXPR_LIST_DECL_SINGLE(SD2CD, SD2, SD2_DESC);
+SIG_EXPR_DECL(GPID6IN, GPID6, GPID6_DESC);
+SIG_EXPR_DECL(GPID6IN, GPID, GPID_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPID6IN, GPID6, GPID);
+MS_PIN_DECL(G18, GPIOD6, SD2CD, GPID6IN);
+
+#define C21 31
+SIG_EXPR_LIST_DECL_SINGLE(SD2WP, SD2, SD2_DESC);
+SIG_EXPR_DECL(GPID6OUT, GPID6, GPID6_DESC);
+SIG_EXPR_DECL(GPID6OUT, GPID, GPID_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPID6OUT, GPID6, GPID);
+MS_PIN_DECL(C21, GPIOD7, SD2WP, GPID6OUT);
+
+FUNC_GROUP_DECL(GPID6, G18, C21);
+FUNC_GROUP_DECL(SD2, F19, E21, F20, D20, D21, E20, G18, C21);
+
+#define GPIE_DESC SIG_DESC_SET(HW_STRAP1, 22)
#define GPIE0_DESC SIG_DESC_SET(SCU8C, 12)
#define B20 32
@@ -176,6 +254,7 @@ SIG_EXPR_DECL(GPIE0IN, GPIE0, GPIE0_DESC);
SIG_EXPR_DECL(GPIE0IN, GPIE, GPIE_DESC);
SIG_EXPR_LIST_DECL_DUAL(GPIE0IN, GPIE0, GPIE);
MS_PIN_DECL(B20, GPIOE0, NCTS3, GPIE0IN);
+FUNC_GROUP_DECL(NCTS3, B20);
#define C20 33
SIG_EXPR_LIST_DECL_SINGLE(NDCD3, NDCD3, SIG_DESC_SET(SCU80, 17));
@@ -183,12 +262,233 @@ SIG_EXPR_DECL(GPIE0OUT, GPIE0, GPIE0_DESC);
SIG_EXPR_DECL(GPIE0OUT, GPIE, GPIE_DESC);
SIG_EXPR_LIST_DECL_DUAL(GPIE0OUT, GPIE0, GPIE);
MS_PIN_DECL(C20, GPIOE1, NDCD3, GPIE0OUT);
+FUNC_GROUP_DECL(NDCD3, C20);
FUNC_GROUP_DECL(GPIE0, B20, C20);
-#define SPI1_DESC { HW_STRAP1, GENMASK(13, 12), 1, 0 }
-#define SPI1DEBUG_DESC { HW_STRAP1, GENMASK(13, 12), 2, 0 }
-#define SPI1PASSTHRU_DESC { HW_STRAP1, GENMASK(13, 12), 3, 0 }
+#define GPIE2_DESC SIG_DESC_SET(SCU8C, 13)
+
+#define F18 34
+SIG_EXPR_LIST_DECL_SINGLE(NDSR3, NDSR3, SIG_DESC_SET(SCU80, 18));
+SIG_EXPR_DECL(GPIE2IN, GPIE2, GPIE2_DESC);
+SIG_EXPR_DECL(GPIE2IN, GPIE, GPIE_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPIE2IN, GPIE2, GPIE);
+MS_PIN_DECL(F18, GPIOE2, NDSR3, GPIE2IN);
+FUNC_GROUP_DECL(NDSR3, F18);
+
+
+#define F17 35
+SIG_EXPR_LIST_DECL_SINGLE(NRI3, NRI3, SIG_DESC_SET(SCU80, 19));
+SIG_EXPR_DECL(GPIE2OUT, GPIE2, GPIE2_DESC);
+SIG_EXPR_DECL(GPIE2OUT, GPIE, GPIE_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPIE2OUT, GPIE2, GPIE);
+MS_PIN_DECL(F17, GPIOE3, NRI3, GPIE2OUT);
+FUNC_GROUP_DECL(NRI3, F17);
+
+FUNC_GROUP_DECL(GPIE2, F18, F17);
+
+#define GPIE4_DESC SIG_DESC_SET(SCU8C, 14)
+
+#define E18 36
+SIG_EXPR_LIST_DECL_SINGLE(NDTR3, NDTR3, SIG_DESC_SET(SCU80, 20));
+SIG_EXPR_DECL(GPIE4IN, GPIE4, GPIE4_DESC);
+SIG_EXPR_DECL(GPIE4IN, GPIE, GPIE_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPIE4IN, GPIE4, GPIE);
+MS_PIN_DECL(E18, GPIOE4, NDTR3, GPIE4IN);
+FUNC_GROUP_DECL(NDTR3, E18);
+
+#define D19 37
+SIG_EXPR_LIST_DECL_SINGLE(NRTS3, NRTS3, SIG_DESC_SET(SCU80, 21));
+SIG_EXPR_DECL(GPIE4OUT, GPIE4, GPIE4_DESC);
+SIG_EXPR_DECL(GPIE4OUT, GPIE, GPIE_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPIE4OUT, GPIE4, GPIE);
+MS_PIN_DECL(D19, GPIOE5, NRTS3, GPIE4OUT);
+FUNC_GROUP_DECL(NRTS3, D19);
+
+FUNC_GROUP_DECL(GPIE4, E18, D19);
+
+#define GPIE6_DESC SIG_DESC_SET(SCU8C, 15)
+
+#define A20 38
+SIG_EXPR_LIST_DECL_SINGLE(TXD3, TXD3, SIG_DESC_SET(SCU80, 22));
+SIG_EXPR_DECL(GPIE6IN, GPIE6, GPIE6_DESC);
+SIG_EXPR_DECL(GPIE6IN, GPIE, GPIE_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPIE6IN, GPIE6, GPIE);
+MS_PIN_DECL(A20, GPIOE6, TXD3, GPIE6IN);
+FUNC_GROUP_DECL(TXD3, A20);
+
+#define B19 39
+SIG_EXPR_LIST_DECL_SINGLE(RXD3, RXD3, SIG_DESC_SET(SCU80, 23));
+SIG_EXPR_DECL(GPIE6OUT, GPIE6, GPIE6_DESC);
+SIG_EXPR_DECL(GPIE6OUT, GPIE, GPIE_DESC);
+SIG_EXPR_LIST_DECL_DUAL(GPIE6OUT, GPIE6, GPIE);
+MS_PIN_DECL(B19, GPIOE7, RXD3, GPIE6OUT);
+FUNC_GROUP_DECL(RXD3, B19);
+
+FUNC_GROUP_DECL(GPIE6, A20, B19);
+
+#define LPCHC_DESC SIG_DESC_IP_SET(ASPEED_IP_LPC, LHCR0, 0)
+#define LPCPLUS_DESC SIG_DESC_SET(SCU90, 30)
+
+#define J19 40
+SIG_EXPR_DECL(LHAD0, LPCHC, LPCHC_DESC);
+SIG_EXPR_DECL(LHAD0, LPCPLUS, LPCPLUS_DESC);
+SIG_EXPR_LIST_DECL_DUAL(LHAD0, LPCHC, LPCPLUS);
+SIG_EXPR_LIST_DECL_SINGLE(NCTS4, NCTS4, SIG_DESC_SET(SCU80, 24));
+MS_PIN_DECL(J19, GPIOF0, LHAD0, NCTS4);
+FUNC_GROUP_DECL(NCTS4, J19);
+
+#define J18 41
+SIG_EXPR_DECL(LHAD1, LPCHC, LPCHC_DESC);
+SIG_EXPR_DECL(LHAD1, LPCPLUS, LPCPLUS_DESC);
+SIG_EXPR_LIST_DECL_DUAL(LHAD1, LPCHC, LPCPLUS);
+SIG_EXPR_LIST_DECL_SINGLE(NDCD4, NDCD4, SIG_DESC_SET(SCU80, 25));
+MS_PIN_DECL(J18, GPIOF1, LHAD1, NDCD4);
+FUNC_GROUP_DECL(NDCD4, J18);
+
+#define B22 42
+SIG_EXPR_DECL(LHAD2, LPCHC, LPCHC_DESC);
+SIG_EXPR_DECL(LHAD2, LPCPLUS, LPCPLUS_DESC);
+SIG_EXPR_LIST_DECL_DUAL(LHAD2, LPCHC, LPCPLUS);
+SIG_EXPR_LIST_DECL_SINGLE(NDSR4, NDSR4, SIG_DESC_SET(SCU80, 26));
+MS_PIN_DECL(B22, GPIOF2, LHAD2, NDSR4);
+FUNC_GROUP_DECL(NDSR4, B22);
+
+#define B21 43
+SIG_EXPR_DECL(LHAD3, LPCHC, LPCHC_DESC);
+SIG_EXPR_DECL(LHAD3, LPCPLUS, LPCPLUS_DESC);
+SIG_EXPR_LIST_DECL_DUAL(LHAD3, LPCHC, LPCPLUS);
+SIG_EXPR_LIST_DECL_SINGLE(NRI4, NRI4, SIG_DESC_SET(SCU80, 27));
+MS_PIN_DECL(B21, GPIOF3, LHAD3, NRI4);
+FUNC_GROUP_DECL(NRI4, B21);
+
+#define A21 44
+SIG_EXPR_DECL(LHCLK, LPCHC, LPCHC_DESC);
+SIG_EXPR_DECL(LHCLK, LPCPLUS, LPCPLUS_DESC);
+SIG_EXPR_LIST_DECL_DUAL(LHCLK, LPCHC, LPCPLUS);
+SIG_EXPR_LIST_DECL_SINGLE(NDTR4, NDTR4, SIG_DESC_SET(SCU80, 28));
+MS_PIN_DECL(A21, GPIOF4, LHCLK, NDTR4);
+FUNC_GROUP_DECL(NDTR4, A21);
+
+#define H19 45
+SIG_EXPR_DECL(LHFRAME, LPCHC, LPCHC_DESC);
+SIG_EXPR_DECL(LHFRAME, LPCPLUS, LPCPLUS_DESC);
+SIG_EXPR_LIST_DECL_DUAL(LHFRAME, LPCHC, LPCPLUS);
+SIG_EXPR_LIST_DECL_SINGLE(NRTS4, NRTS4, SIG_DESC_SET(SCU80, 29));
+MS_PIN_DECL(H19, GPIOF5, LHFRAME, NRTS4);
+FUNC_GROUP_DECL(NRTS4, H19);
+
+#define G17 46
+SIG_EXPR_LIST_DECL_SINGLE(LHSIRQ, LPCHC, LPCHC_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(TXD4, TXD4, SIG_DESC_SET(SCU80, 30));
+MS_PIN_DECL(G17, GPIOF6, LHSIRQ, TXD4);
+FUNC_GROUP_DECL(TXD4, G17);
+
+#define H18 47
+SIG_EXPR_DECL(LHRST, LPCHC, LPCHC_DESC);
+SIG_EXPR_DECL(LHRST, LPCPLUS, LPCPLUS_DESC);
+SIG_EXPR_LIST_DECL_DUAL(LHRST, LPCHC, LPCPLUS);
+SIG_EXPR_LIST_DECL_SINGLE(RXD4, RXD4, SIG_DESC_SET(SCU80, 31));
+MS_PIN_DECL(H18, GPIOF7, LHRST, RXD4);
+FUNC_GROUP_DECL(RXD4, H18);
+
+FUNC_GROUP_DECL(LPCHC, J19, J18, B22, B21, A21, H19, G17, H18);
+FUNC_GROUP_DECL(LPCPLUS, J19, J18, B22, B21, A21, H19, H18);
+
+#define A19 48
+SIG_EXPR_LIST_DECL_SINGLE(SGPS1CK, SGPS1, COND1, SIG_DESC_SET(SCU84, 0));
+SS_PIN_DECL(A19, GPIOG0, SGPS1CK);
+
+#define E19 49
+SIG_EXPR_LIST_DECL_SINGLE(SGPS1LD, SGPS1, COND1, SIG_DESC_SET(SCU84, 1));
+SS_PIN_DECL(E19, GPIOG1, SGPS1LD);
+
+#define C19 50
+SIG_EXPR_LIST_DECL_SINGLE(SGPS1I0, SGPS1, COND1, SIG_DESC_SET(SCU84, 2));
+SS_PIN_DECL(C19, GPIOG2, SGPS1I0);
+
+#define E16 51
+SIG_EXPR_LIST_DECL_SINGLE(SGPS1I1, SGPS1, COND1, SIG_DESC_SET(SCU84, 3));
+SS_PIN_DECL(E16, GPIOG3, SGPS1I1);
+
+FUNC_GROUP_DECL(SGPS1, A19, E19, C19, E16);
+
+#define SGPS2_DESC SIG_DESC_SET(SCU94, 12)
+
+#define E17 52
+SIG_EXPR_LIST_DECL_SINGLE(SGPS2CK, SGPS2, COND1, SGPS2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(SALT1, SALT1, COND1, SIG_DESC_SET(SCU84, 4));
+MS_PIN_DECL(E17, GPIOG4, SGPS2CK, SALT1);
+FUNC_GROUP_DECL(SALT1, E17);
+
+#define D16 53
+SIG_EXPR_LIST_DECL_SINGLE(SGPS2LD, SGPS2, COND1, SGPS2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(SALT2, SALT2, COND1, SIG_DESC_SET(SCU84, 5));
+MS_PIN_DECL(D16, GPIOG5, SGPS2LD, SALT2);
+FUNC_GROUP_DECL(SALT2, D16);
+
+#define D15 54
+SIG_EXPR_LIST_DECL_SINGLE(SGPS2I0, SGPS2, COND1, SGPS2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(SALT3, SALT3, COND1, SIG_DESC_SET(SCU84, 6));
+MS_PIN_DECL(D15, GPIOG6, SGPS2I0, SALT3);
+FUNC_GROUP_DECL(SALT3, D15);
+
+#define E14 55
+SIG_EXPR_LIST_DECL_SINGLE(SGPS2I1, SGPS2, COND1, SGPS2_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(SALT4, SALT4, COND1, SIG_DESC_SET(SCU84, 7));
+MS_PIN_DECL(E14, GPIOG7, SGPS2I1, SALT4);
+FUNC_GROUP_DECL(SALT4, E14);
+
+FUNC_GROUP_DECL(SGPS2, E17, D16, D15, E14);
+
+#define UART6_DESC SIG_DESC_SET(SCU90, 7)
+
+#define A18 56
+SIG_EXPR_LIST_DECL_SINGLE(DASHA18, DASHA18, COND1, SIG_DESC_SET(SCU94, 5));
+SIG_EXPR_LIST_DECL_SINGLE(NCTS6, UART6, COND1, UART6_DESC);
+MS_PIN_DECL(A18, GPIOH0, DASHA18, NCTS6);
+
+#define B18 57
+SIG_EXPR_LIST_DECL_SINGLE(DASHB18, DASHB18, COND1, SIG_DESC_SET(SCU94, 5));
+SIG_EXPR_LIST_DECL_SINGLE(NDCD6, UART6, COND1, UART6_DESC);
+MS_PIN_DECL(B18, GPIOH1, DASHB18, NDCD6);
+
+#define D17 58
+SIG_EXPR_LIST_DECL_SINGLE(DASHD17, DASHD17, COND1, SIG_DESC_SET(SCU94, 6));
+SIG_EXPR_LIST_DECL_SINGLE(NDSR6, UART6, COND1, UART6_DESC);
+MS_PIN_DECL(D17, GPIOH2, DASHD17, NDSR6);
+
+#define C17 59
+SIG_EXPR_LIST_DECL_SINGLE(DASHC17, DASHC17, COND1, SIG_DESC_SET(SCU94, 6));
+SIG_EXPR_LIST_DECL_SINGLE(NRI6, UART6, COND1, UART6_DESC);
+MS_PIN_DECL(C17, GPIOH3, DASHC17, NRI6);
+
+#define A17 60
+SIG_EXPR_LIST_DECL_SINGLE(DASHA17, DASHA17, COND1, SIG_DESC_SET(SCU94, 7));
+SIG_EXPR_LIST_DECL_SINGLE(NDTR6, UART6, COND1, UART6_DESC);
+MS_PIN_DECL(A17, GPIOH4, DASHA17, NDTR6);
+
+#define B17 61
+SIG_EXPR_LIST_DECL_SINGLE(DASHB17, DASHB17, COND1, SIG_DESC_SET(SCU94, 7));
+SIG_EXPR_LIST_DECL_SINGLE(NRTS6, UART6, COND1, UART6_DESC);
+MS_PIN_DECL(B17, GPIOH5, DASHB17, NRTS6);
+
+#define A16 62
+SIG_EXPR_LIST_DECL_SINGLE(TXD6, UART6, COND1, UART6_DESC);
+SS_PIN_DECL(A16, GPIOH6, TXD6);
+
+#define D18 63
+SIG_EXPR_LIST_DECL_SINGLE(RXD6, UART6, COND1, UART6_DESC);
+SS_PIN_DECL(D18, GPIOH7, RXD6);
+
+FUNC_GROUP_DECL(UART6, A18, B18, D17, C17, A17, B17, A16, D18);
+
+#define SPI1_DESC \
+ { ASPEED_IP_SCU, HW_STRAP1, GENMASK(13, 12), 1, 0 }
+#define SPI1DEBUG_DESC \
+ { ASPEED_IP_SCU, HW_STRAP1, GENMASK(13, 12), 2, 0 }
+#define SPI1PASSTHRU_DESC \
+ { ASPEED_IP_SCU, HW_STRAP1, GENMASK(13, 12), 3, 0 }
#define C18 64
SIG_EXPR_DECL(SYSCS, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
@@ -277,6 +577,30 @@ SS_PIN_DECL(N3, GPIOJ2, SGPMO);
SIG_EXPR_LIST_DECL_SINGLE(SGPMI, SGPM, SIG_DESC_SET(SCU84, 11));
SS_PIN_DECL(N4, GPIOJ3, SGPMI);
+#define N5 76
+SIG_EXPR_LIST_DECL_SINGLE(VGAHS, VGAHS, SIG_DESC_SET(SCU84, 12));
+SIG_EXPR_LIST_DECL_SINGLE(DASHN5, DASHN5, SIG_DESC_SET(SCU94, 8));
+MS_PIN_DECL(N5, GPIOJ4, VGAHS, DASHN5);
+FUNC_GROUP_DECL(VGAHS, N5);
+
+#define R4 77
+SIG_EXPR_LIST_DECL_SINGLE(VGAVS, VGAVS, SIG_DESC_SET(SCU84, 13));
+SIG_EXPR_LIST_DECL_SINGLE(DASHR4, DASHR4, SIG_DESC_SET(SCU94, 8));
+MS_PIN_DECL(R4, GPIOJ5, VGAVS, DASHR4);
+FUNC_GROUP_DECL(VGAVS, R4);
+
+#define R3 78
+SIG_EXPR_LIST_DECL_SINGLE(DDCCLK, DDCCLK, SIG_DESC_SET(SCU84, 14));
+SIG_EXPR_LIST_DECL_SINGLE(DASHR3, DASHR3, SIG_DESC_SET(SCU94, 9));
+MS_PIN_DECL(R3, GPIOJ6, DDCCLK, DASHR3);
+FUNC_GROUP_DECL(DDCCLK, R3);
+
+#define T3 79
+SIG_EXPR_LIST_DECL_SINGLE(DDCDAT, DDCDAT, SIG_DESC_SET(SCU84, 15));
+SIG_EXPR_LIST_DECL_SINGLE(DASHT3, DASHT3, SIG_DESC_SET(SCU94, 9));
+MS_PIN_DECL(T3, GPIOJ7, DDCDAT, DASHT3);
+FUNC_GROUP_DECL(DDCDAT, T3);
+
#define I2C5_DESC SIG_DESC_SET(SCU90, 18)
#define L3 80
@@ -325,10 +649,119 @@ SS_PIN_DECL(R1, GPIOK7, SDA8);
FUNC_GROUP_DECL(I2C8, P2, R1);
-#define VPIOFF0_DESC { SCU90, GENMASK(5, 4), 0, 0 }
-#define VPIOFF1_DESC { SCU90, GENMASK(5, 4), 1, 0 }
-#define VPI24_DESC { SCU90, GENMASK(5, 4), 2, 0 }
-#define VPIRSVD_DESC { SCU90, GENMASK(5, 4), 3, 0 }
+#define T2 88
+SSSF_PIN_DECL(T2, GPIOL0, NCTS1, SIG_DESC_SET(SCU84, 16));
+
+#define VPIOFF0_DESC { ASPEED_IP_SCU, SCU90, GENMASK(5, 4), 0, 0 }
+#define VPIOFF1_DESC { ASPEED_IP_SCU, SCU90, GENMASK(5, 4), 1, 0 }
+#define VPI24_DESC { ASPEED_IP_SCU, SCU90, GENMASK(5, 4), 2, 0 }
+#define VPIRSVD_DESC { ASPEED_IP_SCU, SCU90, GENMASK(5, 4), 3, 0 }
+#define VPI_24_RSVD_DESC SIG_DESC_SET(SCU90, 5)
+
+#define T1 89
+#define T1_DESC SIG_DESC_SET(SCU84, 17)
+SIG_EXPR_LIST_DECL_SINGLE(VPIDE, VPI24, VPI_24_RSVD_DESC, T1_DESC, COND2);
+SIG_EXPR_LIST_DECL_SINGLE(NDCD1, NDCD1, T1_DESC, COND2);
+MS_PIN_DECL(T1, GPIOL1, VPIDE, NDCD1);
+FUNC_GROUP_DECL(NDCD1, T1);
+
+#define U1 90
+#define U1_DESC SIG_DESC_SET(SCU84, 18)
+SIG_EXPR_LIST_DECL_SINGLE(DASHU1, VPI24, VPI_24_RSVD_DESC, U1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(NDSR1, NDSR1, U1_DESC);
+MS_PIN_DECL(U1, GPIOL2, DASHU1, NDSR1);
+FUNC_GROUP_DECL(NDSR1, U1);
+
+#define U2 91
+#define U2_DESC SIG_DESC_SET(SCU84, 19)
+SIG_EXPR_LIST_DECL_SINGLE(VPIHS, VPI24, VPI_24_RSVD_DESC, U2_DESC, COND2);
+SIG_EXPR_LIST_DECL_SINGLE(NRI1, NRI1, U2_DESC, COND2);
+MS_PIN_DECL(U2, GPIOL3, VPIHS, NRI1);
+FUNC_GROUP_DECL(NRI1, U2);
+
+#define P4 92
+#define P4_DESC SIG_DESC_SET(SCU84, 20)
+SIG_EXPR_LIST_DECL_SINGLE(VPIVS, VPI24, VPI_24_RSVD_DESC, P4_DESC, COND2);
+SIG_EXPR_LIST_DECL_SINGLE(NDTR1, NDTR1, P4_DESC, COND2);
+MS_PIN_DECL(P4, GPIOL4, VPIVS, NDTR1);
+FUNC_GROUP_DECL(NDTR1, P4);
+
+#define P3 93
+#define P3_DESC SIG_DESC_SET(SCU84, 21)
+SIG_EXPR_LIST_DECL_SINGLE(VPICLK, VPI24, VPI_24_RSVD_DESC, P3_DESC, COND2);
+SIG_EXPR_LIST_DECL_SINGLE(NRTS1, NRTS1, P3_DESC, COND2);
+MS_PIN_DECL(P3, GPIOL5, VPICLK, NRTS1);
+FUNC_GROUP_DECL(NRTS1, P3);
+
+#define V1 94
+#define V1_DESC SIG_DESC_SET(SCU84, 22)
+SIG_EXPR_LIST_DECL_SINGLE(DASHV1, DASHV1, VPIRSVD_DESC, V1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(TXD1, TXD1, V1_DESC, COND2);
+MS_PIN_DECL(V1, GPIOL6, DASHV1, TXD1);
+FUNC_GROUP_DECL(TXD1, V1);
+
+#define W1 95
+#define W1_DESC SIG_DESC_SET(SCU84, 23)
+SIG_EXPR_LIST_DECL_SINGLE(DASHW1, DASHW1, VPIRSVD_DESC, W1_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(RXD1, RXD1, W1_DESC, COND2);
+MS_PIN_DECL(W1, GPIOL7, DASHW1, RXD1);
+FUNC_GROUP_DECL(RXD1, W1);
+
+#define Y1 96
+#define Y1_DESC SIG_DESC_SET(SCU84, 24)
+SIG_EXPR_LIST_DECL_SINGLE(VPIB2, VPI24, VPI_24_RSVD_DESC, Y1_DESC, COND2);
+SIG_EXPR_LIST_DECL_SINGLE(NCTS2, NCTS2, Y1_DESC, COND2);
+MS_PIN_DECL(Y1, GPIOM0, VPIB2, NCTS2);
+FUNC_GROUP_DECL(NCTS2, Y1);
+
+#define AB2 97
+#define AB2_DESC SIG_DESC_SET(SCU84, 25)
+SIG_EXPR_LIST_DECL_SINGLE(VPIB3, VPI24, VPI_24_RSVD_DESC, AB2_DESC, COND2);
+SIG_EXPR_LIST_DECL_SINGLE(NDCD2, NDCD2, AB2_DESC, COND2);
+MS_PIN_DECL(AB2, GPIOM1, VPIB3, NDCD2);
+FUNC_GROUP_DECL(NDCD2, AB2);
+
+#define AA1 98
+#define AA1_DESC SIG_DESC_SET(SCU84, 26)
+SIG_EXPR_LIST_DECL_SINGLE(VPIB4, VPI24, VPI_24_RSVD_DESC, AA1_DESC, COND2);
+SIG_EXPR_LIST_DECL_SINGLE(NDSR2, NDSR2, AA1_DESC, COND2);
+MS_PIN_DECL(AA1, GPIOM2, VPIB4, NDSR2);
+FUNC_GROUP_DECL(NDSR2, AA1);
+
+#define Y2 99
+#define Y2_DESC SIG_DESC_SET(SCU84, 27)
+SIG_EXPR_LIST_DECL_SINGLE(VPIB5, VPI24, VPI_24_RSVD_DESC, Y2_DESC, COND2);
+SIG_EXPR_LIST_DECL_SINGLE(NRI2, NRI2, Y2_DESC, COND2);
+MS_PIN_DECL(Y2, GPIOM3, VPIB5, NRI2);
+FUNC_GROUP_DECL(NRI2, Y2);
+
+#define AA2 100
+#define AA2_DESC SIG_DESC_SET(SCU84, 28)
+SIG_EXPR_LIST_DECL_SINGLE(VPIB6, VPI24, VPI_24_RSVD_DESC, AA2_DESC, COND2);
+SIG_EXPR_LIST_DECL_SINGLE(NDTR2, NDTR2, AA2_DESC, COND2);
+MS_PIN_DECL(AA2, GPIOM4, VPIB6, NDTR2);
+FUNC_GROUP_DECL(NDTR2, AA2);
+
+#define P5 101
+#define P5_DESC SIG_DESC_SET(SCU84, 29)
+SIG_EXPR_LIST_DECL_SINGLE(VPIB7, VPI24, VPI_24_RSVD_DESC, P5_DESC, COND2);
+SIG_EXPR_LIST_DECL_SINGLE(NRTS2, NRTS2, P5_DESC, COND2);
+MS_PIN_DECL(P5, GPIOM5, VPIB7, NRTS2);
+FUNC_GROUP_DECL(NRTS2, P5);
+
+#define R5 102
+#define R5_DESC SIG_DESC_SET(SCU84, 30)
+SIG_EXPR_LIST_DECL_SINGLE(VPIB8, VPI24, VPI_24_RSVD_DESC, R5_DESC, COND2);
+SIG_EXPR_LIST_DECL_SINGLE(TXD2, TXD2, R5_DESC, COND2);
+MS_PIN_DECL(R5, GPIOM6, VPIB8, TXD2);
+FUNC_GROUP_DECL(TXD2, R5);
+
+#define T5 103
+#define T5_DESC SIG_DESC_SET(SCU84, 31)
+SIG_EXPR_LIST_DECL_SINGLE(VPIB9, VPI24, VPI_24_RSVD_DESC, T5_DESC, COND2);
+SIG_EXPR_LIST_DECL_SINGLE(RXD2, RXD2, T5_DESC, COND2);
+MS_PIN_DECL(T5, GPIOM7, VPIB9, RXD2);
+FUNC_GROUP_DECL(RXD2, T5);
#define V2 104
#define V2_DESC SIG_DESC_SET(SCU88, 0)
@@ -394,9 +827,88 @@ SIG_EXPR_LIST_DECL_SINGLE(PWM7, PWM7, T4_DESC, COND2);
MS_PIN_DECL(T4, GPION7, VPIG7, PWM7);
FUNC_GROUP_DECL(PWM7, T4);
+#define U5 112
+SIG_EXPR_LIST_DECL_SINGLE(VPIG8, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 8),
+ COND2);
+SS_PIN_DECL(U5, GPIOO0, VPIG8);
+
+#define U4 113
+SIG_EXPR_LIST_DECL_SINGLE(VPIG9, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 9),
+ COND2);
+SS_PIN_DECL(U4, GPIOO1, VPIG9);
+
+#define V5 114
+SIG_EXPR_LIST_DECL_SINGLE(DASHV5, DASHV5, VPI_24_RSVD_DESC,
+ SIG_DESC_SET(SCU88, 10));
+SS_PIN_DECL(V5, GPIOO2, DASHV5);
+
+#define AB4 115
+SIG_EXPR_LIST_DECL_SINGLE(DASHAB4, DASHAB4, VPI_24_RSVD_DESC,
+ SIG_DESC_SET(SCU88, 11));
+SS_PIN_DECL(AB4, GPIOO3, DASHAB4);
+
+#define AB3 116
+SIG_EXPR_LIST_DECL_SINGLE(VPIR2, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 12),
+ COND2);
+SS_PIN_DECL(AB3, GPIOO4, VPIR2);
+
+#define Y4 117
+SIG_EXPR_LIST_DECL_SINGLE(VPIR3, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 13),
+ COND2);
+SS_PIN_DECL(Y4, GPIOO5, VPIR3);
+
+#define AA4 118
+SIG_EXPR_LIST_DECL_SINGLE(VPIR4, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 14),
+ COND2);
+SS_PIN_DECL(AA4, GPIOO6, VPIR4);
+
+#define W4 119
+SIG_EXPR_LIST_DECL_SINGLE(VPIR5, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 15),
+ COND2);
+SS_PIN_DECL(W4, GPIOO7, VPIR5);
+
+#define V4 120
+SIG_EXPR_LIST_DECL_SINGLE(VPIR6, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 16),
+ COND2);
+SS_PIN_DECL(V4, GPIOP0, VPIR6);
+
+#define W5 121
+SIG_EXPR_LIST_DECL_SINGLE(VPIR7, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 17),
+ COND2);
+SS_PIN_DECL(W5, GPIOP1, VPIR7);
+
+#define AA5 122
+SIG_EXPR_LIST_DECL_SINGLE(VPIR8, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 18),
+ COND2);
+SS_PIN_DECL(AA5, GPIOP2, VPIR8);
+
+#define AB5 123
+SIG_EXPR_LIST_DECL_SINGLE(VPIR9, VPI24, VPI24_DESC, SIG_DESC_SET(SCU88, 19),
+ COND2);
+SS_PIN_DECL(AB5, GPIOP3, VPIR9);
+
+FUNC_GROUP_DECL(VPI24, T1, U2, P4, P3, Y1, AB2, AA1, Y2, AA2, P5, R5, T5, V3,
+ U3, W3, AA3, Y3, T4, U5, U4, AB3, Y4, AA4, W4, V4, W5, AA5,
+ AB5);
+
+#define Y6 124
+SIG_EXPR_LIST_DECL_SINGLE(DASHY6, DASHY6, SIG_DESC_SET(SCU90, 28),
+ SIG_DESC_SET(SCU88, 20));
+SS_PIN_DECL(Y6, GPIOP4, DASHY6);
+
+#define Y5 125
+SIG_EXPR_LIST_DECL_SINGLE(DASHY5, DASHY5, SIG_DESC_SET(SCU90, 28),
+ SIG_DESC_SET(SCU88, 21));
+SS_PIN_DECL(Y5, GPIOP5, DASHY5);
+
+#define W6 126
+SIG_EXPR_LIST_DECL_SINGLE(DASHW6, DASHW6, SIG_DESC_SET(SCU90, 28),
+ SIG_DESC_SET(SCU88, 22));
+SS_PIN_DECL(W6, GPIOP6, DASHW6);
+
#define V6 127
SIG_EXPR_LIST_DECL_SINGLE(DASHV6, DASHV6, SIG_DESC_SET(SCU90, 28),
- SIG_DESC_SET(SCU88, 23));
+ SIG_DESC_SET(SCU88, 23));
SS_PIN_DECL(V6, GPIOP7, DASHV6);
#define I2C3_DESC SIG_DESC_SET(SCU90, 16)
@@ -441,6 +953,24 @@ SSSF_PIN_DECL(B10, GPIOQ6, OSCCLK, SIG_DESC_SET(SCU2C, 1));
#define N20 135
SSSF_PIN_DECL(N20, GPIOQ7, PEWAKE, SIG_DESC_SET(SCU2C, 29));
+#define AA19 136
+SSSF_PIN_DECL(AA19, GPIOR0, FWSPICS1, SIG_DESC_SET(SCU88, 24), COND2);
+
+#define T19 137
+SSSF_PIN_DECL(T19, GPIOR1, FWSPICS2, SIG_DESC_SET(SCU88, 25), COND2);
+
+#define T17 138
+SSSF_PIN_DECL(T17, GPIOR2, SPI2CS0, SIG_DESC_SET(SCU88, 26), COND2);
+
+#define Y19 139
+SSSF_PIN_DECL(Y19, GPIOR3, SPI2CK, SIG_DESC_SET(SCU88, 27), COND2);
+
+#define W19 140
+SSSF_PIN_DECL(W19, GPIOR4, SPI2MOSI, SIG_DESC_SET(SCU88, 28), COND2);
+
+#define V19 141
+SSSF_PIN_DECL(V19, GPIOR5, SPI2MISO, SIG_DESC_SET(SCU88, 29), COND2);
+
#define D8 142
SIG_EXPR_LIST_DECL_SINGLE(MDC1, MDIO1, SIG_DESC_SET(SCU88, 30));
SS_PIN_DECL(D8, GPIOR6, MDC1);
@@ -451,6 +981,93 @@ SS_PIN_DECL(E10, GPIOR7, MDIO1);
FUNC_GROUP_DECL(MDIO1, D8, E10);
+#define VPOOFF0_DESC { ASPEED_IP_SCU, SCU94, GENMASK(1, 0), 0, 0 }
+#define VPO_DESC { ASPEED_IP_SCU, SCU94, GENMASK(1, 0), 1, 0 }
+#define VPOOFF1_DESC { ASPEED_IP_SCU, SCU94, GENMASK(1, 0), 2, 0 }
+#define VPOOFF2_DESC { ASPEED_IP_SCU, SCU94, GENMASK(1, 0), 3, 0 }
+
+#define CRT_DVO_EN_DESC SIG_DESC_IP_SET(ASPEED_IP_GFX, GFX064, 7)
+
+#define V20 144
+#define V20_DESC SIG_DESC_SET(SCU8C, 0)
+SIG_EXPR_DECL(VPOB2, VPO, V20_DESC, VPO_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOB2, VPOOFF1, V20_DESC, VPOOFF1_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOB2, VPOOFF2, V20_DESC, VPOOFF2_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_LIST_DECL(VPOB2, SIG_EXPR_PTR(VPOB2, VPO),
+ SIG_EXPR_PTR(VPOB2, VPOOFF1), SIG_EXPR_PTR(VPOB2, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(SPI2CS1, SPI2CS1, V20_DESC);
+MS_PIN_DECL(V20, GPIOS0, VPOB2, SPI2CS1);
+FUNC_GROUP_DECL(SPI2CS1, V20);
+
+#define U19 145
+#define U19_DESC SIG_DESC_SET(SCU8C, 1)
+SIG_EXPR_DECL(VPOB3, VPO, U19_DESC, VPO_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOB3, VPOOFF1, U19_DESC, VPOOFF1_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOB3, VPOOFF2, U19_DESC, VPOOFF2_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_LIST_DECL(VPOB3, SIG_EXPR_PTR(VPOB3, VPO),
+ SIG_EXPR_PTR(VPOB3, VPOOFF1), SIG_EXPR_PTR(VPOB3, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(BMCINT, BMCINT, U19_DESC);
+MS_PIN_DECL(U19, GPIOS1, VPOB3, BMCINT);
+FUNC_GROUP_DECL(BMCINT, U19);
+
+#define R18 146
+#define R18_DESC SIG_DESC_SET(SCU8C, 2)
+SIG_EXPR_DECL(VPOB4, VPO, R18_DESC, VPO_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOB4, VPOOFF1, R18_DESC, VPOOFF1_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOB4, VPOOFF2, R18_DESC, VPOOFF2_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_LIST_DECL(VPOB4, SIG_EXPR_PTR(VPOB4, VPO),
+ SIG_EXPR_PTR(VPOB4, VPOOFF1), SIG_EXPR_PTR(VPOB4, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(SALT5, SALT5, R18_DESC);
+MS_PIN_DECL(R18, GPIOS2, VPOB4, SALT5);
+FUNC_GROUP_DECL(SALT5, R18);
+
+#define P18 147
+#define P18_DESC SIG_DESC_SET(SCU8C, 3)
+SIG_EXPR_DECL(VPOB5, VPO, P18_DESC, VPO_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOB5, VPOOFF1, P18_DESC, VPOOFF1_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOB5, VPOOFF2, P18_DESC, VPOOFF2_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_LIST_DECL(VPOB5, SIG_EXPR_PTR(VPOB5, VPO),
+ SIG_EXPR_PTR(VPOB5, VPOOFF1), SIG_EXPR_PTR(VPOB5, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(SALT6, SALT6, P18_DESC);
+MS_PIN_DECL(P18, GPIOS3, VPOB5, SALT6);
+FUNC_GROUP_DECL(SALT6, P18);
+
+#define R19 148
+#define R19_DESC SIG_DESC_SET(SCU8C, 4)
+SIG_EXPR_DECL(VPOB6, VPO, R19_DESC, VPO_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOB6, VPOOFF1, R19_DESC, VPOOFF1_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOB6, VPOOFF2, R19_DESC, VPOOFF2_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_LIST_DECL(VPOB6, SIG_EXPR_PTR(VPOB6, VPO),
+ SIG_EXPR_PTR(VPOB6, VPOOFF1), SIG_EXPR_PTR(VPOB6, VPOOFF2));
+SS_PIN_DECL(R19, GPIOS4, VPOB6);
+
+#define W20 149
+#define W20_DESC SIG_DESC_SET(SCU8C, 5)
+SIG_EXPR_DECL(VPOB7, VPO, W20_DESC, VPO_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOB7, VPOOFF1, W20_DESC, VPOOFF1_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOB7, VPOOFF2, W20_DESC, VPOOFF2_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_LIST_DECL(VPOB7, SIG_EXPR_PTR(VPOB7, VPO),
+ SIG_EXPR_PTR(VPOB7, VPOOFF1), SIG_EXPR_PTR(VPOB7, VPOOFF2));
+SS_PIN_DECL(W20, GPIOS5, VPOB7);
+
+#define U20 150
+#define U20_DESC SIG_DESC_SET(SCU8C, 6)
+SIG_EXPR_DECL(VPOB8, VPO, U20_DESC, VPO_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOB8, VPOOFF1, U20_DESC, VPOOFF1_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOB8, VPOOFF2, U20_DESC, VPOOFF2_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_LIST_DECL(VPOB8, SIG_EXPR_PTR(VPOB8, VPO),
+ SIG_EXPR_PTR(VPOB8, VPOOFF1), SIG_EXPR_PTR(VPOB8, VPOOFF2));
+SS_PIN_DECL(U20, GPIOS6, VPOB8);
+
+#define AA20 151
+#define AA20_DESC SIG_DESC_SET(SCU8C, 7)
+SIG_EXPR_DECL(VPOB9, VPO, AA20_DESC, VPO_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOB9, VPOOFF1, AA20_DESC, VPOOFF1_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOB9, VPOOFF2, AA20_DESC, VPOOFF2_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_LIST_DECL(VPOB9, SIG_EXPR_PTR(VPOB9, VPO),
+ SIG_EXPR_PTR(VPOB9, VPOOFF1), SIG_EXPR_PTR(VPOB9, VPOOFF2));
+SS_PIN_DECL(AA20, GPIOS7, VPOB9);
+
/* RGMII1/RMII1 */
#define RMII1_DESC SIG_DESC_BIT(HW_STRAP1, 6, 0)
@@ -632,6 +1249,481 @@ MS_PIN_DECL_(E6, SIG_EXPR_LIST_PTR(GPIOV7), SIG_EXPR_LIST_PTR(RMII2RXER),
FUNC_GROUP_DECL(RGMII2, B2, B1, A2, B3, D5, D4, C2, C1, C3, D1, D2, E6);
FUNC_GROUP_DECL(RMII2, B2, B1, A2, B3, C2, C3, D1, D2, E6);
+#define F4 176
+SIG_EXPR_LIST_DECL_SINGLE(GPIOW0, GPIOW0, SIG_DESC_SET(SCUA0, 24));
+SIG_EXPR_LIST_DECL_SINGLE(ADC0, ADC0);
+MS_PIN_DECL_(F4, SIG_EXPR_LIST_PTR(GPIOW0), SIG_EXPR_LIST_PTR(ADC0));
+FUNC_GROUP_DECL(ADC0, F4);
+
+#define F5 177
+SIG_EXPR_LIST_DECL_SINGLE(GPIOW1, GPIOW1, SIG_DESC_SET(SCUA0, 25));
+SIG_EXPR_LIST_DECL_SINGLE(ADC1, ADC1);
+MS_PIN_DECL_(F5, SIG_EXPR_LIST_PTR(GPIOW1), SIG_EXPR_LIST_PTR(ADC1));
+FUNC_GROUP_DECL(ADC1, F5);
+
+#define E2 178
+SIG_EXPR_LIST_DECL_SINGLE(GPIOW2, GPIOW2, SIG_DESC_SET(SCUA0, 26));
+SIG_EXPR_LIST_DECL_SINGLE(ADC2, ADC2);
+MS_PIN_DECL_(E2, SIG_EXPR_LIST_PTR(GPIOW2), SIG_EXPR_LIST_PTR(ADC2));
+FUNC_GROUP_DECL(ADC2, E2);
+
+#define E1 179
+SIG_EXPR_LIST_DECL_SINGLE(GPIOW3, GPIOW3, SIG_DESC_SET(SCUA0, 27));
+SIG_EXPR_LIST_DECL_SINGLE(ADC3, ADC3);
+MS_PIN_DECL_(E1, SIG_EXPR_LIST_PTR(GPIOW3), SIG_EXPR_LIST_PTR(ADC3));
+FUNC_GROUP_DECL(ADC3, E1);
+
+#define F3 180
+SIG_EXPR_LIST_DECL_SINGLE(GPIOW4, GPIOW4, SIG_DESC_SET(SCUA0, 28));
+SIG_EXPR_LIST_DECL_SINGLE(ADC4, ADC4);
+MS_PIN_DECL_(F3, SIG_EXPR_LIST_PTR(GPIOW4), SIG_EXPR_LIST_PTR(ADC4));
+FUNC_GROUP_DECL(ADC4, F3);
+
+#define E3 181
+SIG_EXPR_LIST_DECL_SINGLE(GPIOW5, GPIOW5, SIG_DESC_SET(SCUA0, 29));
+SIG_EXPR_LIST_DECL_SINGLE(ADC5, ADC5);
+MS_PIN_DECL_(E3, SIG_EXPR_LIST_PTR(GPIOW5), SIG_EXPR_LIST_PTR(ADC5));
+FUNC_GROUP_DECL(ADC5, E3);
+
+#define G5 182
+SIG_EXPR_LIST_DECL_SINGLE(GPIOW6, GPIOW6, SIG_DESC_SET(SCUA0, 30));
+SIG_EXPR_LIST_DECL_SINGLE(ADC6, ADC6);
+MS_PIN_DECL_(G5, SIG_EXPR_LIST_PTR(GPIOW6), SIG_EXPR_LIST_PTR(ADC6));
+FUNC_GROUP_DECL(ADC6, G5);
+
+#define G4 183
+SIG_EXPR_LIST_DECL_SINGLE(GPIOW7, GPIOW7, SIG_DESC_SET(SCUA0, 31));
+SIG_EXPR_LIST_DECL_SINGLE(ADC7, ADC7);
+MS_PIN_DECL_(G4, SIG_EXPR_LIST_PTR(GPIOW7), SIG_EXPR_LIST_PTR(ADC7));
+FUNC_GROUP_DECL(ADC7, G4);
+
+#define F2 184
+SIG_EXPR_LIST_DECL_SINGLE(GPIOX0, GPIOX0, SIG_DESC_SET(SCUA4, 0));
+SIG_EXPR_LIST_DECL_SINGLE(ADC8, ADC8);
+MS_PIN_DECL_(F2, SIG_EXPR_LIST_PTR(GPIOX0), SIG_EXPR_LIST_PTR(ADC8));
+FUNC_GROUP_DECL(ADC8, F2);
+
+#define G3 185
+SIG_EXPR_LIST_DECL_SINGLE(GPIOX1, GPIOX1, SIG_DESC_SET(SCUA4, 1));
+SIG_EXPR_LIST_DECL_SINGLE(ADC9, ADC9);
+MS_PIN_DECL_(G3, SIG_EXPR_LIST_PTR(GPIOX1), SIG_EXPR_LIST_PTR(ADC9));
+FUNC_GROUP_DECL(ADC9, G3);
+
+#define G2 186
+SIG_EXPR_LIST_DECL_SINGLE(GPIOX2, GPIOX2, SIG_DESC_SET(SCUA4, 2));
+SIG_EXPR_LIST_DECL_SINGLE(ADC10, ADC10);
+MS_PIN_DECL_(G2, SIG_EXPR_LIST_PTR(GPIOX2), SIG_EXPR_LIST_PTR(ADC10));
+FUNC_GROUP_DECL(ADC10, G2);
+
+#define F1 187
+SIG_EXPR_LIST_DECL_SINGLE(GPIOX3, GPIOX3, SIG_DESC_SET(SCUA4, 3));
+SIG_EXPR_LIST_DECL_SINGLE(ADC11, ADC11);
+MS_PIN_DECL_(F1, SIG_EXPR_LIST_PTR(GPIOX3), SIG_EXPR_LIST_PTR(ADC11));
+FUNC_GROUP_DECL(ADC11, F1);
+
+#define H5 188
+SIG_EXPR_LIST_DECL_SINGLE(GPIOX4, GPIOX4, SIG_DESC_SET(SCUA4, 4));
+SIG_EXPR_LIST_DECL_SINGLE(ADC12, ADC12);
+MS_PIN_DECL_(H5, SIG_EXPR_LIST_PTR(GPIOX4), SIG_EXPR_LIST_PTR(ADC12));
+FUNC_GROUP_DECL(ADC12, H5);
+
+#define G1 189
+SIG_EXPR_LIST_DECL_SINGLE(GPIOX5, GPIOX5, SIG_DESC_SET(SCUA4, 5));
+SIG_EXPR_LIST_DECL_SINGLE(ADC13, ADC13);
+MS_PIN_DECL_(G1, SIG_EXPR_LIST_PTR(GPIOX5), SIG_EXPR_LIST_PTR(ADC13));
+FUNC_GROUP_DECL(ADC13, G1);
+
+#define H3 190
+SIG_EXPR_LIST_DECL_SINGLE(GPIOX6, GPIOX6, SIG_DESC_SET(SCUA4, 6));
+SIG_EXPR_LIST_DECL_SINGLE(ADC14, ADC14);
+MS_PIN_DECL_(H3, SIG_EXPR_LIST_PTR(GPIOX6), SIG_EXPR_LIST_PTR(ADC14));
+FUNC_GROUP_DECL(ADC14, H3);
+
+#define H4 191
+SIG_EXPR_LIST_DECL_SINGLE(GPIOX7, GPIOX7, SIG_DESC_SET(SCUA4, 7));
+SIG_EXPR_LIST_DECL_SINGLE(ADC15, ADC15);
+MS_PIN_DECL_(H4, SIG_EXPR_LIST_PTR(GPIOX7), SIG_EXPR_LIST_PTR(ADC15));
+FUNC_GROUP_DECL(ADC15, H4);
+
+#define ACPI_DESC SIG_DESC_SET(HW_STRAP1, 19)
+
+#define R22 192
+SIG_EXPR_DECL(SIOS3, SIOS3, SIG_DESC_SET(SCUA4, 8));
+SIG_EXPR_DECL(SIOS3, ACPI, ACPI_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SIOS3, SIOS3, ACPI);
+SIG_EXPR_LIST_DECL_SINGLE(DASHR22, DASHR22, SIG_DESC_SET(SCU94, 10));
+MS_PIN_DECL(R22, GPIOY0, SIOS3, DASHR22);
+FUNC_GROUP_DECL(SIOS3, R22);
+
+#define R21 193
+SIG_EXPR_DECL(SIOS5, SIOS5, SIG_DESC_SET(SCUA4, 9));
+SIG_EXPR_DECL(SIOS5, ACPI, ACPI_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SIOS5, SIOS5, ACPI);
+SIG_EXPR_LIST_DECL_SINGLE(DASHR21, DASHR21, SIG_DESC_SET(SCU94, 10));
+MS_PIN_DECL(R21, GPIOY1, SIOS5, DASHR21);
+FUNC_GROUP_DECL(SIOS5, R21);
+
+#define P22 194
+SIG_EXPR_DECL(SIOPWREQ, SIOPWREQ, SIG_DESC_SET(SCUA4, 10));
+SIG_EXPR_DECL(SIOPWREQ, ACPI, ACPI_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SIOPWREQ, SIOPWREQ, ACPI);
+SIG_EXPR_LIST_DECL_SINGLE(DASHP22, DASHP22, SIG_DESC_SET(SCU94, 11));
+MS_PIN_DECL(P22, GPIOY2, SIOPWREQ, DASHP22);
+FUNC_GROUP_DECL(SIOPWREQ, P22);
+
+#define P21 195
+SIG_EXPR_DECL(SIOONCTRL, SIOONCTRL, SIG_DESC_SET(SCUA4, 11));
+SIG_EXPR_DECL(SIOONCTRL, ACPI, ACPI_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SIOONCTRL, SIOONCTRL, ACPI);
+SIG_EXPR_LIST_DECL_SINGLE(DASHP21, DASHP21, SIG_DESC_SET(SCU94, 11));
+MS_PIN_DECL(P21, GPIOY3, SIOONCTRL, DASHP21);
+FUNC_GROUP_DECL(SIOONCTRL, P21);
+
+#define M18 196
+SSSF_PIN_DECL(M18, GPIOY4, SCL1, SIG_DESC_SET(SCUA4, 12));
+
+#define M19 197
+SSSF_PIN_DECL(M19, GPIOY5, SDA1, SIG_DESC_SET(SCUA4, 13));
+
+#define M20 198
+SSSF_PIN_DECL(M20, GPIOY6, SCL2, SIG_DESC_SET(SCUA4, 14));
+
+#define P20 199
+SSSF_PIN_DECL(P20, GPIOY7, SDA2, SIG_DESC_SET(SCUA4, 15));
+
+#define PNOR_DESC SIG_DESC_SET(SCU90, 31)
+
+#define Y20 200
+#define Y20_DESC SIG_DESC_SET(SCUA4, 16)
+SIG_EXPR_DECL(VPOG2, VPO, Y20_DESC, VPO_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOG2, VPOOFF1, Y20_DESC, VPOOFF1_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOG2, VPOOFF2, Y20_DESC, VPOOFF2_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_LIST_DECL(VPOG2, SIG_EXPR_PTR(VPOG2, VPO),
+ SIG_EXPR_PTR(VPOG2, VPOOFF1), SIG_EXPR_PTR(VPOG2, VPOOFF2));
+SIG_EXPR_DECL(SIOPBI, SIOPBI, Y20_DESC);
+SIG_EXPR_DECL(SIOPBI, ACPI, Y20_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SIOPBI, SIOPBI, ACPI);
+SIG_EXPR_LIST_DECL_SINGLE(NORA0, PNOR, PNOR_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(GPIOZ0, GPIOZ0);
+MS_PIN_DECL_(Y20, SIG_EXPR_LIST_PTR(VPOG2), SIG_EXPR_LIST_PTR(SIOPBI),
+ SIG_EXPR_LIST_PTR(NORA0), SIG_EXPR_LIST_PTR(GPIOZ0));
+FUNC_GROUP_DECL(SIOPBI, Y20);
+
+#define AB20 201
+#define AB20_DESC SIG_DESC_SET(SCUA4, 17)
+SIG_EXPR_DECL(VPOG3, VPO, AB20_DESC, VPO_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOG3, VPOOFF1, AB20_DESC, VPOOFF1_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOG3, VPOOFF2, AB20_DESC, VPOOFF2_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_LIST_DECL(VPOG3, SIG_EXPR_PTR(VPOG3, VPO),
+ SIG_EXPR_PTR(VPOG3, VPOOFF1), SIG_EXPR_PTR(VPOG3, VPOOFF2));
+SIG_EXPR_DECL(SIOPWRGD, SIOPWRGD, AB20_DESC);
+SIG_EXPR_DECL(SIOPWRGD, ACPI, AB20_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SIOPWRGD, SIOPWRGD, ACPI);
+SIG_EXPR_LIST_DECL_SINGLE(NORA1, PNOR, PNOR_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(GPIOZ1, GPIOZ1);
+MS_PIN_DECL_(AB20, SIG_EXPR_LIST_PTR(VPOG3), SIG_EXPR_LIST_PTR(SIOPWRGD),
+ SIG_EXPR_LIST_PTR(NORA1), SIG_EXPR_LIST_PTR(GPIOZ1));
+FUNC_GROUP_DECL(SIOPWRGD, AB20);
+
+#define AB21 202
+#define AB21_DESC SIG_DESC_SET(SCUA4, 18)
+SIG_EXPR_DECL(VPOG4, VPO, AB21_DESC, VPO_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOG4, VPOOFF1, AB21_DESC, VPOOFF1_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOG4, VPOOFF2, AB21_DESC, VPOOFF2_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_LIST_DECL(VPOG4, SIG_EXPR_PTR(VPOG4, VPO),
+ SIG_EXPR_PTR(VPOG4, VPOOFF1), SIG_EXPR_PTR(VPOG4, VPOOFF2));
+SIG_EXPR_DECL(SIOPBO, SIOPBO, AB21_DESC);
+SIG_EXPR_DECL(SIOPBO, ACPI, AB21_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SIOPBO, SIOPBO, ACPI);
+SIG_EXPR_LIST_DECL_SINGLE(NORA2, PNOR, PNOR_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(GPIOZ2, GPIOZ2);
+MS_PIN_DECL_(AB21, SIG_EXPR_LIST_PTR(VPOG4), SIG_EXPR_LIST_PTR(SIOPBO),
+ SIG_EXPR_LIST_PTR(NORA2), SIG_EXPR_LIST_PTR(GPIOZ2));
+FUNC_GROUP_DECL(SIOPBO, AB21);
+
+#define AA21 203
+#define AA21_DESC SIG_DESC_SET(SCUA4, 19)
+SIG_EXPR_DECL(VPOG5, VPO, AA21_DESC, VPO_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOG5, VPOOFF1, AA21_DESC, VPOOFF1_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOG5, VPOOFF2, AA21_DESC, VPOOFF2_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_LIST_DECL(VPOG5, SIG_EXPR_PTR(VPOG5, VPO),
+ SIG_EXPR_PTR(VPOG5, VPOOFF1), SIG_EXPR_PTR(VPOG5, VPOOFF2));
+SIG_EXPR_DECL(SIOSCI, SIOSCI, AA21_DESC);
+SIG_EXPR_DECL(SIOSCI, ACPI, AA21_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SIOSCI, SIOSCI, ACPI);
+SIG_EXPR_LIST_DECL_SINGLE(NORA3, PNOR, PNOR_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(GPIOZ3, GPIOZ3);
+MS_PIN_DECL_(AA21, SIG_EXPR_LIST_PTR(VPOG5), SIG_EXPR_LIST_PTR(SIOSCI),
+ SIG_EXPR_LIST_PTR(NORA3), SIG_EXPR_LIST_PTR(GPIOZ3));
+FUNC_GROUP_DECL(SIOSCI, AA21);
+
+FUNC_GROUP_DECL(ACPI, R22, R21, P22, P21, Y20, AB20, AB21, AA21);
+
+/* CRT DVO disabled, configured for single-edge mode */
+#define CRT_DVO_DS_DESC { ASPEED_IP_GFX, GFX064, GENMASK(7, 6), 0, 0 }
+
+/* CRT DVO disabled, configured for dual-edge mode */
+#define CRT_DVO_DD_DESC { ASPEED_IP_GFX, GFX064, GENMASK(7, 6), 1, 1 }
+
+/* CRT DVO enabled, configured for single-edge mode */
+#define CRT_DVO_ES_DESC { ASPEED_IP_GFX, GFX064, GENMASK(7, 6), 2, 2 }
+
+/* CRT DVO enabled, configured for dual-edge mode */
+#define CRT_DVO_ED_DESC { ASPEED_IP_GFX, GFX064, GENMASK(7, 6), 3, 3 }
+
+#define U21 204
+#define U21_DESC SIG_DESC_SET(SCUA4, 20)
+SIG_EXPR_DECL(VPOG6, VPO, U21_DESC, VPO_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOG6, VPOOFF1, U21_DESC, VPOOFF1_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOG6, VPOOFF2, U21_DESC, VPOOFF2_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_LIST_DECL(VPOG6, SIG_EXPR_PTR(VPOG6, VPO),
+ SIG_EXPR_PTR(VPOG6, VPOOFF1), SIG_EXPR_PTR(VPOG6, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(NORA4, PNOR, PNOR_DESC);
+MS_PIN_DECL(U21, GPIOZ4, VPOG6, NORA4);
+
+#define W22 205
+#define W22_DESC SIG_DESC_SET(SCUA4, 21)
+SIG_EXPR_DECL(VPOG7, VPO, W22_DESC, VPO_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOG7, VPOOFF1, W22_DESC, VPOOFF1_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOG7, VPOOFF2, W22_DESC, VPOOFF2_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_LIST_DECL(VPOG7, SIG_EXPR_PTR(VPOG7, VPO),
+ SIG_EXPR_PTR(VPOG7, VPOOFF1), SIG_EXPR_PTR(VPOG7, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(NORA5, PNOR, PNOR_DESC);
+MS_PIN_DECL(W22, GPIOZ5, VPOG7, NORA5);
+
+#define V22 206
+#define V22_DESC SIG_DESC_SET(SCUA4, 22)
+SIG_EXPR_DECL(VPOG8, VPO, V22_DESC, VPO_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOG8, VPOOFF1, V22_DESC, VPOOFF1_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOG8, VPOOFF2, V22_DESC, VPOOFF2_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_LIST_DECL(VPOG8, SIG_EXPR_PTR(VPOG8, VPO),
+ SIG_EXPR_PTR(VPOG8, VPOOFF1), SIG_EXPR_PTR(VPOG8, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(NORA6, PNOR, PNOR_DESC);
+MS_PIN_DECL(V22, GPIOZ6, VPOG8, NORA6);
+
+#define W21 207
+#define W21_DESC SIG_DESC_SET(SCUA4, 23)
+SIG_EXPR_DECL(VPOG9, VPO, W21_DESC, VPO_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOG9, VPOOFF1, W21_DESC, VPOOFF1_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOG9, VPOOFF2, W21_DESC, VPOOFF2_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_LIST_DECL(VPOG9, SIG_EXPR_PTR(VPOG9, VPO),
+ SIG_EXPR_PTR(VPOG9, VPOOFF1), SIG_EXPR_PTR(VPOG9, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(NORA7, PNOR, PNOR_DESC);
+MS_PIN_DECL(W21, GPIOZ7, VPOG9, NORA7);
+
+#define Y21 208
+#define Y21_DESC SIG_DESC_SET(SCUA4, 24)
+SIG_EXPR_DECL(VPOR2, VPO, Y21_DESC, VPO_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOR2, VPOOFF1, Y21_DESC, VPOOFF1_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOR2, VPOOFF2, Y21_DESC, VPOOFF2_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_LIST_DECL(VPOR2, SIG_EXPR_PTR(VPOR2, VPO),
+ SIG_EXPR_PTR(VPOR2, VPOOFF1), SIG_EXPR_PTR(VPOR2, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(SALT7, SALT7, Y21_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(NORD0, PNOR, PNOR_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(GPIOAA0, GPIOAA0);
+MS_PIN_DECL_(Y21, SIG_EXPR_LIST_PTR(VPOR2), SIG_EXPR_LIST_PTR(SALT7),
+ SIG_EXPR_LIST_PTR(NORD0), SIG_EXPR_LIST_PTR(GPIOAA0));
+FUNC_GROUP_DECL(SALT7, Y21);
+
+#define V21 209
+#define V21_DESC SIG_DESC_SET(SCUA4, 25)
+SIG_EXPR_DECL(VPOR3, VPO, V21_DESC, VPO_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOR3, VPOOFF1, V21_DESC, VPOOFF1_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOR3, VPOOFF2, V21_DESC, VPOOFF2_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_LIST_DECL(VPOR3, SIG_EXPR_PTR(VPOR3, VPO),
+ SIG_EXPR_PTR(VPOR3, VPOOFF1), SIG_EXPR_PTR(VPOR3, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(SALT8, SALT8, V21_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(NORD1, PNOR, PNOR_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(GPIOAA1, GPIOAA1);
+MS_PIN_DECL_(V21, SIG_EXPR_LIST_PTR(VPOR3), SIG_EXPR_LIST_PTR(SALT8),
+ SIG_EXPR_LIST_PTR(NORD1), SIG_EXPR_LIST_PTR(GPIOAA1));
+FUNC_GROUP_DECL(SALT8, V21);
+
+#define Y22 210
+#define Y22_DESC SIG_DESC_SET(SCUA4, 26)
+SIG_EXPR_DECL(VPOR4, VPO, Y22_DESC, VPO_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOR4, VPOOFF1, Y22_DESC, VPOOFF1_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOR4, VPOOFF2, Y22_DESC, VPOOFF2_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_LIST_DECL(VPOR4, SIG_EXPR_PTR(VPOR4, VPO),
+ SIG_EXPR_PTR(VPOR4, VPOOFF1), SIG_EXPR_PTR(VPOR4, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(SALT9, SALT9, Y22_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(NORD2, PNOR, PNOR_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(GPIOAA2, GPIOAA2);
+MS_PIN_DECL_(Y22, SIG_EXPR_LIST_PTR(VPOR4), SIG_EXPR_LIST_PTR(SALT9),
+ SIG_EXPR_LIST_PTR(NORD2), SIG_EXPR_LIST_PTR(GPIOAA2));
+FUNC_GROUP_DECL(SALT9, Y22);
+
+#define AA22 211
+#define AA22_DESC SIG_DESC_SET(SCUA4, 27)
+SIG_EXPR_DECL(VPOR5, VPO, AA22_DESC, VPO_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOR5, VPOOFF1, AA22_DESC, VPOOFF1_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOR5, VPOOFF2, AA22_DESC, VPOOFF2_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_LIST_DECL(VPOR5, SIG_EXPR_PTR(VPOR5, VPO),
+ SIG_EXPR_PTR(VPOR5, VPOOFF1), SIG_EXPR_PTR(VPOR5, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(SALT10, SALT10, AA22_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(NORD3, PNOR, PNOR_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(GPIOAA3, GPIOAA3);
+MS_PIN_DECL_(AA22, SIG_EXPR_LIST_PTR(VPOR5), SIG_EXPR_LIST_PTR(SALT10),
+ SIG_EXPR_LIST_PTR(NORD3), SIG_EXPR_LIST_PTR(GPIOAA3));
+FUNC_GROUP_DECL(SALT10, AA22);
+
+#define U22 212
+#define U22_DESC SIG_DESC_SET(SCUA4, 28)
+SIG_EXPR_DECL(VPOR6, VPO, U22_DESC, VPO_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOR6, VPOOFF1, U22_DESC, VPOOFF1_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOR6, VPOOFF2, U22_DESC, VPOOFF2_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_LIST_DECL(VPOR6, SIG_EXPR_PTR(VPOR6, VPO),
+ SIG_EXPR_PTR(VPOR6, VPOOFF1), SIG_EXPR_PTR(VPOR6, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(SALT11, SALT11, U22_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(NORD4, PNOR, PNOR_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(GPIOAA4, GPIOAA4);
+MS_PIN_DECL_(U22, SIG_EXPR_LIST_PTR(VPOR6), SIG_EXPR_LIST_PTR(SALT11),
+ SIG_EXPR_LIST_PTR(NORD4), SIG_EXPR_LIST_PTR(GPIOAA4));
+FUNC_GROUP_DECL(SALT11, U22);
+
+#define T20 213
+#define T20_DESC SIG_DESC_SET(SCUA4, 29)
+SIG_EXPR_DECL(VPOR7, VPO, T20_DESC, VPO_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOR7, VPOOFF1, T20_DESC, VPOOFF1_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOR7, VPOOFF2, T20_DESC, VPOOFF2_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_LIST_DECL(VPOR7, SIG_EXPR_PTR(VPOR7, VPO),
+ SIG_EXPR_PTR(VPOR7, VPOOFF1), SIG_EXPR_PTR(VPOR7, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(SALT12, SALT12, T20_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(NORD5, PNOR, PNOR_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(GPIOAA5, GPIOAA5);
+MS_PIN_DECL_(T20, SIG_EXPR_LIST_PTR(VPOR7), SIG_EXPR_LIST_PTR(SALT12),
+ SIG_EXPR_LIST_PTR(NORD5), SIG_EXPR_LIST_PTR(GPIOAA5));
+FUNC_GROUP_DECL(SALT12, T20);
+
+#define N18 214
+#define N18_DESC SIG_DESC_SET(SCUA4, 30)
+SIG_EXPR_DECL(VPOR8, VPO, N18_DESC, VPO_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOR8, VPOOFF1, N18_DESC, VPOOFF1_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOR8, VPOOFF2, N18_DESC, VPOOFF2_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_LIST_DECL(VPOR8, SIG_EXPR_PTR(VPOR8, VPO),
+ SIG_EXPR_PTR(VPOR8, VPOOFF1), SIG_EXPR_PTR(VPOR8, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(SALT13, SALT13, N18_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(NORD6, PNOR, PNOR_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(GPIOAA6, GPIOAA6);
+MS_PIN_DECL_(N18, SIG_EXPR_LIST_PTR(VPOR8), SIG_EXPR_LIST_PTR(SALT13),
+ SIG_EXPR_LIST_PTR(NORD6), SIG_EXPR_LIST_PTR(GPIOAA6));
+FUNC_GROUP_DECL(SALT13, N18);
+
+#define P19 215
+#define P19_DESC SIG_DESC_SET(SCUA4, 31)
+SIG_EXPR_DECL(VPOR9, VPO, P19_DESC, VPO_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOR9, VPOOFF1, P19_DESC, VPOOFF1_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_DECL(VPOR9, VPOOFF2, P19_DESC, VPOOFF2_DESC, CRT_DVO_ES_DESC);
+SIG_EXPR_LIST_DECL(VPOR9, SIG_EXPR_PTR(VPOR9, VPO),
+ SIG_EXPR_PTR(VPOR9, VPOOFF1), SIG_EXPR_PTR(VPOR9, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(SALT14, SALT14, P19_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(NORD7, PNOR, PNOR_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(GPIOAA7, GPIOAA7);
+MS_PIN_DECL_(P19, SIG_EXPR_LIST_PTR(VPOR9), SIG_EXPR_LIST_PTR(SALT14),
+ SIG_EXPR_LIST_PTR(NORD7), SIG_EXPR_LIST_PTR(GPIOAA7));
+FUNC_GROUP_DECL(SALT14, P19);
+
+#define N19 216
+#define N19_DESC SIG_DESC_SET(SCUA8, 0)
+SIG_EXPR_DECL(VPODE, VPO, N19_DESC, VPO_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPODE, VPOOFF1, N19_DESC, VPOOFF1_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPODE, VPOOFF2, N19_DESC, VPOOFF2_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_LIST_DECL(VPODE, SIG_EXPR_PTR(VPODE, VPO),
+ SIG_EXPR_PTR(VPODE, VPOOFF1), SIG_EXPR_PTR(VPODE, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(NOROE, PNOR, PNOR_DESC);
+MS_PIN_DECL(N19, GPIOAB0, VPODE, NOROE);
+
+#define T21 217
+#define T21_DESC SIG_DESC_SET(SCUA8, 1)
+SIG_EXPR_DECL(VPOHS, VPO, T21_DESC, VPO_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOHS, VPOOFF1, T21_DESC, VPOOFF1_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOHS, VPOOFF2, T21_DESC, VPOOFF2_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_LIST_DECL(VPOHS, SIG_EXPR_PTR(VPOHS, VPO),
+ SIG_EXPR_PTR(VPOHS, VPOOFF1), SIG_EXPR_PTR(VPOHS, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(NORWE, PNOR, PNOR_DESC);
+MS_PIN_DECL(T21, GPIOAB1, VPOHS, NORWE);
+
+FUNC_GROUP_DECL(PNOR, Y20, AB20, AB21, AA21, U21, W22, V22, W21, Y21, V21, Y22,
+ AA22, U22, T20, N18, P19, N19, T21);
+
+#define T22 218
+#define T22_DESC SIG_DESC_SET(SCUA8, 2)
+SIG_EXPR_DECL(VPOVS, VPO, T22_DESC, VPO_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOVS, VPOOFF1, T22_DESC, VPOOFF1_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOVS, VPOOFF2, T22_DESC, VPOOFF2_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_LIST_DECL(VPOVS, SIG_EXPR_PTR(VPOVS, VPO),
+ SIG_EXPR_PTR(VPOVS, VPOOFF1), SIG_EXPR_PTR(VPOVS, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(WDTRST1, WDTRST1, T22_DESC);
+MS_PIN_DECL(T22, GPIOAB2, VPOVS, WDTRST1);
+FUNC_GROUP_DECL(WDTRST1, T22);
+
+#define R20 219
+#define R20_DESC SIG_DESC_SET(SCUA8, 3)
+SIG_EXPR_DECL(VPOCLK, VPO, R20_DESC, VPO_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOCLK, VPOOFF1, R20_DESC, VPOOFF1_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_DECL(VPOCLK, VPOOFF2, R20_DESC, VPOOFF2_DESC, CRT_DVO_EN_DESC);
+SIG_EXPR_LIST_DECL(VPOCLK, SIG_EXPR_PTR(VPOCLK, VPO),
+ SIG_EXPR_PTR(VPOCLK, VPOOFF1), SIG_EXPR_PTR(VPOCLK, VPOOFF2));
+SIG_EXPR_LIST_DECL_SINGLE(WDTRST2, WDTRST2, R20_DESC);
+MS_PIN_DECL(R20, GPIOAB3, VPOCLK, WDTRST2);
+FUNC_GROUP_DECL(WDTRST2, R20);
+
+FUNC_GROUP_DECL(VPO, V20, U19, R18, P18, R19, W20, U20, AA20, Y20, AB20,
+ AB21, AA21, U21, W22, V22, W21, Y21, V21, Y22, AA22, U22, T20,
+ N18, P19, N19, T21, T22, R20);
+
+#define ESPI_DESC SIG_DESC_SET(HW_STRAP1, 25)
+
+#define G21 224
+SIG_EXPR_LIST_DECL_SINGLE(ESPID0, ESPI, ESPI_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(LAD0, LAD0, SIG_DESC_SET(SCUAC, 0));
+MS_PIN_DECL(G21, GPIOAC0, ESPID0, LAD0);
+FUNC_GROUP_DECL(LAD0, G21);
+
+#define G20 225
+SIG_EXPR_LIST_DECL_SINGLE(ESPID1, ESPI, ESPI_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(LAD1, LAD1, SIG_DESC_SET(SCUAC, 1));
+MS_PIN_DECL(G20, GPIOAC1, ESPID1, LAD1);
+FUNC_GROUP_DECL(LAD1, G20);
+
+#define D22 226
+SIG_EXPR_LIST_DECL_SINGLE(ESPID2, ESPI, ESPI_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(LAD2, LAD2, SIG_DESC_SET(SCUAC, 2));
+MS_PIN_DECL(D22, GPIOAC2, ESPID2, LAD2);
+FUNC_GROUP_DECL(LAD2, D22);
+
+#define E22 227
+SIG_EXPR_LIST_DECL_SINGLE(ESPID3, ESPI, ESPI_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(LAD3, LAD3, SIG_DESC_SET(SCUAC, 3));
+MS_PIN_DECL(E22, GPIOAC3, ESPID3, LAD3);
+FUNC_GROUP_DECL(LAD3, E22);
+
+#define C22 228
+SIG_EXPR_LIST_DECL_SINGLE(ESPICK, ESPI, ESPI_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(LCLK, LCLK, SIG_DESC_SET(SCUAC, 4));
+MS_PIN_DECL(C22, GPIOAC4, ESPICK, LCLK);
+FUNC_GROUP_DECL(LCLK, C22);
+
+#define F21 229
+SIG_EXPR_LIST_DECL_SINGLE(ESPICS, ESPI, ESPI_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(LFRAME, LFRAME, SIG_DESC_SET(SCUAC, 5));
+MS_PIN_DECL(F21, GPIOAC5, ESPICS, LFRAME);
+FUNC_GROUP_DECL(LFRAME, F21);
+
+#define F22 230
+SIG_EXPR_LIST_DECL_SINGLE(ESPIALT, ESPI, ESPI_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(LSIRQ, LSIRQ, SIG_DESC_SET(SCUAC, 6));
+MS_PIN_DECL(F22, GPIOAC6, ESPIALT, LSIRQ);
+FUNC_GROUP_DECL(LSIRQ, F22);
+
+#define G22 231
+SIG_EXPR_LIST_DECL_SINGLE(ESPIRST, ESPI, ESPI_DESC);
+SIG_EXPR_LIST_DECL_SINGLE(LPCRST, LPCRST, SIG_DESC_SET(SCUAC, 7));
+MS_PIN_DECL(G22, GPIOAC7, ESPIRST, LPCRST);
+FUNC_GROUP_DECL(LPCRST, G22);
+
+FUNC_GROUP_DECL(ESPI, G21, G20, D22, E22, C22, F21, F22, G22);
+
/* Pins, groups and functions are sort(1):ed alphabetically for sanity */
static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
@@ -641,12 +1733,32 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
ASPEED_PINCTRL_PIN(A13),
ASPEED_PINCTRL_PIN(A14),
ASPEED_PINCTRL_PIN(A15),
+ ASPEED_PINCTRL_PIN(A16),
+ ASPEED_PINCTRL_PIN(A17),
+ ASPEED_PINCTRL_PIN(A18),
+ ASPEED_PINCTRL_PIN(A19),
ASPEED_PINCTRL_PIN(A2),
+ ASPEED_PINCTRL_PIN(A20),
+ ASPEED_PINCTRL_PIN(A21),
ASPEED_PINCTRL_PIN(A3),
ASPEED_PINCTRL_PIN(A4),
ASPEED_PINCTRL_PIN(A5),
ASPEED_PINCTRL_PIN(A9),
+ ASPEED_PINCTRL_PIN(AA1),
+ ASPEED_PINCTRL_PIN(AA19),
+ ASPEED_PINCTRL_PIN(AA2),
+ ASPEED_PINCTRL_PIN(AA20),
+ ASPEED_PINCTRL_PIN(AA21),
+ ASPEED_PINCTRL_PIN(AA22),
ASPEED_PINCTRL_PIN(AA3),
+ ASPEED_PINCTRL_PIN(AA4),
+ ASPEED_PINCTRL_PIN(AA5),
+ ASPEED_PINCTRL_PIN(AB2),
+ ASPEED_PINCTRL_PIN(AB20),
+ ASPEED_PINCTRL_PIN(AB21),
+ ASPEED_PINCTRL_PIN(AB3),
+ ASPEED_PINCTRL_PIN(AB4),
+ ASPEED_PINCTRL_PIN(AB5),
ASPEED_PINCTRL_PIN(B1),
ASPEED_PINCTRL_PIN(B10),
ASPEED_PINCTRL_PIN(B11),
@@ -655,8 +1767,13 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
ASPEED_PINCTRL_PIN(B14),
ASPEED_PINCTRL_PIN(B15),
ASPEED_PINCTRL_PIN(B16),
+ ASPEED_PINCTRL_PIN(B17),
+ ASPEED_PINCTRL_PIN(B18),
+ ASPEED_PINCTRL_PIN(B19),
ASPEED_PINCTRL_PIN(B2),
ASPEED_PINCTRL_PIN(B20),
+ ASPEED_PINCTRL_PIN(B21),
+ ASPEED_PINCTRL_PIN(B22),
ASPEED_PINCTRL_PIN(B3),
ASPEED_PINCTRL_PIN(B4),
ASPEED_PINCTRL_PIN(B5),
@@ -668,62 +1785,210 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
ASPEED_PINCTRL_PIN(C14),
ASPEED_PINCTRL_PIN(C15),
ASPEED_PINCTRL_PIN(C16),
+ ASPEED_PINCTRL_PIN(C17),
ASPEED_PINCTRL_PIN(C18),
+ ASPEED_PINCTRL_PIN(C19),
ASPEED_PINCTRL_PIN(C2),
ASPEED_PINCTRL_PIN(C20),
+ ASPEED_PINCTRL_PIN(C21),
+ ASPEED_PINCTRL_PIN(C22),
ASPEED_PINCTRL_PIN(C3),
ASPEED_PINCTRL_PIN(C4),
ASPEED_PINCTRL_PIN(C5),
ASPEED_PINCTRL_PIN(D1),
ASPEED_PINCTRL_PIN(D10),
+ ASPEED_PINCTRL_PIN(D13),
+ ASPEED_PINCTRL_PIN(D14),
+ ASPEED_PINCTRL_PIN(D15),
+ ASPEED_PINCTRL_PIN(D16),
+ ASPEED_PINCTRL_PIN(D17),
+ ASPEED_PINCTRL_PIN(D18),
+ ASPEED_PINCTRL_PIN(D19),
ASPEED_PINCTRL_PIN(D2),
ASPEED_PINCTRL_PIN(D20),
+ ASPEED_PINCTRL_PIN(D21),
+ ASPEED_PINCTRL_PIN(D22),
ASPEED_PINCTRL_PIN(D4),
ASPEED_PINCTRL_PIN(D5),
ASPEED_PINCTRL_PIN(D6),
ASPEED_PINCTRL_PIN(D7),
ASPEED_PINCTRL_PIN(D8),
ASPEED_PINCTRL_PIN(D9),
+ ASPEED_PINCTRL_PIN(E1),
ASPEED_PINCTRL_PIN(E10),
ASPEED_PINCTRL_PIN(E12),
ASPEED_PINCTRL_PIN(E13),
+ ASPEED_PINCTRL_PIN(E14),
ASPEED_PINCTRL_PIN(E15),
+ ASPEED_PINCTRL_PIN(E16),
+ ASPEED_PINCTRL_PIN(E17),
+ ASPEED_PINCTRL_PIN(E18),
+ ASPEED_PINCTRL_PIN(E19),
+ ASPEED_PINCTRL_PIN(E2),
+ ASPEED_PINCTRL_PIN(E20),
ASPEED_PINCTRL_PIN(E21),
+ ASPEED_PINCTRL_PIN(E22),
+ ASPEED_PINCTRL_PIN(E3),
ASPEED_PINCTRL_PIN(E6),
ASPEED_PINCTRL_PIN(E7),
ASPEED_PINCTRL_PIN(E9),
+ ASPEED_PINCTRL_PIN(F1),
+ ASPEED_PINCTRL_PIN(F17),
+ ASPEED_PINCTRL_PIN(F18),
ASPEED_PINCTRL_PIN(F19),
+ ASPEED_PINCTRL_PIN(F2),
ASPEED_PINCTRL_PIN(F20),
+ ASPEED_PINCTRL_PIN(F21),
+ ASPEED_PINCTRL_PIN(F22),
+ ASPEED_PINCTRL_PIN(F3),
+ ASPEED_PINCTRL_PIN(F4),
+ ASPEED_PINCTRL_PIN(F5),
ASPEED_PINCTRL_PIN(F9),
+ ASPEED_PINCTRL_PIN(G1),
+ ASPEED_PINCTRL_PIN(G17),
+ ASPEED_PINCTRL_PIN(G18),
+ ASPEED_PINCTRL_PIN(G2),
+ ASPEED_PINCTRL_PIN(G20),
+ ASPEED_PINCTRL_PIN(G21),
+ ASPEED_PINCTRL_PIN(G22),
+ ASPEED_PINCTRL_PIN(G3),
+ ASPEED_PINCTRL_PIN(G4),
+ ASPEED_PINCTRL_PIN(G5),
+ ASPEED_PINCTRL_PIN(H18),
+ ASPEED_PINCTRL_PIN(H19),
ASPEED_PINCTRL_PIN(H20),
+ ASPEED_PINCTRL_PIN(H21),
+ ASPEED_PINCTRL_PIN(H22),
+ ASPEED_PINCTRL_PIN(H3),
+ ASPEED_PINCTRL_PIN(H4),
+ ASPEED_PINCTRL_PIN(H5),
+ ASPEED_PINCTRL_PIN(J18),
+ ASPEED_PINCTRL_PIN(J19),
+ ASPEED_PINCTRL_PIN(J20),
+ ASPEED_PINCTRL_PIN(K18),
+ ASPEED_PINCTRL_PIN(K19),
ASPEED_PINCTRL_PIN(L1),
+ ASPEED_PINCTRL_PIN(L18),
+ ASPEED_PINCTRL_PIN(L19),
ASPEED_PINCTRL_PIN(L2),
ASPEED_PINCTRL_PIN(L3),
ASPEED_PINCTRL_PIN(L4),
+ ASPEED_PINCTRL_PIN(M18),
+ ASPEED_PINCTRL_PIN(M19),
+ ASPEED_PINCTRL_PIN(M20),
ASPEED_PINCTRL_PIN(N1),
+ ASPEED_PINCTRL_PIN(N18),
+ ASPEED_PINCTRL_PIN(N19),
ASPEED_PINCTRL_PIN(N2),
ASPEED_PINCTRL_PIN(N20),
ASPEED_PINCTRL_PIN(N21),
ASPEED_PINCTRL_PIN(N22),
ASPEED_PINCTRL_PIN(N3),
ASPEED_PINCTRL_PIN(N4),
+ ASPEED_PINCTRL_PIN(N5),
ASPEED_PINCTRL_PIN(P1),
+ ASPEED_PINCTRL_PIN(P18),
+ ASPEED_PINCTRL_PIN(P19),
ASPEED_PINCTRL_PIN(P2),
+ ASPEED_PINCTRL_PIN(P20),
+ ASPEED_PINCTRL_PIN(P21),
+ ASPEED_PINCTRL_PIN(P22),
+ ASPEED_PINCTRL_PIN(P3),
+ ASPEED_PINCTRL_PIN(P4),
+ ASPEED_PINCTRL_PIN(P5),
ASPEED_PINCTRL_PIN(R1),
+ ASPEED_PINCTRL_PIN(R18),
+ ASPEED_PINCTRL_PIN(R19),
+ ASPEED_PINCTRL_PIN(R2),
+ ASPEED_PINCTRL_PIN(R20),
+ ASPEED_PINCTRL_PIN(R21),
+ ASPEED_PINCTRL_PIN(R22),
+ ASPEED_PINCTRL_PIN(R3),
+ ASPEED_PINCTRL_PIN(R4),
+ ASPEED_PINCTRL_PIN(R5),
+ ASPEED_PINCTRL_PIN(T1),
+ ASPEED_PINCTRL_PIN(T17),
+ ASPEED_PINCTRL_PIN(T19),
+ ASPEED_PINCTRL_PIN(T2),
+ ASPEED_PINCTRL_PIN(T20),
+ ASPEED_PINCTRL_PIN(T21),
+ ASPEED_PINCTRL_PIN(T22),
+ ASPEED_PINCTRL_PIN(T3),
ASPEED_PINCTRL_PIN(T4),
+ ASPEED_PINCTRL_PIN(T5),
+ ASPEED_PINCTRL_PIN(U1),
+ ASPEED_PINCTRL_PIN(U19),
+ ASPEED_PINCTRL_PIN(U2),
+ ASPEED_PINCTRL_PIN(U20),
+ ASPEED_PINCTRL_PIN(U21),
+ ASPEED_PINCTRL_PIN(U22),
ASPEED_PINCTRL_PIN(U3),
+ ASPEED_PINCTRL_PIN(U4),
+ ASPEED_PINCTRL_PIN(U5),
+ ASPEED_PINCTRL_PIN(V1),
+ ASPEED_PINCTRL_PIN(V19),
ASPEED_PINCTRL_PIN(V2),
+ ASPEED_PINCTRL_PIN(V20),
+ ASPEED_PINCTRL_PIN(V21),
+ ASPEED_PINCTRL_PIN(V22),
ASPEED_PINCTRL_PIN(V3),
+ ASPEED_PINCTRL_PIN(V4),
+ ASPEED_PINCTRL_PIN(V5),
ASPEED_PINCTRL_PIN(V6),
+ ASPEED_PINCTRL_PIN(W1),
+ ASPEED_PINCTRL_PIN(W19),
ASPEED_PINCTRL_PIN(W2),
+ ASPEED_PINCTRL_PIN(W20),
+ ASPEED_PINCTRL_PIN(W21),
+ ASPEED_PINCTRL_PIN(W22),
ASPEED_PINCTRL_PIN(W3),
+ ASPEED_PINCTRL_PIN(W4),
+ ASPEED_PINCTRL_PIN(W5),
+ ASPEED_PINCTRL_PIN(W6),
+ ASPEED_PINCTRL_PIN(Y1),
+ ASPEED_PINCTRL_PIN(Y19),
+ ASPEED_PINCTRL_PIN(Y2),
+ ASPEED_PINCTRL_PIN(Y20),
+ ASPEED_PINCTRL_PIN(Y21),
+ ASPEED_PINCTRL_PIN(Y22),
ASPEED_PINCTRL_PIN(Y3),
+ ASPEED_PINCTRL_PIN(Y4),
+ ASPEED_PINCTRL_PIN(Y5),
+ ASPEED_PINCTRL_PIN(Y6),
};
static const struct aspeed_pin_group aspeed_g5_groups[] = {
+ ASPEED_PINCTRL_GROUP(ACPI),
+ ASPEED_PINCTRL_GROUP(ADC0),
+ ASPEED_PINCTRL_GROUP(ADC1),
+ ASPEED_PINCTRL_GROUP(ADC10),
+ ASPEED_PINCTRL_GROUP(ADC11),
+ ASPEED_PINCTRL_GROUP(ADC12),
+ ASPEED_PINCTRL_GROUP(ADC13),
+ ASPEED_PINCTRL_GROUP(ADC14),
+ ASPEED_PINCTRL_GROUP(ADC15),
+ ASPEED_PINCTRL_GROUP(ADC2),
+ ASPEED_PINCTRL_GROUP(ADC3),
+ ASPEED_PINCTRL_GROUP(ADC4),
+ ASPEED_PINCTRL_GROUP(ADC5),
+ ASPEED_PINCTRL_GROUP(ADC6),
+ ASPEED_PINCTRL_GROUP(ADC7),
+ ASPEED_PINCTRL_GROUP(ADC8),
+ ASPEED_PINCTRL_GROUP(ADC9),
+ ASPEED_PINCTRL_GROUP(BMCINT),
+ ASPEED_PINCTRL_GROUP(DDCCLK),
+ ASPEED_PINCTRL_GROUP(DDCDAT),
+ ASPEED_PINCTRL_GROUP(ESPI),
+ ASPEED_PINCTRL_GROUP(FWSPICS1),
+ ASPEED_PINCTRL_GROUP(FWSPICS2),
ASPEED_PINCTRL_GROUP(GPID0),
ASPEED_PINCTRL_GROUP(GPID2),
+ ASPEED_PINCTRL_GROUP(GPID4),
+ ASPEED_PINCTRL_GROUP(GPID6),
ASPEED_PINCTRL_GROUP(GPIE0),
+ ASPEED_PINCTRL_GROUP(GPIE2),
+ ASPEED_PINCTRL_GROUP(GPIE4),
+ ASPEED_PINCTRL_GROUP(GPIE6),
ASPEED_PINCTRL_GROUP(I2C10),
ASPEED_PINCTRL_GROUP(I2C11),
ASPEED_PINCTRL_GROUP(I2C12),
@@ -736,11 +2001,50 @@ static const struct aspeed_pin_group aspeed_g5_groups[] = {
ASPEED_PINCTRL_GROUP(I2C7),
ASPEED_PINCTRL_GROUP(I2C8),
ASPEED_PINCTRL_GROUP(I2C9),
+ ASPEED_PINCTRL_GROUP(LAD0),
+ ASPEED_PINCTRL_GROUP(LAD1),
+ ASPEED_PINCTRL_GROUP(LAD2),
+ ASPEED_PINCTRL_GROUP(LAD3),
+ ASPEED_PINCTRL_GROUP(LCLK),
+ ASPEED_PINCTRL_GROUP(LFRAME),
+ ASPEED_PINCTRL_GROUP(LPCHC),
+ ASPEED_PINCTRL_GROUP(LPCPD),
+ ASPEED_PINCTRL_GROUP(LPCPLUS),
+ ASPEED_PINCTRL_GROUP(LPCPME),
+ ASPEED_PINCTRL_GROUP(LPCRST),
+ ASPEED_PINCTRL_GROUP(LPCSMI),
+ ASPEED_PINCTRL_GROUP(LSIRQ),
ASPEED_PINCTRL_GROUP(MAC1LINK),
+ ASPEED_PINCTRL_GROUP(MAC2LINK),
ASPEED_PINCTRL_GROUP(MDIO1),
ASPEED_PINCTRL_GROUP(MDIO2),
+ ASPEED_PINCTRL_GROUP(NCTS1),
+ ASPEED_PINCTRL_GROUP(NCTS2),
+ ASPEED_PINCTRL_GROUP(NCTS3),
+ ASPEED_PINCTRL_GROUP(NCTS4),
+ ASPEED_PINCTRL_GROUP(NDCD1),
+ ASPEED_PINCTRL_GROUP(NDCD2),
+ ASPEED_PINCTRL_GROUP(NDCD3),
+ ASPEED_PINCTRL_GROUP(NDCD4),
+ ASPEED_PINCTRL_GROUP(NDSR1),
+ ASPEED_PINCTRL_GROUP(NDSR2),
+ ASPEED_PINCTRL_GROUP(NDSR3),
+ ASPEED_PINCTRL_GROUP(NDSR4),
+ ASPEED_PINCTRL_GROUP(NDTR1),
+ ASPEED_PINCTRL_GROUP(NDTR2),
+ ASPEED_PINCTRL_GROUP(NDTR3),
+ ASPEED_PINCTRL_GROUP(NDTR4),
+ ASPEED_PINCTRL_GROUP(NRI1),
+ ASPEED_PINCTRL_GROUP(NRI2),
+ ASPEED_PINCTRL_GROUP(NRI3),
+ ASPEED_PINCTRL_GROUP(NRI4),
+ ASPEED_PINCTRL_GROUP(NRTS1),
+ ASPEED_PINCTRL_GROUP(NRTS2),
+ ASPEED_PINCTRL_GROUP(NRTS3),
+ ASPEED_PINCTRL_GROUP(NRTS4),
ASPEED_PINCTRL_GROUP(OSCCLK),
ASPEED_PINCTRL_GROUP(PEWAKE),
+ ASPEED_PINCTRL_GROUP(PNOR),
ASPEED_PINCTRL_GROUP(PWM0),
ASPEED_PINCTRL_GROUP(PWM1),
ASPEED_PINCTRL_GROUP(PWM2),
@@ -753,22 +2057,102 @@ static const struct aspeed_pin_group aspeed_g5_groups[] = {
ASPEED_PINCTRL_GROUP(RGMII2),
ASPEED_PINCTRL_GROUP(RMII1),
ASPEED_PINCTRL_GROUP(RMII2),
+ ASPEED_PINCTRL_GROUP(RXD1),
+ ASPEED_PINCTRL_GROUP(RXD2),
+ ASPEED_PINCTRL_GROUP(RXD3),
+ ASPEED_PINCTRL_GROUP(RXD4),
+ ASPEED_PINCTRL_GROUP(SALT1),
+ ASPEED_PINCTRL_GROUP(SALT10),
+ ASPEED_PINCTRL_GROUP(SALT11),
+ ASPEED_PINCTRL_GROUP(SALT12),
+ ASPEED_PINCTRL_GROUP(SALT13),
+ ASPEED_PINCTRL_GROUP(SALT14),
+ ASPEED_PINCTRL_GROUP(SALT2),
+ ASPEED_PINCTRL_GROUP(SALT3),
+ ASPEED_PINCTRL_GROUP(SALT4),
+ ASPEED_PINCTRL_GROUP(SALT5),
+ ASPEED_PINCTRL_GROUP(SALT6),
+ ASPEED_PINCTRL_GROUP(SALT7),
+ ASPEED_PINCTRL_GROUP(SALT8),
+ ASPEED_PINCTRL_GROUP(SALT9),
+ ASPEED_PINCTRL_GROUP(SCL1),
+ ASPEED_PINCTRL_GROUP(SCL2),
ASPEED_PINCTRL_GROUP(SD1),
+ ASPEED_PINCTRL_GROUP(SD2),
+ ASPEED_PINCTRL_GROUP(SDA1),
+ ASPEED_PINCTRL_GROUP(SDA2),
+ ASPEED_PINCTRL_GROUP(SGPS1),
+ ASPEED_PINCTRL_GROUP(SGPS2),
+ ASPEED_PINCTRL_GROUP(SIOONCTRL),
+ ASPEED_PINCTRL_GROUP(SIOPBI),
+ ASPEED_PINCTRL_GROUP(SIOPBO),
+ ASPEED_PINCTRL_GROUP(SIOPWREQ),
+ ASPEED_PINCTRL_GROUP(SIOPWRGD),
+ ASPEED_PINCTRL_GROUP(SIOS3),
+ ASPEED_PINCTRL_GROUP(SIOS5),
+ ASPEED_PINCTRL_GROUP(SIOSCI),
ASPEED_PINCTRL_GROUP(SPI1),
+ ASPEED_PINCTRL_GROUP(SPI1CS1),
ASPEED_PINCTRL_GROUP(SPI1DEBUG),
ASPEED_PINCTRL_GROUP(SPI1PASSTHRU),
+ ASPEED_PINCTRL_GROUP(SPI2CK),
+ ASPEED_PINCTRL_GROUP(SPI2CS0),
+ ASPEED_PINCTRL_GROUP(SPI2CS1),
+ ASPEED_PINCTRL_GROUP(SPI2MISO),
+ ASPEED_PINCTRL_GROUP(SPI2MOSI),
+ ASPEED_PINCTRL_GROUP(TIMER3),
ASPEED_PINCTRL_GROUP(TIMER4),
ASPEED_PINCTRL_GROUP(TIMER5),
ASPEED_PINCTRL_GROUP(TIMER6),
ASPEED_PINCTRL_GROUP(TIMER7),
ASPEED_PINCTRL_GROUP(TIMER8),
+ ASPEED_PINCTRL_GROUP(TXD1),
+ ASPEED_PINCTRL_GROUP(TXD2),
+ ASPEED_PINCTRL_GROUP(TXD3),
+ ASPEED_PINCTRL_GROUP(TXD4),
+ ASPEED_PINCTRL_GROUP(UART6),
+ ASPEED_PINCTRL_GROUP(USBCKI),
ASPEED_PINCTRL_GROUP(VGABIOSROM),
+ ASPEED_PINCTRL_GROUP(VGAHS),
+ ASPEED_PINCTRL_GROUP(VGAVS),
+ ASPEED_PINCTRL_GROUP(VPI24),
+ ASPEED_PINCTRL_GROUP(VPO),
+ ASPEED_PINCTRL_GROUP(WDTRST1),
+ ASPEED_PINCTRL_GROUP(WDTRST2),
};
static const struct aspeed_pin_function aspeed_g5_functions[] = {
+ ASPEED_PINCTRL_FUNC(ACPI),
+ ASPEED_PINCTRL_FUNC(ADC0),
+ ASPEED_PINCTRL_FUNC(ADC1),
+ ASPEED_PINCTRL_FUNC(ADC10),
+ ASPEED_PINCTRL_FUNC(ADC11),
+ ASPEED_PINCTRL_FUNC(ADC12),
+ ASPEED_PINCTRL_FUNC(ADC13),
+ ASPEED_PINCTRL_FUNC(ADC14),
+ ASPEED_PINCTRL_FUNC(ADC15),
+ ASPEED_PINCTRL_FUNC(ADC2),
+ ASPEED_PINCTRL_FUNC(ADC3),
+ ASPEED_PINCTRL_FUNC(ADC4),
+ ASPEED_PINCTRL_FUNC(ADC5),
+ ASPEED_PINCTRL_FUNC(ADC6),
+ ASPEED_PINCTRL_FUNC(ADC7),
+ ASPEED_PINCTRL_FUNC(ADC8),
+ ASPEED_PINCTRL_FUNC(ADC9),
+ ASPEED_PINCTRL_FUNC(BMCINT),
+ ASPEED_PINCTRL_FUNC(DDCCLK),
+ ASPEED_PINCTRL_FUNC(DDCDAT),
+ ASPEED_PINCTRL_FUNC(ESPI),
+ ASPEED_PINCTRL_FUNC(FWSPICS1),
+ ASPEED_PINCTRL_FUNC(FWSPICS2),
ASPEED_PINCTRL_FUNC(GPID0),
ASPEED_PINCTRL_FUNC(GPID2),
+ ASPEED_PINCTRL_FUNC(GPID4),
+ ASPEED_PINCTRL_FUNC(GPID6),
ASPEED_PINCTRL_FUNC(GPIE0),
+ ASPEED_PINCTRL_FUNC(GPIE2),
+ ASPEED_PINCTRL_FUNC(GPIE4),
+ ASPEED_PINCTRL_FUNC(GPIE6),
ASPEED_PINCTRL_FUNC(I2C10),
ASPEED_PINCTRL_FUNC(I2C11),
ASPEED_PINCTRL_FUNC(I2C12),
@@ -781,11 +2165,50 @@ static const struct aspeed_pin_function aspeed_g5_functions[] = {
ASPEED_PINCTRL_FUNC(I2C7),
ASPEED_PINCTRL_FUNC(I2C8),
ASPEED_PINCTRL_FUNC(I2C9),
+ ASPEED_PINCTRL_FUNC(LAD0),
+ ASPEED_PINCTRL_FUNC(LAD1),
+ ASPEED_PINCTRL_FUNC(LAD2),
+ ASPEED_PINCTRL_FUNC(LAD3),
+ ASPEED_PINCTRL_FUNC(LCLK),
+ ASPEED_PINCTRL_FUNC(LFRAME),
+ ASPEED_PINCTRL_FUNC(LPCHC),
+ ASPEED_PINCTRL_FUNC(LPCPD),
+ ASPEED_PINCTRL_FUNC(LPCPLUS),
+ ASPEED_PINCTRL_FUNC(LPCPME),
+ ASPEED_PINCTRL_FUNC(LPCRST),
+ ASPEED_PINCTRL_FUNC(LPCSMI),
+ ASPEED_PINCTRL_FUNC(LSIRQ),
ASPEED_PINCTRL_FUNC(MAC1LINK),
+ ASPEED_PINCTRL_FUNC(MAC2LINK),
ASPEED_PINCTRL_FUNC(MDIO1),
ASPEED_PINCTRL_FUNC(MDIO2),
+ ASPEED_PINCTRL_FUNC(NCTS1),
+ ASPEED_PINCTRL_FUNC(NCTS2),
+ ASPEED_PINCTRL_FUNC(NCTS3),
+ ASPEED_PINCTRL_FUNC(NCTS4),
+ ASPEED_PINCTRL_FUNC(NDCD1),
+ ASPEED_PINCTRL_FUNC(NDCD2),
+ ASPEED_PINCTRL_FUNC(NDCD3),
+ ASPEED_PINCTRL_FUNC(NDCD4),
+ ASPEED_PINCTRL_FUNC(NDSR1),
+ ASPEED_PINCTRL_FUNC(NDSR2),
+ ASPEED_PINCTRL_FUNC(NDSR3),
+ ASPEED_PINCTRL_FUNC(NDSR4),
+ ASPEED_PINCTRL_FUNC(NDTR1),
+ ASPEED_PINCTRL_FUNC(NDTR2),
+ ASPEED_PINCTRL_FUNC(NDTR3),
+ ASPEED_PINCTRL_FUNC(NDTR4),
+ ASPEED_PINCTRL_FUNC(NRI1),
+ ASPEED_PINCTRL_FUNC(NRI2),
+ ASPEED_PINCTRL_FUNC(NRI3),
+ ASPEED_PINCTRL_FUNC(NRI4),
+ ASPEED_PINCTRL_FUNC(NRTS1),
+ ASPEED_PINCTRL_FUNC(NRTS2),
+ ASPEED_PINCTRL_FUNC(NRTS3),
+ ASPEED_PINCTRL_FUNC(NRTS4),
ASPEED_PINCTRL_FUNC(OSCCLK),
ASPEED_PINCTRL_FUNC(PEWAKE),
+ ASPEED_PINCTRL_FUNC(PNOR),
ASPEED_PINCTRL_FUNC(PWM0),
ASPEED_PINCTRL_FUNC(PWM1),
ASPEED_PINCTRL_FUNC(PWM2),
@@ -798,16 +2221,68 @@ static const struct aspeed_pin_function aspeed_g5_functions[] = {
ASPEED_PINCTRL_FUNC(RGMII2),
ASPEED_PINCTRL_FUNC(RMII1),
ASPEED_PINCTRL_FUNC(RMII2),
+ ASPEED_PINCTRL_FUNC(RXD1),
+ ASPEED_PINCTRL_FUNC(RXD2),
+ ASPEED_PINCTRL_FUNC(RXD3),
+ ASPEED_PINCTRL_FUNC(RXD4),
+ ASPEED_PINCTRL_FUNC(SALT1),
+ ASPEED_PINCTRL_FUNC(SALT10),
+ ASPEED_PINCTRL_FUNC(SALT11),
+ ASPEED_PINCTRL_FUNC(SALT12),
+ ASPEED_PINCTRL_FUNC(SALT13),
+ ASPEED_PINCTRL_FUNC(SALT14),
+ ASPEED_PINCTRL_FUNC(SALT2),
+ ASPEED_PINCTRL_FUNC(SALT3),
+ ASPEED_PINCTRL_FUNC(SALT4),
+ ASPEED_PINCTRL_FUNC(SALT5),
+ ASPEED_PINCTRL_FUNC(SALT6),
+ ASPEED_PINCTRL_FUNC(SALT7),
+ ASPEED_PINCTRL_FUNC(SALT8),
+ ASPEED_PINCTRL_FUNC(SALT9),
+ ASPEED_PINCTRL_FUNC(SCL1),
+ ASPEED_PINCTRL_FUNC(SCL2),
ASPEED_PINCTRL_FUNC(SD1),
+ ASPEED_PINCTRL_FUNC(SD2),
+ ASPEED_PINCTRL_FUNC(SDA1),
+ ASPEED_PINCTRL_FUNC(SDA2),
+ ASPEED_PINCTRL_FUNC(SGPS1),
+ ASPEED_PINCTRL_FUNC(SGPS2),
+ ASPEED_PINCTRL_FUNC(SIOONCTRL),
+ ASPEED_PINCTRL_FUNC(SIOPBI),
+ ASPEED_PINCTRL_FUNC(SIOPBO),
+ ASPEED_PINCTRL_FUNC(SIOPWREQ),
+ ASPEED_PINCTRL_FUNC(SIOPWRGD),
+ ASPEED_PINCTRL_FUNC(SIOS3),
+ ASPEED_PINCTRL_FUNC(SIOS5),
+ ASPEED_PINCTRL_FUNC(SIOSCI),
ASPEED_PINCTRL_FUNC(SPI1),
+ ASPEED_PINCTRL_FUNC(SPI1CS1),
ASPEED_PINCTRL_FUNC(SPI1DEBUG),
ASPEED_PINCTRL_FUNC(SPI1PASSTHRU),
+ ASPEED_PINCTRL_FUNC(SPI2CK),
+ ASPEED_PINCTRL_FUNC(SPI2CS0),
+ ASPEED_PINCTRL_FUNC(SPI2CS1),
+ ASPEED_PINCTRL_FUNC(SPI2MISO),
+ ASPEED_PINCTRL_FUNC(SPI2MOSI),
+ ASPEED_PINCTRL_FUNC(TIMER3),
ASPEED_PINCTRL_FUNC(TIMER4),
ASPEED_PINCTRL_FUNC(TIMER5),
ASPEED_PINCTRL_FUNC(TIMER6),
ASPEED_PINCTRL_FUNC(TIMER7),
ASPEED_PINCTRL_FUNC(TIMER8),
+ ASPEED_PINCTRL_FUNC(TXD1),
+ ASPEED_PINCTRL_FUNC(TXD2),
+ ASPEED_PINCTRL_FUNC(TXD3),
+ ASPEED_PINCTRL_FUNC(TXD4),
+ ASPEED_PINCTRL_FUNC(UART6),
+ ASPEED_PINCTRL_FUNC(USBCKI),
ASPEED_PINCTRL_FUNC(VGABIOSROM),
+ ASPEED_PINCTRL_FUNC(VGAHS),
+ ASPEED_PINCTRL_FUNC(VGAVS),
+ ASPEED_PINCTRL_FUNC(VPI24),
+ ASPEED_PINCTRL_FUNC(VPO),
+ ASPEED_PINCTRL_FUNC(WDTRST1),
+ ASPEED_PINCTRL_FUNC(WDTRST2),
};
static struct aspeed_pinctrl_data aspeed_g5_pinctrl_data = {
@@ -848,10 +2323,35 @@ static struct pinctrl_desc aspeed_g5_pinctrl_desc = {
static int aspeed_g5_pinctrl_probe(struct platform_device *pdev)
{
int i;
+ struct regmap *map;
+ struct device_node *node;
for (i = 0; i < ARRAY_SIZE(aspeed_g5_pins); i++)
aspeed_g5_pins[i].number = i;
+ node = of_parse_phandle(pdev->dev.of_node, "aspeed,external-nodes", 0);
+ map = syscon_node_to_regmap(node);
+ of_node_put(node);
+ if (IS_ERR(map)) {
+ dev_warn(&pdev->dev, "No GFX phandle found, some mux configurations may fail\n");
+ map = NULL;
+ }
+ aspeed_g5_pinctrl_data.maps[ASPEED_IP_GFX] = map;
+
+ node = of_parse_phandle(pdev->dev.of_node, "aspeed,external-nodes", 1);
+ if (node) {
+ map = syscon_node_to_regmap(node->parent);
+ if (IS_ERR(map)) {
+ dev_warn(&pdev->dev, "LHC parent is not a syscon, some mux configurations may fail\n");
+ map = NULL;
+ }
+ } else {
+ dev_warn(&pdev->dev, "No LHC phandle found, some mux configurations may fail\n");
+ map = NULL;
+ }
+ of_node_put(node);
+ aspeed_g5_pinctrl_data.maps[ASPEED_IP_LPC] = map;
+
return aspeed_pinctrl_probe(pdev, &aspeed_g5_pinctrl_desc,
&aspeed_g5_pinctrl_data);
}
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index 49aeba912531..76f62bd45f02 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -14,6 +14,12 @@
#include "../core.h"
#include "pinctrl-aspeed.h"
+static const char *const aspeed_pinmux_ips[] = {
+ [ASPEED_IP_SCU] = "SCU",
+ [ASPEED_IP_GFX] = "GFX",
+ [ASPEED_IP_LPC] = "LPC",
+};
+
int aspeed_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
{
struct aspeed_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
@@ -78,7 +84,8 @@ int aspeed_pinmux_get_fn_groups(struct pinctrl_dev *pctldev,
static inline void aspeed_sig_desc_print_val(
const struct aspeed_sig_desc *desc, bool enable, u32 rv)
{
- pr_debug("SCU%x[0x%08x]=0x%x, got 0x%x from 0x%08x\n", desc->reg,
+ pr_debug("Want %s%X[0x%08X]=0x%X, got 0x%X from 0x%08X\n",
+ aspeed_pinmux_ips[desc->ip], desc->reg,
desc->mask, enable ? desc->enable : desc->disable,
(rv & desc->mask) >> __ffs(desc->mask), rv);
}
@@ -88,10 +95,11 @@ static inline void aspeed_sig_desc_print_val(
*
* @desc: The signal descriptor of interest
* @enabled: True to query the enabled state, false to query disabled state
- * @regmap: The SCU regmap instance
+ * @regmap: The IP block's regmap instance
*
- * @return True if the descriptor's bitfield is configured to the state
- * selected by @enabled, false otherwise
+ * Return: 1 if the descriptor's bitfield is configured to the state
+ * selected by @enabled, 0 if not, and less than zero if an unrecoverable
+ * failure occurred
*
* Evaluation of descriptor state is non-trivial in that it is not a binary
* outcome: The bitfields can be greater than one bit in size and thus can take
@@ -99,14 +107,19 @@ static inline void aspeed_sig_desc_print_val(
* descriptor (typically this means a different function to the one of interest
* is enabled). Thus we must explicitly test for either condition as required.
*/
-static bool aspeed_sig_desc_eval(const struct aspeed_sig_desc *desc,
+static int aspeed_sig_desc_eval(const struct aspeed_sig_desc *desc,
bool enabled, struct regmap *map)
{
+ int ret;
unsigned int raw;
u32 want;
- if (regmap_read(map, desc->reg, &raw) < 0)
- return false;
+ if (!map)
+ return -ENODEV;
+
+ ret = regmap_read(map, desc->reg, &raw);
+ if (ret)
+ return ret;
aspeed_sig_desc_print_val(desc, enabled, raw);
want = enabled ? desc->enable : desc->disable;
@@ -119,10 +132,10 @@ static bool aspeed_sig_desc_eval(const struct aspeed_sig_desc *desc,
*
* @expr: An expression controlling the signal for a mux function on a pin
* @enabled: True to query the enabled state, false to query disabled state
- * @regmap: The SCU regmap instance
+ * @maps: The list of regmap instances
*
- * @return True if the expression composed by @enabled evaluates true, false
- * otherwise
+ * Return: 1 if the expression composed by @enabled evaluates true, 0 if not,
+ * and less than zero if an unrecoverable failure occurred.
*
* A mux function is enabled or disabled if the function's signal expression
* for each pin in the function's pin group evaluates true for the desired
@@ -135,19 +148,21 @@ static bool aspeed_sig_desc_eval(const struct aspeed_sig_desc *desc,
* neither the enabled nor disabled state. Thus we must explicitly test for
* either condition as required.
*/
-static bool aspeed_sig_expr_eval(const struct aspeed_sig_expr *expr,
- bool enabled, struct regmap *map)
+static int aspeed_sig_expr_eval(const struct aspeed_sig_expr *expr,
+ bool enabled, struct regmap * const *maps)
{
int i;
+ int ret;
for (i = 0; i < expr->ndescs; i++) {
const struct aspeed_sig_desc *desc = &expr->descs[i];
- if (!aspeed_sig_desc_eval(desc, enabled, map))
- return false;
+ ret = aspeed_sig_desc_eval(desc, enabled, maps[desc->ip]);
+ if (ret <= 0)
+ return ret;
}
- return true;
+ return 1;
}
/**
@@ -158,19 +173,24 @@ static bool aspeed_sig_expr_eval(const struct aspeed_sig_expr *expr,
* configured
* @enable: true to enable an function's signal through a pin's signal
* expression, false to disable the function's signal
- * @map: The SCU's regmap instance for pinmux register access.
+ * @maps: The list of regmap instances for pinmux register access.
*
- * @return true if the expression is configured as requested, false otherwise
+ * Return: 0 if the expression is configured as requested and a negative error
+ * code otherwise
*/
-static bool aspeed_sig_expr_set(const struct aspeed_sig_expr *expr,
- bool enable, struct regmap *map)
+static int aspeed_sig_expr_set(const struct aspeed_sig_expr *expr,
+ bool enable, struct regmap * const *maps)
{
+ int ret;
int i;
for (i = 0; i < expr->ndescs; i++) {
- bool ret;
const struct aspeed_sig_desc *desc = &expr->descs[i];
u32 pattern = enable ? desc->enable : desc->disable;
+ u32 val = (pattern << __ffs(desc->mask));
+
+ if (!maps[desc->ip])
+ return -ENODEV;
/*
* Strap registers are configured in hardware or by early-boot
@@ -179,64 +199,79 @@ static bool aspeed_sig_expr_set(const struct aspeed_sig_expr *expr,
* deconfigured and is the reason we re-evaluate after writing
* all descriptor bits.
*/
- if (desc->reg == HW_STRAP1 || desc->reg == HW_STRAP2)
+ if ((desc->reg == HW_STRAP1 || desc->reg == HW_STRAP2) &&
+ desc->ip == ASPEED_IP_SCU)
continue;
- ret = regmap_update_bits(map, desc->reg, desc->mask,
- pattern << __ffs(desc->mask)) == 0;
+ ret = regmap_update_bits(maps[desc->ip], desc->reg,
+ desc->mask, val);
- if (!ret)
+ if (ret)
return ret;
}
- return aspeed_sig_expr_eval(expr, enable, map);
+ ret = aspeed_sig_expr_eval(expr, enable, maps);
+ if (ret < 0)
+ return ret;
+
+ if (!ret)
+ return -EPERM;
+
+ return 0;
}
-static bool aspeed_sig_expr_enable(const struct aspeed_sig_expr *expr,
- struct regmap *map)
+static int aspeed_sig_expr_enable(const struct aspeed_sig_expr *expr,
+ struct regmap * const *maps)
{
- if (aspeed_sig_expr_eval(expr, true, map))
- return true;
+ int ret;
+
+ ret = aspeed_sig_expr_eval(expr, true, maps);
+ if (ret < 0)
+ return ret;
+
+ if (!ret)
+ return aspeed_sig_expr_set(expr, true, maps);
- return aspeed_sig_expr_set(expr, true, map);
+ return 0;
}
-static bool aspeed_sig_expr_disable(const struct aspeed_sig_expr *expr,
- struct regmap *map)
+static int aspeed_sig_expr_disable(const struct aspeed_sig_expr *expr,
+ struct regmap * const *maps)
{
- if (!aspeed_sig_expr_eval(expr, true, map))
- return true;
+ int ret;
+
+ ret = aspeed_sig_expr_eval(expr, true, maps);
+ if (ret < 0)
+ return ret;
+
+ if (ret)
+ return aspeed_sig_expr_set(expr, false, maps);
- return aspeed_sig_expr_set(expr, false, map);
+ return 0;
}
/**
* Disable a signal on a pin by disabling all provided signal expressions.
*
* @exprs: The list of signal expressions (from a priority level on a pin)
- * @map: The SCU's regmap instance for pinmux register access.
+ * @maps: The list of regmap instances for pinmux register access.
*
- * @return true if all expressions in the list are successfully disabled, false
- * otherwise
+ * Return: 0 if all expressions are disabled, otherwise a negative error code
*/
-static bool aspeed_disable_sig(const struct aspeed_sig_expr **exprs,
- struct regmap *map)
+static int aspeed_disable_sig(const struct aspeed_sig_expr **exprs,
+ struct regmap * const *maps)
{
- bool disabled = true;
+ int ret = 0;
if (!exprs)
return true;
- while (*exprs) {
- bool ret;
-
- ret = aspeed_sig_expr_disable(*exprs, map);
- disabled = disabled && ret;
-
+ while (*exprs && !ret) {
+ ret = aspeed_sig_expr_disable(*exprs, maps);
exprs++;
}
- return disabled;
+ return ret;
}
/**
@@ -246,8 +281,8 @@ static bool aspeed_disable_sig(const struct aspeed_sig_expr **exprs,
* @exprs: List of signal expressions (haystack)
* @name: The name of the requested function (needle)
*
- * @return A pointer to the signal expression whose function tag matches the
- * provided name, otherwise NULL.
+ * Return: A pointer to the signal expression whose function tag matches the
+ * provided name, otherwise NULL.
*
*/
static const struct aspeed_sig_expr *aspeed_find_expr_by_name(
@@ -330,6 +365,7 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
unsigned int group)
{
int i;
+ int ret;
const struct aspeed_pinctrl_data *pdata =
pinctrl_dev_get_drvdata(pctldev);
const struct aspeed_pin_group *pgroup = &pdata->groups[group];
@@ -343,6 +379,8 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
const struct aspeed_sig_expr **funcs;
const struct aspeed_sig_expr ***prios;
+ pr_debug("Muxing pin %d for %s\n", pin, pfunc->name);
+
if (!pdesc)
return -EINVAL;
@@ -358,8 +396,9 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
if (expr)
break;
- if (!aspeed_disable_sig(funcs, pdata->map))
- return -EPERM;
+ ret = aspeed_disable_sig(funcs, pdata->maps);
+ if (ret)
+ return ret;
prios++;
}
@@ -377,8 +416,9 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
return -ENXIO;
}
- if (!aspeed_sig_expr_enable(expr, pdata->map))
- return -EPERM;
+ ret = aspeed_sig_expr_enable(expr, pdata->maps);
+ if (ret)
+ return ret;
}
return 0;
@@ -414,6 +454,7 @@ int aspeed_gpio_request_enable(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
unsigned int offset)
{
+ int ret;
const struct aspeed_pinctrl_data *pdata =
pinctrl_dev_get_drvdata(pctldev);
const struct aspeed_pin_desc *pdesc = pdata->pins[offset].drv_data;
@@ -432,8 +473,9 @@ int aspeed_gpio_request_enable(struct pinctrl_dev *pctldev,
if (aspeed_gpio_in_exprs(funcs))
break;
- if (!aspeed_disable_sig(funcs, pdata->map))
- return -EPERM;
+ ret = aspeed_disable_sig(funcs, pdata->maps);
+ if (ret)
+ return ret;
prios++;
}
@@ -462,10 +504,7 @@ int aspeed_gpio_request_enable(struct pinctrl_dev *pctldev,
* If GPIO is not the lowest priority signal type, assume there is only
* one expression defined to enable the GPIO function
*/
- if (!aspeed_sig_expr_enable(expr, pdata->map))
- return -EPERM;
-
- return 0;
+ return aspeed_sig_expr_enable(expr, pdata->maps);
}
int aspeed_pinctrl_probe(struct platform_device *pdev,
@@ -481,10 +520,10 @@ int aspeed_pinctrl_probe(struct platform_device *pdev,
return -ENODEV;
}
- pdata->map = syscon_node_to_regmap(parent->of_node);
- if (IS_ERR(pdata->map)) {
+ pdata->maps[ASPEED_IP_SCU] = syscon_node_to_regmap(parent->of_node);
+ if (IS_ERR(pdata->maps[ASPEED_IP_SCU])) {
dev_err(&pdev->dev, "No regmap for syscon pincontroller parent\n");
- return PTR_ERR(pdata->map);
+ return PTR_ERR(pdata->maps[ASPEED_IP_SCU]);
}
pctl = pinctrl_register(pdesc, &pdev->dev, pdata);
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.h b/drivers/pinctrl/aspeed/pinctrl-aspeed.h
index 3e72ef8c54bf..08a10d4db229 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.h
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.h
@@ -232,6 +232,11 @@
* group.
*/
+#define ASPEED_IP_SCU 0
+#define ASPEED_IP_GFX 1
+#define ASPEED_IP_LPC 2
+#define ASPEED_NR_PINMUX_IPS 3
+
/*
* The "Multi-function Pins Mapping and Control" table in the SoC datasheet
* references registers by the device/offset mnemonic. The register macros
@@ -255,13 +260,16 @@
#define SCUA0 0xA0 /* Multi-function Pin Control #7 */
#define SCUA4 0xA4 /* Multi-function Pin Control #8 */
#define SCUA8 0xA8 /* Multi-function Pin Control #9 */
+#define SCUAC 0xAC /* Multi-function Pin Control #10 */
#define HW_STRAP2 0xD0 /* Strapping */
/**
* A signal descriptor, which describes the register, bits and the
* enable/disable values that should be compared or written.
*
- * @reg: The register offset from base in bytes
+ * @ip: The IP block identifier, used as an index into the regmap array in
+ * struct aspeed_pinctrl_data
+ * @reg: The register offset with respect to the base address of the IP block
* @mask: The mask to apply to the register. The lowest set bit of the mask is
* used to derive the shift value.
* @enable: The value that enables the function. Value should be in the LSBs,
@@ -270,6 +278,7 @@
* LSBs, not at the position of the mask.
*/
struct aspeed_sig_desc {
+ unsigned int ip;
unsigned int reg;
u32 mask;
u32 enable;
@@ -313,24 +322,30 @@ struct aspeed_pin_desc {
/* Macro hell */
+#define SIG_DESC_IP_BIT(ip, reg, idx, val) \
+ { ip, reg, BIT_MASK(idx), val, (((val) + 1) & 1) }
+
/**
- * Short-hand macro for describing a configuration enabled by the state of one
- * bit. The disable value is derived.
+ * Short-hand macro for describing an SCU descriptor enabled by the state of
+ * one bit. The disable value is derived.
*
* @reg: The signal's associated register, offset from base
* @idx: The signal's bit index in the register
* @val: The value (0 or 1) that enables the function
*/
#define SIG_DESC_BIT(reg, idx, val) \
- { reg, BIT_MASK(idx), val, (((val) + 1) & 1) }
+ SIG_DESC_IP_BIT(ASPEED_IP_SCU, reg, idx, val)
+
+#define SIG_DESC_IP_SET(ip, reg, idx) SIG_DESC_IP_BIT(ip, reg, idx, 1)
/**
- * A further short-hand macro describing a configuration enabled with a set bit.
+ * A further short-hand macro expanding to an SCU descriptor enabled by a set
+ * bit.
*
- * @reg: The configuration's associated register, offset from base
- * @idx: The configuration's bit index in the register
+ * @reg: The register, offset from base
+ * @idx: The bit index in the register
*/
-#define SIG_DESC_SET(reg, idx) SIG_DESC_BIT(reg, idx, 1)
+#define SIG_DESC_SET(reg, idx) SIG_DESC_IP_BIT(ASPEED_IP_SCU, reg, idx, 1)
#define SIG_DESC_LIST_SYM(sig, func) sig_descs_ ## sig ## _ ## func
#define SIG_DESC_LIST_DECL(sig, func, ...) \
@@ -500,7 +515,7 @@ struct aspeed_pin_desc {
MS_PIN_DECL_(pin, SIG_EXPR_LIST_PTR(gpio))
struct aspeed_pinctrl_data {
- struct regmap *map;
+ struct regmap *maps[ASPEED_NR_PINMUX_IPS];
const struct pinctrl_pin_desc *pins;
const unsigned int npins;
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
index a5331fdfc795..810a81786f62 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
@@ -1106,7 +1106,7 @@ static int bcm281xx_std_pin_update(struct pinctrl_dev *pctldev,
struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
int i;
enum pin_config_param param;
- u16 arg;
+ u32 arg;
for (i = 0; i < num_configs; i++) {
param = pinconf_to_config_param(configs[i]);
@@ -1222,7 +1222,7 @@ static int bcm281xx_i2c_pin_update(struct pinctrl_dev *pctldev,
struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
int i, j;
enum pin_config_param param;
- u16 arg;
+ u32 arg;
for (i = 0; i < num_configs; i++) {
param = pinconf_to_config_param(configs[i]);
@@ -1292,7 +1292,7 @@ static int bcm281xx_hdmi_pin_update(struct pinctrl_dev *pctldev,
struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
int i;
enum pin_config_param param;
- u16 arg;
+ u32 arg;
for (i = 0; i < num_configs; i++) {
param = pinconf_to_config_param(configs[i]);
diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
index 5d1e505c3c63..3ca925dfefd1 100644
--- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
@@ -619,7 +619,7 @@ static int iproc_pin_config_set(struct pinctrl_dev *pctldev, unsigned pin,
{
struct iproc_gpio *chip = pinctrl_dev_get_drvdata(pctldev);
enum pin_config_param param;
- u16 arg;
+ u32 arg;
unsigned i, gpio = iproc_pin_to_gpio(pin);
int ret = -ENOTSUPP;
diff --git a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
index 13a4c2774157..4b5cf0e0f16e 100644
--- a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
@@ -703,7 +703,7 @@ static int ns2_pin_get_enable(struct pinctrl_dev *pctrldev, unsigned int pin)
}
static int ns2_pin_set_slew(struct pinctrl_dev *pctrldev, unsigned int pin,
- u16 slew)
+ u32 slew)
{
struct ns2_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrldev);
struct ns2_pin *pin_data = pctrldev->desc->pins[pin].drv_data;
@@ -793,7 +793,7 @@ static void ns2_pin_get_pull(struct pinctrl_dev *pctrldev,
}
static int ns2_pin_set_strength(struct pinctrl_dev *pctrldev, unsigned int pin,
- u16 strength)
+ u32 strength)
{
struct ns2_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrldev);
struct ns2_pin *pin_data = pctrldev->desc->pins[pin].drv_data;
@@ -904,7 +904,7 @@ static int ns2_pin_config_set(struct pinctrl_dev *pctrldev, unsigned int pin,
struct ns2_pin *pin_data = pctrldev->desc->pins[pin].drv_data;
enum pin_config_param param;
unsigned int i;
- u16 arg;
+ u32 arg;
int ret = -ENOTSUPP;
if (pin_data->pin_conf.base == -1)
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
index c8deb8be1da7..91ea32dc1e7f 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
@@ -366,7 +366,7 @@ static const struct pinctrl_ops nsp_pctrl_ops = {
.dt_free_map = pinctrl_utils_free_map,
};
-static int nsp_gpio_set_slew(struct nsp_gpio *chip, unsigned gpio, u16 slew)
+static int nsp_gpio_set_slew(struct nsp_gpio *chip, unsigned gpio, u32 slew)
{
if (slew)
nsp_set_bit(chip, IO_CTRL, NSP_GPIO_SLEW_RATE_EN, gpio, true);
@@ -403,7 +403,7 @@ static void nsp_gpio_get_pull(struct nsp_gpio *chip, unsigned gpio,
}
static int nsp_gpio_set_strength(struct nsp_gpio *chip, unsigned gpio,
- u16 strength)
+ u32 strength)
{
u32 offset, shift, i;
u32 val;
@@ -522,7 +522,7 @@ static int nsp_pin_config_set(struct pinctrl_dev *pctldev, unsigned pin,
{
struct nsp_gpio *chip = pinctrl_dev_get_drvdata(pctldev);
enum pin_config_param param;
- u16 arg;
+ u32 arg;
unsigned int i, gpio;
int ret = -ENOTSUPP;
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index fb38e208f32d..d69046537b75 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -237,10 +237,8 @@ static int pinctrl_register_one_pin(struct pinctrl_dev *pctldev,
}
pindesc = kzalloc(sizeof(*pindesc), GFP_KERNEL);
- if (pindesc == NULL) {
- dev_err(pctldev->dev, "failed to alloc struct pin_desc\n");
+ if (!pindesc)
return -ENOMEM;
- }
/* Set owner */
pindesc->pctldev = pctldev;
@@ -540,6 +538,182 @@ void pinctrl_remove_gpio_range(struct pinctrl_dev *pctldev,
}
EXPORT_SYMBOL_GPL(pinctrl_remove_gpio_range);
+#ifdef CONFIG_GENERIC_PINCTRL_GROUPS
+
+/**
+ * pinctrl_generic_get_group_count() - returns the number of pin groups
+ * @pctldev: pin controller device
+ */
+int pinctrl_generic_get_group_count(struct pinctrl_dev *pctldev)
+{
+ return pctldev->num_groups;
+}
+EXPORT_SYMBOL_GPL(pinctrl_generic_get_group_count);
+
+/**
+ * pinctrl_generic_get_group_name() - returns the name of a pin group
+ * @pctldev: pin controller device
+ * @selector: group number
+ */
+const char *pinctrl_generic_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct group_desc *group;
+
+ group = radix_tree_lookup(&pctldev->pin_group_tree,
+ selector);
+ if (!group)
+ return NULL;
+
+ return group->name;
+}
+EXPORT_SYMBOL_GPL(pinctrl_generic_get_group_name);
+
+/**
+ * pinctrl_generic_get_group_pins() - gets the pin group pins
+ * @pctldev: pin controller device
+ * @selector: group number
+ * @pins: pins in the group
+ * @num_pins: number of pins in the group
+ */
+int pinctrl_generic_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ struct group_desc *group;
+
+ group = radix_tree_lookup(&pctldev->pin_group_tree,
+ selector);
+ if (!group) {
+ dev_err(pctldev->dev, "%s could not find pingroup%i\n",
+ __func__, selector);
+ return -EINVAL;
+ }
+
+ *pins = group->pins;
+ *num_pins = group->num_pins;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pinctrl_generic_get_group_pins);
+
+/**
+ * pinctrl_generic_get_group() - returns a pin group based on the number
+ * @pctldev: pin controller device
+ * @gselector: group number
+ */
+struct group_desc *pinctrl_generic_get_group(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct group_desc *group;
+
+ group = radix_tree_lookup(&pctldev->pin_group_tree,
+ selector);
+ if (!group)
+ return NULL;
+
+ return group;
+}
+EXPORT_SYMBOL_GPL(pinctrl_generic_get_group);
+
+/**
+ * pinctrl_generic_add_group() - adds a new pin group
+ * @pctldev: pin controller device
+ * @name: name of the pin group
+ * @pins: pins in the pin group
+ * @num_pins: number of pins in the pin group
+ * @data: pin controller driver specific data
+ *
+ * Note that the caller must take care of locking.
+ */
+int pinctrl_generic_add_group(struct pinctrl_dev *pctldev, const char *name,
+ int *pins, int num_pins, void *data)
+{
+ struct group_desc *group;
+
+ group = devm_kzalloc(pctldev->dev, sizeof(*group), GFP_KERNEL);
+ if (!group)
+ return -ENOMEM;
+
+ group->name = name;
+ group->pins = pins;
+ group->num_pins = num_pins;
+ group->data = data;
+
+ radix_tree_insert(&pctldev->pin_group_tree, pctldev->num_groups,
+ group);
+
+ pctldev->num_groups++;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pinctrl_generic_add_group);
+
+/**
+ * pinctrl_generic_remove_group() - removes a numbered pin group
+ * @pctldev: pin controller device
+ * @selector: group number
+ *
+ * Note that the caller must take care of locking.
+ */
+int pinctrl_generic_remove_group(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct group_desc *group;
+
+ group = radix_tree_lookup(&pctldev->pin_group_tree,
+ selector);
+ if (!group)
+ return -ENOENT;
+
+ radix_tree_delete(&pctldev->pin_group_tree, selector);
+ devm_kfree(pctldev->dev, group);
+
+ pctldev->num_groups--;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pinctrl_generic_remove_group);
+
+/**
+ * pinctrl_generic_free_groups() - removes all pin groups
+ * @pctldev: pin controller device
+ *
+ * Note that the caller must take care of locking.
+ */
+static void pinctrl_generic_free_groups(struct pinctrl_dev *pctldev)
+{
+ struct radix_tree_iter iter;
+ struct group_desc *group;
+ unsigned long *indices;
+ void **slot;
+ int i = 0;
+
+ indices = devm_kzalloc(pctldev->dev, sizeof(*indices) *
+ pctldev->num_groups, GFP_KERNEL);
+ if (!indices)
+ return;
+
+ radix_tree_for_each_slot(slot, &pctldev->pin_group_tree, &iter, 0)
+ indices[i++] = iter.index;
+
+ for (i = 0; i < pctldev->num_groups; i++) {
+ group = radix_tree_lookup(&pctldev->pin_group_tree,
+ indices[i]);
+ radix_tree_delete(&pctldev->pin_group_tree, indices[i]);
+ devm_kfree(pctldev->dev, group);
+ }
+
+ pctldev->num_groups = 0;
+}
+
+#else
+static inline void pinctrl_generic_free_groups(struct pinctrl_dev *pctldev)
+{
+}
+#endif /* CONFIG_GENERIC_PINCTRL_GROUPS */
+
/**
* pinctrl_get_group_selector() - returns the group selector for a group
* @pctldev: the pin controller handling the group
@@ -688,6 +862,35 @@ int pinctrl_gpio_direction_output(unsigned gpio)
}
EXPORT_SYMBOL_GPL(pinctrl_gpio_direction_output);
+/**
+ * pinctrl_gpio_set_config() - Apply config to given GPIO pin
+ * @gpio: the GPIO pin number from the GPIO subsystem number space
+ * @config: the configuration to apply to the GPIO
+ *
+ * This function should *ONLY* be used from gpiolib-based GPIO drivers, if
+ * they need to call the underlying pin controller to change GPIO config
+ * (for example set debounce time).
+ */
+int pinctrl_gpio_set_config(unsigned gpio, unsigned long config)
+{
+ unsigned long configs[] = { config };
+ struct pinctrl_gpio_range *range;
+ struct pinctrl_dev *pctldev;
+ int ret, pin;
+
+ ret = pinctrl_get_device_gpio_range(gpio, &pctldev, &range);
+ if (ret)
+ return ret;
+
+ mutex_lock(&pctldev->mutex);
+ pin = gpio_to_pin(range, gpio);
+ ret = pinconf_set_config(pctldev, pin, configs, ARRAY_SIZE(configs));
+ mutex_unlock(&pctldev->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pinctrl_gpio_set_config);
+
static struct pinctrl_state *find_state(struct pinctrl *p,
const char *name)
{
@@ -706,11 +909,8 @@ static struct pinctrl_state *create_state(struct pinctrl *p,
struct pinctrl_state *state;
state = kzalloc(sizeof(*state), GFP_KERNEL);
- if (state == NULL) {
- dev_err(p->dev,
- "failed to alloc struct pinctrl_state\n");
+ if (!state)
return ERR_PTR(-ENOMEM);
- }
state->name = name;
INIT_LIST_HEAD(&state->settings);
@@ -720,7 +920,8 @@ static struct pinctrl_state *create_state(struct pinctrl *p,
return state;
}
-static int add_setting(struct pinctrl *p, struct pinctrl_map const *map)
+static int add_setting(struct pinctrl *p, struct pinctrl_dev *pctldev,
+ struct pinctrl_map const *map)
{
struct pinctrl_state *state;
struct pinctrl_setting *setting;
@@ -736,15 +937,16 @@ static int add_setting(struct pinctrl *p, struct pinctrl_map const *map)
return 0;
setting = kzalloc(sizeof(*setting), GFP_KERNEL);
- if (setting == NULL) {
- dev_err(p->dev,
- "failed to alloc struct pinctrl_setting\n");
+ if (!setting)
return -ENOMEM;
- }
setting->type = map->type;
- setting->pctldev = get_pinctrl_dev_from_devname(map->ctrl_dev_name);
+ if (pctldev)
+ setting->pctldev = pctldev;
+ else
+ setting->pctldev =
+ get_pinctrl_dev_from_devname(map->ctrl_dev_name);
if (setting->pctldev == NULL) {
kfree(setting);
/* Do not defer probing of hogs (circular loop) */
@@ -800,7 +1002,8 @@ static struct pinctrl *find_pinctrl(struct device *dev)
static void pinctrl_free(struct pinctrl *p, bool inlist);
-static struct pinctrl *create_pinctrl(struct device *dev)
+static struct pinctrl *create_pinctrl(struct device *dev,
+ struct pinctrl_dev *pctldev)
{
struct pinctrl *p;
const char *devname;
@@ -815,15 +1018,13 @@ static struct pinctrl *create_pinctrl(struct device *dev)
* a pin control handle with pinctrl_get()
*/
p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (p == NULL) {
- dev_err(dev, "failed to alloc struct pinctrl\n");
+ if (!p)
return ERR_PTR(-ENOMEM);
- }
p->dev = dev;
INIT_LIST_HEAD(&p->states);
INIT_LIST_HEAD(&p->dt_maps);
- ret = pinctrl_dt_to_map(p);
+ ret = pinctrl_dt_to_map(p, pctldev);
if (ret < 0) {
kfree(p);
return ERR_PTR(ret);
@@ -838,7 +1039,7 @@ static struct pinctrl *create_pinctrl(struct device *dev)
if (strcmp(map->dev_name, devname))
continue;
- ret = add_setting(p, map);
+ ret = add_setting(p, pctldev, map);
/*
* At this point the adding of a setting may:
*
@@ -899,7 +1100,7 @@ struct pinctrl *pinctrl_get(struct device *dev)
return p;
}
- return create_pinctrl(dev);
+ return create_pinctrl(dev, NULL);
}
EXPORT_SYMBOL_GPL(pinctrl_get);
@@ -1175,10 +1376,8 @@ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
}
maps_node = kzalloc(sizeof(*maps_node), GFP_KERNEL);
- if (!maps_node) {
- pr_err("failed to alloc struct pinctrl_maps\n");
+ if (!maps_node)
return -ENOMEM;
- }
maps_node->num_maps = num_maps;
if (dup) {
@@ -1731,20 +1930,18 @@ static int pinctrl_check_ops(struct pinctrl_dev *pctldev)
!ops->get_group_name)
return -EINVAL;
- if (ops->dt_node_to_map && !ops->dt_free_map)
- return -EINVAL;
-
return 0;
}
/**
- * pinctrl_register() - register a pin controller device
+ * pinctrl_init_controller() - init a pin controller device
* @pctldesc: descriptor for this pin controller
* @dev: parent device for this pin controller
* @driver_data: private pin controller data for this pin controller
*/
-struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
- struct device *dev, void *driver_data)
+struct pinctrl_dev *pinctrl_init_controller(struct pinctrl_desc *pctldesc,
+ struct device *dev,
+ void *driver_data)
{
struct pinctrl_dev *pctldev;
int ret;
@@ -1755,17 +1952,22 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
return ERR_PTR(-EINVAL);
pctldev = kzalloc(sizeof(*pctldev), GFP_KERNEL);
- if (pctldev == NULL) {
- dev_err(dev, "failed to alloc struct pinctrl_dev\n");
+ if (!pctldev)
return ERR_PTR(-ENOMEM);
- }
/* Initialize pin control device struct */
pctldev->owner = pctldesc->owner;
pctldev->desc = pctldesc;
pctldev->driver_data = driver_data;
INIT_RADIX_TREE(&pctldev->pin_desc_tree, GFP_KERNEL);
+#ifdef CONFIG_GENERIC_PINCTRL_GROUPS
+ INIT_RADIX_TREE(&pctldev->pin_group_tree, GFP_KERNEL);
+#endif
+#ifdef CONFIG_GENERIC_PINMUX_FUNCTIONS
+ INIT_RADIX_TREE(&pctldev->pin_function_tree, GFP_KERNEL);
+#endif
INIT_LIST_HEAD(&pctldev->gpio_ranges);
+ INIT_LIST_HEAD(&pctldev->node);
pctldev->dev = dev;
mutex_init(&pctldev->mutex);
@@ -1800,21 +2002,28 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
goto out_err;
}
- mutex_lock(&pinctrldev_list_mutex);
- list_add_tail(&pctldev->node, &pinctrldev_list);
- mutex_unlock(&pinctrldev_list_mutex);
+ return pctldev;
- pctldev->p = pinctrl_get(pctldev->dev);
+out_err:
+ mutex_destroy(&pctldev->mutex);
+ kfree(pctldev);
+ return ERR_PTR(ret);
+}
+static int pinctrl_create_and_start(struct pinctrl_dev *pctldev)
+{
+ pctldev->p = create_pinctrl(pctldev->dev, pctldev);
if (!IS_ERR(pctldev->p)) {
+ kref_get(&pctldev->p->users);
pctldev->hog_default =
pinctrl_lookup_state(pctldev->p, PINCTRL_STATE_DEFAULT);
if (IS_ERR(pctldev->hog_default)) {
- dev_dbg(dev, "failed to lookup the default state\n");
+ dev_dbg(pctldev->dev,
+ "failed to lookup the default state\n");
} else {
if (pinctrl_select_state(pctldev->p,
pctldev->hog_default))
- dev_err(dev,
+ dev_err(pctldev->dev,
"failed to select default state\n");
}
@@ -1822,20 +2031,85 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
pinctrl_lookup_state(pctldev->p,
PINCTRL_STATE_SLEEP);
if (IS_ERR(pctldev->hog_sleep))
- dev_dbg(dev, "failed to lookup the sleep state\n");
+ dev_dbg(pctldev->dev,
+ "failed to lookup the sleep state\n");
}
+ mutex_lock(&pinctrldev_list_mutex);
+ list_add_tail(&pctldev->node, &pinctrldev_list);
+ mutex_unlock(&pinctrldev_list_mutex);
+
pinctrl_init_device_debugfs(pctldev);
+ return 0;
+}
+
+/**
+ * pinctrl_register() - register a pin controller device
+ * @pctldesc: descriptor for this pin controller
+ * @dev: parent device for this pin controller
+ * @driver_data: private pin controller data for this pin controller
+ *
+ * Note that pinctrl_register() is known to have problems as the pin
+ * controller driver functions are called before the driver has a
+ * struct pinctrl_dev handle. To avoid issues later on, please use the
+ * new pinctrl_register_and_init() below instead.
+ */
+struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
+ struct device *dev, void *driver_data)
+{
+ struct pinctrl_dev *pctldev;
+ int error;
+
+ pctldev = pinctrl_init_controller(pctldesc, dev, driver_data);
+ if (IS_ERR(pctldev))
+ return pctldev;
+
+ error = pinctrl_create_and_start(pctldev);
+ if (error) {
+ mutex_destroy(&pctldev->mutex);
+ kfree(pctldev);
+
+ return ERR_PTR(error);
+ }
+
return pctldev;
-out_err:
- mutex_destroy(&pctldev->mutex);
- kfree(pctldev);
- return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(pinctrl_register);
+int pinctrl_register_and_init(struct pinctrl_desc *pctldesc,
+ struct device *dev, void *driver_data,
+ struct pinctrl_dev **pctldev)
+{
+ struct pinctrl_dev *p;
+ int error;
+
+ p = pinctrl_init_controller(pctldesc, dev, driver_data);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
+
+ /*
+ * We have pinctrl_start() call functions in the pin controller
+ * driver with create_pinctrl() for at least dt_node_to_map(). So
+ * let's make sure pctldev is properly initialized for the
+ * pin controller driver before we do anything.
+ */
+ *pctldev = p;
+
+ error = pinctrl_create_and_start(p);
+ if (error) {
+ mutex_destroy(&p->mutex);
+ kfree(p);
+ *pctldev = NULL;
+
+ return error;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pinctrl_register_and_init);
+
/**
* pinctrl_unregister() - unregister pinmux
* @pctldev: pin controller to unregister
@@ -1845,6 +2119,7 @@ EXPORT_SYMBOL_GPL(pinctrl_register);
void pinctrl_unregister(struct pinctrl_dev *pctldev)
{
struct pinctrl_gpio_range *range, *n;
+
if (pctldev == NULL)
return;
@@ -1852,13 +2127,15 @@ void pinctrl_unregister(struct pinctrl_dev *pctldev)
pinctrl_remove_device_debugfs(pctldev);
mutex_unlock(&pctldev->mutex);
- if (!IS_ERR(pctldev->p))
+ if (!IS_ERR_OR_NULL(pctldev->p))
pinctrl_put(pctldev->p);
mutex_lock(&pinctrldev_list_mutex);
mutex_lock(&pctldev->mutex);
/* TODO: check that no pinmuxes are still active? */
list_del(&pctldev->node);
+ pinmux_generic_free_functions(pctldev);
+ pinctrl_generic_free_groups(pctldev);
/* Destroy descriptor tree */
pinctrl_free_pindescs(pctldev, pctldev->desc->pins,
pctldev->desc->npins);
@@ -1925,6 +2202,42 @@ struct pinctrl_dev *devm_pinctrl_register(struct device *dev,
EXPORT_SYMBOL_GPL(devm_pinctrl_register);
/**
+ * devm_pinctrl_register_and_init() - Resource managed pinctrl register and init
+ * @dev: parent device for this pin controller
+ * @pctldesc: descriptor for this pin controller
+ * @driver_data: private pin controller data for this pin controller
+ *
+ * Returns an error pointer if pincontrol register failed. Otherwise
+ * it returns valid pinctrl handle.
+ *
+ * The pinctrl device will be automatically released when the device is unbound.
+ */
+int devm_pinctrl_register_and_init(struct device *dev,
+ struct pinctrl_desc *pctldesc,
+ void *driver_data,
+ struct pinctrl_dev **pctldev)
+{
+ struct pinctrl_dev **ptr;
+ int error;
+
+ ptr = devres_alloc(devm_pinctrl_dev_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ error = pinctrl_register_and_init(pctldesc, dev, driver_data, pctldev);
+ if (error) {
+ devres_free(ptr);
+ return error;
+ }
+
+ *ptr = *pctldev;
+ devres_add(dev, ptr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_pinctrl_register_and_init);
+
+/**
* devm_pinctrl_unregister() - Resource managed version of pinctrl_unregister().
* @dev: device for which which resource was allocated
* @pctldev: the pinctrl device to unregister.
diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
index 747c423c11f3..1c35de59a658 100644
--- a/drivers/pinctrl/core.h
+++ b/drivers/pinctrl/core.h
@@ -24,6 +24,10 @@ struct pinctrl_gpio_range;
* controller
* @pin_desc_tree: each pin descriptor for this pin controller is stored in
* this radix tree
+ * @pin_group_tree: optionally each pin group can be stored in this radix tree
+ * @num_groups: optionally number of groups can be kept here
+ * @pin_function_tree: optionally each function can be stored in this radix tree
+ * @num_functions: optionally number of functions can be kept here
* @gpio_ranges: a list of GPIO ranges that is handled by this pin controller,
* ranges are added to this list at runtime
* @dev: the device entry for this pin controller
@@ -40,6 +44,14 @@ struct pinctrl_dev {
struct list_head node;
struct pinctrl_desc *desc;
struct radix_tree_root pin_desc_tree;
+#ifdef CONFIG_GENERIC_PINCTRL_GROUPS
+ struct radix_tree_root pin_group_tree;
+ unsigned int num_groups;
+#endif
+#ifdef CONFIG_GENERIC_PINMUX_FUNCTIONS
+ struct radix_tree_root pin_function_tree;
+ unsigned int num_functions;
+#endif
struct list_head gpio_ranges;
struct device *dev;
struct module *owner;
@@ -171,6 +183,49 @@ struct pinctrl_maps {
unsigned num_maps;
};
+#ifdef CONFIG_GENERIC_PINCTRL_GROUPS
+
+/**
+ * struct group_desc - generic pin group descriptor
+ * @name: name of the pin group
+ * @pins: array of pins that belong to the group
+ * @num_pins: number of pins in the group
+ * @data: pin controller driver specific data
+ */
+struct group_desc {
+ const char *name;
+ int *pins;
+ int num_pins;
+ void *data;
+};
+
+int pinctrl_generic_get_group_count(struct pinctrl_dev *pctldev);
+
+const char *pinctrl_generic_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int group_selector);
+
+int pinctrl_generic_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int group_selector,
+ const unsigned int **pins,
+ unsigned int *npins);
+
+struct group_desc *pinctrl_generic_get_group(struct pinctrl_dev *pctldev,
+ unsigned int group_selector);
+
+int pinctrl_generic_add_group(struct pinctrl_dev *pctldev, const char *name,
+ int *gpins, int ngpins, void *data);
+
+int pinctrl_generic_remove_group(struct pinctrl_dev *pctldev,
+ unsigned int group_selector);
+
+static inline int
+pinctrl_generic_remove_last_group(struct pinctrl_dev *pctldev)
+{
+ return pinctrl_generic_remove_group(pctldev, pctldev->num_groups - 1);
+}
+
+#endif /* CONFIG_GENERIC_PINCTRL_GROUPS */
+
struct pinctrl_dev *get_pinctrl_dev_from_devname(const char *dev_name);
struct pinctrl_dev *get_pinctrl_dev_from_of_node(struct device_node *np);
int pin_get_from_name(struct pinctrl_dev *pctldev, const char *name);
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index 260908480075..0e5c9f14a706 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -42,7 +42,8 @@ static void dt_free_map(struct pinctrl_dev *pctldev,
{
if (pctldev) {
const struct pinctrl_ops *ops = pctldev->desc->pctlops;
- ops->dt_free_map(pctldev, map, num_maps);
+ if (ops->dt_free_map)
+ ops->dt_free_map(pctldev, map, num_maps);
} else {
/* There is no pctldev for PIN_MAP_TYPE_DUMMY_STATE */
kfree(map);
@@ -100,11 +101,12 @@ struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
return get_pinctrl_dev_from_of_node(np);
}
-static int dt_to_map_one_config(struct pinctrl *p, const char *statename,
+static int dt_to_map_one_config(struct pinctrl *p,
+ struct pinctrl_dev *pctldev,
+ const char *statename,
struct device_node *np_config)
{
struct device_node *np_pctldev;
- struct pinctrl_dev *pctldev;
const struct pinctrl_ops *ops;
int ret;
struct pinctrl_map *map;
@@ -121,7 +123,8 @@ static int dt_to_map_one_config(struct pinctrl *p, const char *statename,
/* OK let's just assume this will appear later then */
return -EPROBE_DEFER;
}
- pctldev = get_pinctrl_dev_from_of_node(np_pctldev);
+ if (!pctldev)
+ pctldev = get_pinctrl_dev_from_of_node(np_pctldev);
if (pctldev)
break;
/* Do not defer probing of hogs (circular loop) */
@@ -166,7 +169,22 @@ static int dt_remember_dummy_state(struct pinctrl *p, const char *statename)
return dt_remember_or_free_map(p, statename, NULL, map, 1);
}
-int pinctrl_dt_to_map(struct pinctrl *p)
+bool pinctrl_dt_has_hogs(struct pinctrl_dev *pctldev)
+{
+ struct device_node *np;
+ struct property *prop;
+ int size;
+
+ np = pctldev->dev->of_node;
+ if (!np)
+ return false;
+
+ prop = of_find_property(np, "pinctrl-0", &size);
+
+ return prop ? true : false;
+}
+
+int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev)
{
struct device_node *np = p->dev->of_node;
int state, ret;
@@ -233,7 +251,8 @@ int pinctrl_dt_to_map(struct pinctrl *p)
}
/* Parse the node */
- ret = dt_to_map_one_config(p, statename, np_config);
+ ret = dt_to_map_one_config(p, pctldev, statename,
+ np_config);
of_node_put(np_config);
if (ret < 0)
goto err;
diff --git a/drivers/pinctrl/devicetree.h b/drivers/pinctrl/devicetree.h
index c2d1a5505850..43d8d19aa5ee 100644
--- a/drivers/pinctrl/devicetree.h
+++ b/drivers/pinctrl/devicetree.h
@@ -20,8 +20,10 @@ struct of_phandle_args;
#ifdef CONFIG_OF
+bool pinctrl_dt_has_hogs(struct pinctrl_dev *pctldev);
+
void pinctrl_dt_free_maps(struct pinctrl *p);
-int pinctrl_dt_to_map(struct pinctrl *p);
+int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev);
int pinctrl_count_index_with_args(const struct device_node *np,
const char *list_name);
@@ -32,7 +34,13 @@ int pinctrl_parse_index_with_args(const struct device_node *np,
#else
-static inline int pinctrl_dt_to_map(struct pinctrl *p)
+static inline bool pinctrl_dt_has_hogs(struct pinctrl_dev *pctldev)
+{
+ return false;
+}
+
+static inline int pinctrl_dt_to_map(struct pinctrl *p,
+ struct pinctrl_dev *pctldev)
{
return 0;
}
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index fc8cbf611723..cae05e76c111 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -1,6 +1,7 @@
config PINCTRL_IMX
bool
- select PINMUX
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
select PINCONF
select REGMAP
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 5ef7e875b50e..a7ace9e1ad81 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -27,6 +27,7 @@
#include <linux/regmap.h>
#include "../core.h"
+#include "../pinmux.h"
#include "pinctrl-imx.h"
/* The bits in CONFIG cell defined in binding doc*/
@@ -42,59 +43,25 @@ struct imx_pinctrl {
struct pinctrl_dev *pctl;
void __iomem *base;
void __iomem *input_sel_base;
- const struct imx_pinctrl_soc_info *info;
+ struct imx_pinctrl_soc_info *info;
};
-static inline const struct imx_pin_group *imx_pinctrl_find_group_by_name(
- const struct imx_pinctrl_soc_info *info,
+static inline const struct group_desc *imx_pinctrl_find_group_by_name(
+ struct pinctrl_dev *pctldev,
const char *name)
{
- const struct imx_pin_group *grp = NULL;
+ const struct group_desc *grp = NULL;
int i;
- for (i = 0; i < info->ngroups; i++) {
- if (!strcmp(info->groups[i].name, name)) {
- grp = &info->groups[i];
+ for (i = 0; i < pctldev->num_groups; i++) {
+ grp = pinctrl_generic_get_group(pctldev, i);
+ if (grp && !strcmp(grp->name, name))
break;
- }
}
return grp;
}
-static int imx_get_groups_count(struct pinctrl_dev *pctldev)
-{
- struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
- const struct imx_pinctrl_soc_info *info = ipctl->info;
-
- return info->ngroups;
-}
-
-static const char *imx_get_group_name(struct pinctrl_dev *pctldev,
- unsigned selector)
-{
- struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
- const struct imx_pinctrl_soc_info *info = ipctl->info;
-
- return info->groups[selector].name;
-}
-
-static int imx_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector,
- const unsigned **pins,
- unsigned *npins)
-{
- struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
- const struct imx_pinctrl_soc_info *info = ipctl->info;
-
- if (selector >= info->ngroups)
- return -EINVAL;
-
- *pins = info->groups[selector].pin_ids;
- *npins = info->groups[selector].npins;
-
- return 0;
-}
-
static void imx_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
unsigned offset)
{
@@ -106,8 +73,8 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
struct pinctrl_map **map, unsigned *num_maps)
{
struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
- const struct imx_pinctrl_soc_info *info = ipctl->info;
- const struct imx_pin_group *grp;
+ struct imx_pinctrl_soc_info *info = ipctl->info;
+ const struct group_desc *grp;
struct pinctrl_map *new_map;
struct device_node *parent;
int map_num = 1;
@@ -117,15 +84,17 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
* first find the group of this node and check if we need create
* config maps for pins
*/
- grp = imx_pinctrl_find_group_by_name(info, np->name);
+ grp = imx_pinctrl_find_group_by_name(pctldev, np->name);
if (!grp) {
dev_err(info->dev, "unable to find group for node %s\n",
np->name);
return -EINVAL;
}
- for (i = 0; i < grp->npins; i++) {
- if (!(grp->pins[i].config & IMX_NO_PAD_CTL))
+ for (i = 0; i < grp->num_pins; i++) {
+ struct imx_pin *pin = &((struct imx_pin *)(grp->data))[i];
+
+ if (!(pin->config & IMX_NO_PAD_CTL))
map_num++;
}
@@ -149,12 +118,14 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
/* create config map */
new_map++;
- for (i = j = 0; i < grp->npins; i++) {
- if (!(grp->pins[i].config & IMX_NO_PAD_CTL)) {
+ for (i = j = 0; i < grp->num_pins; i++) {
+ struct imx_pin *pin = &((struct imx_pin *)(grp->data))[i];
+
+ if (!(pin->config & IMX_NO_PAD_CTL)) {
new_map[j].type = PIN_MAP_TYPE_CONFIGS_PIN;
new_map[j].data.configs.group_or_pin =
- pin_get_name(pctldev, grp->pins[i].pin);
- new_map[j].data.configs.configs = &grp->pins[i].config;
+ pin_get_name(pctldev, pin->pin);
+ new_map[j].data.configs.configs = &pin->config;
new_map[j].data.configs.num_configs = 1;
j++;
}
@@ -173,9 +144,9 @@ static void imx_dt_free_map(struct pinctrl_dev *pctldev,
}
static const struct pinctrl_ops imx_pctrl_ops = {
- .get_groups_count = imx_get_groups_count,
- .get_group_name = imx_get_group_name,
- .get_group_pins = imx_get_group_pins,
+ .get_groups_count = pinctrl_generic_get_group_count,
+ .get_group_name = pinctrl_generic_get_group_name,
+ .get_group_pins = pinctrl_generic_get_group_pins,
.pin_dbg_show = imx_pin_dbg_show,
.dt_node_to_map = imx_dt_node_to_map,
.dt_free_map = imx_dt_free_map,
@@ -186,24 +157,33 @@ static int imx_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
unsigned group)
{
struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
- const struct imx_pinctrl_soc_info *info = ipctl->info;
+ struct imx_pinctrl_soc_info *info = ipctl->info;
const struct imx_pin_reg *pin_reg;
unsigned int npins, pin_id;
int i;
- struct imx_pin_group *grp;
+ struct group_desc *grp = NULL;
+ struct function_desc *func = NULL;
/*
* Configure the mux mode for each pin in the group for a specific
* function.
*/
- grp = &info->groups[group];
- npins = grp->npins;
+ grp = pinctrl_generic_get_group(pctldev, group);
+ if (!grp)
+ return -EINVAL;
+
+ func = pinmux_generic_get_function(pctldev, selector);
+ if (!func)
+ return -EINVAL;
+
+ npins = grp->num_pins;
dev_dbg(ipctl->dev, "enable function %s group %s\n",
- info->functions[selector].name, grp->name);
+ func->name, grp->name);
for (i = 0; i < npins; i++) {
- struct imx_pin *pin = &grp->pins[i];
+ struct imx_pin *pin = &((struct imx_pin *)(grp->data))[i];
+
pin_id = pin->pin;
pin_reg = &info->pin_regs[pin_id];
@@ -272,43 +252,13 @@ static int imx_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
return 0;
}
-static int imx_pmx_get_funcs_count(struct pinctrl_dev *pctldev)
-{
- struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
- const struct imx_pinctrl_soc_info *info = ipctl->info;
-
- return info->nfunctions;
-}
-
-static const char *imx_pmx_get_func_name(struct pinctrl_dev *pctldev,
- unsigned selector)
-{
- struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
- const struct imx_pinctrl_soc_info *info = ipctl->info;
-
- return info->functions[selector].name;
-}
-
-static int imx_pmx_get_groups(struct pinctrl_dev *pctldev, unsigned selector,
- const char * const **groups,
- unsigned * const num_groups)
-{
- struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
- const struct imx_pinctrl_soc_info *info = ipctl->info;
-
- *groups = info->functions[selector].groups;
- *num_groups = info->functions[selector].num_groups;
-
- return 0;
-}
-
static int imx_pmx_gpio_request_enable(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range, unsigned offset)
{
struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
- const struct imx_pinctrl_soc_info *info = ipctl->info;
+ struct imx_pinctrl_soc_info *info = ipctl->info;
const struct imx_pin_reg *pin_reg;
- struct imx_pin_group *grp;
+ struct group_desc *grp;
struct imx_pin *imx_pin;
unsigned int pin, group;
u32 reg;
@@ -322,10 +272,12 @@ static int imx_pmx_gpio_request_enable(struct pinctrl_dev *pctldev,
return -EINVAL;
/* Find the pinctrl config with GPIO mux mode for the requested pin */
- for (group = 0; group < info->ngroups; group++) {
- grp = &info->groups[group];
- for (pin = 0; pin < grp->npins; pin++) {
- imx_pin = &grp->pins[pin];
+ for (group = 0; group < pctldev->num_groups; group++) {
+ grp = pinctrl_generic_get_group(pctldev, group);
+ if (!grp)
+ continue;
+ for (pin = 0; pin < grp->num_pins; pin++) {
+ imx_pin = &((struct imx_pin *)(grp->data))[pin];
if (imx_pin->pin == offset && !imx_pin->mux_mode)
goto mux_pin;
}
@@ -346,7 +298,7 @@ static void imx_pmx_gpio_disable_free(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range, unsigned offset)
{
struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
- const struct imx_pinctrl_soc_info *info = ipctl->info;
+ struct imx_pinctrl_soc_info *info = ipctl->info;
const struct imx_pin_reg *pin_reg;
u32 reg;
@@ -371,7 +323,7 @@ static int imx_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range, unsigned offset, bool input)
{
struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
- const struct imx_pinctrl_soc_info *info = ipctl->info;
+ struct imx_pinctrl_soc_info *info = ipctl->info;
const struct imx_pin_reg *pin_reg;
u32 reg;
@@ -398,9 +350,9 @@ static int imx_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
}
static const struct pinmux_ops imx_pmx_ops = {
- .get_functions_count = imx_pmx_get_funcs_count,
- .get_function_name = imx_pmx_get_func_name,
- .get_function_groups = imx_pmx_get_groups,
+ .get_functions_count = pinmux_generic_get_function_count,
+ .get_function_name = pinmux_generic_get_function_name,
+ .get_function_groups = pinmux_generic_get_function_groups,
.set_mux = imx_pmx_set,
.gpio_request_enable = imx_pmx_gpio_request_enable,
.gpio_disable_free = imx_pmx_gpio_disable_free,
@@ -411,7 +363,7 @@ static int imx_pinconf_get(struct pinctrl_dev *pctldev,
unsigned pin_id, unsigned long *config)
{
struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
- const struct imx_pinctrl_soc_info *info = ipctl->info;
+ struct imx_pinctrl_soc_info *info = ipctl->info;
const struct imx_pin_reg *pin_reg = &info->pin_regs[pin_id];
if (pin_reg->conf_reg == -1) {
@@ -433,7 +385,7 @@ static int imx_pinconf_set(struct pinctrl_dev *pctldev,
unsigned num_configs)
{
struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
- const struct imx_pinctrl_soc_info *info = ipctl->info;
+ struct imx_pinctrl_soc_info *info = ipctl->info;
const struct imx_pin_reg *pin_reg = &info->pin_regs[pin_id];
int i;
@@ -467,7 +419,7 @@ static void imx_pinconf_dbg_show(struct pinctrl_dev *pctldev,
struct seq_file *s, unsigned pin_id)
{
struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
- const struct imx_pinctrl_soc_info *info = ipctl->info;
+ struct imx_pinctrl_soc_info *info = ipctl->info;
const struct imx_pin_reg *pin_reg = &info->pin_regs[pin_id];
unsigned long config;
@@ -483,20 +435,22 @@ static void imx_pinconf_dbg_show(struct pinctrl_dev *pctldev,
static void imx_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
struct seq_file *s, unsigned group)
{
- struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
- const struct imx_pinctrl_soc_info *info = ipctl->info;
- struct imx_pin_group *grp;
+ struct group_desc *grp;
unsigned long config;
const char *name;
int i, ret;
- if (group > info->ngroups)
+ if (group > pctldev->num_groups)
return;
seq_printf(s, "\n");
- grp = &info->groups[group];
- for (i = 0; i < grp->npins; i++) {
- struct imx_pin *pin = &grp->pins[i];
+ grp = pinctrl_generic_get_group(pctldev, group);
+ if (!grp)
+ return;
+
+ for (i = 0; i < grp->num_pins; i++) {
+ struct imx_pin *pin = &((struct imx_pin *)(grp->data))[i];
+
name = pin_get_name(pctldev, pin->pin);
ret = imx_pinconf_get(pctldev, pin->pin, &config);
if (ret)
@@ -520,7 +474,7 @@ static const struct pinconf_ops imx_pinconf_ops = {
#define SHARE_FSL_PIN_SIZE 20
static int imx_pinctrl_parse_groups(struct device_node *np,
- struct imx_pin_group *grp,
+ struct group_desc *grp,
struct imx_pinctrl_soc_info *info,
u32 index)
{
@@ -554,20 +508,20 @@ static int imx_pinctrl_parse_groups(struct device_node *np,
return -EINVAL;
}
- grp->npins = size / pin_size;
- grp->pins = devm_kzalloc(info->dev, grp->npins * sizeof(struct imx_pin),
- GFP_KERNEL);
- grp->pin_ids = devm_kzalloc(info->dev, grp->npins * sizeof(unsigned int),
- GFP_KERNEL);
- if (!grp->pins || ! grp->pin_ids)
+ grp->num_pins = size / pin_size;
+ grp->data = devm_kzalloc(info->dev, grp->num_pins *
+ sizeof(struct imx_pin), GFP_KERNEL);
+ grp->pins = devm_kzalloc(info->dev, grp->num_pins *
+ sizeof(unsigned int), GFP_KERNEL);
+ if (!grp->pins || !grp->data)
return -ENOMEM;
- for (i = 0; i < grp->npins; i++) {
+ for (i = 0; i < grp->num_pins; i++) {
u32 mux_reg = be32_to_cpu(*list++);
u32 conf_reg;
unsigned int pin_id;
struct imx_pin_reg *pin_reg;
- struct imx_pin *pin = &grp->pins[i];
+ struct imx_pin *pin = &((struct imx_pin *)(grp->data))[i];
if (!(info->flags & ZERO_OFFSET_VALID) && !mux_reg)
mux_reg = -1;
@@ -583,7 +537,7 @@ static int imx_pinctrl_parse_groups(struct device_node *np,
pin_id = (mux_reg != -1) ? mux_reg / 4 : conf_reg / 4;
pin_reg = &info->pin_regs[pin_id];
pin->pin = pin_id;
- grp->pin_ids[i] = pin_id;
+ grp->pins[i] = pin_id;
pin_reg->mux_reg = mux_reg;
pin_reg->conf_reg = conf_reg;
pin->input_reg = be32_to_cpu(*list++);
@@ -604,31 +558,46 @@ static int imx_pinctrl_parse_groups(struct device_node *np,
}
static int imx_pinctrl_parse_functions(struct device_node *np,
- struct imx_pinctrl_soc_info *info,
+ struct imx_pinctrl *ipctl,
u32 index)
{
+ struct pinctrl_dev *pctl = ipctl->pctl;
+ struct imx_pinctrl_soc_info *info = ipctl->info;
struct device_node *child;
- struct imx_pmx_func *func;
- struct imx_pin_group *grp;
+ struct function_desc *func;
+ struct group_desc *grp;
u32 i = 0;
dev_dbg(info->dev, "parse function(%d): %s\n", index, np->name);
- func = &info->functions[index];
+ func = pinmux_generic_get_function(pctl, index);
+ if (!func)
+ return -EINVAL;
/* Initialise function */
func->name = np->name;
- func->num_groups = of_get_child_count(np);
- if (func->num_groups == 0) {
+ func->num_group_names = of_get_child_count(np);
+ if (func->num_group_names == 0) {
dev_err(info->dev, "no groups defined in %s\n", np->full_name);
return -EINVAL;
}
- func->groups = devm_kzalloc(info->dev,
- func->num_groups * sizeof(char *), GFP_KERNEL);
+ func->group_names = devm_kzalloc(info->dev,
+ func->num_group_names *
+ sizeof(char *), GFP_KERNEL);
for_each_child_of_node(np, child) {
- func->groups[i] = child->name;
- grp = &info->groups[info->group_index++];
+ func->group_names[i] = child->name;
+
+ grp = devm_kzalloc(info->dev, sizeof(struct group_desc),
+ GFP_KERNEL);
+ if (!grp)
+ return -ENOMEM;
+
+ mutex_lock(&info->mutex);
+ radix_tree_insert(&pctl->pin_group_tree,
+ info->group_index++, grp);
+ mutex_unlock(&info->mutex);
+
imx_pinctrl_parse_groups(child, grp, info, i++);
}
@@ -659,10 +628,12 @@ static bool imx_pinctrl_dt_is_flat_functions(struct device_node *np)
}
static int imx_pinctrl_probe_dt(struct platform_device *pdev,
- struct imx_pinctrl_soc_info *info)
+ struct imx_pinctrl *ipctl)
{
struct device_node *np = pdev->dev.of_node;
struct device_node *child;
+ struct pinctrl_dev *pctl = ipctl->pctl;
+ struct imx_pinctrl_soc_info *info = ipctl->info;
u32 nfuncs = 0;
u32 i = 0;
bool flat_funcs;
@@ -681,35 +652,50 @@ static int imx_pinctrl_probe_dt(struct platform_device *pdev,
}
}
- info->nfunctions = nfuncs;
- info->functions = devm_kzalloc(&pdev->dev, nfuncs * sizeof(struct imx_pmx_func),
+ for (i = 0; i < nfuncs; i++) {
+ struct function_desc *function;
+
+ function = devm_kzalloc(&pdev->dev, sizeof(*function),
GFP_KERNEL);
- if (!info->functions)
- return -ENOMEM;
+ if (!function)
+ return -ENOMEM;
+
+ mutex_lock(&info->mutex);
+ radix_tree_insert(&pctl->pin_function_tree, i, function);
+ mutex_unlock(&info->mutex);
+ }
+ pctl->num_functions = nfuncs;
info->group_index = 0;
if (flat_funcs) {
- info->ngroups = of_get_child_count(np);
+ pctl->num_groups = of_get_child_count(np);
} else {
- info->ngroups = 0;
+ pctl->num_groups = 0;
for_each_child_of_node(np, child)
- info->ngroups += of_get_child_count(child);
+ pctl->num_groups += of_get_child_count(child);
}
- info->groups = devm_kzalloc(&pdev->dev, info->ngroups * sizeof(struct imx_pin_group),
- GFP_KERNEL);
- if (!info->groups)
- return -ENOMEM;
if (flat_funcs) {
- imx_pinctrl_parse_functions(np, info, 0);
+ imx_pinctrl_parse_functions(np, ipctl, 0);
} else {
+ i = 0;
for_each_child_of_node(np, child)
- imx_pinctrl_parse_functions(child, info, i++);
+ imx_pinctrl_parse_functions(child, ipctl, i++);
}
return 0;
}
+/*
+ * imx_free_resources() - free memory used by this driver
+ * @info: info driver instance
+ */
+static void imx_free_resources(struct imx_pinctrl *ipctl)
+{
+ if (ipctl->pctl)
+ pinctrl_unregister(ipctl->pctl);
+}
+
int imx_pinctrl_probe(struct platform_device *pdev,
struct imx_pinctrl_soc_info *info)
{
@@ -783,23 +769,31 @@ int imx_pinctrl_probe(struct platform_device *pdev,
imx_pinctrl_desc->confops = &imx_pinconf_ops;
imx_pinctrl_desc->owner = THIS_MODULE;
- ret = imx_pinctrl_probe_dt(pdev, info);
- if (ret) {
- dev_err(&pdev->dev, "fail to probe dt properties\n");
- return ret;
- }
+ mutex_init(&info->mutex);
ipctl->info = info;
ipctl->dev = info->dev;
platform_set_drvdata(pdev, ipctl);
- ipctl->pctl = devm_pinctrl_register(&pdev->dev,
- imx_pinctrl_desc, ipctl);
- if (IS_ERR(ipctl->pctl)) {
+ ret = devm_pinctrl_register_and_init(&pdev->dev,
+ imx_pinctrl_desc, ipctl,
+ &ipctl->pctl);
+ if (ret) {
dev_err(&pdev->dev, "could not register IMX pinctrl driver\n");
- return PTR_ERR(ipctl->pctl);
+ goto free;
+ }
+
+ ret = imx_pinctrl_probe_dt(pdev, ipctl);
+ if (ret) {
+ dev_err(&pdev->dev, "fail to probe dt properties\n");
+ goto free;
}
dev_info(&pdev->dev, "initialized IMX pinctrl driver\n");
return 0;
+
+free:
+ imx_free_resources(ipctl);
+
+ return ret;
}
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.h b/drivers/pinctrl/freescale/pinctrl-imx.h
index 8af8aa2897ab..ff2d3e56b7c5 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.h
+++ b/drivers/pinctrl/freescale/pinctrl-imx.h
@@ -18,7 +18,7 @@
struct platform_device;
/**
- * struct imx_pin_group - describes a single i.MX pin
+ * struct imx_pin - describes a single i.MX pin
* @pin: the pin_id of this pin
* @mux_mode: the mux mode for this pin.
* @input_reg: the select input register offset for this pin if any
@@ -35,33 +35,6 @@ struct imx_pin {
};
/**
- * struct imx_pin_group - describes an IMX pin group
- * @name: the name of this specific pin group
- * @npins: the number of pins in this group array, i.e. the number of
- * elements in .pins so we can iterate over that array
- * @pin_ids: array of pin_ids. pinctrl forces us to maintain such an array
- * @pins: array of pins
- */
-struct imx_pin_group {
- const char *name;
- unsigned npins;
- unsigned int *pin_ids;
- struct imx_pin *pins;
-};
-
-/**
- * struct imx_pmx_func - describes IMX pinmux functions
- * @name: the name of this specific function
- * @groups: corresponding pin groups
- * @num_groups: the number of groups
- */
-struct imx_pmx_func {
- const char *name;
- const char **groups;
- unsigned num_groups;
-};
-
-/**
* struct imx_pin_reg - describe a pin reg map
* @mux_reg: mux register offset
* @conf_reg: config register offset
@@ -76,13 +49,10 @@ struct imx_pinctrl_soc_info {
const struct pinctrl_pin_desc *pins;
unsigned int npins;
struct imx_pin_reg *pin_regs;
- struct imx_pin_group *groups;
- unsigned int ngroups;
unsigned int group_index;
- struct imx_pmx_func *functions;
- unsigned int nfunctions;
unsigned int flags;
const char *gpr_compatible;
+ struct mutex mutex;
};
#define SHARE_MUX_CONF_REG 0x1
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 37300634b7d2..7241435b90fa 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -1092,6 +1092,7 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
enum pin_config_param param = pinconf_to_config_param(*config);
void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+ void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG);
unsigned long flags;
u32 conf, pull, val, debounce;
u16 arg = 0;
@@ -1128,7 +1129,7 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
return -EINVAL;
raw_spin_lock_irqsave(&vg->lock, flags);
- debounce = readl(byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG));
+ debounce = readl(db_reg);
raw_spin_unlock_irqrestore(&vg->lock, flags);
switch (debounce & BYT_DEBOUNCE_PULSE_MASK) {
@@ -1176,6 +1177,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
unsigned int param, arg;
void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+ void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG);
unsigned long flags;
u32 conf, val, debounce;
int i, ret = 0;
@@ -1238,36 +1240,40 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
break;
case PIN_CONFIG_INPUT_DEBOUNCE:
- debounce = readl(byt_gpio_reg(vg, offset,
- BYT_DEBOUNCE_REG));
- conf &= ~BYT_DEBOUNCE_PULSE_MASK;
+ debounce = readl(db_reg);
+ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
switch (arg) {
+ case 0:
+ conf &= BYT_DEBOUNCE_EN;
+ break;
case 375:
- conf |= BYT_DEBOUNCE_PULSE_375US;
+ debounce |= BYT_DEBOUNCE_PULSE_375US;
break;
case 750:
- conf |= BYT_DEBOUNCE_PULSE_750US;
+ debounce |= BYT_DEBOUNCE_PULSE_750US;
break;
case 1500:
- conf |= BYT_DEBOUNCE_PULSE_1500US;
+ debounce |= BYT_DEBOUNCE_PULSE_1500US;
break;
case 3000:
- conf |= BYT_DEBOUNCE_PULSE_3MS;
+ debounce |= BYT_DEBOUNCE_PULSE_3MS;
break;
case 6000:
- conf |= BYT_DEBOUNCE_PULSE_6MS;
+ debounce |= BYT_DEBOUNCE_PULSE_6MS;
break;
case 12000:
- conf |= BYT_DEBOUNCE_PULSE_12MS;
+ debounce |= BYT_DEBOUNCE_PULSE_12MS;
break;
case 24000:
- conf |= BYT_DEBOUNCE_PULSE_24MS;
+ debounce |= BYT_DEBOUNCE_PULSE_24MS;
break;
default:
ret = -EINVAL;
}
+ if (!ret)
+ writel(debounce, db_reg);
break;
default:
ret = -ENOTSUPP;
@@ -1449,7 +1455,7 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
val & BYT_INPUT_EN ? " " : "in",
val & BYT_OUTPUT_EN ? " " : "out",
val & BYT_LEVEL ? "hi" : "lo",
- comm->pad_map[i], comm->pad_map[i] * 32,
+ comm->pad_map[i], comm->pad_map[i] * 16,
conf0 & 0x7,
conf0 & BYT_TRIG_NEG ? " fall" : " ",
conf0 & BYT_TRIG_POS ? " rise" : " ",
@@ -1617,6 +1623,8 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
{
+ struct gpio_chip *gc = &vg->chip;
+ struct device *dev = &vg->pdev->dev;
void __iomem *reg;
u32 base, value;
int i;
@@ -1638,10 +1646,12 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
}
value = readl(reg);
- if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i) &&
- !(value & BYT_DIRECT_IRQ_EN)) {
+ if (value & BYT_DIRECT_IRQ_EN) {
+ clear_bit(i, gc->irq_valid_mask);
+ dev_dbg(dev, "excluding GPIO %d from IRQ domain\n", i);
+ } else if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i)) {
byt_gpio_clear_triggering(vg, i);
- dev_dbg(&vg->pdev->dev, "disabling GPIO %d\n", i);
+ dev_dbg(dev, "disabling GPIO %d\n", i);
}
}
@@ -1680,12 +1690,13 @@ static int byt_gpio_probe(struct byt_gpio *vg)
gc->can_sleep = false;
gc->parent = &vg->pdev->dev;
gc->ngpio = vg->soc_data->npins;
+ gc->irq_need_valid_mask = true;
#ifdef CONFIG_PM_SLEEP
vg->saved_context = devm_kcalloc(&vg->pdev->dev, gc->ngpio,
sizeof(*vg->saved_context), GFP_KERNEL);
#endif
- ret = gpiochip_add_data(gc, vg);
+ ret = devm_gpiochip_add_data(&vg->pdev->dev, gc, vg);
if (ret) {
dev_err(&vg->pdev->dev, "failed adding byt-gpio chip\n");
return ret;
@@ -1695,7 +1706,7 @@ static int byt_gpio_probe(struct byt_gpio *vg)
0, 0, vg->soc_data->npins);
if (ret) {
dev_err(&vg->pdev->dev, "failed to add GPIO pin range\n");
- goto fail;
+ return ret;
}
/* set up interrupts */
@@ -1706,7 +1717,7 @@ static int byt_gpio_probe(struct byt_gpio *vg)
handle_bad_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(&vg->pdev->dev, "failed to add irqchip\n");
- goto fail;
+ return ret;
}
gpiochip_set_chained_irqchip(gc, &byt_irqchip,
@@ -1715,11 +1726,6 @@ static int byt_gpio_probe(struct byt_gpio *vg)
}
return ret;
-
-fail:
- gpiochip_remove(&vg->chip);
-
- return ret;
}
static int byt_set_soc_data(struct byt_gpio *vg,
@@ -1802,7 +1808,7 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
vg->pctl_desc.pins = vg->soc_data->pins;
vg->pctl_desc.npins = vg->soc_data->npins;
- vg->pctl_dev = pinctrl_register(&vg->pctl_desc, &pdev->dev, vg);
+ vg->pctl_dev = devm_pinctrl_register(&pdev->dev, &vg->pctl_desc, vg);
if (IS_ERR(vg->pctl_dev)) {
dev_err(&pdev->dev, "failed to register pinctrl driver\n");
return PTR_ERR(vg->pctl_dev);
@@ -1811,10 +1817,8 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
raw_spin_lock_init(&vg->lock);
ret = byt_gpio_probe(vg);
- if (ret) {
- pinctrl_unregister(vg->pctl_dev);
+ if (ret)
return ret;
- }
platform_set_drvdata(pdev, vg);
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/pinctrl/intel/pinctrl-broxton.c b/drivers/pinctrl/intel/pinctrl-broxton.c
index 59cb7a6fc5be..e6e6fd112585 100644
--- a/drivers/pinctrl/intel/pinctrl-broxton.c
+++ b/drivers/pinctrl/intel/pinctrl-broxton.c
@@ -19,7 +19,7 @@
#define BXT_PAD_OWN 0x020
#define BXT_HOSTSW_OWN 0x080
-#define BXT_PADCFGLOCK 0x090
+#define BXT_PADCFGLOCK 0x060
#define BXT_GPI_IE 0x110
#define BXT_COMMUNITY(s, e) \
@@ -1004,8 +1004,8 @@ static const struct acpi_device_id bxt_pinctrl_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, bxt_pinctrl_acpi_match);
static const struct platform_device_id bxt_pinctrl_platform_ids[] = {
- { "apl-pinctrl", (kernel_ulong_t)&apl_pinctrl_soc_data },
- { "broxton-pinctrl", (kernel_ulong_t)&bxt_pinctrl_soc_data },
+ { "apollolake-pinctrl", (kernel_ulong_t)apl_pinctrl_soc_data },
+ { "broxton-pinctrl", (kernel_ulong_t)bxt_pinctrl_soc_data },
{ },
};
@@ -1058,7 +1058,6 @@ static const struct dev_pm_ops bxt_pinctrl_pm_ops = {
static struct platform_driver bxt_pinctrl_driver = {
.probe = bxt_pinctrl_probe,
- .remove = intel_pinctrl_remove,
.driver = {
.name = "broxton-pinctrl",
.acpi_match_table = bxt_pinctrl_acpi_match,
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 5e66860a5e67..f80134e3e0b6 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1059,7 +1059,7 @@ static int chv_config_get(struct pinctrl_dev *pctldev, unsigned pin,
}
static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin,
- enum pin_config_param param, u16 arg)
+ enum pin_config_param param, u32 arg)
{
void __iomem *reg = chv_padreg(pctrl, pin, CHV_PADCTRL0);
unsigned long flags;
@@ -1151,7 +1151,7 @@ static int chv_config_set(struct pinctrl_dev *pctldev, unsigned pin,
struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
enum pin_config_param param;
int i, ret;
- u16 arg;
+ u32 arg;
if (chv_pad_locked(pctrl, pin))
return -EBUSY;
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 1e139672f1af..377a3f090dd1 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -353,6 +353,21 @@ static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
return 0;
}
+static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input)
+{
+ u32 value;
+
+ value = readl(padcfg0);
+ if (input) {
+ value &= ~PADCFG0_GPIORXDIS;
+ value |= PADCFG0_GPIOTXDIS;
+ } else {
+ value &= ~PADCFG0_GPIOTXDIS;
+ value |= PADCFG0_GPIORXDIS;
+ }
+ writel(value, padcfg0);
+}
+
static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
unsigned pin)
@@ -375,11 +390,11 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
/* Disable SCI/SMI/NMI generation */
value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
- /* Disable TX buffer and enable RX (this will be input) */
- value &= ~PADCFG0_GPIORXDIS;
- value |= PADCFG0_GPIOTXDIS;
writel(value, padcfg0);
+ /* Disable TX buffer and enable RX (this will be input) */
+ __intel_gpio_set_direction(padcfg0, true);
+
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
@@ -392,18 +407,11 @@ static int intel_gpio_set_direction(struct pinctrl_dev *pctldev,
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
void __iomem *padcfg0;
unsigned long flags;
- u32 value;
raw_spin_lock_irqsave(&pctrl->lock, flags);
padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
-
- value = readl(padcfg0);
- if (input)
- value |= PADCFG0_GPIOTXDIS;
- else
- value &= ~PADCFG0_GPIOTXDIS;
- writel(value, padcfg0);
+ __intel_gpio_set_direction(padcfg0, input);
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
@@ -884,7 +892,7 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
pctrl->chip.base = -1;
pctrl->irq = irq;
- ret = gpiochip_add_data(&pctrl->chip, pctrl);
+ ret = devm_gpiochip_add_data(pctrl->dev, &pctrl->chip, pctrl);
if (ret) {
dev_err(pctrl->dev, "failed to register gpiochip\n");
return ret;
@@ -894,7 +902,7 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
0, 0, pctrl->soc->npins);
if (ret) {
dev_err(pctrl->dev, "failed to add GPIO pin range\n");
- goto fail;
+ return ret;
}
/*
@@ -907,24 +915,19 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
dev_name(pctrl->dev), pctrl);
if (ret) {
dev_err(pctrl->dev, "failed to request interrupt\n");
- goto fail;
+ return ret;
}
ret = gpiochip_irqchip_add(&pctrl->chip, &intel_gpio_irqchip, 0,
handle_bad_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(pctrl->dev, "failed to add irqchip\n");
- goto fail;
+ return ret;
}
gpiochip_set_chained_irqchip(&pctrl->chip, &intel_gpio_irqchip, irq,
NULL);
return 0;
-
-fail:
- gpiochip_remove(&pctrl->chip);
-
- return ret;
}
static int intel_pinctrl_pm_init(struct intel_pinctrl *pctrl)
@@ -1046,16 +1049,6 @@ int intel_pinctrl_probe(struct platform_device *pdev,
}
EXPORT_SYMBOL_GPL(intel_pinctrl_probe);
-int intel_pinctrl_remove(struct platform_device *pdev)
-{
- struct intel_pinctrl *pctrl = platform_get_drvdata(pdev);
-
- gpiochip_remove(&pctrl->chip);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(intel_pinctrl_remove);
-
#ifdef CONFIG_PM_SLEEP
static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned pin)
{
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index b60215793017..c22c44485c5d 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -121,8 +121,6 @@ struct intel_pinctrl_soc_data {
int intel_pinctrl_probe(struct platform_device *pdev,
const struct intel_pinctrl_soc_data *soc_data);
-int intel_pinctrl_remove(struct platform_device *pdev);
-
#ifdef CONFIG_PM_SLEEP
int intel_pinctrl_suspend(struct device *dev);
int intel_pinctrl_resume(struct device *dev);
diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
index c725a5313b4e..9877526c0807 100644
--- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
+++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
@@ -574,7 +574,6 @@ static const struct dev_pm_ops spt_pinctrl_pm_ops = {
static struct platform_driver spt_pinctrl_driver = {
.probe = spt_pinctrl_probe,
- .remove = intel_pinctrl_remove,
.driver = {
.name = "sunrisepoint-pinctrl",
.acpi_match_table = spt_pinctrl_acpi_match,
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
index 4f0bc8a103f4..80fe3b48796c 100644
--- a/drivers/pinctrl/mediatek/Kconfig
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -10,25 +10,29 @@ config PINCTRL_MTK
# For ARMv7 SoCs
config PINCTRL_MT2701
- bool "Mediatek MT2701 pin control" if COMPILE_TEST && !MACH_MT2701
+ bool "Mediatek MT2701 pin control"
+ depends on MACH_MT2701 || COMPILE_TEST
depends on OF
default MACH_MT2701
select PINCTRL_MTK
config PINCTRL_MT7623
- bool "Mediatek MT7623 pin control" if COMPILE_TEST && !MACH_MT7623
+ bool "Mediatek MT7623 pin control"
+ depends on MACH_MT7623 || COMPILE_TEST
depends on OF
default MACH_MT7623
select PINCTRL_MTK_COMMON
config PINCTRL_MT8135
- bool "Mediatek MT8135 pin control" if COMPILE_TEST && !MACH_MT8135
+ bool "Mediatek MT8135 pin control"
+ depends on MACH_MT8135 || COMPILE_TEST
depends on OF
default MACH_MT8135
select PINCTRL_MTK
config PINCTRL_MT8127
- bool "Mediatek MT8127 pin control" if COMPILE_TEST && !MACH_MT8127
+ bool "Mediatek MT8127 pin control"
+ depends on MACH_MT8127 || COMPILE_TEST
depends on OF
default MACH_MT8127
select PINCTRL_MTK
@@ -43,7 +47,8 @@ config PINCTRL_MT8173
# For PMIC
config PINCTRL_MT6397
- bool "Mediatek MT6397 pin control" if COMPILE_TEST && !MFD_MT6397
+ bool "Mediatek MT6397 pin control"
+ depends on MFD_MT6397 || COMPILE_TEST
depends on OF
default MFD_MT6397
select PINCTRL_MTK
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7623.c b/drivers/pinctrl/mediatek/pinctrl-mt7623.c
index 67895f8234e3..fa28dd6b871b 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7623.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7623.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016 John Crispin <blogic@openwrt.org>
+ * Copyright (c) 2016 John Crispin <john@phrozen.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index f9aef2ac03a1..3cf384f8b122 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -1054,6 +1054,18 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
return 0;
}
+static int mtk_gpio_set_config(struct gpio_chip *chip, unsigned offset,
+ unsigned long config)
+{
+ u32 debounce;
+
+ if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+ return -ENOTSUPP;
+
+ debounce = pinconf_to_config_argument(config);
+ return mtk_gpio_set_debounce(chip, offset, debounce);
+}
+
static const struct gpio_chip mtk_gpio_chip = {
.owner = THIS_MODULE,
.request = gpiochip_generic_request,
@@ -1064,7 +1076,7 @@ static const struct gpio_chip mtk_gpio_chip = {
.get = mtk_gpio_get,
.set = mtk_gpio_set,
.to_irq = mtk_gpio_to_irq,
- .set_debounce = mtk_gpio_set_debounce,
+ .set_config = mtk_gpio_set_config,
.of_gpio_n_cells = 2,
};
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt7623.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt7623.h
index 3472a76ad422..e06cfc40da0f 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-mt7623.h
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt7623.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016 John Crispin <blogic@openwrt.org>
+ * Copyright (c) 2016 John Crispin <john@phrozen.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index c3928aa3fefa..7671424d46cb 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -232,6 +232,10 @@ static const unsigned int pwm_e_pins[] = { PIN(GPIOX_19, EE_OFF) };
static const unsigned int pwm_f_x_pins[] = { PIN(GPIOX_7, EE_OFF) };
static const unsigned int pwm_f_y_pins[] = { PIN(GPIOY_15, EE_OFF) };
+static const unsigned int hdmi_hpd_pins[] = { PIN(GPIOH_0, EE_OFF) };
+static const unsigned int hdmi_sda_pins[] = { PIN(GPIOH_1, EE_OFF) };
+static const unsigned int hdmi_scl_pins[] = { PIN(GPIOH_2, EE_OFF) };
+
static const struct pinctrl_pin_desc meson_gxbb_aobus_pins[] = {
MESON_PIN(GPIOAO_0, 0),
MESON_PIN(GPIOAO_1, 0),
@@ -253,9 +257,8 @@ static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, 0) };
static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) };
static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) };
static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) };
-static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_0, 0) };
-static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_1, 0),
- PIN(GPIOAO_5, 0) };
+static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_4, 0) };
+static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_5, 0) };
static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) };
static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) };
@@ -440,6 +443,11 @@ static struct meson_pmx_group meson_gxbb_periphs_groups[] = {
GROUP(eth_txd2, 6, 3),
GROUP(eth_txd3, 6, 2),
+ /* Bank H */
+ GROUP(hdmi_hpd, 1, 26),
+ GROUP(hdmi_sda, 1, 25),
+ GROUP(hdmi_scl, 1, 24),
+
/* Bank DV */
GROUP(uart_tx_b, 2, 29),
GROUP(uart_rx_b, 2, 28),
@@ -498,7 +506,7 @@ static struct meson_pmx_group meson_gxbb_aobus_groups[] = {
GPIO_GROUP(GPIOAO_13, 0),
/* bank AO */
- GROUP(uart_tx_ao_b, 0, 26),
+ GROUP(uart_tx_ao_b, 0, 24),
GROUP(uart_rx_ao_b, 0, 25),
GROUP(uart_tx_ao_a, 0, 12),
GROUP(uart_rx_ao_a, 0, 11),
@@ -636,6 +644,14 @@ static const char * const pwm_f_y_groups[] = {
"pwm_f_y",
};
+static const char * const hdmi_hpd_groups[] = {
+ "hdmi_hpd",
+};
+
+static const char * const hdmi_i2c_groups[] = {
+ "hdmi_sda", "hdmi_scl",
+};
+
static const char * const gpio_aobus_groups[] = {
"GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3", "GPIOAO_4",
"GPIOAO_5", "GPIOAO_6", "GPIOAO_7", "GPIOAO_8", "GPIOAO_9",
@@ -699,6 +715,8 @@ static struct meson_pmx_func meson_gxbb_periphs_functions[] = {
FUNCTION(pwm_e),
FUNCTION(pwm_f_x),
FUNCTION(pwm_f_y),
+ FUNCTION(hdmi_hpd),
+ FUNCTION(hdmi_i2c),
};
static struct meson_pmx_func meson_gxbb_aobus_functions[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
index 25694f7094c7..4ab94a85e306 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
@@ -197,6 +197,10 @@ static const unsigned int eth_txd3_pins[] = { PIN(GPIOZ_13, EE_OFF) };
static const unsigned int pwm_e_pins[] = { PIN(GPIOX_16, EE_OFF) };
+static const unsigned int hdmi_hpd_pins[] = { PIN(GPIOH_0, EE_OFF) };
+static const unsigned int hdmi_sda_pins[] = { PIN(GPIOH_1, EE_OFF) };
+static const unsigned int hdmi_scl_pins[] = { PIN(GPIOH_2, EE_OFF) };
+
static const struct pinctrl_pin_desc meson_gxl_aobus_pins[] = {
MESON_PIN(GPIOAO_0, 0),
MESON_PIN(GPIOAO_1, 0),
@@ -214,14 +218,15 @@ static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, 0) };
static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) };
static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) };
static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) };
-static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_0, 0) };
-static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_1, 0),
- PIN(GPIOAO_5, 0) };
+static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_4, 0) };
+static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_5, 0) };
static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) };
static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) };
static const unsigned int remote_input_ao_pins[] = {PIN(GPIOAO_7, 0) };
+static const unsigned int pwm_ao_b_pins[] = { PIN(GPIOAO_9, 0) };
+
static struct meson_pmx_group meson_gxl_periphs_groups[] = {
GPIO_GROUP(GPIOZ_0, EE_OFF),
GPIO_GROUP(GPIOZ_1, EE_OFF),
@@ -363,6 +368,11 @@ static struct meson_pmx_group meson_gxl_periphs_groups[] = {
GROUP(eth_txd2, 4, 11),
GROUP(eth_txd3, 4, 10),
+ /* Bank H */
+ GROUP(hdmi_hpd, 6, 31),
+ GROUP(hdmi_sda, 6, 30),
+ GROUP(hdmi_scl, 6, 29),
+
/* Bank DV */
GROUP(uart_tx_b, 2, 16),
GROUP(uart_rx_b, 2, 15),
@@ -409,7 +419,7 @@ static struct meson_pmx_group meson_gxl_aobus_groups[] = {
GPIO_GROUP(GPIOAO_9, 0),
/* bank AO */
- GROUP(uart_tx_ao_b, 0, 26),
+ GROUP(uart_tx_ao_b, 0, 24),
GROUP(uart_rx_ao_b, 0, 25),
GROUP(uart_tx_ao_a, 0, 12),
GROUP(uart_rx_ao_a, 0, 11),
@@ -418,6 +428,7 @@ static struct meson_pmx_group meson_gxl_aobus_groups[] = {
GROUP(uart_cts_ao_b, 0, 8),
GROUP(uart_rts_ao_b, 0, 7),
GROUP(remote_input_ao, 0, 0),
+ GROUP(pwm_ao_b, 0, 3),
};
static const char * const gpio_periphs_groups[] = {
@@ -506,6 +517,14 @@ static const char * const pwm_e_groups[] = {
"pwm_e",
};
+static const char * const hdmi_hpd_groups[] = {
+ "hdmi_hpd",
+};
+
+static const char * const hdmi_i2c_groups[] = {
+ "hdmi_sda", "hdmi_scl",
+};
+
static const char * const gpio_aobus_groups[] = {
"GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3", "GPIOAO_4",
"GPIOAO_5", "GPIOAO_6", "GPIOAO_7", "GPIOAO_8", "GPIOAO_9",
@@ -523,6 +542,10 @@ static const char * const remote_input_ao_groups[] = {
"remote_input_ao",
};
+static const char * const pwm_ao_b_groups[] = {
+ "pwm_ao_b",
+};
+
static struct meson_pmx_func meson_gxl_periphs_functions[] = {
FUNCTION(gpio_periphs),
FUNCTION(emmc),
@@ -537,6 +560,8 @@ static struct meson_pmx_func meson_gxl_periphs_functions[] = {
FUNCTION(i2c_c),
FUNCTION(eth),
FUNCTION(pwm_e),
+ FUNCTION(hdmi_hpd),
+ FUNCTION(hdmi_i2c),
};
static struct meson_pmx_func meson_gxl_aobus_functions[] = {
@@ -544,6 +569,7 @@ static struct meson_pmx_func meson_gxl_aobus_functions[] = {
FUNCTION(uart_ao),
FUNCTION(uart_ao_b),
FUNCTION(remote_input_ao),
+ FUNCTION(pwm_ao_b),
};
static struct meson_bank meson_gxl_periphs_banks[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index a579126832af..cf1686e04378 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -212,7 +212,7 @@ static int meson_pmx_request_gpio(struct pinctrl_dev *pcdev,
{
struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
- meson_pmx_disable_other_groups(pc, range->pin_base + offset, -1);
+ meson_pmx_disable_other_groups(pc, offset, -1);
return 0;
}
@@ -260,7 +260,6 @@ static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
enum pin_config_param param;
unsigned int reg, bit;
int i, ret;
- u16 arg;
ret = meson_get_bank(pc, pin, &bank);
if (ret)
@@ -268,7 +267,6 @@ static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
for (i = 0; i < num_configs; i++) {
param = pinconf_to_config_param(configs[i]);
- arg = pinconf_to_config_argument(configs[i]);
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-370.c b/drivers/pinctrl/mvebu/pinctrl-armada-370.c
index 9cc1cc3f5c34..c2de4f8ee488 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-370.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-370.c
@@ -23,18 +23,6 @@
#include "pinctrl-mvebu.h"
-static void __iomem *mpp_base;
-
-static int armada_370_mpp_ctrl_get(unsigned pid, unsigned long *config)
-{
- return default_mpp_ctrl_get(mpp_base, pid, config);
-}
-
-static int armada_370_mpp_ctrl_set(unsigned pid, unsigned long config)
-{
- return default_mpp_ctrl_set(mpp_base, pid, config);
-}
-
static struct mvebu_mpp_mode mv88f6710_mpp_modes[] = {
MPP_MODE(0,
MPP_FUNCTION(0x0, "gpio", NULL),
@@ -384,8 +372,8 @@ static const struct of_device_id armada_370_pinctrl_of_match[] = {
{ },
};
-static struct mvebu_mpp_ctrl mv88f6710_mpp_controls[] = {
- MPP_FUNC_CTRL(0, 65, NULL, armada_370_mpp_ctrl),
+static const struct mvebu_mpp_ctrl mv88f6710_mpp_controls[] = {
+ MPP_FUNC_CTRL(0, 65, NULL, mvebu_mmio_mpp_ctrl),
};
static struct pinctrl_gpio_range mv88f6710_mpp_gpio_ranges[] = {
@@ -397,12 +385,6 @@ static struct pinctrl_gpio_range mv88f6710_mpp_gpio_ranges[] = {
static int armada_370_pinctrl_probe(struct platform_device *pdev)
{
struct mvebu_pinctrl_soc_info *soc = &armada_370_pinctrl_info;
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mpp_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(mpp_base))
- return PTR_ERR(mpp_base);
soc->variant = 0; /* no variants for Armada 370 */
soc->controls = mv88f6710_mpp_controls;
@@ -414,7 +396,7 @@ static int armada_370_pinctrl_probe(struct platform_device *pdev)
pdev->dev.platform_data = soc;
- return mvebu_pinctrl_probe(pdev);
+ return mvebu_pinctrl_simple_mmio_probe(pdev);
}
static struct platform_driver armada_370_pinctrl_driver = {
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-375.c b/drivers/pinctrl/mvebu/pinctrl-armada-375.c
index 070651431ca4..30cbf23b0b03 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-375.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-375.c
@@ -23,18 +23,6 @@
#include "pinctrl-mvebu.h"
-static void __iomem *mpp_base;
-
-static int armada_375_mpp_ctrl_get(unsigned pid, unsigned long *config)
-{
- return default_mpp_ctrl_get(mpp_base, pid, config);
-}
-
-static int armada_375_mpp_ctrl_set(unsigned pid, unsigned long config)
-{
- return default_mpp_ctrl_set(mpp_base, pid, config);
-}
-
static struct mvebu_mpp_mode mv88f6720_mpp_modes[] = {
MPP_MODE(0,
MPP_FUNCTION(0x0, "gpio", NULL),
@@ -402,8 +390,8 @@ static const struct of_device_id armada_375_pinctrl_of_match[] = {
{ },
};
-static struct mvebu_mpp_ctrl mv88f6720_mpp_controls[] = {
- MPP_FUNC_CTRL(0, 69, NULL, armada_375_mpp_ctrl),
+static const struct mvebu_mpp_ctrl mv88f6720_mpp_controls[] = {
+ MPP_FUNC_CTRL(0, 69, NULL, mvebu_mmio_mpp_ctrl),
};
static struct pinctrl_gpio_range mv88f6720_mpp_gpio_ranges[] = {
@@ -415,12 +403,6 @@ static struct pinctrl_gpio_range mv88f6720_mpp_gpio_ranges[] = {
static int armada_375_pinctrl_probe(struct platform_device *pdev)
{
struct mvebu_pinctrl_soc_info *soc = &armada_375_pinctrl_info;
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mpp_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(mpp_base))
- return PTR_ERR(mpp_base);
soc->variant = 0; /* no variants for Armada 375 */
soc->controls = mv88f6720_mpp_controls;
@@ -432,7 +414,7 @@ static int armada_375_pinctrl_probe(struct platform_device *pdev)
pdev->dev.platform_data = soc;
- return mvebu_pinctrl_probe(pdev);
+ return mvebu_pinctrl_simple_mmio_probe(pdev);
}
static struct platform_driver armada_375_pinctrl_driver = {
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-38x.c b/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
index 4e84c8e4938c..e66ed239522e 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
@@ -22,18 +22,6 @@
#include "pinctrl-mvebu.h"
-static void __iomem *mpp_base;
-
-static int armada_38x_mpp_ctrl_get(unsigned pid, unsigned long *config)
-{
- return default_mpp_ctrl_get(mpp_base, pid, config);
-}
-
-static int armada_38x_mpp_ctrl_set(unsigned pid, unsigned long config)
-{
- return default_mpp_ctrl_set(mpp_base, pid, config);
-}
-
enum {
V_88F6810 = BIT(0),
V_88F6820 = BIT(1),
@@ -409,8 +397,8 @@ static const struct of_device_id armada_38x_pinctrl_of_match[] = {
{ },
};
-static struct mvebu_mpp_ctrl armada_38x_mpp_controls[] = {
- MPP_FUNC_CTRL(0, 59, NULL, armada_38x_mpp_ctrl),
+static const struct mvebu_mpp_ctrl armada_38x_mpp_controls[] = {
+ MPP_FUNC_CTRL(0, 59, NULL, mvebu_mmio_mpp_ctrl),
};
static struct pinctrl_gpio_range armada_38x_mpp_gpio_ranges[] = {
@@ -423,16 +411,10 @@ static int armada_38x_pinctrl_probe(struct platform_device *pdev)
struct mvebu_pinctrl_soc_info *soc = &armada_38x_pinctrl_info;
const struct of_device_id *match =
of_match_device(armada_38x_pinctrl_of_match, &pdev->dev);
- struct resource *res;
if (!match)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mpp_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(mpp_base))
- return PTR_ERR(mpp_base);
-
soc->variant = (unsigned) match->data & 0xff;
soc->controls = armada_38x_mpp_controls;
soc->ncontrols = ARRAY_SIZE(armada_38x_mpp_controls);
@@ -443,7 +425,7 @@ static int armada_38x_pinctrl_probe(struct platform_device *pdev)
pdev->dev.platform_data = soc;
- return mvebu_pinctrl_probe(pdev);
+ return mvebu_pinctrl_simple_mmio_probe(pdev);
}
static struct platform_driver armada_38x_pinctrl_driver = {
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-39x.c b/drivers/pinctrl/mvebu/pinctrl-armada-39x.c
index e288f8ba0bf1..697c8774a4da 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-39x.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-39x.c
@@ -22,18 +22,6 @@
#include "pinctrl-mvebu.h"
-static void __iomem *mpp_base;
-
-static int armada_39x_mpp_ctrl_get(unsigned pid, unsigned long *config)
-{
- return default_mpp_ctrl_get(mpp_base, pid, config);
-}
-
-static int armada_39x_mpp_ctrl_set(unsigned pid, unsigned long config)
-{
- return default_mpp_ctrl_set(mpp_base, pid, config);
-}
-
enum {
V_88F6920 = BIT(0),
V_88F6925 = BIT(1),
@@ -391,8 +379,8 @@ static const struct of_device_id armada_39x_pinctrl_of_match[] = {
{ },
};
-static struct mvebu_mpp_ctrl armada_39x_mpp_controls[] = {
- MPP_FUNC_CTRL(0, 59, NULL, armada_39x_mpp_ctrl),
+static const struct mvebu_mpp_ctrl armada_39x_mpp_controls[] = {
+ MPP_FUNC_CTRL(0, 59, NULL, mvebu_mmio_mpp_ctrl),
};
static struct pinctrl_gpio_range armada_39x_mpp_gpio_ranges[] = {
@@ -405,16 +393,10 @@ static int armada_39x_pinctrl_probe(struct platform_device *pdev)
struct mvebu_pinctrl_soc_info *soc = &armada_39x_pinctrl_info;
const struct of_device_id *match =
of_match_device(armada_39x_pinctrl_of_match, &pdev->dev);
- struct resource *res;
if (!match)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mpp_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(mpp_base))
- return PTR_ERR(mpp_base);
-
soc->variant = (unsigned) match->data & 0xff;
soc->controls = armada_39x_mpp_controls;
soc->ncontrols = ARRAY_SIZE(armada_39x_mpp_controls);
@@ -425,7 +407,7 @@ static int armada_39x_pinctrl_probe(struct platform_device *pdev)
pdev->dev.platform_data = soc;
- return mvebu_pinctrl_probe(pdev);
+ return mvebu_pinctrl_simple_mmio_probe(pdev);
}
static struct platform_driver armada_39x_pinctrl_driver = {
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
index e4ea71a9d985..61cbc138703e 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
@@ -30,25 +30,18 @@
#include "pinctrl-mvebu.h"
-static void __iomem *mpp_base;
static u32 *mpp_saved_regs;
-static int armada_xp_mpp_ctrl_get(unsigned pid, unsigned long *config)
-{
- return default_mpp_ctrl_get(mpp_base, pid, config);
-}
-
-static int armada_xp_mpp_ctrl_set(unsigned pid, unsigned long config)
-{
- return default_mpp_ctrl_set(mpp_base, pid, config);
-}
-
enum armada_xp_variant {
V_MV78230 = BIT(0),
V_MV78260 = BIT(1),
V_MV78460 = BIT(2),
V_MV78230_PLUS = (V_MV78230 | V_MV78260 | V_MV78460),
V_MV78260_PLUS = (V_MV78260 | V_MV78460),
+ V_98DX3236 = BIT(3),
+ V_98DX3336 = BIT(4),
+ V_98DX4251 = BIT(5),
+ V_98DX3236_PLUS = (V_98DX3236 | V_98DX3336 | V_98DX4251),
};
static struct mvebu_mpp_mode armada_xp_mpp_modes[] = {
@@ -360,6 +353,131 @@ static struct mvebu_mpp_mode armada_xp_mpp_modes[] = {
MPP_VAR_FUNCTION(0x1, "dev", "ad31", V_MV78260_PLUS)),
};
+static struct mvebu_mpp_mode mv98dx3236_mpp_modes[] = {
+ MPP_MODE(0,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x2, "spi0", "mosi", V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x4, "dev", "ad8", V_98DX3236_PLUS)),
+ MPP_MODE(1,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x2, "spi0", "miso", V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x4, "dev", "ad9", V_98DX3236_PLUS)),
+ MPP_MODE(2,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x2, "spi0", "sck", V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x4, "dev", "ad10", V_98DX3236_PLUS)),
+ MPP_MODE(3,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x2, "spi0", "cs0", V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x4, "dev", "ad11", V_98DX3236_PLUS)),
+ MPP_MODE(4,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x2, "spi0", "cs1", V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x3, "smi", "mdc", V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x4, "dev", "cs0", V_98DX3236_PLUS)),
+ MPP_MODE(5,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x1, "pex", "rsto", V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x2, "sd0", "cmd", V_98DX4251),
+ MPP_VAR_FUNCTION(0x4, "dev", "bootcs", V_98DX3236_PLUS)),
+ MPP_MODE(6,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x2, "sd0", "clk", V_98DX4251),
+ MPP_VAR_FUNCTION(0x4, "dev", "a2", V_98DX3236_PLUS)),
+ MPP_MODE(7,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x2, "sd0", "d0", V_98DX4251),
+ MPP_VAR_FUNCTION(0x4, "dev", "ale0", V_98DX3236_PLUS)),
+ MPP_MODE(8,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x2, "sd0", "d1", V_98DX4251),
+ MPP_VAR_FUNCTION(0x4, "dev", "ale1", V_98DX3236_PLUS)),
+ MPP_MODE(9,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x2, "sd0", "d2", V_98DX4251),
+ MPP_VAR_FUNCTION(0x4, "dev", "ready0", V_98DX3236_PLUS)),
+ MPP_MODE(10,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x2, "sd0", "d3", V_98DX4251),
+ MPP_VAR_FUNCTION(0x4, "dev", "ad12", V_98DX3236_PLUS)),
+ MPP_MODE(11,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x2, "uart1", "rxd", V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x3, "uart0", "cts", V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x4, "dev", "ad13", V_98DX3236_PLUS)),
+ MPP_MODE(12,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x2, "uart1", "txd", V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x3, "uart0", "rts", V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x4, "dev", "ad14", V_98DX3236_PLUS)),
+ MPP_MODE(13,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x1, "intr", "out", V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x4, "dev", "ad15", V_98DX3236_PLUS)),
+ MPP_MODE(14,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x1, "i2c0", "sck", V_98DX3236_PLUS)),
+ MPP_MODE(15,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x4, "i2c0", "sda", V_98DX3236_PLUS)),
+ MPP_MODE(16,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x4, "dev", "oe", V_98DX3236_PLUS)),
+ MPP_MODE(17,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x4, "dev", "clkout", V_98DX3236_PLUS)),
+ MPP_MODE(18,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x3, "uart1", "txd", V_98DX3236_PLUS)),
+ MPP_MODE(19,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x3, "uart1", "rxd", V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x4, "dev", "rb", V_98DX3236_PLUS)),
+ MPP_MODE(20,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x4, "dev", "we0", V_98DX3236_PLUS)),
+ MPP_MODE(21,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "ad0", V_98DX3236_PLUS)),
+ MPP_MODE(22,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "ad1", V_98DX3236_PLUS)),
+ MPP_MODE(23,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "ad2", V_98DX3236_PLUS)),
+ MPP_MODE(24,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "ad3", V_98DX3236_PLUS)),
+ MPP_MODE(25,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "ad4", V_98DX3236_PLUS)),
+ MPP_MODE(26,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "ad5", V_98DX3236_PLUS)),
+ MPP_MODE(27,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "ad6", V_98DX3236_PLUS)),
+ MPP_MODE(28,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "ad7", V_98DX3236_PLUS)),
+ MPP_MODE(29,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "a0", V_98DX3236_PLUS)),
+ MPP_MODE(30,
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x1, "dev", "a1", V_98DX3236_PLUS)),
+ MPP_MODE(31,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x1, "slv_smi", "mdc", V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x3, "smi", "mdc", V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x4, "dev", "we1", V_98DX3236_PLUS)),
+ MPP_MODE(32,
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x1, "slv_smi", "mdio", V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x3, "smi", "mdio", V_98DX3236_PLUS),
+ MPP_VAR_FUNCTION(0x4, "dev", "cs1", V_98DX3236_PLUS)),
+};
+
static struct mvebu_pinctrl_soc_info armada_xp_pinctrl_info;
static const struct of_device_id armada_xp_pinctrl_of_match[] = {
@@ -375,11 +493,19 @@ static const struct of_device_id armada_xp_pinctrl_of_match[] = {
.compatible = "marvell,mv78460-pinctrl",
.data = (void *) V_MV78460,
},
+ {
+ .compatible = "marvell,98dx3236-pinctrl",
+ .data = (void *) V_98DX3236,
+ },
+ {
+ .compatible = "marvell,98dx4251-pinctrl",
+ .data = (void *) V_98DX4251,
+ },
{ },
};
-static struct mvebu_mpp_ctrl mv78230_mpp_controls[] = {
- MPP_FUNC_CTRL(0, 48, NULL, armada_xp_mpp_ctrl),
+static const struct mvebu_mpp_ctrl mv78230_mpp_controls[] = {
+ MPP_FUNC_CTRL(0, 48, NULL, mvebu_mmio_mpp_ctrl),
};
static struct pinctrl_gpio_range mv78230_mpp_gpio_ranges[] = {
@@ -387,8 +513,8 @@ static struct pinctrl_gpio_range mv78230_mpp_gpio_ranges[] = {
MPP_GPIO_RANGE(1, 32, 32, 17),
};
-static struct mvebu_mpp_ctrl mv78260_mpp_controls[] = {
- MPP_FUNC_CTRL(0, 66, NULL, armada_xp_mpp_ctrl),
+static const struct mvebu_mpp_ctrl mv78260_mpp_controls[] = {
+ MPP_FUNC_CTRL(0, 66, NULL, mvebu_mmio_mpp_ctrl),
};
static struct pinctrl_gpio_range mv78260_mpp_gpio_ranges[] = {
@@ -397,8 +523,8 @@ static struct pinctrl_gpio_range mv78260_mpp_gpio_ranges[] = {
MPP_GPIO_RANGE(2, 64, 64, 3),
};
-static struct mvebu_mpp_ctrl mv78460_mpp_controls[] = {
- MPP_FUNC_CTRL(0, 66, NULL, armada_xp_mpp_ctrl),
+static const struct mvebu_mpp_ctrl mv78460_mpp_controls[] = {
+ MPP_FUNC_CTRL(0, 66, NULL, mvebu_mmio_mpp_ctrl),
};
static struct pinctrl_gpio_range mv78460_mpp_gpio_ranges[] = {
@@ -407,6 +533,14 @@ static struct pinctrl_gpio_range mv78460_mpp_gpio_ranges[] = {
MPP_GPIO_RANGE(2, 64, 64, 3),
};
+static struct mvebu_mpp_ctrl mv98dx3236_mpp_controls[] = {
+ MPP_FUNC_CTRL(0, 32, NULL, mvebu_mmio_mpp_ctrl),
+};
+
+static struct pinctrl_gpio_range mv98dx3236_mpp_gpio_ranges[] = {
+ MPP_GPIO_RANGE(0, 0, 0, 32),
+};
+
static int armada_xp_pinctrl_suspend(struct platform_device *pdev,
pm_message_t state)
{
@@ -417,7 +551,7 @@ static int armada_xp_pinctrl_suspend(struct platform_device *pdev,
nregs = DIV_ROUND_UP(soc->nmodes, MVEBU_MPPS_PER_REG);
for (i = 0; i < nregs; i++)
- mpp_saved_regs[i] = readl(mpp_base + i * 4);
+ mpp_saved_regs[i] = readl(soc->control_data[0].base + i * 4);
return 0;
}
@@ -431,7 +565,7 @@ static int armada_xp_pinctrl_resume(struct platform_device *pdev)
nregs = DIV_ROUND_UP(soc->nmodes, MVEBU_MPPS_PER_REG);
for (i = 0; i < nregs; i++)
- writel(mpp_saved_regs[i], mpp_base + i * 4);
+ writel(mpp_saved_regs[i], soc->control_data[0].base + i * 4);
return 0;
}
@@ -441,17 +575,11 @@ static int armada_xp_pinctrl_probe(struct platform_device *pdev)
struct mvebu_pinctrl_soc_info *soc = &armada_xp_pinctrl_info;
const struct of_device_id *match =
of_match_device(armada_xp_pinctrl_of_match, &pdev->dev);
- struct resource *res;
int nregs;
if (!match)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mpp_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(mpp_base))
- return PTR_ERR(mpp_base);
-
soc->variant = (unsigned) match->data & 0xff;
switch (soc->variant) {
@@ -488,6 +616,17 @@ static int armada_xp_pinctrl_probe(struct platform_device *pdev)
soc->gpioranges = mv78460_mpp_gpio_ranges;
soc->ngpioranges = ARRAY_SIZE(mv78460_mpp_gpio_ranges);
break;
+ case V_98DX3236:
+ case V_98DX3336:
+ case V_98DX4251:
+ /* fall-through */
+ soc->controls = mv98dx3236_mpp_controls;
+ soc->ncontrols = ARRAY_SIZE(mv98dx3236_mpp_controls);
+ soc->modes = mv98dx3236_mpp_modes;
+ soc->nmodes = mv98dx3236_mpp_controls[0].npins;
+ soc->gpioranges = mv98dx3236_mpp_gpio_ranges;
+ soc->ngpioranges = ARRAY_SIZE(mv98dx3236_mpp_gpio_ranges);
+ break;
}
nregs = DIV_ROUND_UP(soc->nmodes, MVEBU_MPPS_PER_REG);
@@ -499,7 +638,7 @@ static int armada_xp_pinctrl_probe(struct platform_device *pdev)
pdev->dev.platform_data = soc;
- return mvebu_pinctrl_probe(pdev);
+ return mvebu_pinctrl_simple_mmio_probe(pdev);
}
static struct platform_driver armada_xp_pinctrl_driver = {
diff --git a/drivers/pinctrl/mvebu/pinctrl-dove.c b/drivers/pinctrl/mvebu/pinctrl-dove.c
index f93ae0dcef9c..89ae93c49f2f 100644
--- a/drivers/pinctrl/mvebu/pinctrl-dove.c
+++ b/drivers/pinctrl/mvebu/pinctrl-dove.c
@@ -61,30 +61,20 @@
#define CONFIG_PMU BIT(4)
-static void __iomem *mpp_base;
static void __iomem *mpp4_base;
static void __iomem *pmu_base;
static struct regmap *gconfmap;
-static int dove_mpp_ctrl_get(unsigned pid, unsigned long *config)
-{
- return default_mpp_ctrl_get(mpp_base, pid, config);
-}
-
-static int dove_mpp_ctrl_set(unsigned pid, unsigned long config)
-{
- return default_mpp_ctrl_set(mpp_base, pid, config);
-}
-
-static int dove_pmu_mpp_ctrl_get(unsigned pid, unsigned long *config)
+static int dove_pmu_mpp_ctrl_get(struct mvebu_mpp_ctrl_data *data,
+ unsigned pid, unsigned long *config)
{
unsigned off = (pid / MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
unsigned shift = (pid % MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
- unsigned long pmu = readl(mpp_base + PMU_MPP_GENERAL_CTRL);
+ unsigned long pmu = readl(data->base + PMU_MPP_GENERAL_CTRL);
unsigned long func;
if ((pmu & BIT(pid)) == 0)
- return default_mpp_ctrl_get(mpp_base, pid, config);
+ return mvebu_mmio_mpp_ctrl_get(data, pid, config);
func = readl(pmu_base + PMU_SIGNAL_SELECT_0 + off);
*config = (func >> shift) & MVEBU_MPP_MASK;
@@ -93,19 +83,20 @@ static int dove_pmu_mpp_ctrl_get(unsigned pid, unsigned long *config)
return 0;
}
-static int dove_pmu_mpp_ctrl_set(unsigned pid, unsigned long config)
+static int dove_pmu_mpp_ctrl_set(struct mvebu_mpp_ctrl_data *data,
+ unsigned pid, unsigned long config)
{
unsigned off = (pid / MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
unsigned shift = (pid % MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
- unsigned long pmu = readl(mpp_base + PMU_MPP_GENERAL_CTRL);
+ unsigned long pmu = readl(data->base + PMU_MPP_GENERAL_CTRL);
unsigned long func;
if ((config & CONFIG_PMU) == 0) {
- writel(pmu & ~BIT(pid), mpp_base + PMU_MPP_GENERAL_CTRL);
- return default_mpp_ctrl_set(mpp_base, pid, config);
+ writel(pmu & ~BIT(pid), data->base + PMU_MPP_GENERAL_CTRL);
+ return mvebu_mmio_mpp_ctrl_set(data, pid, config);
}
- writel(pmu | BIT(pid), mpp_base + PMU_MPP_GENERAL_CTRL);
+ writel(pmu | BIT(pid), data->base + PMU_MPP_GENERAL_CTRL);
func = readl(pmu_base + PMU_SIGNAL_SELECT_0 + off);
func &= ~(MVEBU_MPP_MASK << shift);
func |= (config & MVEBU_MPP_MASK) << shift;
@@ -114,7 +105,8 @@ static int dove_pmu_mpp_ctrl_set(unsigned pid, unsigned long config)
return 0;
}
-static int dove_mpp4_ctrl_get(unsigned pid, unsigned long *config)
+static int dove_mpp4_ctrl_get(struct mvebu_mpp_ctrl_data *data, unsigned pid,
+ unsigned long *config)
{
unsigned long mpp4 = readl(mpp4_base);
unsigned long mask;
@@ -144,7 +136,8 @@ static int dove_mpp4_ctrl_get(unsigned pid, unsigned long *config)
return 0;
}
-static int dove_mpp4_ctrl_set(unsigned pid, unsigned long config)
+static int dove_mpp4_ctrl_set(struct mvebu_mpp_ctrl_data *data, unsigned pid,
+ unsigned long config)
{
unsigned long mpp4 = readl(mpp4_base);
unsigned long mask;
@@ -178,7 +171,8 @@ static int dove_mpp4_ctrl_set(unsigned pid, unsigned long config)
return 0;
}
-static int dove_nand_ctrl_get(unsigned pid, unsigned long *config)
+static int dove_nand_ctrl_get(struct mvebu_mpp_ctrl_data *data, unsigned pid,
+ unsigned long *config)
{
unsigned int gmpp;
@@ -188,7 +182,8 @@ static int dove_nand_ctrl_get(unsigned pid, unsigned long *config)
return 0;
}
-static int dove_nand_ctrl_set(unsigned pid, unsigned long config)
+static int dove_nand_ctrl_set(struct mvebu_mpp_ctrl_data *data, unsigned pid,
+ unsigned long config)
{
regmap_update_bits(gconfmap, MPP_GENERAL_CONFIG,
NAND_GPIO_EN,
@@ -196,28 +191,31 @@ static int dove_nand_ctrl_set(unsigned pid, unsigned long config)
return 0;
}
-static int dove_audio0_ctrl_get(unsigned pid, unsigned long *config)
+static int dove_audio0_ctrl_get(struct mvebu_mpp_ctrl_data *data, unsigned pid,
+ unsigned long *config)
{
- unsigned long pmu = readl(mpp_base + PMU_MPP_GENERAL_CTRL);
+ unsigned long pmu = readl(data->base + PMU_MPP_GENERAL_CTRL);
*config = ((pmu & AU0_AC97_SEL) != 0);
return 0;
}
-static int dove_audio0_ctrl_set(unsigned pid, unsigned long config)
+static int dove_audio0_ctrl_set(struct mvebu_mpp_ctrl_data *data, unsigned pid,
+ unsigned long config)
{
- unsigned long pmu = readl(mpp_base + PMU_MPP_GENERAL_CTRL);
+ unsigned long pmu = readl(data->base + PMU_MPP_GENERAL_CTRL);
pmu &= ~AU0_AC97_SEL;
if (config)
pmu |= AU0_AC97_SEL;
- writel(pmu, mpp_base + PMU_MPP_GENERAL_CTRL);
+ writel(pmu, data->base + PMU_MPP_GENERAL_CTRL);
return 0;
}
-static int dove_audio1_ctrl_get(unsigned pid, unsigned long *config)
+static int dove_audio1_ctrl_get(struct mvebu_mpp_ctrl_data *data, unsigned pid,
+ unsigned long *config)
{
unsigned int mpp4 = readl(mpp4_base);
unsigned int sspc1;
@@ -247,7 +245,8 @@ static int dove_audio1_ctrl_get(unsigned pid, unsigned long *config)
return 0;
}
-static int dove_audio1_ctrl_set(unsigned pid, unsigned long config)
+static int dove_audio1_ctrl_set(struct mvebu_mpp_ctrl_data *data, unsigned pid,
+ unsigned long config)
{
unsigned int mpp4 = readl(mpp4_base);
@@ -274,11 +273,12 @@ static int dove_audio1_ctrl_set(unsigned pid, unsigned long config)
* break other functions. If you require all mpps as gpio
* enforce gpio setting by pinctrl mapping.
*/
-static int dove_audio1_ctrl_gpio_req(unsigned pid)
+static int dove_audio1_ctrl_gpio_req(struct mvebu_mpp_ctrl_data *data,
+ unsigned pid)
{
unsigned long config;
- dove_audio1_ctrl_get(pid, &config);
+ dove_audio1_ctrl_get(data, pid, &config);
switch (config) {
case 0x02: /* i2s1 : gpio[56:57] */
@@ -301,14 +301,16 @@ static int dove_audio1_ctrl_gpio_req(unsigned pid)
}
/* mpp[52:57] has gpio pins capable of in and out */
-static int dove_audio1_ctrl_gpio_dir(unsigned pid, bool input)
+static int dove_audio1_ctrl_gpio_dir(struct mvebu_mpp_ctrl_data *data,
+ unsigned pid, bool input)
{
if (pid < 52 || pid > 57)
return -ENOTSUPP;
return 0;
}
-static int dove_twsi_ctrl_get(unsigned pid, unsigned long *config)
+static int dove_twsi_ctrl_get(struct mvebu_mpp_ctrl_data *data, unsigned pid,
+ unsigned long *config)
{
unsigned int gcfg1;
unsigned int gcfg2;
@@ -327,7 +329,8 @@ static int dove_twsi_ctrl_get(unsigned pid, unsigned long *config)
return 0;
}
-static int dove_twsi_ctrl_set(unsigned pid, unsigned long config)
+static int dove_twsi_ctrl_set(struct mvebu_mpp_ctrl_data *data, unsigned pid,
+ unsigned long config)
{
unsigned int gcfg1 = 0;
unsigned int gcfg2 = 0;
@@ -354,9 +357,9 @@ static int dove_twsi_ctrl_set(unsigned pid, unsigned long config)
return 0;
}
-static struct mvebu_mpp_ctrl dove_mpp_controls[] = {
+static const struct mvebu_mpp_ctrl dove_mpp_controls[] = {
MPP_FUNC_CTRL(0, 15, NULL, dove_pmu_mpp_ctrl),
- MPP_FUNC_CTRL(16, 23, NULL, dove_mpp_ctrl),
+ MPP_FUNC_CTRL(16, 23, NULL, mvebu_mmio_mpp_ctrl),
MPP_FUNC_CTRL(24, 39, "mpp_camera", dove_mpp4_ctrl),
MPP_FUNC_CTRL(40, 45, "mpp_sdio0", dove_mpp4_ctrl),
MPP_FUNC_CTRL(46, 51, "mpp_sdio1", dove_mpp4_ctrl),
@@ -769,6 +772,10 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
struct resource fb_res;
const struct of_device_id *match =
of_match_device(dove_pinctrl_of_match, &pdev->dev);
+ struct mvebu_mpp_ctrl_data *mpp_data;
+ void __iomem *base;
+ int i;
+
pdev->dev.platform_data = (void *)match->data;
/*
@@ -783,9 +790,18 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
clk_prepare_enable(clk);
mpp_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mpp_base = devm_ioremap_resource(&pdev->dev, mpp_res);
- if (IS_ERR(mpp_base))
- return PTR_ERR(mpp_base);
+ base = devm_ioremap_resource(&pdev->dev, mpp_res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ mpp_data = devm_kcalloc(&pdev->dev, dove_pinctrl_info.ncontrols,
+ sizeof(*mpp_data), GFP_KERNEL);
+ if (!mpp_data)
+ return -ENOMEM;
+
+ dove_pinctrl_info.control_data = mpp_data;
+ for (i = 0; i < ARRAY_SIZE(dove_mpp_controls); i++)
+ mpp_data[i].base = base;
/* prepare fallback resource */
memcpy(&fb_res, mpp_res, sizeof(struct resource));
diff --git a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
index 5f89c26f3292..be12b6d569a0 100644
--- a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
+++ b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
@@ -21,18 +21,6 @@
#include "pinctrl-mvebu.h"
-static void __iomem *mpp_base;
-
-static int kirkwood_mpp_ctrl_get(unsigned pid, unsigned long *config)
-{
- return default_mpp_ctrl_get(mpp_base, pid, config);
-}
-
-static int kirkwood_mpp_ctrl_set(unsigned pid, unsigned long config)
-{
- return default_mpp_ctrl_set(mpp_base, pid, config);
-}
-
#define V(f6180, f6190, f6192, f6281, f6282, dx4122) \
((f6180 << 0) | (f6190 << 1) | (f6192 << 2) | \
(f6281 << 3) | (f6282 << 4) | (dx4122 << 5))
@@ -370,8 +358,8 @@ static struct mvebu_mpp_mode mv88f6xxx_mpp_modes[] = {
MPP_VAR_FUNCTION(0xb, "lcd", "d17", V(0, 0, 0, 0, 1, 0))),
};
-static struct mvebu_mpp_ctrl mv88f6180_mpp_controls[] = {
- MPP_FUNC_CTRL(0, 44, NULL, kirkwood_mpp_ctrl),
+static const struct mvebu_mpp_ctrl mv88f6180_mpp_controls[] = {
+ MPP_FUNC_CTRL(0, 44, NULL, mvebu_mmio_mpp_ctrl),
};
static struct pinctrl_gpio_range mv88f6180_gpio_ranges[] = {
@@ -379,8 +367,8 @@ static struct pinctrl_gpio_range mv88f6180_gpio_ranges[] = {
MPP_GPIO_RANGE(1, 35, 35, 10),
};
-static struct mvebu_mpp_ctrl mv88f619x_mpp_controls[] = {
- MPP_FUNC_CTRL(0, 35, NULL, kirkwood_mpp_ctrl),
+static const struct mvebu_mpp_ctrl mv88f619x_mpp_controls[] = {
+ MPP_FUNC_CTRL(0, 35, NULL, mvebu_mmio_mpp_ctrl),
};
static struct pinctrl_gpio_range mv88f619x_gpio_ranges[] = {
@@ -388,8 +376,8 @@ static struct pinctrl_gpio_range mv88f619x_gpio_ranges[] = {
MPP_GPIO_RANGE(1, 32, 32, 4),
};
-static struct mvebu_mpp_ctrl mv88f628x_mpp_controls[] = {
- MPP_FUNC_CTRL(0, 49, NULL, kirkwood_mpp_ctrl),
+static const struct mvebu_mpp_ctrl mv88f628x_mpp_controls[] = {
+ MPP_FUNC_CTRL(0, 49, NULL, mvebu_mmio_mpp_ctrl),
};
static struct pinctrl_gpio_range mv88f628x_gpio_ranges[] = {
@@ -469,17 +457,12 @@ static const struct of_device_id kirkwood_pinctrl_of_match[] = {
static int kirkwood_pinctrl_probe(struct platform_device *pdev)
{
- struct resource *res;
const struct of_device_id *match =
of_match_device(kirkwood_pinctrl_of_match, &pdev->dev);
- pdev->dev.platform_data = (void *)match->data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mpp_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(mpp_base))
- return PTR_ERR(mpp_base);
+ pdev->dev.platform_data = (void *)match->data;
- return mvebu_pinctrl_probe(pdev);
+ return mvebu_pinctrl_simple_mmio_probe(pdev);
}
static struct platform_driver kirkwood_pinctrl_driver = {
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.c b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
index b6ec6db78351..1bdfe770eb5c 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.c
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
@@ -23,6 +23,8 @@
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include "pinctrl-mvebu.h"
@@ -38,7 +40,8 @@ struct mvebu_pinctrl_function {
struct mvebu_pinctrl_group {
const char *name;
- struct mvebu_mpp_ctrl *ctrl;
+ const struct mvebu_mpp_ctrl *ctrl;
+ struct mvebu_mpp_ctrl_data *data;
struct mvebu_mpp_ctrl_setting *settings;
unsigned num_settings;
unsigned gid;
@@ -57,6 +60,30 @@ struct mvebu_pinctrl {
u8 variant;
};
+int mvebu_mmio_mpp_ctrl_get(struct mvebu_mpp_ctrl_data *data,
+ unsigned int pid, unsigned long *config)
+{
+ unsigned off = (pid / MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+ unsigned shift = (pid % MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+
+ *config = (readl(data->base + off) >> shift) & MVEBU_MPP_MASK;
+
+ return 0;
+}
+
+int mvebu_mmio_mpp_ctrl_set(struct mvebu_mpp_ctrl_data *data,
+ unsigned int pid, unsigned long config)
+{
+ unsigned off = (pid / MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+ unsigned shift = (pid % MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+ unsigned long reg;
+
+ reg = readl(data->base + off) & ~(MVEBU_MPP_MASK << shift);
+ writel(reg | (config << shift), data->base + off);
+
+ return 0;
+}
+
static struct mvebu_pinctrl_group *mvebu_pinctrl_find_group_by_pid(
struct mvebu_pinctrl *pctl, unsigned pid)
{
@@ -146,7 +173,7 @@ static int mvebu_pinconf_group_get(struct pinctrl_dev *pctldev,
if (!grp->ctrl)
return -EINVAL;
- return grp->ctrl->mpp_get(grp->pins[0], config);
+ return grp->ctrl->mpp_get(grp->data, grp->pins[0], config);
}
static int mvebu_pinconf_group_set(struct pinctrl_dev *pctldev,
@@ -161,7 +188,7 @@ static int mvebu_pinconf_group_set(struct pinctrl_dev *pctldev,
return -EINVAL;
for (i = 0; i < num_configs; i++) {
- ret = grp->ctrl->mpp_set(grp->pins[0], configs[i]);
+ ret = grp->ctrl->mpp_set(grp->data, grp->pins[0], configs[i]);
if (ret)
return ret;
} /* for each config */
@@ -188,18 +215,19 @@ static void mvebu_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
if (curr->subname)
seq_printf(s, "(%s)", curr->subname);
if (curr->flags & (MVEBU_SETTING_GPO | MVEBU_SETTING_GPI)) {
- seq_printf(s, "(");
+ seq_putc(s, '(');
if (curr->flags & MVEBU_SETTING_GPI)
- seq_printf(s, "i");
+ seq_putc(s, 'i');
if (curr->flags & MVEBU_SETTING_GPO)
- seq_printf(s, "o");
- seq_printf(s, ")");
+ seq_putc(s, 'o');
+ seq_putc(s, ')');
}
- } else
- seq_printf(s, "current: UNKNOWN");
+ } else {
+ seq_puts(s, "current: UNKNOWN");
+ }
if (grp->num_settings > 1) {
- seq_printf(s, ", available = [");
+ seq_puts(s, ", available = [");
for (n = 0; n < grp->num_settings; n++) {
if (curr == &grp->settings[n])
continue;
@@ -214,17 +242,16 @@ static void mvebu_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
seq_printf(s, "(%s)", grp->settings[n].subname);
if (grp->settings[n].flags &
(MVEBU_SETTING_GPO | MVEBU_SETTING_GPI)) {
- seq_printf(s, "(");
+ seq_putc(s, '(');
if (grp->settings[n].flags & MVEBU_SETTING_GPI)
- seq_printf(s, "i");
+ seq_putc(s, 'i');
if (grp->settings[n].flags & MVEBU_SETTING_GPO)
- seq_printf(s, "o");
- seq_printf(s, ")");
+ seq_putc(s, 'o');
+ seq_putc(s, ')');
}
}
- seq_printf(s, " ]");
+ seq_puts(s, " ]");
}
- return;
}
static const struct pinconf_ops mvebu_pinconf_ops = {
@@ -302,7 +329,7 @@ static int mvebu_pinmux_gpio_request_enable(struct pinctrl_dev *pctldev,
return -EINVAL;
if (grp->ctrl->mpp_gpio_req)
- return grp->ctrl->mpp_gpio_req(offset);
+ return grp->ctrl->mpp_gpio_req(grp->data, offset);
setting = mvebu_pinctrl_find_gpio_setting(pctl, grp);
if (!setting)
@@ -325,7 +352,7 @@ static int mvebu_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
return -EINVAL;
if (grp->ctrl->mpp_gpio_dir)
- return grp->ctrl->mpp_gpio_dir(offset, input);
+ return grp->ctrl->mpp_gpio_dir(grp->data, offset, input);
setting = mvebu_pinctrl_find_gpio_setting(pctl, grp);
if (!setting)
@@ -398,13 +425,9 @@ static int mvebu_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
return 0;
}
- *map = kmalloc(nmaps * sizeof(struct pinctrl_map), GFP_KERNEL);
- if (*map == NULL) {
- dev_err(pctl->dev,
- "cannot allocate pinctrl_map memory for %s\n",
- np->name);
+ *map = kmalloc_array(nmaps, sizeof(**map), GFP_KERNEL);
+ if (!*map)
return -ENOMEM;
- }
n = 0;
of_property_for_each_string(np, "marvell,pins", prop, group) {
@@ -563,10 +586,8 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
pctl = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pinctrl),
GFP_KERNEL);
- if (!pctl) {
- dev_err(&pdev->dev, "unable to alloc driver\n");
+ if (!pctl)
return -ENOMEM;
- }
pctl->desc.name = dev_name(&pdev->dev);
pctl->desc.owner = THIS_MODULE;
@@ -582,7 +603,7 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
pctl->num_groups = 0;
pctl->desc.npins = 0;
for (n = 0; n < soc->ncontrols; n++) {
- struct mvebu_mpp_ctrl *ctrl = &soc->controls[n];
+ const struct mvebu_mpp_ctrl *ctrl = &soc->controls[n];
pctl->desc.npins += ctrl->npins;
/* initialize control's pins[] array */
@@ -604,10 +625,8 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
pdesc = devm_kzalloc(&pdev->dev, pctl->desc.npins *
sizeof(struct pinctrl_pin_desc), GFP_KERNEL);
- if (!pdesc) {
- dev_err(&pdev->dev, "failed to alloc pinctrl pins\n");
+ if (!pdesc)
return -ENOMEM;
- }
for (n = 0; n < pctl->desc.npins; n++)
pdesc[n].number = n;
@@ -628,9 +647,13 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
/* assign mpp controls to groups */
gid = 0;
for (n = 0; n < soc->ncontrols; n++) {
- struct mvebu_mpp_ctrl *ctrl = &soc->controls[n];
+ const struct mvebu_mpp_ctrl *ctrl = &soc->controls[n];
+ struct mvebu_mpp_ctrl_data *data = soc->control_data ?
+ &soc->control_data[n] : NULL;
+
pctl->groups[gid].gid = gid;
pctl->groups[gid].ctrl = ctrl;
+ pctl->groups[gid].data = data;
pctl->groups[gid].name = ctrl->name;
pctl->groups[gid].pins = ctrl->pins;
pctl->groups[gid].npins = ctrl->npins;
@@ -650,6 +673,7 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
gid++;
pctl->groups[gid].gid = gid;
pctl->groups[gid].ctrl = ctrl;
+ pctl->groups[gid].data = data;
pctl->groups[gid].name = noname_buf;
pctl->groups[gid].pins = &ctrl->pins[k];
pctl->groups[gid].npins = 1;
@@ -725,3 +749,94 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
return 0;
}
+
+/*
+ * mvebu_pinctrl_simple_mmio_probe - probe a simple mmio pinctrl
+ * @pdev: platform device (with platform data already attached)
+ *
+ * Initialise a simple (single base address) mmio pinctrl driver,
+ * assigning the MMIO base address to all mvebu mpp ctrl instances.
+ */
+int mvebu_pinctrl_simple_mmio_probe(struct platform_device *pdev)
+{
+ struct mvebu_pinctrl_soc_info *soc = dev_get_platdata(&pdev->dev);
+ struct mvebu_mpp_ctrl_data *mpp_data;
+ struct resource *res;
+ void __iomem *base;
+ int i;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ mpp_data = devm_kcalloc(&pdev->dev, soc->ncontrols, sizeof(*mpp_data),
+ GFP_KERNEL);
+ if (!mpp_data)
+ return -ENOMEM;
+
+ for (i = 0; i < soc->ncontrols; i++)
+ mpp_data[i].base = base;
+
+ soc->control_data = mpp_data;
+
+ return mvebu_pinctrl_probe(pdev);
+}
+
+int mvebu_regmap_mpp_ctrl_get(struct mvebu_mpp_ctrl_data *data,
+ unsigned int pid, unsigned long *config)
+{
+ unsigned off = (pid / MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+ unsigned shift = (pid % MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+ unsigned int val;
+ int err;
+
+ err = regmap_read(data->regmap.map, data->regmap.offset + off, &val);
+ if (err)
+ return err;
+
+ *config = (val >> shift) & MVEBU_MPP_MASK;
+
+ return 0;
+}
+
+int mvebu_regmap_mpp_ctrl_set(struct mvebu_mpp_ctrl_data *data,
+ unsigned int pid, unsigned long config)
+{
+ unsigned off = (pid / MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+ unsigned shift = (pid % MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+
+ return regmap_update_bits(data->regmap.map, data->regmap.offset + off,
+ MVEBU_MPP_MASK << shift, config << shift);
+}
+
+int mvebu_pinctrl_simple_regmap_probe(struct platform_device *pdev,
+ struct device *syscon_dev)
+{
+ struct mvebu_pinctrl_soc_info *soc = dev_get_platdata(&pdev->dev);
+ struct mvebu_mpp_ctrl_data *mpp_data;
+ struct regmap *regmap;
+ u32 offset;
+ int i;
+
+ regmap = syscon_node_to_regmap(syscon_dev->of_node);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ if (of_property_read_u32(pdev->dev.of_node, "offset", &offset))
+ return -EINVAL;
+
+ mpp_data = devm_kcalloc(&pdev->dev, soc->ncontrols, sizeof(*mpp_data),
+ GFP_KERNEL);
+ if (!mpp_data)
+ return -ENOMEM;
+
+ for (i = 0; i < soc->ncontrols; i++) {
+ mpp_data[i].regmap.map = regmap;
+ mpp_data[i].regmap.offset = offset;
+ }
+
+ soc->control_data = mpp_data;
+
+ return mvebu_pinctrl_probe(pdev);
+}
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.h b/drivers/pinctrl/mvebu/pinctrl-mvebu.h
index b75a5f4adf3b..c90704e74884 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.h
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.h
@@ -14,6 +14,22 @@
#define __PINCTRL_MVEBU_H__
/**
+ * struct mvebu_mpp_ctrl_data - private data for the mpp ctrl operations
+ * @base: base address of pinctrl hardware
+ * @regmap.map: regmap structure
+ * @regmap.offset: regmap offset
+ */
+struct mvebu_mpp_ctrl_data {
+ union {
+ void __iomem *base;
+ struct {
+ struct regmap *map;
+ u32 offset;
+ } regmap;
+ };
+};
+
+/**
* struct mvebu_mpp_ctrl - describe a mpp control
* @name: name of the control group
* @pid: first pin id handled by this control
@@ -37,10 +53,13 @@ struct mvebu_mpp_ctrl {
u8 pid;
u8 npins;
unsigned *pins;
- int (*mpp_get)(unsigned pid, unsigned long *config);
- int (*mpp_set)(unsigned pid, unsigned long config);
- int (*mpp_gpio_req)(unsigned pid);
- int (*mpp_gpio_dir)(unsigned pid, bool input);
+ int (*mpp_get)(struct mvebu_mpp_ctrl_data *data, unsigned pid,
+ unsigned long *config);
+ int (*mpp_set)(struct mvebu_mpp_ctrl_data *data, unsigned pid,
+ unsigned long config);
+ int (*mpp_gpio_req)(struct mvebu_mpp_ctrl_data *data, unsigned pid);
+ int (*mpp_gpio_dir)(struct mvebu_mpp_ctrl_data *data, unsigned pid,
+ bool input);
};
/**
@@ -93,6 +112,7 @@ struct mvebu_mpp_mode {
* struct mvebu_pinctrl_soc_info - SoC specific info passed to pinctrl-mvebu
* @variant: variant mask of soc_info
* @controls: list of available mvebu_mpp_ctrls
+ * @control_data: optional array, one entry for each control
* @ncontrols: number of available mvebu_mpp_ctrls
* @modes: list of available mvebu_mpp_modes
* @nmodes: number of available mvebu_mpp_modes
@@ -105,7 +125,8 @@ struct mvebu_mpp_mode {
*/
struct mvebu_pinctrl_soc_info {
u8 variant;
- struct mvebu_mpp_ctrl *controls;
+ const struct mvebu_mpp_ctrl *controls;
+ struct mvebu_mpp_ctrl_data *control_data;
int ncontrols;
struct mvebu_mpp_mode *modes;
int nmodes;
@@ -177,30 +198,18 @@ struct mvebu_pinctrl_soc_info {
#define MVEBU_MPP_BITS 4
#define MVEBU_MPP_MASK 0xf
-static inline int default_mpp_ctrl_get(void __iomem *base, unsigned int pid,
- unsigned long *config)
-{
- unsigned off = (pid / MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
- unsigned shift = (pid % MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
-
- *config = (readl(base + off) >> shift) & MVEBU_MPP_MASK;
-
- return 0;
-}
-
-static inline int default_mpp_ctrl_set(void __iomem *base, unsigned int pid,
- unsigned long config)
-{
- unsigned off = (pid / MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
- unsigned shift = (pid % MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
- unsigned long reg;
-
- reg = readl(base + off) & ~(MVEBU_MPP_MASK << shift);
- writel(reg | (config << shift), base + off);
-
- return 0;
-}
+int mvebu_mmio_mpp_ctrl_get(struct mvebu_mpp_ctrl_data *data, unsigned pid,
+ unsigned long *config);
+int mvebu_mmio_mpp_ctrl_set(struct mvebu_mpp_ctrl_data *data, unsigned pid,
+ unsigned long config);
+int mvebu_regmap_mpp_ctrl_get(struct mvebu_mpp_ctrl_data *data, unsigned pid,
+ unsigned long *config);
+int mvebu_regmap_mpp_ctrl_set(struct mvebu_mpp_ctrl_data *data, unsigned pid,
+ unsigned long config);
int mvebu_pinctrl_probe(struct platform_device *pdev);
+int mvebu_pinctrl_simple_mmio_probe(struct platform_device *pdev);
+int mvebu_pinctrl_simple_regmap_probe(struct platform_device *pdev,
+ struct device *syscon_dev);
#endif
diff --git a/drivers/pinctrl/mvebu/pinctrl-orion.c b/drivers/pinctrl/mvebu/pinctrl-orion.c
index 84e144167b44..c2e0c16cf9b3 100644
--- a/drivers/pinctrl/mvebu/pinctrl-orion.c
+++ b/drivers/pinctrl/mvebu/pinctrl-orion.c
@@ -32,7 +32,8 @@
static void __iomem *mpp_base;
static void __iomem *high_mpp_base;
-static int orion_mpp_ctrl_get(unsigned pid, unsigned long *config)
+static int orion_mpp_ctrl_get(struct mvebu_mpp_ctrl_data *data,
+ unsigned pid, unsigned long *config)
{
unsigned shift = (pid % MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
@@ -47,7 +48,8 @@ static int orion_mpp_ctrl_get(unsigned pid, unsigned long *config)
return 0;
}
-static int orion_mpp_ctrl_set(unsigned pid, unsigned long config)
+static int orion_mpp_ctrl_set(struct mvebu_mpp_ctrl_data *data,
+ unsigned pid, unsigned long config)
{
unsigned shift = (pid % MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
@@ -161,7 +163,7 @@ static struct mvebu_mpp_mode orion_mpp_modes[] = {
MPP_VAR_FUNCTION(0x5, "gpio", NULL, V_5182)),
};
-static struct mvebu_mpp_ctrl orion_mpp_controls[] = {
+static const struct mvebu_mpp_ctrl orion_mpp_controls[] = {
MPP_FUNC_CTRL(0, 19, NULL, orion_mpp_ctrl),
};
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c
index 799048f3c8d4..c1c1ccc58267 100644
--- a/drivers/pinctrl/pinconf.c
+++ b/drivers/pinctrl/pinconf.c
@@ -200,6 +200,18 @@ int pinconf_apply_setting(struct pinctrl_setting const *setting)
return 0;
}
+int pinconf_set_config(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned long *configs, size_t nconfigs)
+{
+ const struct pinconf_ops *ops;
+
+ ops = pctldev->desc->confops;
+ if (!ops)
+ return -ENOTSUPP;
+
+ return ops->pin_config_set(pctldev, pin, configs, nconfigs);
+}
+
#ifdef CONFIG_DEBUG_FS
static void pinconf_show_config(struct seq_file *s, struct pinctrl_dev *pctldev,
diff --git a/drivers/pinctrl/pinconf.h b/drivers/pinctrl/pinconf.h
index 55c75780b3b2..bf8aff9abf32 100644
--- a/drivers/pinctrl/pinconf.h
+++ b/drivers/pinctrl/pinconf.h
@@ -20,6 +20,9 @@ int pinconf_map_to_setting(struct pinctrl_map const *map,
void pinconf_free_setting(struct pinctrl_setting const *setting);
int pinconf_apply_setting(struct pinctrl_setting const *setting);
+int pinconf_set_config(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned long *configs, size_t nconfigs);
+
/*
* You will only be interested in these if you're using PINCONF
* so don't supply any stubs for these.
@@ -56,6 +59,12 @@ static inline int pinconf_apply_setting(struct pinctrl_setting const *setting)
return 0;
}
+static inline int pinconf_set_config(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned long *configs, size_t nconfigs)
+{
+ return -ENOTSUPP;
+}
+
#endif
#if defined(CONFIG_PINCONF) && defined(CONFIG_DEBUG_FS)
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index aea310a91821..d69e357a7a98 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -164,6 +164,18 @@ static int amd_gpio_set_debounce(struct gpio_chip *gc, unsigned offset,
return ret;
}
+static int amd_gpio_set_config(struct gpio_chip *gc, unsigned offset,
+ unsigned long config)
+{
+ u32 debounce;
+
+ if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+ return -ENOTSUPP;
+
+ debounce = pinconf_to_config_argument(config);
+ return amd_gpio_set_debounce(gc, offset, debounce);
+}
+
#ifdef CONFIG_DEBUG_FS
static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
{
@@ -186,7 +198,7 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
char *output_value;
char *output_enable;
- for (bank = 0; bank < AMD_GPIO_TOTAL_BANKS; bank++) {
+ for (bank = 0; bank < gpio_dev->hwbank_num; bank++) {
seq_printf(s, "GPIO bank%d\t", bank);
switch (bank) {
@@ -202,8 +214,14 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
i = 128;
pin_num = AMD_GPIO_PINS_BANK2 + i;
break;
+ case 3:
+ i = 192;
+ pin_num = AMD_GPIO_PINS_BANK3 + i;
+ break;
+ default:
+ /* Illegal bank number, ignore */
+ continue;
}
-
for (; i < pin_num; i++) {
seq_printf(s, "pin%d\t", i);
spin_lock_irqsave(&gpio_dev->lock, flags);
@@ -213,14 +231,14 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
if (pin_reg & BIT(INTERRUPT_ENABLE_OFF)) {
interrupt_enable = "interrupt is enabled|";
- if (!(pin_reg & BIT(ACTIVE_LEVEL_OFF))
- && !(pin_reg & BIT(ACTIVE_LEVEL_OFF+1)))
+ if (!(pin_reg & BIT(ACTIVE_LEVEL_OFF)) &&
+ !(pin_reg & BIT(ACTIVE_LEVEL_OFF + 1)))
active_level = "Active low|";
- else if (pin_reg & BIT(ACTIVE_LEVEL_OFF)
- && !(pin_reg & BIT(ACTIVE_LEVEL_OFF+1)))
+ else if (pin_reg & BIT(ACTIVE_LEVEL_OFF) &&
+ !(pin_reg & BIT(ACTIVE_LEVEL_OFF + 1)))
active_level = "Active high|";
- else if (!(pin_reg & BIT(ACTIVE_LEVEL_OFF))
- && pin_reg & BIT(ACTIVE_LEVEL_OFF+1))
+ else if (!(pin_reg & BIT(ACTIVE_LEVEL_OFF)) &&
+ pin_reg & BIT(ACTIVE_LEVEL_OFF + 1))
active_level = "Active on both|";
else
active_level = "Unknow Active level|";
@@ -244,17 +262,17 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
interrupt_mask =
"interrupt is masked|";
- if (pin_reg & BIT(WAKE_CNTRL_OFF))
+ if (pin_reg & BIT(WAKE_CNTRL_OFF_S0I3))
wake_cntrl0 = "enable wakeup in S0i3 state|";
else
wake_cntrl0 = "disable wakeup in S0i3 state|";
- if (pin_reg & BIT(WAKE_CNTRL_OFF))
+ if (pin_reg & BIT(WAKE_CNTRL_OFF_S3))
wake_cntrl1 = "enable wakeup in S3 state|";
else
wake_cntrl1 = "disable wakeup in S3 state|";
- if (pin_reg & BIT(WAKE_CNTRL_OFF))
+ if (pin_reg & BIT(WAKE_CNTRL_OFF_S4))
wake_cntrl2 = "enable wakeup in S4/S5 state|";
else
wake_cntrl2 = "disable wakeup in S4/S5 state|";
@@ -382,26 +400,21 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
int ret = 0;
u32 pin_reg;
- unsigned long flags;
- bool level_trig;
- u32 active_level;
+ unsigned long flags, irq_flags;
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
- /*
- * When level_trig is set EDGE and active_level is set HIGH in BIOS
- * default settings, ignore incoming settings from client and use
- * BIOS settings to configure GPIO register.
+ /* Ignore the settings coming from the client and
+ * read the values from the ACPI tables
+ * while setting the trigger type
*/
- level_trig = !(pin_reg & (LEVEL_TRIGGER << LEVEL_TRIG_OFF));
- active_level = pin_reg & (ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
- if(level_trig &&
- ((active_level >> ACTIVE_LEVEL_OFF) == ACTIVE_HIGH))
- type = IRQ_TYPE_EDGE_FALLING;
+ irq_flags = irq_get_trigger_type(d->irq);
+ if (irq_flags != IRQ_TYPE_NONE)
+ type = irq_flags;
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_RISING:
@@ -479,6 +492,7 @@ static struct irq_chip amd_gpio_irqchip = {
.irq_unmask = amd_gpio_irq_unmask,
.irq_eoi = amd_gpio_irq_eoi,
.irq_set_type = amd_gpio_irq_set_type,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
};
static void amd_gpio_irq_handler(struct irq_desc *desc)
@@ -761,18 +775,19 @@ static int amd_gpio_probe(struct platform_device *pdev)
gpio_dev->gc.direction_output = amd_gpio_direction_output;
gpio_dev->gc.get = amd_gpio_get_value;
gpio_dev->gc.set = amd_gpio_set_value;
- gpio_dev->gc.set_debounce = amd_gpio_set_debounce;
+ gpio_dev->gc.set_config = amd_gpio_set_config;
gpio_dev->gc.dbg_show = amd_gpio_dbg_show;
- gpio_dev->gc.base = 0;
+ gpio_dev->gc.base = -1;
gpio_dev->gc.label = pdev->name;
gpio_dev->gc.owner = THIS_MODULE;
gpio_dev->gc.parent = &pdev->dev;
- gpio_dev->gc.ngpio = TOTAL_NUMBER_OF_PINS;
+ gpio_dev->gc.ngpio = resource_size(res) / 4;
#if defined(CONFIG_OF_GPIO)
gpio_dev->gc.of_node = pdev->dev.of_node;
#endif
+ gpio_dev->hwbank_num = gpio_dev->gc.ngpio / 64;
gpio_dev->groups = kerncz_groups;
gpio_dev->ngroups = ARRAY_SIZE(kerncz_groups);
@@ -789,7 +804,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
return ret;
ret = gpiochip_add_pin_range(&gpio_dev->gc, dev_name(&pdev->dev),
- 0, 0, TOTAL_NUMBER_OF_PINS);
+ 0, 0, gpio_dev->gc.ngpio);
if (ret) {
dev_err(&pdev->dev, "Failed to add pin range\n");
goto out2;
@@ -810,7 +825,6 @@ static int amd_gpio_probe(struct platform_device *pdev)
&amd_gpio_irqchip,
irq_base,
amd_gpio_irq_handler);
-
platform_set_drvdata(pdev, gpio_dev);
dev_dbg(&pdev->dev, "amd gpio driver loaded\n");
diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h
index 7bfea47dbb47..c03f77822069 100644
--- a/drivers/pinctrl/pinctrl-amd.h
+++ b/drivers/pinctrl/pinctrl-amd.h
@@ -13,13 +13,12 @@
#ifndef _PINCTRL_AMD_H
#define _PINCTRL_AMD_H
-#define TOTAL_NUMBER_OF_PINS 192
#define AMD_GPIO_PINS_PER_BANK 64
-#define AMD_GPIO_TOTAL_BANKS 3
#define AMD_GPIO_PINS_BANK0 63
#define AMD_GPIO_PINS_BANK1 64
#define AMD_GPIO_PINS_BANK2 56
+#define AMD_GPIO_PINS_BANK3 32
#define WAKE_INT_MASTER_REG 0xfc
#define EOI_MASK (1 << 29)
@@ -35,7 +34,9 @@
#define ACTIVE_LEVEL_OFF 9
#define INTERRUPT_ENABLE_OFF 11
#define INTERRUPT_MASK_OFF 12
-#define WAKE_CNTRL_OFF 13
+#define WAKE_CNTRL_OFF_S0I3 13
+#define WAKE_CNTRL_OFF_S3 14
+#define WAKE_CNTRL_OFF_S4 15
#define PIN_STS_OFF 16
#define DRV_STRENGTH_SEL_OFF 17
#define PULL_UP_SEL_OFF 19
@@ -93,6 +94,7 @@ struct amd_gpio {
u32 ngroups;
struct pinctrl_dev *pctrl;
struct gpio_chip gc;
+ unsigned int hwbank_num;
struct resource *res;
struct platform_device *pdev;
};
diff --git a/drivers/pinctrl/pinctrl-da850-pupd.c b/drivers/pinctrl/pinctrl-da850-pupd.c
index b36a90a3f3e4..f41d3d948dd8 100644
--- a/drivers/pinctrl/pinctrl-da850-pupd.c
+++ b/drivers/pinctrl/pinctrl-da850-pupd.c
@@ -113,7 +113,6 @@ static int da850_pupd_pin_config_group_set(struct pinctrl_dev *pctldev,
struct da850_pupd_data *data = pinctrl_dev_get_drvdata(pctldev);
u32 ena, sel;
enum pin_config_param param;
- u16 arg;
int i;
ena = readl(data->base + DA850_PUPD_ENA);
@@ -121,7 +120,6 @@ static int da850_pupd_pin_config_group_set(struct pinctrl_dev *pctldev,
for (i = 0; i < num_configs; i++) {
param = pinconf_to_config_param(configs[i]);
- arg = pinconf_to_config_argument(configs[i]);
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
@@ -194,6 +192,7 @@ static const struct of_device_id da850_pupd_of_match[] = {
{ .compatible = "ti,da850-pupd" },
{ }
};
+MODULE_DEVICE_TABLE(of, da850_pupd_of_match);
static struct platform_driver da850_pupd_driver = {
.driver = {
diff --git a/drivers/pinctrl/pinctrl-falcon.c b/drivers/pinctrl/pinctrl-falcon.c
index 0b0fc2eb48e0..fb73dcbb5ef3 100644
--- a/drivers/pinctrl/pinctrl-falcon.c
+++ b/drivers/pinctrl/pinctrl-falcon.c
@@ -7,7 +7,7 @@
* by the Free Software Foundation.
*
* Copyright (C) 2012 Thomas Langer <thomas.langer@lantiq.com>
- * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
*/
#include <linux/gpio.h>
diff --git a/drivers/pinctrl/pinctrl-lantiq.c b/drivers/pinctrl/pinctrl-lantiq.c
index a4d647424600..41dc39c7a7b1 100644
--- a/drivers/pinctrl/pinctrl-lantiq.c
+++ b/drivers/pinctrl/pinctrl-lantiq.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License version 2 as
* publishhed by the Free Software Foundation.
*
- * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
*/
#include <linux/module.h>
diff --git a/drivers/pinctrl/pinctrl-lantiq.h b/drivers/pinctrl/pinctrl-lantiq.h
index e137d139e494..0e4308b8f235 100644
--- a/drivers/pinctrl/pinctrl-lantiq.h
+++ b/drivers/pinctrl/pinctrl-lantiq.h
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License version 2 as
* publishhed by the Free Software Foundation.
*
- * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
*/
#ifndef __PINCTRL_LANTIQ_H
diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c
index e053f1fa5512..d090f37ca4a1 100644
--- a/drivers/pinctrl/pinctrl-lpc18xx.c
+++ b/drivers/pinctrl/pinctrl-lpc18xx.c
@@ -904,7 +904,7 @@ static int lpc18xx_pconf_get(struct pinctrl_dev *pctldev, unsigned pin,
static int lpc18xx_pconf_set_usb1(struct pinctrl_dev *pctldev,
enum pin_config_param param,
- u16 param_val, u32 *reg)
+ u32 param_val, u32 *reg)
{
switch (param) {
case PIN_CONFIG_LOW_POWER_MODE:
@@ -932,7 +932,7 @@ static int lpc18xx_pconf_set_usb1(struct pinctrl_dev *pctldev,
static int lpc18xx_pconf_set_i2c0(struct pinctrl_dev *pctldev,
enum pin_config_param param,
- u16 param_val, u32 *reg,
+ u32 param_val, u32 *reg,
unsigned pin)
{
u8 shift;
@@ -982,7 +982,7 @@ static int lpc18xx_pconf_set_i2c0(struct pinctrl_dev *pctldev,
}
static int lpc18xx_pconf_set_gpio_pin_int(struct pinctrl_dev *pctldev,
- u16 param_val, unsigned pin)
+ u32 param_val, unsigned pin)
{
struct lpc18xx_scu_data *scu = pinctrl_dev_get_drvdata(pctldev);
u32 val, reg_val, reg_offset = LPC18XX_SCU_PINTSEL0;
@@ -1008,7 +1008,7 @@ static int lpc18xx_pconf_set_gpio_pin_int(struct pinctrl_dev *pctldev,
}
static int lpc18xx_pconf_set_pin(struct pinctrl_dev *pctldev, unsigned param,
- u16 param_val, u32 *reg, unsigned pin,
+ u32 param_val, u32 *reg, unsigned pin,
struct lpc18xx_pin_caps *pin_cap)
{
switch (param) {
@@ -1088,7 +1088,7 @@ static int lpc18xx_pconf_set(struct pinctrl_dev *pctldev, unsigned pin,
struct lpc18xx_scu_data *scu = pinctrl_dev_get_drvdata(pctldev);
struct lpc18xx_pin_caps *pin_cap;
enum pin_config_param param;
- u16 param_val;
+ u32 param_val;
u32 reg;
int ret;
int i;
diff --git a/drivers/pinctrl/pinctrl-max77620.c b/drivers/pinctrl/pinctrl-max77620.c
index d9ff53e8f715..b8d2180a2bea 100644
--- a/drivers/pinctrl/pinctrl-max77620.c
+++ b/drivers/pinctrl/pinctrl-max77620.c
@@ -402,7 +402,7 @@ static int max77620_pinconf_set(struct pinctrl_dev *pctldev,
struct device *dev = mpci->dev;
struct max77620_fps_config *fps_config;
int param;
- u16 param_val;
+ u32 param_val;
unsigned int val;
unsigned int pu_val;
unsigned int pd_val;
diff --git a/drivers/pinctrl/pinctrl-palmas.c b/drivers/pinctrl/pinctrl-palmas.c
index a30146da7ffd..4d6a5015b927 100644
--- a/drivers/pinctrl/pinctrl-palmas.c
+++ b/drivers/pinctrl/pinctrl-palmas.c
@@ -860,7 +860,7 @@ static int palmas_pinconf_set(struct pinctrl_dev *pctldev,
{
struct palmas_pctrl_chip_info *pci = pinctrl_dev_get_drvdata(pctldev);
enum pin_config_param param;
- u16 param_val;
+ u32 param_val;
const struct palmas_pingroup *g;
const struct palmas_pin_info *opt;
int ret;
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 08765f58253c..7813599e43fa 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -1441,7 +1441,7 @@ static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
struct rockchip_pin_bank *bank = pin_to_bank(info, pin);
enum pin_config_param param;
- u16 arg;
+ u32 arg;
int i;
int rc;
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index a5a0392ab817..8b2d45e85bae 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -33,27 +33,12 @@
#include "core.h"
#include "devicetree.h"
#include "pinconf.h"
+#include "pinmux.h"
#define DRIVER_NAME "pinctrl-single"
#define PCS_OFF_DISABLED ~0U
/**
- * struct pcs_pingroup - pingroups for a function
- * @np: pingroup device node pointer
- * @name: pingroup name
- * @gpins: array of the pins in the group
- * @ngpins: number of pins in the group
- * @node: list node
- */
-struct pcs_pingroup {
- struct device_node *np;
- const char *name;
- int *gpins;
- int ngpins;
- struct list_head node;
-};
-
-/**
* struct pcs_func_vals - mux function register offset and value pair
* @reg: register virtual address
* @val: register value
@@ -176,16 +161,10 @@ struct pcs_soc_data {
* @bits_per_mux: number of bits per mux
* @bits_per_pin: number of bits per pin
* @pins: physical pins on the SoC
- * @pgtree: pingroup index radix tree
- * @ftree: function index radix tree
- * @pingroups: list of pingroups
- * @functions: list of functions
* @gpiofuncs: list of gpio functions
* @irqs: list of interrupt registers
* @chip: chip container for this instance
* @domain: IRQ domain for this instance
- * @ngroups: number of pingroups
- * @nfuncs: number of functions
* @desc: pin controller descriptor
* @read: register read function to use
* @write: register write function to use
@@ -213,16 +192,10 @@ struct pcs_device {
bool bits_per_mux;
unsigned bits_per_pin;
struct pcs_data pins;
- struct radix_tree_root pgtree;
- struct radix_tree_root ftree;
- struct list_head pingroups;
- struct list_head functions;
struct list_head gpiofuncs;
struct list_head irqs;
struct irq_chip chip;
struct irq_domain *domain;
- unsigned ngroups;
- unsigned nfuncs;
struct pinctrl_desc desc;
unsigned (*read)(void __iomem *reg);
void (*write)(unsigned val, void __iomem *reg);
@@ -288,54 +261,6 @@ static void __maybe_unused pcs_writel(unsigned val, void __iomem *reg)
writel(val, reg);
}
-static int pcs_get_groups_count(struct pinctrl_dev *pctldev)
-{
- struct pcs_device *pcs;
-
- pcs = pinctrl_dev_get_drvdata(pctldev);
-
- return pcs->ngroups;
-}
-
-static const char *pcs_get_group_name(struct pinctrl_dev *pctldev,
- unsigned gselector)
-{
- struct pcs_device *pcs;
- struct pcs_pingroup *group;
-
- pcs = pinctrl_dev_get_drvdata(pctldev);
- group = radix_tree_lookup(&pcs->pgtree, gselector);
- if (!group) {
- dev_err(pcs->dev, "%s could not find pingroup%i\n",
- __func__, gselector);
- return NULL;
- }
-
- return group->name;
-}
-
-static int pcs_get_group_pins(struct pinctrl_dev *pctldev,
- unsigned gselector,
- const unsigned **pins,
- unsigned *npins)
-{
- struct pcs_device *pcs;
- struct pcs_pingroup *group;
-
- pcs = pinctrl_dev_get_drvdata(pctldev);
- group = radix_tree_lookup(&pcs->pgtree, gselector);
- if (!group) {
- dev_err(pcs->dev, "%s could not find pingroup%i\n",
- __func__, gselector);
- return -EINVAL;
- }
-
- *pins = group->gpins;
- *npins = group->ngpins;
-
- return 0;
-}
-
static void pcs_pin_dbg_show(struct pinctrl_dev *pctldev,
struct seq_file *s,
unsigned pin)
@@ -369,67 +294,21 @@ static int pcs_dt_node_to_map(struct pinctrl_dev *pctldev,
struct pinctrl_map **map, unsigned *num_maps);
static const struct pinctrl_ops pcs_pinctrl_ops = {
- .get_groups_count = pcs_get_groups_count,
- .get_group_name = pcs_get_group_name,
- .get_group_pins = pcs_get_group_pins,
+ .get_groups_count = pinctrl_generic_get_group_count,
+ .get_group_name = pinctrl_generic_get_group_name,
+ .get_group_pins = pinctrl_generic_get_group_pins,
.pin_dbg_show = pcs_pin_dbg_show,
.dt_node_to_map = pcs_dt_node_to_map,
.dt_free_map = pcs_dt_free_map,
};
-static int pcs_get_functions_count(struct pinctrl_dev *pctldev)
-{
- struct pcs_device *pcs;
-
- pcs = pinctrl_dev_get_drvdata(pctldev);
-
- return pcs->nfuncs;
-}
-
-static const char *pcs_get_function_name(struct pinctrl_dev *pctldev,
- unsigned fselector)
-{
- struct pcs_device *pcs;
- struct pcs_function *func;
-
- pcs = pinctrl_dev_get_drvdata(pctldev);
- func = radix_tree_lookup(&pcs->ftree, fselector);
- if (!func) {
- dev_err(pcs->dev, "%s could not find function%i\n",
- __func__, fselector);
- return NULL;
- }
-
- return func->name;
-}
-
-static int pcs_get_function_groups(struct pinctrl_dev *pctldev,
- unsigned fselector,
- const char * const **groups,
- unsigned * const ngroups)
-{
- struct pcs_device *pcs;
- struct pcs_function *func;
-
- pcs = pinctrl_dev_get_drvdata(pctldev);
- func = radix_tree_lookup(&pcs->ftree, fselector);
- if (!func) {
- dev_err(pcs->dev, "%s could not find function%i\n",
- __func__, fselector);
- return -EINVAL;
- }
- *groups = func->pgnames;
- *ngroups = func->npgnames;
-
- return 0;
-}
-
static int pcs_get_function(struct pinctrl_dev *pctldev, unsigned pin,
struct pcs_function **func)
{
struct pcs_device *pcs = pinctrl_dev_get_drvdata(pctldev);
struct pin_desc *pdesc = pin_desc_get(pctldev, pin);
const struct pinctrl_setting_mux *setting;
+ struct function_desc *function;
unsigned fselector;
/* If pin is not described in DTS & enabled, mux_setting is NULL. */
@@ -437,7 +316,8 @@ static int pcs_get_function(struct pinctrl_dev *pctldev, unsigned pin,
if (!setting)
return -ENOTSUPP;
fselector = setting->func;
- *func = radix_tree_lookup(&pcs->ftree, fselector);
+ function = pinmux_generic_get_function(pctldev, fselector);
+ *func = function->data;
if (!(*func)) {
dev_err(pcs->dev, "%s could not find function%i\n",
__func__, fselector);
@@ -450,6 +330,7 @@ static int pcs_set_mux(struct pinctrl_dev *pctldev, unsigned fselector,
unsigned group)
{
struct pcs_device *pcs;
+ struct function_desc *function;
struct pcs_function *func;
int i;
@@ -457,7 +338,8 @@ static int pcs_set_mux(struct pinctrl_dev *pctldev, unsigned fselector,
/* If function mask is null, needn't enable it. */
if (!pcs->fmask)
return 0;
- func = radix_tree_lookup(&pcs->ftree, fselector);
+ function = pinmux_generic_get_function(pctldev, fselector);
+ func = function->data;
if (!func)
return -EINVAL;
@@ -515,9 +397,9 @@ static int pcs_request_gpio(struct pinctrl_dev *pctldev,
}
static const struct pinmux_ops pcs_pinmux_ops = {
- .get_functions_count = pcs_get_functions_count,
- .get_function_name = pcs_get_function_name,
- .get_function_groups = pcs_get_function_groups,
+ .get_functions_count = pinmux_generic_get_function_count,
+ .get_function_name = pinmux_generic_get_function_name,
+ .get_function_groups = pinmux_generic_get_function_groups,
.set_mux = pcs_set_mux,
.gpio_request_enable = pcs_request_gpio,
};
@@ -622,7 +504,7 @@ static int pcs_pinconf_set(struct pinctrl_dev *pctldev,
struct pcs_device *pcs = pinctrl_dev_get_drvdata(pctldev);
struct pcs_function *func;
unsigned offset = 0, shift = 0, i, data, ret;
- u16 arg;
+ u32 arg;
int j;
ret = pcs_get_function(pctldev, pin, &func);
@@ -685,7 +567,7 @@ static int pcs_pinconf_group_get(struct pinctrl_dev *pctldev,
unsigned npins, old = 0;
int i, ret;
- ret = pcs_get_group_pins(pctldev, group, &pins, &npins);
+ ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins);
if (ret)
return ret;
for (i = 0; i < npins; i++) {
@@ -707,7 +589,7 @@ static int pcs_pinconf_group_set(struct pinctrl_dev *pctldev,
unsigned npins;
int i, ret;
- ret = pcs_get_group_pins(pctldev, group, &pins, &npins);
+ ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins);
if (ret)
return ret;
for (i = 0; i < npins; i++) {
@@ -859,77 +741,24 @@ static struct pcs_function *pcs_add_function(struct pcs_device *pcs,
unsigned npgnames)
{
struct pcs_function *function;
+ int res;
function = devm_kzalloc(pcs->dev, sizeof(*function), GFP_KERNEL);
if (!function)
return NULL;
- function->name = name;
function->vals = vals;
function->nvals = nvals;
- function->pgnames = pgnames;
- function->npgnames = npgnames;
- mutex_lock(&pcs->mutex);
- list_add_tail(&function->node, &pcs->functions);
- radix_tree_insert(&pcs->ftree, pcs->nfuncs, function);
- pcs->nfuncs++;
- mutex_unlock(&pcs->mutex);
+ res = pinmux_generic_add_function(pcs->pctl, name,
+ pgnames, npgnames,
+ function);
+ if (res)
+ return NULL;
return function;
}
-static void pcs_remove_function(struct pcs_device *pcs,
- struct pcs_function *function)
-{
- int i;
-
- mutex_lock(&pcs->mutex);
- for (i = 0; i < pcs->nfuncs; i++) {
- struct pcs_function *found;
-
- found = radix_tree_lookup(&pcs->ftree, i);
- if (found == function)
- radix_tree_delete(&pcs->ftree, i);
- }
- list_del(&function->node);
- mutex_unlock(&pcs->mutex);
-}
-
-/**
- * pcs_add_pingroup() - add a pingroup to the pingroup list
- * @pcs: pcs driver instance
- * @np: device node of the mux entry
- * @name: name of the pingroup
- * @gpins: array of the pins that belong to the group
- * @ngpins: number of pins in the group
- */
-static int pcs_add_pingroup(struct pcs_device *pcs,
- struct device_node *np,
- const char *name,
- int *gpins,
- int ngpins)
-{
- struct pcs_pingroup *pingroup;
-
- pingroup = devm_kzalloc(pcs->dev, sizeof(*pingroup), GFP_KERNEL);
- if (!pingroup)
- return -ENOMEM;
-
- pingroup->name = name;
- pingroup->np = np;
- pingroup->gpins = gpins;
- pingroup->ngpins = ngpins;
-
- mutex_lock(&pcs->mutex);
- list_add_tail(&pingroup->node, &pcs->pingroups);
- radix_tree_insert(&pcs->pgtree, pcs->ngroups, pingroup);
- pcs->ngroups++;
- mutex_unlock(&pcs->mutex);
-
- return 0;
-}
-
/**
* pcs_get_pin_by_offset() - get a pin index based on the register offset
* @pcs: pcs driver instance
@@ -1100,10 +929,9 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np,
return 0;
}
-static void pcs_free_pingroups(struct pcs_device *pcs);
-
/**
* smux_parse_one_pinctrl_entry() - parses a device tree mux entry
+ * @pctldev: pin controller device
* @pcs: pinctrl driver instance
* @np: device node of the mux entry
* @map: map entry
@@ -1134,7 +962,7 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs,
rows = pinctrl_count_index_with_args(np, name);
if (rows <= 0) {
- dev_err(pcs->dev, "Ivalid number of rows: %d\n", rows);
+ dev_err(pcs->dev, "Invalid number of rows: %d\n", rows);
return -EINVAL;
}
@@ -1186,7 +1014,7 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs,
goto free_pins;
}
- res = pcs_add_pingroup(pcs, np, np->name, pins, found);
+ res = pinctrl_generic_add_group(pcs->pctl, np->name, pins, found, pcs);
if (res < 0)
goto free_function;
@@ -1205,10 +1033,10 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs,
return 0;
free_pingroups:
- pcs_free_pingroups(pcs);
+ pinctrl_generic_remove_last_group(pcs->pctl);
*num_maps = 1;
free_function:
- pcs_remove_function(pcs, function);
+ pinmux_generic_remove_last_function(pcs->pctl);
free_pins:
devm_kfree(pcs->dev, pins);
@@ -1320,7 +1148,7 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
goto free_pins;
}
- res = pcs_add_pingroup(pcs, np, np->name, pins, found);
+ res = pinctrl_generic_add_group(pcs->pctl, np->name, pins, found, pcs);
if (res < 0)
goto free_function;
@@ -1337,11 +1165,10 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
return 0;
free_pingroups:
- pcs_free_pingroups(pcs);
+ pinctrl_generic_remove_last_group(pcs->pctl);
*num_maps = 1;
free_function:
- pcs_remove_function(pcs, function);
-
+ pinmux_generic_remove_last_function(pcs->pctl);
free_pins:
devm_kfree(pcs->dev, pins);
@@ -1409,60 +1236,6 @@ free_map:
}
/**
- * pcs_free_funcs() - free memory used by functions
- * @pcs: pcs driver instance
- */
-static void pcs_free_funcs(struct pcs_device *pcs)
-{
- struct list_head *pos, *tmp;
- int i;
-
- mutex_lock(&pcs->mutex);
- for (i = 0; i < pcs->nfuncs; i++) {
- struct pcs_function *func;
-
- func = radix_tree_lookup(&pcs->ftree, i);
- if (!func)
- continue;
- radix_tree_delete(&pcs->ftree, i);
- }
- list_for_each_safe(pos, tmp, &pcs->functions) {
- struct pcs_function *function;
-
- function = list_entry(pos, struct pcs_function, node);
- list_del(&function->node);
- }
- mutex_unlock(&pcs->mutex);
-}
-
-/**
- * pcs_free_pingroups() - free memory used by pingroups
- * @pcs: pcs driver instance
- */
-static void pcs_free_pingroups(struct pcs_device *pcs)
-{
- struct list_head *pos, *tmp;
- int i;
-
- mutex_lock(&pcs->mutex);
- for (i = 0; i < pcs->ngroups; i++) {
- struct pcs_pingroup *pingroup;
-
- pingroup = radix_tree_lookup(&pcs->pgtree, i);
- if (!pingroup)
- continue;
- radix_tree_delete(&pcs->pgtree, i);
- }
- list_for_each_safe(pos, tmp, &pcs->pingroups) {
- struct pcs_pingroup *pingroup;
-
- pingroup = list_entry(pos, struct pcs_pingroup, node);
- list_del(&pingroup->node);
- }
- mutex_unlock(&pcs->mutex);
-}
-
-/**
* pcs_irq_free() - free interrupt
* @pcs: pcs driver instance
*/
@@ -1490,8 +1263,7 @@ static void pcs_free_resources(struct pcs_device *pcs)
{
pcs_irq_free(pcs);
pinctrl_unregister(pcs->pctl);
- pcs_free_funcs(pcs);
- pcs_free_pingroups(pcs);
+
#if IS_BUILTIN(CONFIG_PINCTRL_SINGLE)
if (pcs->missing_nr_pinctrl_cells)
of_remove_property(pcs->np, pcs->missing_nr_pinctrl_cells);
@@ -1885,8 +1657,6 @@ static int pcs_probe(struct platform_device *pdev)
pcs->np = np;
raw_spin_lock_init(&pcs->lock);
mutex_init(&pcs->mutex);
- INIT_LIST_HEAD(&pcs->pingroups);
- INIT_LIST_HEAD(&pcs->functions);
INIT_LIST_HEAD(&pcs->gpiofuncs);
soc = match->data;
pcs->flags = soc->flags;
@@ -1947,8 +1717,6 @@ static int pcs_probe(struct platform_device *pdev)
return -ENODEV;
}
- INIT_RADIX_TREE(&pcs->pgtree, GFP_KERNEL);
- INIT_RADIX_TREE(&pcs->ftree, GFP_KERNEL);
platform_set_drvdata(pdev, pcs);
switch (pcs->width) {
@@ -1979,10 +1747,9 @@ static int pcs_probe(struct platform_device *pdev)
if (ret < 0)
goto free;
- pcs->pctl = pinctrl_register(&pcs->desc, pcs->dev, pcs);
- if (IS_ERR(pcs->pctl)) {
+ ret = pinctrl_register_and_init(&pcs->desc, pcs->dev, pcs, &pcs->pctl);
+ if (ret) {
dev_err(pcs->dev, "could not register single pinctrl driver\n");
- ret = PTR_ERR(pcs->pctl);
goto free;
}
diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c
index 29fb7403d24e..7450f5118445 100644
--- a/drivers/pinctrl/pinctrl-sx150x.c
+++ b/drivers/pinctrl/pinctrl-sx150x.c
@@ -424,41 +424,6 @@ static int sx150x_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(value & BIT(offset));
}
-static int sx150x_gpio_set_single_ended(struct gpio_chip *chip,
- unsigned int offset,
- enum single_ended_mode mode)
-{
- struct sx150x_pinctrl *pctl = gpiochip_get_data(chip);
- int ret;
-
- switch (mode) {
- case LINE_MODE_PUSH_PULL:
- if (pctl->data->model != SX150X_789 ||
- sx150x_pin_is_oscio(pctl, offset))
- return 0;
-
- ret = regmap_write_bits(pctl->regmap,
- pctl->data->pri.x789.reg_drain,
- BIT(offset), 0);
- break;
-
- case LINE_MODE_OPEN_DRAIN:
- if (pctl->data->model != SX150X_789 ||
- sx150x_pin_is_oscio(pctl, offset))
- return -ENOTSUPP;
-
- ret = regmap_write_bits(pctl->regmap,
- pctl->data->pri.x789.reg_drain,
- BIT(offset), BIT(offset));
- break;
- default:
- ret = -ENOTSUPP;
- break;
- }
-
- return ret;
-}
-
static int __sx150x_gpio_set(struct sx150x_pinctrl *pctl, unsigned int offset,
int value)
{
@@ -811,16 +776,26 @@ static int sx150x_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
break;
case PIN_CONFIG_DRIVE_OPEN_DRAIN:
- ret = sx150x_gpio_set_single_ended(&pctl->gpio,
- pin, LINE_MODE_OPEN_DRAIN);
+ if (pctl->data->model != SX150X_789 ||
+ sx150x_pin_is_oscio(pctl, pin))
+ return -ENOTSUPP;
+
+ ret = regmap_write_bits(pctl->regmap,
+ pctl->data->pri.x789.reg_drain,
+ BIT(pin), BIT(pin));
if (ret < 0)
return ret;
break;
case PIN_CONFIG_DRIVE_PUSH_PULL:
- ret = sx150x_gpio_set_single_ended(&pctl->gpio,
- pin, LINE_MODE_PUSH_PULL);
+ if (pctl->data->model != SX150X_789 ||
+ sx150x_pin_is_oscio(pctl, pin))
+ return 0;
+
+ ret = regmap_write_bits(pctl->regmap,
+ pctl->data->pri.x789.reg_drain,
+ BIT(pin), 0);
if (ret < 0)
return ret;
@@ -1178,7 +1153,7 @@ static int sx150x_probe(struct i2c_client *client,
pctl->gpio.direction_output = sx150x_gpio_direction_output;
pctl->gpio.get = sx150x_gpio_get;
pctl->gpio.set = sx150x_gpio_set;
- pctl->gpio.set_single_ended = sx150x_gpio_set_single_ended;
+ pctl->gpio.set_config = gpiochip_generic_config;
pctl->gpio.parent = dev;
#ifdef CONFIG_OF_GPIO
pctl->gpio.of_node = dev->of_node;
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
index dd85ad1807f5..d4167e2c173a 100644
--- a/drivers/pinctrl/pinctrl-xway.c
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License version 2 as
* publishhed by the Free Software Foundation.
*
- * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
* Copyright (C) 2015 Martin Schiller <mschiller@tdt.de>
*/
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index ece702881946..29ad3151abec 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -99,37 +99,24 @@ static int pin_request(struct pinctrl_dev *pctldev,
dev_dbg(pctldev->dev, "request pin %d (%s) for %s\n",
pin, desc->name, owner);
- if (gpio_range) {
- /* There's no need to support multiple GPIO requests */
- if (desc->gpio_owner) {
- dev_err(pctldev->dev,
- "pin %s already requested by %s; cannot claim for %s\n",
- desc->name, desc->gpio_owner, owner);
- goto out;
- }
- if (ops->strict && desc->mux_usecount &&
- strcmp(desc->mux_owner, owner)) {
- dev_err(pctldev->dev,
- "pin %s already requested by %s; cannot claim for %s\n",
- desc->name, desc->mux_owner, owner);
- goto out;
- }
+ if ((!gpio_range || ops->strict) &&
+ desc->mux_usecount && strcmp(desc->mux_owner, owner)) {
+ dev_err(pctldev->dev,
+ "pin %s already requested by %s; cannot claim for %s\n",
+ desc->name, desc->mux_owner, owner);
+ goto out;
+ }
+ if ((gpio_range || ops->strict) && desc->gpio_owner) {
+ dev_err(pctldev->dev,
+ "pin %s already requested by %s; cannot claim for %s\n",
+ desc->name, desc->gpio_owner, owner);
+ goto out;
+ }
+
+ if (gpio_range) {
desc->gpio_owner = owner;
} else {
- if (desc->mux_usecount && strcmp(desc->mux_owner, owner)) {
- dev_err(pctldev->dev,
- "pin %s already requested by %s; cannot claim for %s\n",
- desc->name, desc->mux_owner, owner);
- goto out;
- }
- if (ops->strict && desc->gpio_owner) {
- dev_err(pctldev->dev,
- "pin %s already requested by %s; cannot claim for %s\n",
- desc->name, desc->gpio_owner, owner);
- goto out;
- }
-
desc->mux_usecount++;
if (desc->mux_usecount > 1)
return 0;
@@ -695,3 +682,176 @@ void pinmux_init_device_debugfs(struct dentry *devroot,
}
#endif /* CONFIG_DEBUG_FS */
+
+#ifdef CONFIG_GENERIC_PINMUX_FUNCTIONS
+
+/**
+ * pinmux_generic_get_function_count() - returns number of functions
+ * @pctldev: pin controller device
+ */
+int pinmux_generic_get_function_count(struct pinctrl_dev *pctldev)
+{
+ return pctldev->num_functions;
+}
+EXPORT_SYMBOL_GPL(pinmux_generic_get_function_count);
+
+/**
+ * pinmux_generic_get_function_name() - returns the function name
+ * @pctldev: pin controller device
+ * @selector: function number
+ */
+const char *
+pinmux_generic_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct function_desc *function;
+
+ function = radix_tree_lookup(&pctldev->pin_function_tree,
+ selector);
+ if (!function)
+ return NULL;
+
+ return function->name;
+}
+EXPORT_SYMBOL_GPL(pinmux_generic_get_function_name);
+
+/**
+ * pinmux_generic_get_function_groups() - gets the function groups
+ * @pctldev: pin controller device
+ * @selector: function number
+ * @groups: array of pin groups
+ * @num_groups: number of pin groups
+ */
+int pinmux_generic_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct function_desc *function;
+
+ function = radix_tree_lookup(&pctldev->pin_function_tree,
+ selector);
+ if (!function) {
+ dev_err(pctldev->dev, "%s could not find function%i\n",
+ __func__, selector);
+ return -EINVAL;
+ }
+ *groups = function->group_names;
+ *num_groups = function->num_group_names;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pinmux_generic_get_function_groups);
+
+/**
+ * pinmux_generic_get_function() - returns a function based on the number
+ * @pctldev: pin controller device
+ * @group_selector: function number
+ */
+struct function_desc *pinmux_generic_get_function(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct function_desc *function;
+
+ function = radix_tree_lookup(&pctldev->pin_function_tree,
+ selector);
+ if (!function)
+ return NULL;
+
+ return function;
+}
+EXPORT_SYMBOL_GPL(pinmux_generic_get_function);
+
+/**
+ * pinmux_generic_get_function_groups() - gets the function groups
+ * @pctldev: pin controller device
+ * @name: name of the function
+ * @groups: array of pin groups
+ * @num_groups: number of pin groups
+ * @data: pin controller driver specific data
+ */
+int pinmux_generic_add_function(struct pinctrl_dev *pctldev,
+ const char *name,
+ const char **groups,
+ const unsigned int num_groups,
+ void *data)
+{
+ struct function_desc *function;
+
+ function = devm_kzalloc(pctldev->dev, sizeof(*function), GFP_KERNEL);
+ if (!function)
+ return -ENOMEM;
+
+ function->name = name;
+ function->group_names = groups;
+ function->num_group_names = num_groups;
+ function->data = data;
+
+ radix_tree_insert(&pctldev->pin_function_tree, pctldev->num_functions,
+ function);
+
+ pctldev->num_functions++;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pinmux_generic_add_function);
+
+/**
+ * pinmux_generic_remove_function() - removes a numbered function
+ * @pctldev: pin controller device
+ * @selector: function number
+ *
+ * Note that the caller must take care of locking.
+ */
+int pinmux_generic_remove_function(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct function_desc *function;
+
+ function = radix_tree_lookup(&pctldev->pin_function_tree,
+ selector);
+ if (!function)
+ return -ENOENT;
+
+ radix_tree_delete(&pctldev->pin_function_tree, selector);
+ devm_kfree(pctldev->dev, function);
+
+ pctldev->num_functions--;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pinmux_generic_remove_function);
+
+/**
+ * pinmux_generic_free_functions() - removes all functions
+ * @pctldev: pin controller device
+ *
+ * Note that the caller must take care of locking.
+ */
+void pinmux_generic_free_functions(struct pinctrl_dev *pctldev)
+{
+ struct radix_tree_iter iter;
+ struct function_desc *function;
+ unsigned long *indices;
+ void **slot;
+ int i = 0;
+
+ indices = devm_kzalloc(pctldev->dev, sizeof(*indices) *
+ pctldev->num_functions, GFP_KERNEL);
+ if (!indices)
+ return;
+
+ radix_tree_for_each_slot(slot, &pctldev->pin_function_tree, &iter, 0)
+ indices[i++] = iter.index;
+
+ for (i = 0; i < pctldev->num_functions; i++) {
+ function = radix_tree_lookup(&pctldev->pin_function_tree,
+ indices[i]);
+ radix_tree_delete(&pctldev->pin_function_tree, indices[i]);
+ devm_kfree(pctldev->dev, function);
+ }
+
+ pctldev->num_functions = 0;
+}
+
+#endif /* CONFIG_GENERIC_PINMUX_FUNCTIONS */
diff --git a/drivers/pinctrl/pinmux.h b/drivers/pinctrl/pinmux.h
index d1a98b1c9fce..248d8ea30e26 100644
--- a/drivers/pinctrl/pinmux.h
+++ b/drivers/pinctrl/pinmux.h
@@ -111,3 +111,59 @@ static inline void pinmux_init_device_debugfs(struct dentry *devroot,
}
#endif
+
+#ifdef CONFIG_GENERIC_PINMUX_FUNCTIONS
+
+/**
+ * struct function_desc - generic function descriptor
+ * @name: name of the function
+ * @group_names: array of pin group names
+ * @num_group_names: number of pin group names
+ * @data: pin controller driver specific data
+ */
+struct function_desc {
+ const char *name;
+ const char **group_names;
+ int num_group_names;
+ void *data;
+};
+
+int pinmux_generic_get_function_count(struct pinctrl_dev *pctldev);
+
+const char *
+pinmux_generic_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned int selector);
+
+int pinmux_generic_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const char * const **groups,
+ unsigned * const num_groups);
+
+struct function_desc *pinmux_generic_get_function(struct pinctrl_dev *pctldev,
+ unsigned int selector);
+
+int pinmux_generic_add_function(struct pinctrl_dev *pctldev,
+ const char *name,
+ const char **groups,
+ unsigned const num_groups,
+ void *data);
+
+int pinmux_generic_remove_function(struct pinctrl_dev *pctldev,
+ unsigned int selector);
+
+static inline int
+pinmux_generic_remove_last_function(struct pinctrl_dev *pctldev)
+{
+ return pinmux_generic_remove_function(pctldev,
+ pctldev->num_functions - 1);
+}
+
+void pinmux_generic_free_functions(struct pinctrl_dev *pctldev);
+
+#else
+
+static inline void pinmux_generic_free_functions(struct pinctrl_dev *pctldev)
+{
+}
+
+#endif /* CONFIG_GENERIC_PINMUX_FUNCTIONS */
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 775c88303017..f8e9e1c2b2f6 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -61,7 +61,7 @@ struct msm_pinctrl {
struct notifier_block restart_nb;
int irq;
- spinlock_t lock;
+ raw_spinlock_t lock;
DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO);
@@ -153,14 +153,14 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
if (WARN_ON(i == g->nfuncs))
return -EINVAL;
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
val = readl(pctrl->regs + g->ctl_reg);
val &= ~mask;
val |= i << g->mux_bit;
writel(val, pctrl->regs + g->ctl_reg);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
}
@@ -323,14 +323,14 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
break;
case PIN_CONFIG_OUTPUT:
/* set output value */
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
val = readl(pctrl->regs + g->io_reg);
if (arg)
val |= BIT(g->out_bit);
else
val &= ~BIT(g->out_bit);
writel(val, pctrl->regs + g->io_reg);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
/* enable output */
arg = 1;
@@ -351,12 +351,12 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
return -EINVAL;
}
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
val = readl(pctrl->regs + g->ctl_reg);
val &= ~(mask << bit);
val |= arg << bit;
writel(val, pctrl->regs + g->ctl_reg);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
return 0;
@@ -384,13 +384,13 @@ static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
g = &pctrl->soc->groups[offset];
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
val = readl(pctrl->regs + g->ctl_reg);
val &= ~BIT(g->oe_bit);
writel(val, pctrl->regs + g->ctl_reg);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
}
@@ -404,7 +404,7 @@ static int msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, in
g = &pctrl->soc->groups[offset];
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
val = readl(pctrl->regs + g->io_reg);
if (value)
@@ -417,7 +417,7 @@ static int msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, in
val |= BIT(g->oe_bit);
writel(val, pctrl->regs + g->ctl_reg);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
}
@@ -443,7 +443,7 @@ static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
g = &pctrl->soc->groups[offset];
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
val = readl(pctrl->regs + g->io_reg);
if (value)
@@ -452,7 +452,7 @@ static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
val &= ~BIT(g->out_bit);
writel(val, pctrl->regs + g->io_reg);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
#ifdef CONFIG_DEBUG_FS
@@ -571,7 +571,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
g = &pctrl->soc->groups[d->hwirq];
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
val = readl(pctrl->regs + g->intr_cfg_reg);
val &= ~BIT(g->intr_enable_bit);
@@ -579,7 +579,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
clear_bit(d->hwirq, pctrl->enabled_irqs);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
static void msm_gpio_irq_unmask(struct irq_data *d)
@@ -592,7 +592,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
g = &pctrl->soc->groups[d->hwirq];
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
val = readl(pctrl->regs + g->intr_status_reg);
val &= ~BIT(g->intr_status_bit);
@@ -604,7 +604,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
set_bit(d->hwirq, pctrl->enabled_irqs);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
static void msm_gpio_irq_ack(struct irq_data *d)
@@ -617,7 +617,7 @@ static void msm_gpio_irq_ack(struct irq_data *d)
g = &pctrl->soc->groups[d->hwirq];
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
val = readl(pctrl->regs + g->intr_status_reg);
if (g->intr_ack_high)
@@ -629,7 +629,7 @@ static void msm_gpio_irq_ack(struct irq_data *d)
if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
msm_gpio_update_dual_edge_pos(pctrl, g, d);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
@@ -642,7 +642,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
g = &pctrl->soc->groups[d->hwirq];
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
/*
* For hw without possibility of detecting both edges
@@ -716,7 +716,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
msm_gpio_update_dual_edge_pos(pctrl, g, d);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
irq_set_handler_locked(d, handle_level_irq);
@@ -732,11 +732,11 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
unsigned long flags;
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
irq_set_irq_wake(pctrl->irq, on);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
}
@@ -882,7 +882,7 @@ int msm_pinctrl_probe(struct platform_device *pdev,
pctrl->soc = soc_data;
pctrl->chip = msm_gpio_template;
- spin_lock_init(&pctrl->lock);
+ raw_spin_lock_init(&pctrl->lock);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pctrl->regs = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8660.c b/drivers/pinctrl/qcom/pinctrl-msm8660.c
index 5591d093bf78..bb71dd1e6279 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8660.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8660.c
@@ -193,9 +193,9 @@ static const struct pinctrl_pin_desc msm8660_pins[] = {
PINCTRL_PIN(171, "GPIO_171"),
PINCTRL_PIN(172, "GPIO_172"),
- PINCTRL_PIN(173, "SDC1_CLK"),
- PINCTRL_PIN(174, "SDC1_CMD"),
- PINCTRL_PIN(175, "SDC1_DATA"),
+ PINCTRL_PIN(173, "SDC4_CLK"),
+ PINCTRL_PIN(174, "SDC4_CMD"),
+ PINCTRL_PIN(175, "SDC4_DATA"),
PINCTRL_PIN(176, "SDC3_CLK"),
PINCTRL_PIN(177, "SDC3_CMD"),
PINCTRL_PIN(178, "SDC3_DATA"),
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index 12f7d1eb65bc..f9b49967f512 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -24,11 +24,15 @@
#include <linux/irqdomain.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/regmap.h>
#include <linux/err.h>
+#include <linux/soc/samsung/exynos-pmu.h>
+#include <linux/soc/samsung/exynos-regs-pmu.h>
#include "pinctrl-samsung.h"
#include "pinctrl-exynos.h"
@@ -56,6 +60,17 @@ static const struct samsung_pin_bank_type bank_type_alive = {
.reg_offset = { 0x00, 0x04, 0x08, 0x0c, },
};
+/* Exynos5433 has the 4bit widths for PINCFG_TYPE_DRV bitfields. */
+static const struct samsung_pin_bank_type exynos5433_bank_type_off = {
+ .fld_width = { 4, 1, 2, 4, 2, 2, },
+ .reg_offset = { 0x00, 0x04, 0x08, 0x0c, 0x10, 0x14, },
+};
+
+static const struct samsung_pin_bank_type exynos5433_bank_type_alive = {
+ .fld_width = { 4, 1, 2, 4, },
+ .reg_offset = { 0x00, 0x04, 0x08, 0x0c, },
+};
+
static void exynos_irq_mask(struct irq_data *irqd)
{
struct irq_chip *chip = irq_data_get_irq_chip(irqd);
@@ -517,10 +532,8 @@ static int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
weint_data = devm_kzalloc(dev, bank->nr_pins
* sizeof(*weint_data), GFP_KERNEL);
- if (!weint_data) {
- dev_err(dev, "could not allocate memory for weint_data\n");
+ if (!weint_data)
return -ENOMEM;
- }
for (idx = 0; idx < bank->nr_pins; ++idx) {
irq = irq_of_parse_and_map(bank->of_node, idx);
@@ -548,10 +561,8 @@ static int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
muxed_data = devm_kzalloc(dev, sizeof(*muxed_data)
+ muxed_banks*sizeof(struct samsung_pin_bank *), GFP_KERNEL);
- if (!muxed_data) {
- dev_err(dev, "could not allocate memory for muxed_data\n");
+ if (!muxed_data)
return -ENOMEM;
- }
irq_set_chained_handler_and_data(irq, exynos_irq_demux_eint16_31,
muxed_data);
@@ -633,6 +644,60 @@ static void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata)
exynos_pinctrl_resume_bank(drvdata, bank);
}
+/* Retention control for S5PV210 are located at the end of clock controller */
+#define S5P_OTHERS 0xE000
+
+#define S5P_OTHERS_RET_IO (1 << 31)
+#define S5P_OTHERS_RET_CF (1 << 30)
+#define S5P_OTHERS_RET_MMC (1 << 29)
+#define S5P_OTHERS_RET_UART (1 << 28)
+
+static void s5pv210_retention_disable(struct samsung_pinctrl_drv_data *drvdata)
+{
+ void *clk_base = drvdata->retention_ctrl->priv;
+ u32 tmp;
+
+ tmp = __raw_readl(clk_base + S5P_OTHERS);
+ tmp |= (S5P_OTHERS_RET_IO | S5P_OTHERS_RET_CF | S5P_OTHERS_RET_MMC |
+ S5P_OTHERS_RET_UART);
+ __raw_writel(tmp, clk_base + S5P_OTHERS);
+}
+
+static struct samsung_retention_ctrl *
+s5pv210_retention_init(struct samsung_pinctrl_drv_data *drvdata,
+ const struct samsung_retention_data *data)
+{
+ struct samsung_retention_ctrl *ctrl;
+ struct device_node *np;
+ void *clk_base;
+
+ ctrl = devm_kzalloc(drvdata->dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return ERR_PTR(-ENOMEM);
+
+ np = of_find_compatible_node(NULL, NULL, "samsung,s5pv210-clock");
+ if (!np) {
+ pr_err("%s: failed to find clock controller DT node\n",
+ __func__);
+ return ERR_PTR(-ENODEV);
+ }
+
+ clk_base = of_iomap(np, 0);
+ if (!clk_base) {
+ pr_err("%s: failed to map clock registers\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ctrl->priv = clk_base;
+ ctrl->disable = s5pv210_retention_disable;
+
+ return ctrl;
+}
+
+static const struct samsung_retention_data s5pv210_retention_data __initconst = {
+ .init = s5pv210_retention_init,
+};
+
/* pin banks of s5pv210 pin-controller */
static const struct samsung_pin_bank_data s5pv210_pin_bank[] __initconst = {
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
@@ -680,9 +745,58 @@ const struct samsung_pin_ctrl s5pv210_pin_ctrl[] __initconst = {
.eint_wkup_init = exynos_eint_wkup_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
+ .retention_data = &s5pv210_retention_data,
},
};
+/* Pad retention control code for accessing PMU regmap */
+static atomic_t exynos_shared_retention_refcnt;
+
+static void exynos_retention_enable(struct samsung_pinctrl_drv_data *drvdata)
+{
+ if (drvdata->retention_ctrl->refcnt)
+ atomic_inc(drvdata->retention_ctrl->refcnt);
+}
+
+static void exynos_retention_disable(struct samsung_pinctrl_drv_data *drvdata)
+{
+ struct samsung_retention_ctrl *ctrl = drvdata->retention_ctrl;
+ struct regmap *pmu_regs = ctrl->priv;
+ int i;
+
+ if (ctrl->refcnt && !atomic_dec_and_test(ctrl->refcnt))
+ return;
+
+ for (i = 0; i < ctrl->nr_regs; i++)
+ regmap_write(pmu_regs, ctrl->regs[i], ctrl->value);
+}
+
+static struct samsung_retention_ctrl *
+exynos_retention_init(struct samsung_pinctrl_drv_data *drvdata,
+ const struct samsung_retention_data *data)
+{
+ struct samsung_retention_ctrl *ctrl;
+ struct regmap *pmu_regs;
+
+ ctrl = devm_kzalloc(drvdata->dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return ERR_PTR(-ENOMEM);
+
+ pmu_regs = exynos_get_pmu_regmap();
+ if (IS_ERR(pmu_regs))
+ return ERR_CAST(pmu_regs);
+
+ ctrl->priv = pmu_regs;
+ ctrl->regs = data->regs;
+ ctrl->nr_regs = data->nr_regs;
+ ctrl->value = data->value;
+ ctrl->refcnt = data->refcnt;
+ ctrl->enable = exynos_retention_enable;
+ ctrl->disable = exynos_retention_disable;
+
+ return ctrl;
+}
+
/* pin banks of exynos3250 pin-controller 0 */
static const struct samsung_pin_bank_data exynos3250_pin_banks0[] __initconst = {
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
@@ -715,6 +829,30 @@ static const struct samsung_pin_bank_data exynos3250_pin_banks1[] __initconst =
};
/*
+ * PMU pad retention groups for Exynos3250 doesn't match pin banks, so handle
+ * them all together
+ */
+static const u32 exynos3250_retention_regs[] = {
+ S5P_PAD_RET_MAUDIO_OPTION,
+ S5P_PAD_RET_GPIO_OPTION,
+ S5P_PAD_RET_UART_OPTION,
+ S5P_PAD_RET_MMCA_OPTION,
+ S5P_PAD_RET_MMCB_OPTION,
+ S5P_PAD_RET_EBIA_OPTION,
+ S5P_PAD_RET_EBIB_OPTION,
+ S5P_PAD_RET_MMC2_OPTION,
+ S5P_PAD_RET_SPI_OPTION,
+};
+
+static const struct samsung_retention_data exynos3250_retention_data __initconst = {
+ .regs = exynos3250_retention_regs,
+ .nr_regs = ARRAY_SIZE(exynos3250_retention_regs),
+ .value = EXYNOS_WAKEUP_FROM_LOWPWR,
+ .refcnt = &exynos_shared_retention_refcnt,
+ .init = exynos_retention_init,
+};
+
+/*
* Samsung pinctrl driver data for Exynos3250 SoC. Exynos3250 SoC includes
* two gpio/pin-mux/pinconfig controllers.
*/
@@ -726,6 +864,7 @@ const struct samsung_pin_ctrl exynos3250_pin_ctrl[] __initconst = {
.eint_gpio_init = exynos_eint_gpio_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
+ .retention_data = &exynos3250_retention_data,
}, {
/* pin-controller instance 1 data */
.pin_banks = exynos3250_pin_banks1,
@@ -734,6 +873,7 @@ const struct samsung_pin_ctrl exynos3250_pin_ctrl[] __initconst = {
.eint_wkup_init = exynos_eint_wkup_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
+ .retention_data = &exynos3250_retention_data,
},
};
@@ -786,6 +926,36 @@ static const struct samsung_pin_bank_data exynos4210_pin_banks2[] __initconst =
EXYNOS_PIN_BANK_EINTN(7, 0x000, "gpz"),
};
+/* PMU pad retention groups registers for Exynos4 (without audio) */
+static const u32 exynos4_retention_regs[] = {
+ S5P_PAD_RET_GPIO_OPTION,
+ S5P_PAD_RET_UART_OPTION,
+ S5P_PAD_RET_MMCA_OPTION,
+ S5P_PAD_RET_MMCB_OPTION,
+ S5P_PAD_RET_EBIA_OPTION,
+ S5P_PAD_RET_EBIB_OPTION,
+};
+
+static const struct samsung_retention_data exynos4_retention_data __initconst = {
+ .regs = exynos4_retention_regs,
+ .nr_regs = ARRAY_SIZE(exynos4_retention_regs),
+ .value = EXYNOS_WAKEUP_FROM_LOWPWR,
+ .refcnt = &exynos_shared_retention_refcnt,
+ .init = exynos_retention_init,
+};
+
+/* PMU retention control for audio pins can be tied to audio pin bank */
+static const u32 exynos4_audio_retention_regs[] = {
+ S5P_PAD_RET_MAUDIO_OPTION,
+};
+
+static const struct samsung_retention_data exynos4_audio_retention_data __initconst = {
+ .regs = exynos4_audio_retention_regs,
+ .nr_regs = ARRAY_SIZE(exynos4_audio_retention_regs),
+ .value = EXYNOS_WAKEUP_FROM_LOWPWR,
+ .init = exynos_retention_init,
+};
+
/*
* Samsung pinctrl driver data for Exynos4210 SoC. Exynos4210 SoC includes
* three gpio/pin-mux/pinconfig controllers.
@@ -798,6 +968,7 @@ const struct samsung_pin_ctrl exynos4210_pin_ctrl[] __initconst = {
.eint_gpio_init = exynos_eint_gpio_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
+ .retention_data = &exynos4_retention_data,
}, {
/* pin-controller instance 1 data */
.pin_banks = exynos4210_pin_banks1,
@@ -806,10 +977,12 @@ const struct samsung_pin_ctrl exynos4210_pin_ctrl[] __initconst = {
.eint_wkup_init = exynos_eint_wkup_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
+ .retention_data = &exynos4_retention_data,
}, {
/* pin-controller instance 2 data */
.pin_banks = exynos4210_pin_banks2,
.nr_banks = ARRAY_SIZE(exynos4210_pin_banks2),
+ .retention_data = &exynos4_audio_retention_data,
},
};
@@ -883,6 +1056,7 @@ const struct samsung_pin_ctrl exynos4x12_pin_ctrl[] __initconst = {
.eint_gpio_init = exynos_eint_gpio_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
+ .retention_data = &exynos4_retention_data,
}, {
/* pin-controller instance 1 data */
.pin_banks = exynos4x12_pin_banks1,
@@ -891,6 +1065,7 @@ const struct samsung_pin_ctrl exynos4x12_pin_ctrl[] __initconst = {
.eint_wkup_init = exynos_eint_wkup_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
+ .retention_data = &exynos4_retention_data,
}, {
/* pin-controller instance 2 data */
.pin_banks = exynos4x12_pin_banks2,
@@ -898,6 +1073,7 @@ const struct samsung_pin_ctrl exynos4x12_pin_ctrl[] __initconst = {
.eint_gpio_init = exynos_eint_gpio_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
+ .retention_data = &exynos4_audio_retention_data,
}, {
/* pin-controller instance 3 data */
.pin_banks = exynos4x12_pin_banks3,
@@ -908,81 +1084,6 @@ const struct samsung_pin_ctrl exynos4x12_pin_ctrl[] __initconst = {
},
};
-/* pin banks of exynos4415 pin-controller 0 */
-static const struct samsung_pin_bank_data exynos4415_pin_banks0[] = {
- EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
- EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
- EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08),
- EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpc0", 0x0c),
- EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpc1", 0x10),
- EXYNOS_PIN_BANK_EINTG(4, 0x0A0, "gpd0", 0x14),
- EXYNOS_PIN_BANK_EINTG(4, 0x0C0, "gpd1", 0x18),
- EXYNOS_PIN_BANK_EINTG(8, 0x180, "gpf0", 0x30),
- EXYNOS_PIN_BANK_EINTG(8, 0x1A0, "gpf1", 0x34),
- EXYNOS_PIN_BANK_EINTG(1, 0x1C0, "gpf2", 0x38),
-};
-
-/* pin banks of exynos4415 pin-controller 1 */
-static const struct samsung_pin_bank_data exynos4415_pin_banks1[] = {
- EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpk0", 0x08),
- EXYNOS_PIN_BANK_EINTG(7, 0x060, "gpk1", 0x0c),
- EXYNOS_PIN_BANK_EINTG(7, 0x080, "gpk2", 0x10),
- EXYNOS_PIN_BANK_EINTG(7, 0x0A0, "gpk3", 0x14),
- EXYNOS_PIN_BANK_EINTG(4, 0x0C0, "gpl0", 0x18),
- EXYNOS_PIN_BANK_EINTN(6, 0x120, "mp00"),
- EXYNOS_PIN_BANK_EINTN(4, 0x140, "mp01"),
- EXYNOS_PIN_BANK_EINTN(6, 0x160, "mp02"),
- EXYNOS_PIN_BANK_EINTN(8, 0x180, "mp03"),
- EXYNOS_PIN_BANK_EINTN(8, 0x1A0, "mp04"),
- EXYNOS_PIN_BANK_EINTN(8, 0x1C0, "mp05"),
- EXYNOS_PIN_BANK_EINTN(8, 0x1E0, "mp06"),
- EXYNOS_PIN_BANK_EINTG(8, 0x260, "gpm0", 0x24),
- EXYNOS_PIN_BANK_EINTG(7, 0x280, "gpm1", 0x28),
- EXYNOS_PIN_BANK_EINTG(5, 0x2A0, "gpm2", 0x2c),
- EXYNOS_PIN_BANK_EINTG(8, 0x2C0, "gpm3", 0x30),
- EXYNOS_PIN_BANK_EINTG(8, 0x2E0, "gpm4", 0x34),
- EXYNOS_PIN_BANK_EINTW(8, 0xC00, "gpx0", 0x00),
- EXYNOS_PIN_BANK_EINTW(8, 0xC20, "gpx1", 0x04),
- EXYNOS_PIN_BANK_EINTW(8, 0xC40, "gpx2", 0x08),
- EXYNOS_PIN_BANK_EINTW(8, 0xC60, "gpx3", 0x0c),
-};
-
-/* pin banks of exynos4415 pin-controller 2 */
-static const struct samsung_pin_bank_data exynos4415_pin_banks2[] = {
- EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz", 0x00),
- EXYNOS_PIN_BANK_EINTN(2, 0x000, "etc1"),
-};
-
-/*
- * Samsung pinctrl driver data for Exynos4415 SoC. Exynos4415 SoC includes
- * three gpio/pin-mux/pinconfig controllers.
- */
-const struct samsung_pin_ctrl exynos4415_pin_ctrl[] = {
- {
- /* pin-controller instance 0 data */
- .pin_banks = exynos4415_pin_banks0,
- .nr_banks = ARRAY_SIZE(exynos4415_pin_banks0),
- .eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
- }, {
- /* pin-controller instance 1 data */
- .pin_banks = exynos4415_pin_banks1,
- .nr_banks = ARRAY_SIZE(exynos4415_pin_banks1),
- .eint_gpio_init = exynos_eint_gpio_init,
- .eint_wkup_init = exynos_eint_wkup_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
- }, {
- /* pin-controller instance 2 data */
- .pin_banks = exynos4415_pin_banks2,
- .nr_banks = ARRAY_SIZE(exynos4415_pin_banks2),
- .eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
- },
-};
-
/* pin banks of exynos5250 pin-controller 0 */
static const struct samsung_pin_bank_data exynos5250_pin_banks0[] __initconst = {
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
@@ -1052,6 +1153,7 @@ const struct samsung_pin_ctrl exynos5250_pin_ctrl[] __initconst = {
.eint_wkup_init = exynos_eint_wkup_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
+ .retention_data = &exynos4_retention_data,
}, {
/* pin-controller instance 1 data */
.pin_banks = exynos5250_pin_banks1,
@@ -1059,6 +1161,7 @@ const struct samsung_pin_ctrl exynos5250_pin_ctrl[] __initconst = {
.eint_gpio_init = exynos_eint_gpio_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
+ .retention_data = &exynos4_retention_data,
}, {
/* pin-controller instance 2 data */
.pin_banks = exynos5250_pin_banks2,
@@ -1073,6 +1176,7 @@ const struct samsung_pin_ctrl exynos5250_pin_ctrl[] __initconst = {
.eint_gpio_init = exynos_eint_gpio_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
+ .retention_data = &exynos4_audio_retention_data,
},
};
@@ -1299,6 +1403,30 @@ static const struct samsung_pin_bank_data exynos5420_pin_banks4[] __initconst =
EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz", 0x00),
};
+/* PMU pad retention groups registers for Exynos5420 (without audio) */
+static const u32 exynos5420_retention_regs[] = {
+ EXYNOS_PAD_RET_DRAM_OPTION,
+ EXYNOS_PAD_RET_JTAG_OPTION,
+ EXYNOS5420_PAD_RET_GPIO_OPTION,
+ EXYNOS5420_PAD_RET_UART_OPTION,
+ EXYNOS5420_PAD_RET_MMCA_OPTION,
+ EXYNOS5420_PAD_RET_MMCB_OPTION,
+ EXYNOS5420_PAD_RET_MMCC_OPTION,
+ EXYNOS5420_PAD_RET_HSI_OPTION,
+ EXYNOS_PAD_RET_EBIA_OPTION,
+ EXYNOS_PAD_RET_EBIB_OPTION,
+ EXYNOS5420_PAD_RET_SPI_OPTION,
+ EXYNOS5420_PAD_RET_DRAM_COREBLK_OPTION,
+};
+
+static const struct samsung_retention_data exynos5420_retention_data __initconst = {
+ .regs = exynos5420_retention_regs,
+ .nr_regs = ARRAY_SIZE(exynos5420_retention_regs),
+ .value = EXYNOS_WAKEUP_FROM_LOWPWR,
+ .refcnt = &exynos_shared_retention_refcnt,
+ .init = exynos_retention_init,
+};
+
/*
* Samsung pinctrl driver data for Exynos5420 SoC. Exynos5420 SoC includes
* four gpio/pin-mux/pinconfig controllers.
@@ -1310,31 +1438,36 @@ const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = {
.nr_banks = ARRAY_SIZE(exynos5420_pin_banks0),
.eint_gpio_init = exynos_eint_gpio_init,
.eint_wkup_init = exynos_eint_wkup_init,
+ .retention_data = &exynos5420_retention_data,
}, {
/* pin-controller instance 1 data */
.pin_banks = exynos5420_pin_banks1,
.nr_banks = ARRAY_SIZE(exynos5420_pin_banks1),
.eint_gpio_init = exynos_eint_gpio_init,
+ .retention_data = &exynos5420_retention_data,
}, {
/* pin-controller instance 2 data */
.pin_banks = exynos5420_pin_banks2,
.nr_banks = ARRAY_SIZE(exynos5420_pin_banks2),
.eint_gpio_init = exynos_eint_gpio_init,
+ .retention_data = &exynos5420_retention_data,
}, {
/* pin-controller instance 3 data */
.pin_banks = exynos5420_pin_banks3,
.nr_banks = ARRAY_SIZE(exynos5420_pin_banks3),
.eint_gpio_init = exynos_eint_gpio_init,
+ .retention_data = &exynos5420_retention_data,
}, {
/* pin-controller instance 4 data */
.pin_banks = exynos5420_pin_banks4,
.nr_banks = ARRAY_SIZE(exynos5420_pin_banks4),
.eint_gpio_init = exynos_eint_gpio_init,
+ .retention_data = &exynos4_audio_retention_data,
},
};
/* pin banks of exynos5433 pin-controller - ALIVE */
-static const struct samsung_pin_bank_data exynos5433_pin_banks0[] = {
+static const struct samsung_pin_bank_data exynos5433_pin_banks0[] __initconst = {
EXYNOS_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
EXYNOS_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
EXYNOS_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
@@ -1347,28 +1480,28 @@ static const struct samsung_pin_bank_data exynos5433_pin_banks0[] = {
};
/* pin banks of exynos5433 pin-controller - AUD */
-static const struct samsung_pin_bank_data exynos5433_pin_banks1[] = {
+static const struct samsung_pin_bank_data exynos5433_pin_banks1[] __initconst = {
EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
};
/* pin banks of exynos5433 pin-controller - CPIF */
-static const struct samsung_pin_bank_data exynos5433_pin_banks2[] = {
+static const struct samsung_pin_bank_data exynos5433_pin_banks2[] __initconst = {
EXYNOS_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00),
};
/* pin banks of exynos5433 pin-controller - eSE */
-static const struct samsung_pin_bank_data exynos5433_pin_banks3[] = {
+static const struct samsung_pin_bank_data exynos5433_pin_banks3[] __initconst = {
EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00),
};
/* pin banks of exynos5433 pin-controller - FINGER */
-static const struct samsung_pin_bank_data exynos5433_pin_banks4[] = {
+static const struct samsung_pin_bank_data exynos5433_pin_banks4[] __initconst = {
EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00),
};
/* pin banks of exynos5433 pin-controller - FSYS */
-static const struct samsung_pin_bank_data exynos5433_pin_banks5[] = {
+static const struct samsung_pin_bank_data exynos5433_pin_banks5[] __initconst = {
EXYNOS_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00),
EXYNOS_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04),
EXYNOS_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08),
@@ -1378,17 +1511,17 @@ static const struct samsung_pin_bank_data exynos5433_pin_banks5[] = {
};
/* pin banks of exynos5433 pin-controller - IMEM */
-static const struct samsung_pin_bank_data exynos5433_pin_banks6[] = {
+static const struct samsung_pin_bank_data exynos5433_pin_banks6[] __initconst = {
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00),
};
/* pin banks of exynos5433 pin-controller - NFC */
-static const struct samsung_pin_bank_data exynos5433_pin_banks7[] = {
+static const struct samsung_pin_bank_data exynos5433_pin_banks7[] __initconst = {
EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00),
};
/* pin banks of exynos5433 pin-controller - PERIC */
-static const struct samsung_pin_bank_data exynos5433_pin_banks8[] = {
+static const struct samsung_pin_bank_data exynos5433_pin_banks8[] __initconst = {
EXYNOS_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00),
EXYNOS_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04),
EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08),
@@ -1409,7 +1542,7 @@ static const struct samsung_pin_bank_data exynos5433_pin_banks8[] = {
};
/* pin banks of exynos5433 pin-controller - TOUCH */
-static const struct samsung_pin_bank_data exynos5433_pin_banks9[] = {
+static const struct samsung_pin_bank_data exynos5433_pin_banks9[] __initconst = {
EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00),
};
@@ -1417,7 +1550,7 @@ static const struct samsung_pin_bank_data exynos5433_pin_banks9[] = {
* Samsung pinctrl driver data for Exynos5433 SoC. Exynos5433 SoC includes
* ten gpio/pin-mux/pinconfig controllers.
*/
-const struct samsung_pin_ctrl exynos5433_pin_ctrl[] = {
+const struct samsung_pin_ctrl exynos5433_pin_ctrl[] __initconst = {
{
/* pin-controller instance 0 data */
.pin_banks = exynos5433_pin_banks0,
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h
index 5821525a2c84..a473092fb8d2 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.h
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.h
@@ -90,6 +90,37 @@
.pctl_res_idx = pctl_idx, \
} \
+#define EXYNOS5433_PIN_BANK_EINTG(pins, reg, id, offs) \
+ { \
+ .type = &exynos5433_bank_type_off, \
+ .pctl_offset = reg, \
+ .nr_pins = pins, \
+ .eint_type = EINT_TYPE_GPIO, \
+ .eint_offset = offs, \
+ .name = id \
+ }
+
+#define EXYNOS5433_PIN_BANK_EINTW(pins, reg, id, offs) \
+ { \
+ .type = &exynos5433_bank_type_alive, \
+ .pctl_offset = reg, \
+ .nr_pins = pins, \
+ .eint_type = EINT_TYPE_WKUP, \
+ .eint_offset = offs, \
+ .name = id \
+ }
+
+#define EXYNOS5433_PIN_BANK_EINTW_EXT(pins, reg, id, offs, pctl_idx) \
+ { \
+ .type = &exynos5433_bank_type_alive, \
+ .pctl_offset = reg, \
+ .nr_pins = pins, \
+ .eint_type = EINT_TYPE_WKUP, \
+ .eint_offset = offs, \
+ .name = id, \
+ .pctl_res_idx = pctl_idx, \
+ } \
+
/**
* struct exynos_weint_data: irq specific data for all the wakeup interrupts
* generated by the external wakeup interrupt controller.
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
index 4c632812ccff..f17890aa6e25 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
@@ -489,10 +489,8 @@ static int s3c64xx_eint_gpio_init(struct samsung_pinctrl_drv_data *d)
data = devm_kzalloc(dev, sizeof(*data)
+ nr_domains * sizeof(*data->domains), GFP_KERNEL);
- if (!data) {
- dev_err(dev, "failed to allocate handler data\n");
+ if (!data)
return -ENOMEM;
- }
data->drvdata = d;
bank = d->pin_banks;
@@ -715,10 +713,8 @@ static int s3c64xx_eint_eint0_init(struct samsung_pinctrl_drv_data *d)
return -ENODEV;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
- if (!data) {
- dev_err(dev, "could not allocate memory for wkup eint data\n");
+ if (!data)
return -ENOMEM;
- }
data->drvdata = d;
for (i = 0; i < NUM_EINT0_IRQ; ++i) {
@@ -751,10 +747,8 @@ static int s3c64xx_eint_eint0_init(struct samsung_pinctrl_drv_data *d)
ddata = devm_kzalloc(dev,
sizeof(*ddata) + nr_eints, GFP_KERNEL);
- if (!ddata) {
- dev_err(dev, "failed to allocate domain data\n");
+ if (!ddata)
return -ENOMEM;
- }
ddata->bank = bank;
bank->irq_domain = irq_domain_add_linear(bank->of_node,
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 41e62391c33c..d79eadad6c5f 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -27,8 +27,8 @@
#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/irqdomain.h>
+#include <linux/of_device.h>
#include <linux/spinlock.h>
-#include <linux/syscore_ops.h>
#include "../core.h"
#include "pinctrl-samsung.h"
@@ -48,9 +48,6 @@ static struct pin_config {
{ "samsung,pin-val", PINCFG_TYPE_DAT },
};
-/* Global list of devices (struct samsung_pinctrl_drv_data) */
-static LIST_HEAD(drvdata_list);
-
static unsigned int pin_base;
static int samsung_get_group_count(struct pinctrl_dev *pctldev)
@@ -93,10 +90,8 @@ static int reserve_map(struct device *dev, struct pinctrl_map **map,
return 0;
new_map = krealloc(*map, sizeof(*new_map) * new_num, GFP_KERNEL);
- if (!new_map) {
- dev_err(dev, "krealloc(map) failed\n");
+ if (!new_map)
return -ENOMEM;
- }
memset(new_map + old_num, 0, (new_num - old_num) * sizeof(*new_map));
@@ -133,10 +128,8 @@ static int add_map_configs(struct device *dev, struct pinctrl_map **map,
dup_configs = kmemdup(configs, num_configs * sizeof(*dup_configs),
GFP_KERNEL);
- if (!dup_configs) {
- dev_err(dev, "kmemdup(configs) failed\n");
+ if (!dup_configs)
return -ENOMEM;
- }
(*map)[*num_maps].type = PIN_MAP_TYPE_CONFIGS_GROUP;
(*map)[*num_maps].data.configs.group_or_pin = group;
@@ -156,10 +149,8 @@ static int add_config(struct device *dev, unsigned long **configs,
new_configs = krealloc(*configs, sizeof(*new_configs) * new_num,
GFP_KERNEL);
- if (!new_configs) {
- dev_err(dev, "krealloc(configs) failed\n");
+ if (!new_configs)
return -ENOMEM;
- }
new_configs[old_num] = config;
@@ -356,7 +347,7 @@ static void pin_to_reg_bank(struct samsung_pinctrl_drv_data *drvdata,
/* enable or disable a pinmux function */
static void samsung_pinmux_setup(struct pinctrl_dev *pctldev, unsigned selector,
- unsigned group, bool enable)
+ unsigned group)
{
struct samsung_pinctrl_drv_data *drvdata;
const struct samsung_pin_bank_type *type;
@@ -386,8 +377,7 @@ static void samsung_pinmux_setup(struct pinctrl_dev *pctldev, unsigned selector,
data = readl(reg + type->reg_offset[PINCFG_TYPE_FUNC]);
data &= ~(mask << shift);
- if (enable)
- data |= func->val << shift;
+ data |= func->val << shift;
writel(data, reg + type->reg_offset[PINCFG_TYPE_FUNC]);
spin_unlock_irqrestore(&bank->slock, flags);
@@ -398,7 +388,7 @@ static int samsung_pinmux_set_mux(struct pinctrl_dev *pctldev,
unsigned selector,
unsigned group)
{
- samsung_pinmux_setup(pctldev, selector, group, true);
+ samsung_pinmux_setup(pctldev, selector, group);
return 0;
}
@@ -756,10 +746,8 @@ static struct samsung_pmx_func *samsung_pinctrl_create_functions(
functions = devm_kzalloc(dev, func_cnt * sizeof(*functions),
GFP_KERNEL);
- if (!functions) {
- dev_err(dev, "failed to allocate memory for function list\n");
- return ERR_PTR(-EINVAL);
- }
+ if (!functions)
+ return ERR_PTR(-ENOMEM);
func = functions;
/*
@@ -850,10 +838,8 @@ static int samsung_pinctrl_register(struct platform_device *pdev,
pindesc = devm_kzalloc(&pdev->dev, sizeof(*pindesc) *
drvdata->nr_pins, GFP_KERNEL);
- if (!pindesc) {
- dev_err(&pdev->dev, "mem alloc for pin descriptors failed\n");
+ if (!pindesc)
return -ENOMEM;
- }
ctrldesc->pins = pindesc;
ctrldesc->npins = drvdata->nr_pins;
@@ -867,10 +853,8 @@ static int samsung_pinctrl_register(struct platform_device *pdev,
*/
pin_names = devm_kzalloc(&pdev->dev, sizeof(char) * PIN_NAME_LENGTH *
drvdata->nr_pins, GFP_KERNEL);
- if (!pin_names) {
- dev_err(&pdev->dev, "mem alloc for pin names failed\n");
+ if (!pin_names)
return -ENOMEM;
- }
/* for each pin, the name of the pin is pin-bank name + pin number */
for (bank = 0; bank < drvdata->nr_banks; bank++) {
@@ -968,15 +952,12 @@ static int samsung_gpiolib_unregister(struct platform_device *pdev,
return 0;
}
-static const struct of_device_id samsung_pinctrl_dt_match[];
-
/* retrieve the soc specific data */
static const struct samsung_pin_ctrl *
samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d,
struct platform_device *pdev)
{
int id;
- const struct of_device_id *match;
struct device_node *node = pdev->dev.of_node;
struct device_node *np;
const struct samsung_pin_bank_data *bdata;
@@ -991,8 +972,8 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d,
dev_err(&pdev->dev, "failed to get alias id\n");
return ERR_PTR(-ENOENT);
}
- match = of_match_node(samsung_pinctrl_dt_match, node);
- ctrl = (struct samsung_pin_ctrl *)match->data + id;
+ ctrl = of_device_get_match_data(&pdev->dev);
+ ctrl += id;
d->suspend = ctrl->suspend;
d->resume = ctrl->resume;
@@ -1075,6 +1056,13 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
if (res)
drvdata->irq = res->start;
+ if (ctrl->retention_data) {
+ drvdata->retention_ctrl = ctrl->retention_data->init(drvdata,
+ ctrl->retention_data);
+ if (IS_ERR(drvdata->retention_ctrl))
+ return PTR_ERR(drvdata->retention_ctrl);
+ }
+
ret = samsung_gpiolib_register(pdev, drvdata);
if (ret)
return ret;
@@ -1092,22 +1080,18 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, drvdata);
- /* Add to the global list */
- list_add_tail(&drvdata->node, &drvdata_list);
-
return 0;
}
#ifdef CONFIG_PM
-
/**
- * samsung_pinctrl_suspend_dev - save pinctrl state for suspend for a device
+ * samsung_pinctrl_suspend - save pinctrl state for suspend
*
* Save data for all banks handled by this device.
*/
-static void samsung_pinctrl_suspend_dev(
- struct samsung_pinctrl_drv_data *drvdata)
+static int samsung_pinctrl_suspend(struct device *dev)
{
+ struct samsung_pinctrl_drv_data *drvdata = dev_get_drvdata(dev);
int i;
for (i = 0; i < drvdata->nr_banks; i++) {
@@ -1141,18 +1125,23 @@ static void samsung_pinctrl_suspend_dev(
if (drvdata->suspend)
drvdata->suspend(drvdata);
+ if (drvdata->retention_ctrl && drvdata->retention_ctrl->enable)
+ drvdata->retention_ctrl->enable(drvdata);
+
+ return 0;
}
/**
- * samsung_pinctrl_resume_dev - restore pinctrl state from suspend for a device
+ * samsung_pinctrl_resume - restore pinctrl state from suspend
*
* Restore one of the banks that was saved during suspend.
*
* We don't bother doing anything complicated to avoid glitching lines since
* we're called before pad retention is turned off.
*/
-static void samsung_pinctrl_resume_dev(struct samsung_pinctrl_drv_data *drvdata)
+static int samsung_pinctrl_resume(struct device *dev)
{
+ struct samsung_pinctrl_drv_data *drvdata = dev_get_drvdata(dev);
int i;
if (drvdata->resume)
@@ -1188,48 +1177,14 @@ static void samsung_pinctrl_resume_dev(struct samsung_pinctrl_drv_data *drvdata)
if (widths[type])
writel(bank->pm_save[type], reg + offs[type]);
}
-}
-/**
- * samsung_pinctrl_suspend - save pinctrl state for suspend
- *
- * Save data for all banks across all devices.
- */
-static int samsung_pinctrl_suspend(void)
-{
- struct samsung_pinctrl_drv_data *drvdata;
-
- list_for_each_entry(drvdata, &drvdata_list, node) {
- samsung_pinctrl_suspend_dev(drvdata);
- }
+ if (drvdata->retention_ctrl && drvdata->retention_ctrl->disable)
+ drvdata->retention_ctrl->disable(drvdata);
return 0;
}
-
-/**
- * samsung_pinctrl_resume - restore pinctrl state for suspend
- *
- * Restore data for all banks across all devices.
- */
-static void samsung_pinctrl_resume(void)
-{
- struct samsung_pinctrl_drv_data *drvdata;
-
- list_for_each_entry_reverse(drvdata, &drvdata_list, node) {
- samsung_pinctrl_resume_dev(drvdata);
- }
-}
-
-#else
-#define samsung_pinctrl_suspend NULL
-#define samsung_pinctrl_resume NULL
#endif
-static struct syscore_ops samsung_pinctrl_syscore_ops = {
- .suspend = samsung_pinctrl_suspend,
- .resume = samsung_pinctrl_resume,
-};
-
static const struct of_device_id samsung_pinctrl_dt_match[] = {
#ifdef CONFIG_PINCTRL_EXYNOS
{ .compatible = "samsung,exynos3250-pinctrl",
@@ -1238,8 +1193,6 @@ static const struct of_device_id samsung_pinctrl_dt_match[] = {
.data = (void *)exynos4210_pin_ctrl },
{ .compatible = "samsung,exynos4x12-pinctrl",
.data = (void *)exynos4x12_pin_ctrl },
- { .compatible = "samsung,exynos4415-pinctrl",
- .data = (void *)exynos4415_pin_ctrl },
{ .compatible = "samsung,exynos5250-pinctrl",
.data = (void *)exynos5250_pin_ctrl },
{ .compatible = "samsung,exynos5260-pinctrl",
@@ -1273,25 +1226,23 @@ static const struct of_device_id samsung_pinctrl_dt_match[] = {
};
MODULE_DEVICE_TABLE(of, samsung_pinctrl_dt_match);
+static const struct dev_pm_ops samsung_pinctrl_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(samsung_pinctrl_suspend,
+ samsung_pinctrl_resume)
+};
+
static struct platform_driver samsung_pinctrl_driver = {
.probe = samsung_pinctrl_probe,
.driver = {
.name = "samsung-pinctrl",
.of_match_table = samsung_pinctrl_dt_match,
.suppress_bind_attrs = true,
+ .pm = &samsung_pinctrl_pm_ops,
},
};
static int __init samsung_pinctrl_drv_register(void)
{
- /*
- * Register syscore ops for save/restore of registers across suspend.
- * It's important to ensure that this driver is running at an earlier
- * initcall level than any arch-specific init calls that install syscore
- * ops that turn off pad retention (like exynos_pm_resume).
- */
- register_syscore_ops(&samsung_pinctrl_syscore_ops);
-
return platform_driver_register(&samsung_pinctrl_driver);
}
postcore_initcall(samsung_pinctrl_drv_register);
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
index 043cb6c11180..515a61035e54 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.h
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
@@ -185,10 +185,48 @@ struct samsung_pin_bank {
};
/**
+ * struct samsung_retention_data: runtime pin-bank retention control data.
+ * @regs: array of PMU registers to control pad retention.
+ * @nr_regs: number of registers in @regs array.
+ * @value: value to store to registers to turn off retention.
+ * @refcnt: atomic counter if retention control affects more than one bank.
+ * @priv: retention control code private data
+ * @enable: platform specific callback to enter retention mode.
+ * @disable: platform specific callback to exit retention mode.
+ **/
+struct samsung_retention_ctrl {
+ const u32 *regs;
+ int nr_regs;
+ u32 value;
+ atomic_t *refcnt;
+ void *priv;
+ void (*enable)(struct samsung_pinctrl_drv_data *);
+ void (*disable)(struct samsung_pinctrl_drv_data *);
+};
+
+/**
+ * struct samsung_retention_data: represent a pin-bank retention control data.
+ * @regs: array of PMU registers to control pad retention.
+ * @nr_regs: number of registers in @regs array.
+ * @value: value to store to registers to turn off retention.
+ * @refcnt: atomic counter if retention control affects more than one bank.
+ * @init: platform specific callback to initialize retention control.
+ **/
+struct samsung_retention_data {
+ const u32 *regs;
+ int nr_regs;
+ u32 value;
+ atomic_t *refcnt;
+ struct samsung_retention_ctrl *(*init)(struct samsung_pinctrl_drv_data *,
+ const struct samsung_retention_data *);
+};
+
+/**
* struct samsung_pin_ctrl: represent a pin controller.
* @pin_banks: list of pin banks included in this controller.
* @nr_banks: number of pin banks.
* @nr_ext_resources: number of the extra base address for pin banks.
+ * @retention_data: configuration data for retention control.
* @eint_gpio_init: platform specific callback to setup the external gpio
* interrupts for the controller.
* @eint_wkup_init: platform specific callback to setup the external wakeup
@@ -198,6 +236,7 @@ struct samsung_pin_ctrl {
const struct samsung_pin_bank_data *pin_banks;
u32 nr_banks;
int nr_ext_resources;
+ const struct samsung_retention_data *retention_data;
int (*eint_gpio_init)(struct samsung_pinctrl_drv_data *);
int (*eint_wkup_init)(struct samsung_pinctrl_drv_data *);
@@ -219,6 +258,7 @@ struct samsung_pin_ctrl {
* @nr_function: number of such pin functions.
* @pin_base: starting system wide pin number.
* @nr_pins: number of pins supported by the controller.
+ * @retention_ctrl: retention control runtime data.
*/
struct samsung_pinctrl_drv_data {
struct list_head node;
@@ -238,6 +278,8 @@ struct samsung_pinctrl_drv_data {
unsigned int pin_base;
unsigned int nr_pins;
+ struct samsung_retention_ctrl *retention_ctrl;
+
void (*suspend)(struct samsung_pinctrl_drv_data *);
void (*resume)(struct samsung_pinctrl_drv_data *);
};
@@ -273,7 +315,6 @@ struct samsung_pmx_func {
extern const struct samsung_pin_ctrl exynos3250_pin_ctrl[];
extern const struct samsung_pin_ctrl exynos4210_pin_ctrl[];
extern const struct samsung_pin_ctrl exynos4x12_pin_ctrl[];
-extern const struct samsung_pin_ctrl exynos4415_pin_ctrl[];
extern const struct samsung_pin_ctrl exynos5250_pin_ctrl[];
extern const struct samsung_pin_ctrl exynos5260_pin_ctrl[];
extern const struct samsung_pin_ctrl exynos5410_pin_ctrl[];
diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c
index fcacfa73ef6e..08150a321be6 100644
--- a/drivers/pinctrl/sh-pfc/pinctrl.c
+++ b/drivers/pinctrl/sh-pfc/pinctrl.c
@@ -816,6 +816,6 @@ int sh_pfc_register_pinctrl(struct sh_pfc *pfc)
pmx->pctl_desc.pins = pmx->pins;
pmx->pctl_desc.npins = pfc->info->nr_pins;
- pmx->pctl = devm_pinctrl_register(pfc->dev, &pmx->pctl_desc, pmx);
- return PTR_ERR_OR_ZERO(pmx->pctl);
+ return devm_pinctrl_register_and_init(pfc->dev, &pmx->pctl_desc, pmx,
+ &pmx->pctl);
}
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c
index 7f3041697813..600d6427a978 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas7.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c
@@ -5322,7 +5322,8 @@ static int atlas7_pin_config_set(struct pinctrl_dev *pctldev,
unsigned pin, unsigned long *configs,
unsigned num_configs)
{
- u16 param, arg;
+ u16 param;
+ u32 arg;
int idx, err;
for (idx = 0; idx < num_configs; idx++) {
@@ -5420,14 +5421,15 @@ static int atlas7_pinmux_probe(struct platform_device *pdev)
sys2pci_np = of_find_node_by_name(NULL, "sys2pci");
if (!sys2pci_np)
return -EINVAL;
+
ret = of_address_to_resource(sys2pci_np, 0, &res);
+ of_node_put(sys2pci_np);
if (ret)
return ret;
+
pmx->sys2pci_base = devm_ioremap_resource(&pdev->dev, &res);
- if (IS_ERR(pmx->sys2pci_base)) {
- of_node_put(sys2pci_np);
+ if (IS_ERR(pmx->sys2pci_base))
return -ENOMEM;
- }
pmx->dev = &pdev->dev;
@@ -5443,7 +5445,7 @@ static int atlas7_pinmux_probe(struct platform_device *pdev)
pmx->regs[idx] = of_iomap(np, idx);
if (!pmx->regs[idx]) {
dev_err(&pdev->dev,
- "can't map ioc bank#%d registers\n", idx);
+ "can't map ioc bank#%d registers\n", idx);
ret = -ENOMEM;
goto unmap_io;
}
@@ -6056,8 +6058,8 @@ static int atlas7_gpio_probe(struct platform_device *pdev)
ret = gpiochip_add_data(chip, a7gc);
if (ret) {
dev_err(&pdev->dev,
- "%s: error in probe function with status %d\n",
- np->name, ret);
+ "%s: error in probe function with status %d\n",
+ np->name, ret);
goto failed;
}
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index efc43711ff5c..c983a1e33dbe 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -631,6 +631,7 @@ static const struct pinmux_ops stm32_pmx_ops = {
.get_function_groups = stm32_pmx_get_func_groups,
.set_mux = stm32_pmx_set_mux,
.gpio_set_direction = stm32_pmx_gpio_set_direction,
+ .strict = true,
};
/* Pinconf functions */
diff --git a/drivers/pinctrl/sunxi/Kconfig b/drivers/pinctrl/sunxi/Kconfig
index bff1ffc6f01e..8ba10d830ce2 100644
--- a/drivers/pinctrl/sunxi/Kconfig
+++ b/drivers/pinctrl/sunxi/Kconfig
@@ -9,18 +9,10 @@ config PINCTRL_SUN4I_A10
def_bool MACH_SUN4I
select PINCTRL_SUNXI
-config PINCTRL_SUN5I_A10S
+config PINCTRL_SUN5I
def_bool MACH_SUN5I
select PINCTRL_SUNXI
-config PINCTRL_SUN5I_A13
- def_bool MACH_SUN5I
- select PINCTRL_SUNXI
-
-config PINCTRL_GR8
- def_bool MACH_SUN5I
- select PINCTRL_SUNXI_COMMON
-
config PINCTRL_SUN6I_A31
def_bool MACH_SUN6I
select PINCTRL_SUNXI
@@ -63,6 +55,10 @@ config PINCTRL_SUN8I_H3_R
def_bool MACH_SUN8I
select PINCTRL_SUNXI_COMMON
+config PINCTRL_SUN8I_V3S
+ def_bool MACH_SUN8I
+ select PINCTRL_SUNXI
+
config PINCTRL_SUN9I_A80
def_bool MACH_SUN9I
select PINCTRL_SUNXI
diff --git a/drivers/pinctrl/sunxi/Makefile b/drivers/pinctrl/sunxi/Makefile
index 95f93d0561fc..7bcb4683bce5 100644
--- a/drivers/pinctrl/sunxi/Makefile
+++ b/drivers/pinctrl/sunxi/Makefile
@@ -3,9 +3,7 @@ obj-y += pinctrl-sunxi.o
# SoC Drivers
obj-$(CONFIG_PINCTRL_SUN4I_A10) += pinctrl-sun4i-a10.o
-obj-$(CONFIG_PINCTRL_SUN5I_A10S) += pinctrl-sun5i-a10s.o
-obj-$(CONFIG_PINCTRL_SUN5I_A13) += pinctrl-sun5i-a13.o
-obj-$(CONFIG_PINCTRL_GR8) += pinctrl-gr8.o
+obj-$(CONFIG_PINCTRL_SUN5I) += pinctrl-sun5i.o
obj-$(CONFIG_PINCTRL_SUN6I_A31) += pinctrl-sun6i-a31.o
obj-$(CONFIG_PINCTRL_SUN6I_A31S) += pinctrl-sun6i-a31s.o
obj-$(CONFIG_PINCTRL_SUN6I_A31_R) += pinctrl-sun6i-a31-r.o
@@ -17,5 +15,6 @@ obj-$(CONFIG_PINCTRL_SUN50I_A64) += pinctrl-sun50i-a64.o
obj-$(CONFIG_PINCTRL_SUN8I_A83T) += pinctrl-sun8i-a83t.o
obj-$(CONFIG_PINCTRL_SUN8I_H3) += pinctrl-sun8i-h3.o
obj-$(CONFIG_PINCTRL_SUN8I_H3_R) += pinctrl-sun8i-h3-r.o
+obj-$(CONFIG_PINCTRL_SUN8I_V3S) += pinctrl-sun8i-v3s.o
obj-$(CONFIG_PINCTRL_SUN9I_A80) += pinctrl-sun9i-a80.o
obj-$(CONFIG_PINCTRL_SUN9I_A80_R) += pinctrl-sun9i-a80-r.o
diff --git a/drivers/pinctrl/sunxi/pinctrl-gr8.c b/drivers/pinctrl/sunxi/pinctrl-gr8.c
deleted file mode 100644
index 2f232c3a0579..000000000000
--- a/drivers/pinctrl/sunxi/pinctrl-gr8.c
+++ /dev/null
@@ -1,536 +0,0 @@
-/*
- * NextThing GR8 SoCs pinctrl driver.
- *
- * Copyright (C) 2016 Mylene Josserand
- *
- * Based on pinctrl-sun5i-a13.c
- *
- * Mylene Josserand <mylene.josserand@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/pinctrl/pinctrl.h>
-
-#include "pinctrl-sunxi.h"
-
-static const struct sunxi_desc_pin sun5i_gr8_pins[] = {
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c0")), /* SCK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c0")), /* SDA */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "pwm0"),
- SUNXI_FUNCTION(0x3, "spdif"), /* DO */
- SUNXI_FUNCTION_IRQ(0x6, 16)), /* EINT16 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ir0"), /* TX */
- SUNXI_FUNCTION_IRQ(0x6, 17)), /* EINT17 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ir0"), /* RX */
- SUNXI_FUNCTION_IRQ(0x6, 18)), /* EINT18 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s0"), /* MCLK */
- SUNXI_FUNCTION_IRQ(0x6, 19)), /* EINT19 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 6),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s0"), /* BCLK */
- SUNXI_FUNCTION_IRQ(0x6, 20)), /* EINT20 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 7),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s0"), /* LRCK */
- SUNXI_FUNCTION_IRQ(0x6, 21)), /* EINT21 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 8),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s0"), /* DO */
- SUNXI_FUNCTION_IRQ(0x6, 22)), /* EINT22 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 9),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s0"), /* DI */
- SUNXI_FUNCTION(0x3, "spdif"), /* DI */
- SUNXI_FUNCTION_IRQ(0x6, 23)), /* EINT23 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 10),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi2"), /* CS1 */
- SUNXI_FUNCTION(0x3, "spdif"), /* DO */
- SUNXI_FUNCTION_IRQ(0x6, 24)), /* EINT24 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 11),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi2"), /* CS0 */
- SUNXI_FUNCTION(0x3, "jtag"), /* MS0 */
- SUNXI_FUNCTION_IRQ(0x6, 25)), /* EINT25 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 12),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi2"), /* CLK */
- SUNXI_FUNCTION(0x3, "jtag"), /* CK0 */
- SUNXI_FUNCTION_IRQ(0x6, 26)), /* EINT26 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 13),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi2"), /* MOSI */
- SUNXI_FUNCTION(0x3, "jtag"), /* DO0 */
- SUNXI_FUNCTION_IRQ(0x6, 27)), /* EINT27 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 14),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi2"), /* MISO */
- SUNXI_FUNCTION(0x3, "jtag"), /* DI0 */
- SUNXI_FUNCTION_IRQ(0x6, 28)), /* EINT28 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 15),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c1")), /* SCK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 16),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c1")), /* SDA */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 17),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c2")), /* SCK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 18),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c2")), /* SDA */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NWE */
- SUNXI_FUNCTION(0x3, "spi0")), /* MOSI */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NALE */
- SUNXI_FUNCTION(0x3, "spi0")), /* MISO */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NCLE */
- SUNXI_FUNCTION(0x3, "spi0")), /* CLK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NCE1 */
- SUNXI_FUNCTION(0x3, "spi0")), /* CS0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0")), /* NCE0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0")), /* NRE */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 6),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NRB0 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* CMD */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 7),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NRB1 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* CLK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 8),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ0 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 9),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ1 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 10),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ2 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 11),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ3 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 12),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ4 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D4 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 13),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ5 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D5 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 14),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ6 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D6 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 15),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ7 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D7 */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 19),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQS */
- SUNXI_FUNCTION(0x3, "uart2"), /* RX */
- SUNXI_FUNCTION(0x4, "uart3")), /* RTS */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D2 */
- SUNXI_FUNCTION(0x3, "uart2")), /* TX */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D3 */
- SUNXI_FUNCTION(0x3, "uart2")), /* RX */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D4 */
- SUNXI_FUNCTION(0x3, "uart2")), /* CTS */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D5 */
- SUNXI_FUNCTION(0x3, "uart2")), /* RTS */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 6),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D6 */
- SUNXI_FUNCTION(0x3, "emac")), /* ECRS */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 7),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D7 */
- SUNXI_FUNCTION(0x3, "emac")), /* ECOL */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 10),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D10 */
- SUNXI_FUNCTION(0x3, "emac")), /* ERXD0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 11),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D11 */
- SUNXI_FUNCTION(0x3, "emac")), /* ERXD1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 12),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D12 */
- SUNXI_FUNCTION(0x3, "emac")), /* ERXD2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 13),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D13 */
- SUNXI_FUNCTION(0x3, "emac")), /* ERXD3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 14),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D14 */
- SUNXI_FUNCTION(0x3, "emac")), /* ERXCK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D15 */
- SUNXI_FUNCTION(0x3, "emac")), /* ERXERR */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 18),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D18 */
- SUNXI_FUNCTION(0x3, "emac")), /* ERXDV */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 19),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D19 */
- SUNXI_FUNCTION(0x3, "emac")), /* ETXD0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 20),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D20 */
- SUNXI_FUNCTION(0x3, "emac")), /* ETXD1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 21),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D21 */
- SUNXI_FUNCTION(0x3, "emac")), /* ETXD2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 22),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D22 */
- SUNXI_FUNCTION(0x3, "emac")), /* ETXD3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 23),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D23 */
- SUNXI_FUNCTION(0x3, "emac")), /* ETXEN */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 24),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* CLK */
- SUNXI_FUNCTION(0x3, "emac")), /* ETXCK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 25),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* DE */
- SUNXI_FUNCTION(0x3, "emac")), /* ETXERR*/
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 26),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* HSYNC */
- SUNXI_FUNCTION(0x3, "emac")), /* EMDC */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 27),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* VSYNC */
- SUNXI_FUNCTION(0x3, "emac")), /* EMDIO */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x2, "ts0"), /* CLK */
- SUNXI_FUNCTION(0x3, "csi0"), /* PCLK */
- SUNXI_FUNCTION(0x4, "spi2"), /* CS0 */
- SUNXI_FUNCTION_IRQ(0x6, 14)), /* EINT14 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x2, "ts0"), /* ERR */
- SUNXI_FUNCTION(0x3, "csi0"), /* MCLK */
- SUNXI_FUNCTION(0x4, "spi2"), /* CLK */
- SUNXI_FUNCTION_IRQ(0x6, 15)), /* EINT15 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x2, "ts0"), /* SYNC */
- SUNXI_FUNCTION(0x3, "csi0"), /* HSYNC */
- SUNXI_FUNCTION(0x4, "spi2")), /* MOSI */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts0"), /* DVLD */
- SUNXI_FUNCTION(0x3, "csi0"), /* VSYNC */
- SUNXI_FUNCTION(0x4, "spi2")), /* MISO */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts0"), /* D0 */
- SUNXI_FUNCTION(0x3, "csi0"), /* D0 */
- SUNXI_FUNCTION(0x4, "mmc2")), /* D0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts0"), /* D1 */
- SUNXI_FUNCTION(0x3, "csi0"), /* D1 */
- SUNXI_FUNCTION(0x4, "mmc2")), /* D1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 6),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts0"), /* D2 */
- SUNXI_FUNCTION(0x3, "csi0"), /* D2 */
- SUNXI_FUNCTION(0x4, "mmc2")), /* D2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 7),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts0"), /* D3 */
- SUNXI_FUNCTION(0x3, "csi0"), /* D3 */
- SUNXI_FUNCTION(0x4, "mmc2")), /* D3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 8),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts0"), /* D4 */
- SUNXI_FUNCTION(0x3, "csi0"), /* D4 */
- SUNXI_FUNCTION(0x4, "mmc2")), /* CMD */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 9),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts0"), /* D5 */
- SUNXI_FUNCTION(0x3, "csi0"), /* D5 */
- SUNXI_FUNCTION(0x4, "mmc2")), /* CLK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 10),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts0"), /* D6 */
- SUNXI_FUNCTION(0x3, "csi0"), /* D6 */
- SUNXI_FUNCTION(0x4, "uart1")), /* TX */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 11),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts0"), /* D7 */
- SUNXI_FUNCTION(0x3, "csi0"), /* D7 */
- SUNXI_FUNCTION(0x4, "uart1")), /* RX */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0"), /* D1 */
- SUNXI_FUNCTION(0x4, "jtag")), /* MS1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0"), /* D0 */
- SUNXI_FUNCTION(0x4, "jtag")), /* DI1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0"), /* CLK */
- SUNXI_FUNCTION(0x4, "uart0")), /* TX */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0"), /* CMD */
- SUNXI_FUNCTION(0x4, "jtag")), /* DO1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0"), /* D3 */
- SUNXI_FUNCTION(0x4, "uart0")), /* RX */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0"), /* D2 */
- SUNXI_FUNCTION(0x4, "jtag")), /* CK1 */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x2, "gps"), /* CLK */
- SUNXI_FUNCTION_IRQ(0x6, 0)), /* EINT0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x2, "gps"), /* SIGN */
- SUNXI_FUNCTION_IRQ(0x6, 1)), /* EINT1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x2, "gps"), /* MAG */
- SUNXI_FUNCTION_IRQ(0x6, 2)), /* EINT2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc1"), /* CMD */
- SUNXI_FUNCTION(0x3, "ms"), /* BS */
- SUNXI_FUNCTION(0x4, "uart1"), /* TX */
- SUNXI_FUNCTION_IRQ(0x6, 3)), /* EINT3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc1"), /* CLK */
- SUNXI_FUNCTION(0x3, "ms"), /* CLK */
- SUNXI_FUNCTION(0x4, "uart1"), /* RX */
- SUNXI_FUNCTION_IRQ(0x6, 4)), /* EINT4 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc1"), /* D0 */
- SUNXI_FUNCTION(0x3, "ms"), /* D0 */
- SUNXI_FUNCTION(0x4, "uart1"), /* CTS */
- SUNXI_FUNCTION_IRQ(0x6, 5)), /* EINT5 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 6),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc1"), /* D1 */
- SUNXI_FUNCTION(0x3, "ms"), /* D1 */
- SUNXI_FUNCTION(0x4, "uart1"), /* RTS */
- SUNXI_FUNCTION(0x5, "uart2"), /* RTS */
- SUNXI_FUNCTION_IRQ(0x6, 6)), /* EINT6 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 7),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc1"), /* D2 */
- SUNXI_FUNCTION(0x3, "ms"), /* D2 */
- SUNXI_FUNCTION(0x5, "uart2"), /* TX */
- SUNXI_FUNCTION_IRQ(0x6, 7)), /* EINT7 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc1"), /* D3 */
- SUNXI_FUNCTION(0x3, "ms"), /* D3 */
- SUNXI_FUNCTION(0x5, "uart2"), /* RX */
- SUNXI_FUNCTION_IRQ(0x6, 8)), /* EINT8 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi1"), /* CS0 */
- SUNXI_FUNCTION(0x3, "uart3"), /* TX */
- SUNXI_FUNCTION_IRQ(0x6, 9)), /* EINT9 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi1"), /* CLK */
- SUNXI_FUNCTION(0x3, "uart3"), /* RX */
- SUNXI_FUNCTION_IRQ(0x6, 10)), /* EINT10 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 11),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi1"), /* MOSI */
- SUNXI_FUNCTION(0x3, "uart3"), /* CTS */
- SUNXI_FUNCTION_IRQ(0x6, 11)), /* EINT11 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 12),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi1"), /* MISO */
- SUNXI_FUNCTION(0x3, "uart3"), /* RTS */
- SUNXI_FUNCTION_IRQ(0x6, 12)), /* EINT12 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 13),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi1"), /* CS1 */
- SUNXI_FUNCTION(0x3, "pwm1"),
- SUNXI_FUNCTION(0x5, "uart2"), /* CTS */
- SUNXI_FUNCTION_IRQ(0x6, 13)), /* EINT13 */
-};
-
-static const struct sunxi_pinctrl_desc sun5i_gr8_pinctrl_data = {
- .pins = sun5i_gr8_pins,
- .npins = ARRAY_SIZE(sun5i_gr8_pins),
- .irq_banks = 1,
-};
-
-static int sun5i_gr8_pinctrl_probe(struct platform_device *pdev)
-{
- return sunxi_pinctrl_init(pdev,
- &sun5i_gr8_pinctrl_data);
-}
-
-static const struct of_device_id sun5i_gr8_pinctrl_match[] = {
- { .compatible = "nextthing,gr8-pinctrl", },
- {}
-};
-
-static struct platform_driver sun5i_gr8_pinctrl_driver = {
- .probe = sun5i_gr8_pinctrl_probe,
- .driver = {
- .name = "gr8-pinctrl",
- .of_match_table = sun5i_gr8_pinctrl_match,
- },
-};
-builtin_platform_driver(sun5i_gr8_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun5i-a13.c b/drivers/pinctrl/sunxi/pinctrl-sun5i-a13.c
deleted file mode 100644
index 8575f3f6d3dd..000000000000
--- a/drivers/pinctrl/sunxi/pinctrl-sun5i-a13.c
+++ /dev/null
@@ -1,403 +0,0 @@
-/*
- * Allwinner A13 SoCs pinctrl driver.
- *
- * Copyright (C) 2014 Maxime Ripard
- *
- * Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/pinctrl/pinctrl.h>
-
-#include "pinctrl-sunxi.h"
-
-static const struct sunxi_desc_pin sun5i_a13_pins[] = {
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c0")), /* SCK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c0")), /* SDA */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "pwm"),
- SUNXI_FUNCTION_IRQ(0x6, 16)), /* EINT16 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ir0"), /* TX */
- SUNXI_FUNCTION_IRQ(0x6, 17)), /* EINT17 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ir0"), /* RX */
- SUNXI_FUNCTION_IRQ(0x6, 18)), /* EINT18 */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 10),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi2"), /* CS1 */
- SUNXI_FUNCTION_IRQ(0x6, 24)), /* EINT24 */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 15),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c1")), /* SCK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 16),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c1")), /* SDA */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 17),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c2")), /* SCK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 18),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c2")), /* SDA */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NWE */
- SUNXI_FUNCTION(0x3, "spi0")), /* MOSI */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NALE */
- SUNXI_FUNCTION(0x3, "spi0")), /* MISO */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NCLE */
- SUNXI_FUNCTION(0x3, "spi0")), /* CLK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NCE1 */
- SUNXI_FUNCTION(0x3, "spi0")), /* CS0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0")), /* NCE0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0")), /* NRE */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 6),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NRB0 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* CMD */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 7),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NRB1 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* CLK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 8),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ0 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 9),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ1 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 10),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ2 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 11),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ3 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 12),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ4 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D4 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 13),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ5 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D5 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 14),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ6 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D6 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 15),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ7 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D7 */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 19),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQS */
- SUNXI_FUNCTION(0x4, "uart3")), /* RTS */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D4 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D5 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 6),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D6 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 7),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D7 */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 10),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D10 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 11),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D11 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 12),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D12 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 13),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D13 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 14),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D14 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D15 */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 18),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D18 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 19),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D19 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 20),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D20 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 21),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D21 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 22),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D22 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 23),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* D23 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 24),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* CLK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 25),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* DE */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 26),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* HSYNC */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 27),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0")), /* VSYNC */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x3, "csi0"), /* PCLK */
- SUNXI_FUNCTION(0x4, "spi2"), /* CS0 */
- SUNXI_FUNCTION_IRQ(0x6, 14)), /* EINT14 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x3, "csi0"), /* MCLK */
- SUNXI_FUNCTION(0x4, "spi2"), /* CLK */
- SUNXI_FUNCTION_IRQ(0x6, 15)), /* EINT15 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x3, "csi0"), /* HSYNC */
- SUNXI_FUNCTION(0x4, "spi2")), /* MOSI */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x3, "csi0"), /* VSYNC */
- SUNXI_FUNCTION(0x4, "spi2")), /* MISO */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x3, "csi0"), /* D0 */
- SUNXI_FUNCTION(0x4, "mmc2")), /* D0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x3, "csi0"), /* D1 */
- SUNXI_FUNCTION(0x4, "mmc2")), /* D1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 6),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x3, "csi0"), /* D2 */
- SUNXI_FUNCTION(0x4, "mmc2")), /* D2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 7),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x3, "csi0"), /* D3 */
- SUNXI_FUNCTION(0x4, "mmc2")), /* D3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 8),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x3, "csi0"), /* D4 */
- SUNXI_FUNCTION(0x4, "mmc2")), /* CMD */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 9),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x3, "csi0"), /* D5 */
- SUNXI_FUNCTION(0x4, "mmc2")), /* CLK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 10),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x3, "csi0"), /* D6 */
- SUNXI_FUNCTION(0x4, "uart1")), /* TX */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 11),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x3, "csi0"), /* D7 */
- SUNXI_FUNCTION(0x4, "uart1")), /* RX */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0")), /* D1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0")), /* D0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0")), /* CLK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0")), /* CMD */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0")), /* D3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0")), /* D2 */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION_IRQ(0x6, 0)), /* EINT0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION_IRQ(0x6, 1)), /* EINT1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION_IRQ(0x6, 2)), /* EINT2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc1"), /* CMD */
- SUNXI_FUNCTION(0x4, "uart1"), /* TX */
- SUNXI_FUNCTION_IRQ(0x6, 3)), /* EINT3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc1"), /* CLK */
- SUNXI_FUNCTION(0x4, "uart1"), /* RX */
- SUNXI_FUNCTION_IRQ(0x6, 4)), /* EINT4 */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi1"), /* CS0 */
- SUNXI_FUNCTION(0x3, "uart3"), /* TX */
- SUNXI_FUNCTION_IRQ(0x6, 9)), /* EINT9 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi1"), /* CLK */
- SUNXI_FUNCTION(0x3, "uart3"), /* RX */
- SUNXI_FUNCTION_IRQ(0x6, 10)), /* EINT10 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 11),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi1"), /* MOSI */
- SUNXI_FUNCTION(0x3, "uart3"), /* CTS */
- SUNXI_FUNCTION_IRQ(0x6, 11)), /* EINT11 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 12),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi1"), /* MISO */
- SUNXI_FUNCTION(0x3, "uart3"), /* RTS */
- SUNXI_FUNCTION_IRQ(0x6, 12)), /* EINT12 */
-};
-
-static const struct sunxi_pinctrl_desc sun5i_a13_pinctrl_data = {
- .pins = sun5i_a13_pins,
- .npins = ARRAY_SIZE(sun5i_a13_pins),
- .irq_banks = 1,
-};
-
-static int sun5i_a13_pinctrl_probe(struct platform_device *pdev)
-{
- return sunxi_pinctrl_init(pdev,
- &sun5i_a13_pinctrl_data);
-}
-
-static const struct of_device_id sun5i_a13_pinctrl_match[] = {
- { .compatible = "allwinner,sun5i-a13-pinctrl", },
- {}
-};
-
-static struct platform_driver sun5i_a13_pinctrl_driver = {
- .probe = sun5i_a13_pinctrl_probe,
- .driver = {
- .name = "sun5i-a13-pinctrl",
- .of_match_table = sun5i_a13_pinctrl_match,
- },
-};
-builtin_platform_driver(sun5i_a13_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c b/drivers/pinctrl/sunxi/pinctrl-sun5i.c
index a5b57fdff9e1..c8a94323ce8b 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun5i.c
@@ -1,16 +1,15 @@
/*
- * Allwinner A10s SoCs pinctrl driver.
+ * Allwinner sun5i SoCs pinctrl driver.
*
- * Copyright (C) 2014 Maxime Ripard
+ * Copyright (C) 2014-2016 Maxime Ripard <maxime.ripard@free-electrons.com>
+ * Copyright (C) 2016 Mylene Josserand <mylene.josserand@free-electrons.com>
*
- * Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
+g * This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
-#include <linux/init.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -18,115 +17,133 @@
#include "pinctrl-sunxi.h"
-static const struct sunxi_desc_pin sun5i_a10s_pins[] = {
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 0),
+static const struct sunxi_desc_pin sun5i_pins[] = {
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 0),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ERXD3 */
SUNXI_FUNCTION(0x3, "ts0"), /* CLK */
SUNXI_FUNCTION(0x5, "keypad")), /* IN0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 1),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 1),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ERXD2 */
SUNXI_FUNCTION(0x3, "ts0"), /* ERR */
SUNXI_FUNCTION(0x5, "keypad")), /* IN1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 2),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 2),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ERXD1 */
SUNXI_FUNCTION(0x3, "ts0"), /* SYNC */
SUNXI_FUNCTION(0x5, "keypad")), /* IN2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 3),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 3),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ERXD0 */
SUNXI_FUNCTION(0x3, "ts0"), /* DLVD */
SUNXI_FUNCTION(0x5, "keypad")), /* IN3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 4),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 4),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ETXD3 */
SUNXI_FUNCTION(0x3, "ts0"), /* D0 */
SUNXI_FUNCTION(0x5, "keypad")), /* IN4 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 5),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 5),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ETXD2 */
SUNXI_FUNCTION(0x3, "ts0"), /* D1 */
SUNXI_FUNCTION(0x5, "keypad")), /* IN5 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 6),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 6),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ETXD1 */
SUNXI_FUNCTION(0x3, "ts0"), /* D2 */
SUNXI_FUNCTION(0x5, "keypad")), /* IN6 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 7),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 7),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ETXD0 */
SUNXI_FUNCTION(0x3, "ts0"), /* D3 */
SUNXI_FUNCTION(0x5, "keypad")), /* IN7 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 8),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 8),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ERXCK */
SUNXI_FUNCTION(0x3, "ts0"), /* D4 */
SUNXI_FUNCTION(0x4, "uart1"), /* DTR */
SUNXI_FUNCTION(0x5, "keypad")), /* OUT0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 9),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 9),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ERXERR */
SUNXI_FUNCTION(0x3, "ts0"), /* D5 */
SUNXI_FUNCTION(0x4, "uart1"), /* DSR */
SUNXI_FUNCTION(0x5, "keypad")), /* OUT1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 10),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 10),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ERXDV */
SUNXI_FUNCTION(0x3, "ts0"), /* D6 */
SUNXI_FUNCTION(0x4, "uart1"), /* DCD */
SUNXI_FUNCTION(0x5, "keypad")), /* OUT2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 11),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 11),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* EMDC */
SUNXI_FUNCTION(0x3, "ts0"), /* D7 */
SUNXI_FUNCTION(0x4, "uart1"), /* RING */
SUNXI_FUNCTION(0x5, "keypad")), /* OUT3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 12),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 12),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* EMDIO */
SUNXI_FUNCTION(0x3, "uart1"), /* TX */
SUNXI_FUNCTION(0x5, "keypad")), /* OUT4 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 13),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 13),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ETXEN */
SUNXI_FUNCTION(0x3, "uart1"), /* RX */
SUNXI_FUNCTION(0x5, "keypad")), /* OUT5 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 14),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 14),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ETXCK */
SUNXI_FUNCTION(0x3, "uart1"), /* CTS */
SUNXI_FUNCTION(0x4, "uart3"), /* TX */
SUNXI_FUNCTION(0x5, "keypad")), /* OUT6 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 15),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 15),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ECRS */
SUNXI_FUNCTION(0x3, "uart1"), /* RTS */
SUNXI_FUNCTION(0x4, "uart3"), /* RX */
SUNXI_FUNCTION(0x5, "keypad")), /* OUT7 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 16),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 16),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ECOL */
SUNXI_FUNCTION(0x3, "uart2")), /* TX */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 17),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(A, 17),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ETXERR */
@@ -145,6 +162,9 @@ static const struct sunxi_desc_pin sun5i_a10s_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "pwm"), /* PWM0 */
+ SUNXI_FUNCTION_VARIANT(0x3,
+ "spdif", /* DO */
+ PINCTRL_SUN5I_GR8),
SUNXI_FUNCTION_IRQ(0x6, 16)), /* EINT16 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 3),
SUNXI_FUNCTION(0x0, "gpio_in"),
@@ -156,55 +176,70 @@ static const struct sunxi_desc_pin sun5i_a10s_pins[] = {
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "ir0"), /* RX */
SUNXI_FUNCTION_IRQ(0x6, 18)), /* EINT18 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 5),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(B, 5),
+ PINCTRL_SUN5I_A10S | PINCTRL_SUN5I_GR8,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "i2s"), /* MCLK */
SUNXI_FUNCTION_IRQ(0x6, 19)), /* EINT19 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 6),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(B, 6),
+ PINCTRL_SUN5I_A10S | PINCTRL_SUN5I_GR8,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "i2s"), /* BCLK */
SUNXI_FUNCTION_IRQ(0x6, 20)), /* EINT20 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 7),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(B, 7),
+ PINCTRL_SUN5I_A10S | PINCTRL_SUN5I_GR8,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "i2s"), /* LRCK */
SUNXI_FUNCTION_IRQ(0x6, 21)), /* EINT21 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 8),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(B, 8),
+ PINCTRL_SUN5I_A10S | PINCTRL_SUN5I_GR8,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "i2s"), /* DO */
SUNXI_FUNCTION_IRQ(0x6, 22)), /* EINT22 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 9),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(B, 9),
+ PINCTRL_SUN5I_A10S | PINCTRL_SUN5I_GR8,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "i2s"), /* DI */
+ SUNXI_FUNCTION_VARIANT(0x3,
+ "spdif", /* DI */
+ PINCTRL_SUN5I_GR8),
SUNXI_FUNCTION_IRQ(0x6, 23)), /* EINT23 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 10),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "spi2"), /* CS1 */
+ SUNXI_FUNCTION_VARIANT(0x3,
+ "spdif", /* DO */
+ PINCTRL_SUN5I_GR8),
SUNXI_FUNCTION_IRQ(0x6, 24)), /* EINT24 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 11),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(B, 11),
+ PINCTRL_SUN5I_A10S | PINCTRL_SUN5I_GR8,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "spi2"), /* CS0 */
SUNXI_FUNCTION(0x3, "jtag"), /* MS0 */
SUNXI_FUNCTION_IRQ(0x6, 25)), /* EINT25 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 12),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(B, 12),
+ PINCTRL_SUN5I_A10S | PINCTRL_SUN5I_GR8,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "spi2"), /* CLK */
SUNXI_FUNCTION(0x3, "jtag"), /* CK0 */
SUNXI_FUNCTION_IRQ(0x6, 26)), /* EINT26 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 13),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(B, 13),
+ PINCTRL_SUN5I_A10S | PINCTRL_SUN5I_GR8,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "spi2"), /* MOSI */
SUNXI_FUNCTION(0x3, "jtag"), /* DO0 */
SUNXI_FUNCTION_IRQ(0x6, 27)), /* EINT27 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 14),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(B, 14),
+ PINCTRL_SUN5I_A10S | PINCTRL_SUN5I_GR8,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "spi2"), /* MISO */
@@ -226,12 +261,14 @@ static const struct sunxi_desc_pin sun5i_a10s_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "i2c2")), /* SDA */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 19),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(B, 19),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "uart0"), /* TX */
SUNXI_FUNCTION_IRQ(0x6, 29)), /* EINT29 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 20),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(B, 20),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "uart0"), /* RX */
@@ -251,7 +288,7 @@ static const struct sunxi_desc_pin sun5i_a10s_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "nand0"), /* NCLE */
- SUNXI_FUNCTION(0x3, "spi0")), /* SCK */
+ SUNXI_FUNCTION(0x3, "spi0")), /* CLK */
SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 3),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
@@ -315,17 +352,20 @@ static const struct sunxi_desc_pin sun5i_a10s_pins[] = {
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "nand0"), /* NDQ7 */
SUNXI_FUNCTION(0x3, "mmc2")), /* D7 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 16),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(C, 16),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "nand0"), /* NWP */
SUNXI_FUNCTION(0x4, "uart3")), /* TX */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 17),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(C, 17),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "nand0"), /* NCE2 */
SUNXI_FUNCTION(0x4, "uart3")), /* RX */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 18),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(C, 18),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "nand0"), /* NCE3 */
@@ -338,11 +378,13 @@ static const struct sunxi_desc_pin sun5i_a10s_pins[] = {
SUNXI_FUNCTION(0x3, "uart2"), /* RX */
SUNXI_FUNCTION(0x4, "uart3")), /* RTS */
/* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 0),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(D, 0),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0")), /* D0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 1),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(D, 1),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0")), /* D1 */
@@ -376,11 +418,13 @@ static const struct sunxi_desc_pin sun5i_a10s_pins[] = {
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D7 */
SUNXI_FUNCTION(0x3, "emac")), /* ECOL */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 8),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(D, 8),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0")), /* D8 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 9),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(D, 9),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0")), /* D9 */
@@ -414,11 +458,13 @@ static const struct sunxi_desc_pin sun5i_a10s_pins[] = {
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0"), /* D15 */
SUNXI_FUNCTION(0x3, "emac")), /* ERXERR */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 16),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(D, 16),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0")), /* D16 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 17),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(D, 17),
+ PINCTRL_SUN5I_A10S,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd0")), /* D17 */
@@ -600,26 +646,30 @@ static const struct sunxi_desc_pin sun5i_a10s_pins[] = {
SUNXI_FUNCTION(0x2, "mmc1"), /* CLK */
SUNXI_FUNCTION(0x4, "uart1"), /* RX */
SUNXI_FUNCTION_IRQ(0x6, 4)), /* EINT4 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 5),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(G, 5),
+ PINCTRL_SUN5I_A10S | PINCTRL_SUN5I_GR8,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "mmc1"), /* DO */
SUNXI_FUNCTION(0x4, "uart1"), /* CTS */
SUNXI_FUNCTION_IRQ(0x6, 5)), /* EINT5 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 6),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(G, 6),
+ PINCTRL_SUN5I_A10S | PINCTRL_SUN5I_GR8,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "mmc1"), /* D1 */
SUNXI_FUNCTION(0x4, "uart1"), /* RTS */
SUNXI_FUNCTION(0x5, "uart2"), /* RTS */
SUNXI_FUNCTION_IRQ(0x6, 6)), /* EINT6 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 7),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(G, 7),
+ PINCTRL_SUN5I_A10S | PINCTRL_SUN5I_GR8,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "mmc1"), /* D2 */
SUNXI_FUNCTION(0x5, "uart2"), /* TX */
SUNXI_FUNCTION_IRQ(0x6, 7)), /* EINT7 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(G, 8),
+ PINCTRL_SUN5I_A10S | PINCTRL_SUN5I_GR8,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "mmc1"), /* D3 */
@@ -649,7 +699,8 @@ static const struct sunxi_desc_pin sun5i_a10s_pins[] = {
SUNXI_FUNCTION(0x2, "spi1"), /* MISO */
SUNXI_FUNCTION(0x3, "uart3"), /* RTS */
SUNXI_FUNCTION_IRQ(0x6, 12)), /* EINT12 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 13),
+ SUNXI_PIN_VARIANT(SUNXI_PINCTRL_PIN(G, 13),
+ PINCTRL_SUN5I_A10S | PINCTRL_SUN5I_GR8,
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "spi1"), /* CS1 */
@@ -658,28 +709,48 @@ static const struct sunxi_desc_pin sun5i_a10s_pins[] = {
SUNXI_FUNCTION_IRQ(0x6, 13)), /* EINT13 */
};
-static const struct sunxi_pinctrl_desc sun5i_a10s_pinctrl_data = {
- .pins = sun5i_a10s_pins,
- .npins = ARRAY_SIZE(sun5i_a10s_pins),
+static const struct sunxi_pinctrl_desc sun5i_pinctrl_data = {
+ .pins = sun5i_pins,
+ .npins = ARRAY_SIZE(sun5i_pins),
.irq_banks = 1,
};
-static int sun5i_a10s_pinctrl_probe(struct platform_device *pdev)
+static int sun5i_pinctrl_probe(struct platform_device *pdev)
{
- return sunxi_pinctrl_init(pdev,
- &sun5i_a10s_pinctrl_data);
+ unsigned long variant = (unsigned long)of_device_get_match_data(&pdev->dev);
+
+ printk("prout\n");
+
+ return sunxi_pinctrl_init_with_variant(pdev, &sun5i_pinctrl_data,
+ variant);
}
-static const struct of_device_id sun5i_a10s_pinctrl_match[] = {
- { .compatible = "allwinner,sun5i-a10s-pinctrl", },
- {}
+static const struct of_device_id sun5i_pinctrl_match[] = {
+ {
+ .compatible = "allwinner,sun5i-a10s-pinctrl",
+ .data = (void *)PINCTRL_SUN5I_A10S
+ },
+ {
+ .compatible = "allwinner,sun5i-a13-pinctrl",
+ .data = (void *)PINCTRL_SUN5I_A13
+ },
+ {
+ .compatible = "nextthing,gr8-pinctrl",
+ .data = (void *)PINCTRL_SUN5I_GR8
+ },
+ { },
};
+MODULE_DEVICE_TABLE(of, sun5i_pinctrl_match);
-static struct platform_driver sun5i_a10s_pinctrl_driver = {
- .probe = sun5i_a10s_pinctrl_probe,
+static struct platform_driver sun5i_pinctrl_driver = {
+ .probe = sun5i_pinctrl_probe,
.driver = {
- .name = "sun5i-a10s-pinctrl",
- .of_match_table = sun5i_a10s_pinctrl_match,
+ .name = "sun5i-pinctrl",
+ .of_match_table = sun5i_pinctrl_match,
},
};
-builtin_platform_driver(sun5i_a10s_pinctrl_driver);
+module_platform_driver(sun5i_pinctrl_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
+MODULE_DESCRIPTION("Allwinner sun5i pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-v3s.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-v3s.c
new file mode 100644
index 000000000000..c86d3c42a905
--- /dev/null
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-v3s.c
@@ -0,0 +1,321 @@
+/*
+ * Allwinner V3s SoCs pinctrl driver.
+ *
+ * Copyright (C) 2016 Icenowy Zheng <icenowy@aosc.xyz>
+ *
+ * Based on pinctrl-sun8i-h3.c, which is:
+ * Copyright (C) 2015 Jens Kuske <jenskuske@gmail.com>
+ *
+ * Based on pinctrl-sun8i-a23.c, which is:
+ * Copyright (C) 2014 Chen-Yu Tsai <wens@csie.org>
+ * Copyright (C) 2014 Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-sunxi.h"
+
+static const struct sunxi_desc_pin sun8i_v3s_pins[] = {
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 0)), /* PB_EINT0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 1)), /* PB_EINT1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 2)), /* PB_EINT2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* D1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 3)), /* PB_EINT3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "pwm0"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 4)), /* PB_EINT4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "pwm1"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 5)), /* PB_EINT5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c0"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 6)), /* PB_EINT6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c0"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 7)), /* PB_EINT7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c1"), /* SDA */
+ SUNXI_FUNCTION(0x3, "uart0"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 8)), /* PB_EINT8 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c1"), /* SCK */
+ SUNXI_FUNCTION(0x3, "uart0"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 9)), /* PB_EINT9 */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc2"), /* CLK */
+ SUNXI_FUNCTION(0x3, "spi0")), /* MISO */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc2"), /* CMD */
+ SUNXI_FUNCTION(0x3, "spi0")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc2"), /* RST */
+ SUNXI_FUNCTION(0x3, "spi0")), /* CS */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc2"), /* D0 */
+ SUNXI_FUNCTION(0x3, "spi0")), /* MOSI */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* PCLK */
+ SUNXI_FUNCTION(0x3, "lcd")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* MCLK */
+ SUNXI_FUNCTION(0x3, "lcd")), /* DE */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* HSYNC */
+ SUNXI_FUNCTION(0x3, "lcd")), /* HSYNC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* VSYNC */
+ SUNXI_FUNCTION(0x3, "lcd")), /* VSYNC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D0 */
+ SUNXI_FUNCTION(0x3, "lcd")), /* D2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D1 */
+ SUNXI_FUNCTION(0x3, "lcd")), /* D3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D2 */
+ SUNXI_FUNCTION(0x3, "lcd")), /* D4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D3 */
+ SUNXI_FUNCTION(0x3, "lcd")), /* D5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D4 */
+ SUNXI_FUNCTION(0x3, "lcd")), /* D6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D5 */
+ SUNXI_FUNCTION(0x3, "lcd")), /* D7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D6 */
+ SUNXI_FUNCTION(0x3, "lcd")), /* D10 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D7 */
+ SUNXI_FUNCTION(0x3, "lcd")), /* D11 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D8 */
+ SUNXI_FUNCTION(0x3, "lcd")), /* D12 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D9 */
+ SUNXI_FUNCTION(0x3, "lcd")), /* D13 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D10 */
+ SUNXI_FUNCTION(0x3, "lcd")), /* D14 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D11 */
+ SUNXI_FUNCTION(0x3, "lcd")), /* D15 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D12 */
+ SUNXI_FUNCTION(0x3, "lcd")), /* D18 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D13 */
+ SUNXI_FUNCTION(0x3, "lcd")), /* D19 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 18),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D14 */
+ SUNXI_FUNCTION(0x3, "lcd")), /* D20 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 19),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D15 */
+ SUNXI_FUNCTION(0x3, "lcd")), /* D21 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 20),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* FIELD */
+ SUNXI_FUNCTION(0x3, "csi_mipi")), /* MCLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 21),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* SCK */
+ SUNXI_FUNCTION(0x3, "i2c1"), /* SCK */
+ SUNXI_FUNCTION(0x4, "uart1")), /* TX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 22),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* SDA */
+ SUNXI_FUNCTION(0x3, "i2c1"), /* SDA */
+ SUNXI_FUNCTION(0x4, "uart1")), /* RX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 23),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "lcd"), /* D22 */
+ SUNXI_FUNCTION(0x4, "uart1")), /* RTS */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 24),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "lcd"), /* D23 */
+ SUNXI_FUNCTION(0x4, "uart1")), /* CTS */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D1 */
+ SUNXI_FUNCTION(0x3, "jtag")), /* MS */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D0 */
+ SUNXI_FUNCTION(0x3, "jtag")), /* DI */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* CLK */
+ SUNXI_FUNCTION(0x3, "uart0")), /* TX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* CMD */
+ SUNXI_FUNCTION(0x3, "jtag")), /* DO */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D3 */
+ SUNXI_FUNCTION(0x3, "uart0")), /* RX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D2 */
+ SUNXI_FUNCTION(0x3, "jtag")), /* CK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 0)), /* PG_EINT0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* CMD */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 1)), /* PG_EINT1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 2)), /* PG_EINT2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 3)), /* PG_EINT3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D2 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 4)), /* PG_EINT4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D3 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 5)), /* PG_EINT5 */
+};
+
+static const struct sunxi_pinctrl_desc sun8i_v3s_pinctrl_data = {
+ .pins = sun8i_v3s_pins,
+ .npins = ARRAY_SIZE(sun8i_v3s_pins),
+ .irq_banks = 2,
+ .irq_read_needs_mux = true
+};
+
+static int sun8i_v3s_pinctrl_probe(struct platform_device *pdev)
+{
+ return sunxi_pinctrl_init(pdev,
+ &sun8i_v3s_pinctrl_data);
+}
+
+static const struct of_device_id sun8i_v3s_pinctrl_match[] = {
+ { .compatible = "allwinner,sun8i-v3s-pinctrl", },
+ {}
+};
+
+static struct platform_driver sun8i_v3s_pinctrl_driver = {
+ .probe = sun8i_v3s_pinctrl_probe,
+ .driver = {
+ .name = "sun8i-v3s-pinctrl",
+ .of_match_table = sun8i_v3s_pinctrl_match,
+ },
+};
+builtin_platform_driver(sun8i_v3s_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 0eb51e33cb1b..aaef7f42f1e4 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -540,7 +540,7 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev,
enum pin_config_param param;
unsigned long flags;
u32 offset, shift, mask, reg;
- u16 arg, val;
+ u32 arg, val;
int ret;
param = pinconf_to_config_param(configs[i]);
@@ -1041,21 +1041,35 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev)
struct sunxi_pinctrl *pctl = platform_get_drvdata(pdev);
int i;
- pctl->ngroups = pctl->desc->npins;
+ /*
+ * Allocate groups
+ *
+ * We assume that the number of groups is the number of pins
+ * given in the data array.
- /* Allocate groups */
+ * This will not always be true, since some pins might not be
+ * available in the current variant, but fortunately for us,
+ * this means that the number of pins is the maximum group
+ * number we will ever see.
+ */
pctl->groups = devm_kzalloc(&pdev->dev,
- pctl->ngroups * sizeof(*pctl->groups),
+ pctl->desc->npins * sizeof(*pctl->groups),
GFP_KERNEL);
if (!pctl->groups)
return -ENOMEM;
for (i = 0; i < pctl->desc->npins; i++) {
const struct sunxi_desc_pin *pin = pctl->desc->pins + i;
- struct sunxi_pinctrl_group *group = pctl->groups + i;
+ struct sunxi_pinctrl_group *group = pctl->groups + pctl->ngroups;
+
+ if (pin->variant && !(pctl->variant & pin->variant))
+ continue;
group->name = pin->pin.name;
group->pin = pin->pin.number;
+
+ /* And now we count the actual number of pins / groups */
+ pctl->ngroups++;
}
/*
@@ -1063,17 +1077,23 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev)
* we'll reallocate that later anyway
*/
pctl->functions = devm_kzalloc(&pdev->dev,
- pctl->desc->npins * sizeof(*pctl->functions),
- GFP_KERNEL);
+ pctl->ngroups * sizeof(*pctl->functions),
+ GFP_KERNEL);
if (!pctl->functions)
return -ENOMEM;
/* Count functions and their associated groups */
for (i = 0; i < pctl->desc->npins; i++) {
const struct sunxi_desc_pin *pin = pctl->desc->pins + i;
- struct sunxi_desc_function *func = pin->functions;
+ struct sunxi_desc_function *func;
+
+ if (pin->variant && !(pctl->variant & pin->variant))
+ continue;
+
+ for (func = pin->functions; func->name; func++) {
+ if (func->variant && !(pctl->variant & func->variant))
+ continue;
- while (func->name) {
/* Create interrupt mapping while we're at it */
if (!strcmp(func->name, "irq")) {
int irqnum = func->irqnum + func->irqbank * IRQ_PER_BANK;
@@ -1081,22 +1101,32 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev)
}
sunxi_pinctrl_add_function(pctl, func->name);
- func++;
}
}
+ /* And now allocated and fill the array for real */
pctl->functions = krealloc(pctl->functions,
- pctl->nfunctions * sizeof(*pctl->functions),
- GFP_KERNEL);
+ pctl->nfunctions * sizeof(*pctl->functions),
+ GFP_KERNEL);
+ if (!pctl->functions) {
+ kfree(pctl->functions);
+ return -ENOMEM;
+ }
for (i = 0; i < pctl->desc->npins; i++) {
const struct sunxi_desc_pin *pin = pctl->desc->pins + i;
- struct sunxi_desc_function *func = pin->functions;
+ struct sunxi_desc_function *func;
- while (func->name) {
+ if (pin->variant && !(pctl->variant & pin->variant))
+ continue;
+
+ for (func = pin->functions; func->name; func++) {
struct sunxi_pinctrl_function *func_item;
const char **func_grp;
+ if (func->variant && !(pctl->variant & func->variant))
+ continue;
+
func_item = sunxi_pinctrl_find_function_by_name(pctl,
func->name);
if (!func_item)
@@ -1116,7 +1146,6 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev)
func_grp++;
*func_grp = pin->pin.name;
- func++;
}
}
@@ -1208,15 +1237,16 @@ static int sunxi_pinctrl_setup_debounce(struct sunxi_pinctrl *pctl,
return 0;
}
-int sunxi_pinctrl_init(struct platform_device *pdev,
- const struct sunxi_pinctrl_desc *desc)
+int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
+ const struct sunxi_pinctrl_desc *desc,
+ unsigned long variant)
{
struct device_node *node = pdev->dev.of_node;
struct pinctrl_desc *pctrl_desc;
struct pinctrl_pin_desc *pins;
struct sunxi_pinctrl *pctl;
struct resource *res;
- int i, ret, last_pin;
+ int i, ret, last_pin, pin_idx;
struct clk *clk;
pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL);
@@ -1233,6 +1263,7 @@ int sunxi_pinctrl_init(struct platform_device *pdev,
pctl->dev = &pdev->dev;
pctl->desc = desc;
+ pctl->variant = variant;
pctl->irq_array = devm_kcalloc(&pdev->dev,
IRQ_PER_BANK * pctl->desc->irq_banks,
@@ -1253,8 +1284,14 @@ int sunxi_pinctrl_init(struct platform_device *pdev,
if (!pins)
return -ENOMEM;
- for (i = 0; i < pctl->desc->npins; i++)
- pins[i] = pctl->desc->pins[i].pin;
+ for (i = 0, pin_idx = 0; i < pctl->desc->npins; i++) {
+ const struct sunxi_desc_pin *pin = pctl->desc->pins + i;
+
+ if (pin->variant && !(pctl->variant & pin->variant))
+ continue;
+
+ pins[pin_idx++] = pin->pin;
+ }
pctrl_desc = devm_kzalloc(&pdev->dev,
sizeof(*pctrl_desc),
@@ -1265,7 +1302,7 @@ int sunxi_pinctrl_init(struct platform_device *pdev,
pctrl_desc->name = dev_name(&pdev->dev);
pctrl_desc->owner = THIS_MODULE;
pctrl_desc->pins = pins;
- pctrl_desc->npins = pctl->desc->npins;
+ pctrl_desc->npins = pctl->ngroups;
pctrl_desc->confops = &sunxi_pconf_ops;
pctrl_desc->pctlops = &sunxi_pctrl_ops;
pctrl_desc->pmxops = &sunxi_pmx_ops;
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index f78a44a03189..56be35387ccf 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -82,7 +82,12 @@
#define SUN4I_FUNC_INPUT 0
#define SUN4I_FUNC_IRQ 6
+#define PINCTRL_SUN5I_A10S BIT(1)
+#define PINCTRL_SUN5I_A13 BIT(2)
+#define PINCTRL_SUN5I_GR8 BIT(3)
+
struct sunxi_desc_function {
+ unsigned long variant;
const char *name;
u8 muxval;
u8 irqbank;
@@ -91,6 +96,7 @@ struct sunxi_desc_function {
struct sunxi_desc_pin {
struct pinctrl_pin_desc pin;
+ unsigned long variant;
struct sunxi_desc_function *functions;
};
@@ -128,6 +134,7 @@ struct sunxi_pinctrl {
unsigned *irq_array;
spinlock_t lock;
struct pinctrl_dev *pctl_dev;
+ unsigned long variant;
};
#define SUNXI_PIN(_pin, ...) \
@@ -137,12 +144,27 @@ struct sunxi_pinctrl {
__VA_ARGS__, { } }, \
}
+#define SUNXI_PIN_VARIANT(_pin, _variant, ...) \
+ { \
+ .pin = _pin, \
+ .variant = _variant, \
+ .functions = (struct sunxi_desc_function[]){ \
+ __VA_ARGS__, { } }, \
+ }
+
#define SUNXI_FUNCTION(_val, _name) \
{ \
.name = _name, \
.muxval = _val, \
}
+#define SUNXI_FUNCTION_VARIANT(_val, _name, _variant) \
+ { \
+ .name = _name, \
+ .muxval = _val, \
+ .variant = _variant, \
+ }
+
#define SUNXI_FUNCTION_IRQ(_val, _irq) \
{ \
.name = "irq", \
@@ -290,7 +312,11 @@ static inline u32 sunxi_irq_status_offset(u16 irq)
return irq_num * IRQ_STATUS_IRQ_BITS;
}
-int sunxi_pinctrl_init(struct platform_device *pdev,
- const struct sunxi_pinctrl_desc *desc);
+int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
+ const struct sunxi_pinctrl_desc *desc,
+ unsigned long variant);
+
+#define sunxi_pinctrl_init(_dev, _desc) \
+ sunxi_pinctrl_init_with_variant(_dev, _desc, 0)
#endif /* __PINCTRL_SUNXI_H */
diff --git a/drivers/pinctrl/ti/Kconfig b/drivers/pinctrl/ti/Kconfig
new file mode 100644
index 000000000000..815a88673d38
--- /dev/null
+++ b/drivers/pinctrl/ti/Kconfig
@@ -0,0 +1,10 @@
+config PINCTRL_TI_IODELAY
+ tristate "TI IODelay Module pinconf driver"
+ depends on OF
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+ select GENERIC_PINCONF
+ select REGMAP_MMIO
+ help
+ Say Y here to support Texas Instruments' IO delay pinconf driver.
+ IO delay module is used for the DRA7 SoC family.
diff --git a/drivers/pinctrl/ti/Makefile b/drivers/pinctrl/ti/Makefile
new file mode 100644
index 000000000000..913744e8b8fa
--- /dev/null
+++ b/drivers/pinctrl/ti/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_PINCTRL_TI_IODELAY) += pinctrl-ti-iodelay.o
diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
new file mode 100644
index 000000000000..717e3404900c
--- /dev/null
+++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
@@ -0,0 +1,937 @@
+/*
+ * Support for configuration of IO Delay module found on Texas Instruments SoCs
+ * such as DRA7
+ *
+ * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "../core.h"
+#include "../devicetree.h"
+
+#define DRIVER_NAME "ti-iodelay"
+
+/**
+ * struct ti_iodelay_reg_data - Describes the registers for the iodelay instance
+ * @signature_mask: CONFIG_REG mask for the signature bits (see TRM)
+ * @signature_value: CONFIG_REG signature value to be written (see TRM)
+ * @lock_mask: CONFIG_REG mask for the lock bits (see TRM)
+ * @lock_val: CONFIG_REG lock value for the lock bits (see TRM)
+ * @unlock_val:CONFIG_REG unlock value for the lock bits (see TRM)
+ * @binary_data_coarse_mask: CONFIG_REG coarse mask (see TRM)
+ * @binary_data_fine_mask: CONFIG_REG fine mask (see TRM)
+ * @reg_refclk_offset: Refclk register offset
+ * @refclk_period_mask: Refclk mask
+ * @reg_coarse_offset: Coarse register configuration offset
+ * @coarse_delay_count_mask: Coarse delay count mask
+ * @coarse_ref_count_mask: Coarse ref count mask
+ * @reg_fine_offset: Fine register configuration offset
+ * @fine_delay_count_mask: Fine delay count mask
+ * @fine_ref_count_mask: Fine ref count mask
+ * @reg_global_lock_offset: Global iodelay module lock register offset
+ * @global_lock_mask: Lock mask
+ * @global_unlock_val: Unlock value
+ * @global_lock_val: Lock value
+ * @reg_start_offset: Offset to iodelay registers after the CONFIG_REG_0 to 8
+ * @reg_nr_per_pin: Number of iodelay registers for each pin
+ * @regmap_config: Regmap configuration for the IODelay region
+ */
+struct ti_iodelay_reg_data {
+ u32 signature_mask;
+ u32 signature_value;
+ u32 lock_mask;
+ u32 lock_val;
+ u32 unlock_val;
+ u32 binary_data_coarse_mask;
+ u32 binary_data_fine_mask;
+
+ u32 reg_refclk_offset;
+ u32 refclk_period_mask;
+
+ u32 reg_coarse_offset;
+ u32 coarse_delay_count_mask;
+ u32 coarse_ref_count_mask;
+
+ u32 reg_fine_offset;
+ u32 fine_delay_count_mask;
+ u32 fine_ref_count_mask;
+
+ u32 reg_global_lock_offset;
+ u32 global_lock_mask;
+ u32 global_unlock_val;
+ u32 global_lock_val;
+
+ u32 reg_start_offset;
+ u32 reg_nr_per_pin;
+
+ struct regmap_config *regmap_config;
+};
+
+/**
+ * struct ti_iodelay_reg_values - Computed io_reg configuration values (see TRM)
+ * @coarse_ref_count: Coarse reference count
+ * @coarse_delay_count: Coarse delay count
+ * @fine_ref_count: Fine reference count
+ * @fine_delay_count: Fine Delay count
+ * @ref_clk_period: Reference Clock period
+ * @cdpe: Coarse delay parameter
+ * @fdpe: Fine delay parameter
+ */
+struct ti_iodelay_reg_values {
+ u16 coarse_ref_count;
+ u16 coarse_delay_count;
+
+ u16 fine_ref_count;
+ u16 fine_delay_count;
+
+ u16 ref_clk_period;
+
+ u32 cdpe;
+ u32 fdpe;
+};
+
+/**
+ * struct ti_iodelay_cfg - Description of each configuration parameters
+ * @offset: Configuration register offset
+ * @a_delay: Agnostic Delay (in ps)
+ * @g_delay: Gnostic Delay (in ps)
+ */
+struct ti_iodelay_cfg {
+ u16 offset;
+ u16 a_delay;
+ u16 g_delay;
+};
+
+/**
+ * struct ti_iodelay_pingroup - Structure that describes one group
+ * @cfg: configuration array for the pin (from dt)
+ * @ncfg: number of configuration values allocated
+ * @config: pinconf "Config" - currently a dummy value
+ */
+struct ti_iodelay_pingroup {
+ struct ti_iodelay_cfg *cfg;
+ int ncfg;
+ unsigned long config;
+};
+
+/**
+ * struct ti_iodelay_device - Represents information for a iodelay instance
+ * @dev: Device pointer
+ * @phys_base: Physical address base of the iodelay device
+ * @reg_base: Virtual address base of the iodelay device
+ * @regmap: Regmap for this iodelay instance
+ * @pctl: Pinctrl device
+ * @desc: pinctrl descriptor for pctl
+ * @pa: pinctrl pin wise description
+ * @reg_data: Register definition data for the IODelay instance
+ * @reg_init_conf_values: Initial configuration values.
+ */
+struct ti_iodelay_device {
+ struct device *dev;
+ unsigned long phys_base;
+ void __iomem *reg_base;
+ struct regmap *regmap;
+
+ struct pinctrl_dev *pctl;
+ struct pinctrl_desc desc;
+ struct pinctrl_pin_desc *pa;
+
+ const struct ti_iodelay_reg_data *reg_data;
+ struct ti_iodelay_reg_values reg_init_conf_values;
+};
+
+/**
+ * ti_iodelay_extract() - extract bits for a field
+ * @val: Register value
+ * @mask: Mask
+ *
+ * Return: extracted value which is appropriately shifted
+ */
+static inline u32 ti_iodelay_extract(u32 val, u32 mask)
+{
+ return (val & mask) >> __ffs(mask);
+}
+
+/**
+ * ti_iodelay_compute_dpe() - Compute equation for delay parameter
+ * @period: Period to use
+ * @ref: Reference Count
+ * @delay: Delay count
+ * @delay_m: Delay multiplier
+ *
+ * Return: Computed delay parameter
+ */
+static inline u32 ti_iodelay_compute_dpe(u16 period, u16 ref, u16 delay,
+ u16 delay_m)
+{
+ u64 m, d;
+
+ /* Handle overflow conditions */
+ m = 10 * (u64)period * (u64)ref;
+ d = 2 * (u64)delay * (u64)delay_m;
+
+ /* Truncate result back to 32 bits */
+ return div64_u64(m, d);
+}
+
+/**
+ * ti_iodelay_pinconf_set() - Configure the pin configuration
+ * @iod: iodelay device
+ * @cfg: Configuration
+ *
+ * Update the configuration register as per TRM and lockup once done.
+ * *IMPORTANT NOTE* SoC TRM does recommend doing iodelay programmation only
+ * while in Isolation. But, then, isolation also implies that every pin
+ * on the SoC (including DDR) will be isolated out. The only benefit being
+ * a glitchless configuration, However, the intent of this driver is purely
+ * to support a "glitchy" configuration where applicable.
+ *
+ * Return: 0 in case of success, else appropriate error value
+ */
+static int ti_iodelay_pinconf_set(struct ti_iodelay_device *iod,
+ struct ti_iodelay_cfg *cfg)
+{
+ const struct ti_iodelay_reg_data *reg = iod->reg_data;
+ struct ti_iodelay_reg_values *ival = &iod->reg_init_conf_values;
+ struct device *dev = iod->dev;
+ u32 g_delay_coarse, g_delay_fine;
+ u32 a_delay_coarse, a_delay_fine;
+ u32 c_elements, f_elements;
+ u32 total_delay;
+ u32 reg_mask, reg_val, tmp_val;
+ int r;
+
+ /* NOTE: Truncation is expected in all division below */
+ g_delay_coarse = cfg->g_delay / 920;
+ g_delay_fine = ((cfg->g_delay % 920) * 10) / 60;
+
+ a_delay_coarse = cfg->a_delay / ival->cdpe;
+ a_delay_fine = ((cfg->a_delay % ival->cdpe) * 10) / ival->fdpe;
+
+ c_elements = g_delay_coarse + a_delay_coarse;
+ f_elements = (g_delay_fine + a_delay_fine) / 10;
+
+ if (f_elements > 22) {
+ total_delay = c_elements * ival->cdpe + f_elements * ival->fdpe;
+ c_elements = total_delay / ival->cdpe;
+ f_elements = (total_delay % ival->cdpe) / ival->fdpe;
+ }
+
+ reg_mask = reg->signature_mask;
+ reg_val = reg->signature_value << __ffs(reg->signature_mask);
+
+ reg_mask |= reg->binary_data_coarse_mask;
+ tmp_val = c_elements << __ffs(reg->binary_data_coarse_mask);
+ if (tmp_val & ~reg->binary_data_coarse_mask) {
+ dev_err(dev, "Masking overflow of coarse elements %08x\n",
+ tmp_val);
+ tmp_val &= reg->binary_data_coarse_mask;
+ }
+ reg_val |= tmp_val;
+
+ reg_mask |= reg->binary_data_fine_mask;
+ tmp_val = f_elements << __ffs(reg->binary_data_fine_mask);
+ if (tmp_val & ~reg->binary_data_fine_mask) {
+ dev_err(dev, "Masking overflow of fine elements %08x\n",
+ tmp_val);
+ tmp_val &= reg->binary_data_fine_mask;
+ }
+ reg_val |= tmp_val;
+
+ /*
+ * NOTE: we leave the iodelay values unlocked - this is to work around
+ * situations such as those found with mmc mode change.
+ * However, this leaves open any unwarranted changes to padconf register
+ * impacting iodelay configuration. Use with care!
+ */
+ reg_mask |= reg->lock_mask;
+ reg_val |= reg->unlock_val << __ffs(reg->lock_mask);
+ r = regmap_update_bits(iod->regmap, cfg->offset, reg_mask, reg_val);
+
+ dev_info(dev, "Set reg 0x%x Delay(a: %d g: %d), Elements(C=%d F=%d)0x%x\n",
+ cfg->offset, cfg->a_delay, cfg->g_delay, c_elements,
+ f_elements, reg_val);
+
+ return r;
+}
+
+/**
+ * ti_iodelay_pinconf_init_dev() - Initialize IODelay device
+ * @iod: iodelay device
+ *
+ * Unlocks the iodelay region, computes the common parameters
+ *
+ * Return: 0 in case of success, else appropriate error value
+ */
+static int ti_iodelay_pinconf_init_dev(struct ti_iodelay_device *iod)
+{
+ const struct ti_iodelay_reg_data *reg = iod->reg_data;
+ struct device *dev = iod->dev;
+ struct ti_iodelay_reg_values *ival = &iod->reg_init_conf_values;
+ u32 val;
+ int r;
+
+ /* unlock the iodelay region */
+ r = regmap_update_bits(iod->regmap, reg->reg_global_lock_offset,
+ reg->global_lock_mask, reg->global_unlock_val);
+ if (r)
+ return r;
+
+ /* Read up Recalibration sequence done by bootloader */
+ r = regmap_read(iod->regmap, reg->reg_refclk_offset, &val);
+ if (r)
+ return r;
+ ival->ref_clk_period = ti_iodelay_extract(val, reg->refclk_period_mask);
+ dev_dbg(dev, "refclk_period=0x%04x\n", ival->ref_clk_period);
+
+ r = regmap_read(iod->regmap, reg->reg_coarse_offset, &val);
+ if (r)
+ return r;
+ ival->coarse_ref_count =
+ ti_iodelay_extract(val, reg->coarse_ref_count_mask);
+ ival->coarse_delay_count =
+ ti_iodelay_extract(val, reg->coarse_delay_count_mask);
+ if (!ival->coarse_delay_count) {
+ dev_err(dev, "Invalid Coarse delay count (0) (reg=0x%08x)\n",
+ val);
+ return -EINVAL;
+ }
+ ival->cdpe = ti_iodelay_compute_dpe(ival->ref_clk_period,
+ ival->coarse_ref_count,
+ ival->coarse_delay_count, 88);
+ if (!ival->cdpe) {
+ dev_err(dev, "Invalid cdpe computed params = %d %d %d\n",
+ ival->ref_clk_period, ival->coarse_ref_count,
+ ival->coarse_delay_count);
+ return -EINVAL;
+ }
+ dev_dbg(iod->dev, "coarse: ref=0x%04x delay=0x%04x cdpe=0x%08x\n",
+ ival->coarse_ref_count, ival->coarse_delay_count, ival->cdpe);
+
+ r = regmap_read(iod->regmap, reg->reg_fine_offset, &val);
+ if (r)
+ return r;
+ ival->fine_ref_count =
+ ti_iodelay_extract(val, reg->fine_ref_count_mask);
+ ival->fine_delay_count =
+ ti_iodelay_extract(val, reg->fine_delay_count_mask);
+ if (!ival->fine_delay_count) {
+ dev_err(dev, "Invalid Fine delay count (0) (reg=0x%08x)\n",
+ val);
+ return -EINVAL;
+ }
+ ival->fdpe = ti_iodelay_compute_dpe(ival->ref_clk_period,
+ ival->fine_ref_count,
+ ival->fine_delay_count, 264);
+ if (!ival->fdpe) {
+ dev_err(dev, "Invalid fdpe(0) computed params = %d %d %d\n",
+ ival->ref_clk_period, ival->fine_ref_count,
+ ival->fine_delay_count);
+ return -EINVAL;
+ }
+ dev_dbg(iod->dev, "fine: ref=0x%04x delay=0x%04x fdpe=0x%08x\n",
+ ival->fine_ref_count, ival->fine_delay_count, ival->fdpe);
+
+ return 0;
+}
+
+/**
+ * ti_iodelay_pinconf_deinit_dev() - deinit the iodelay device
+ * @iod: IODelay device
+ *
+ * Deinitialize the IODelay device (basically just lock the region back up.
+ */
+static void ti_iodelay_pinconf_deinit_dev(struct ti_iodelay_device *iod)
+{
+ const struct ti_iodelay_reg_data *reg = iod->reg_data;
+
+ /* lock the iodelay region back again */
+ regmap_update_bits(iod->regmap, reg->reg_global_lock_offset,
+ reg->global_lock_mask, reg->global_lock_val);
+}
+
+/**
+ * ti_iodelay_get_pingroup() - Find the group mapped by a group selector
+ * @iod: iodelay device
+ * @selector: Group Selector
+ *
+ * Return: Corresponding group representing group selector
+ */
+static struct ti_iodelay_pingroup *
+ti_iodelay_get_pingroup(struct ti_iodelay_device *iod, unsigned int selector)
+{
+ struct group_desc *g;
+
+ g = pinctrl_generic_get_group(iod->pctl, selector);
+ if (!g) {
+ dev_err(iod->dev, "%s could not find pingroup %i\n", __func__,
+ selector);
+
+ return NULL;
+ }
+
+ return g->data;
+}
+
+/**
+ * ti_iodelay_offset_to_pin() - get a pin index based on the register offset
+ * @iod: iodelay driver instance
+ * @offset: register offset from the base
+ */
+static int ti_iodelay_offset_to_pin(struct ti_iodelay_device *iod,
+ unsigned int offset)
+{
+ const struct ti_iodelay_reg_data *r = iod->reg_data;
+ unsigned int index;
+
+ if (offset > r->regmap_config->max_register) {
+ dev_err(iod->dev, "mux offset out of range: 0x%x (0x%x)\n",
+ offset, r->regmap_config->max_register);
+ return -EINVAL;
+ }
+
+ index = (offset - r->reg_start_offset) / r->regmap_config->reg_stride;
+ index /= r->reg_nr_per_pin;
+
+ return index;
+}
+
+/**
+ * ti_iodelay_node_iterator() - Iterate iodelay node
+ * @pctldev: Pin controller driver
+ * @np: Device node
+ * @pinctrl_spec: Parsed arguments from device tree
+ * @pins: Array of pins in the pin group
+ * @pin_index: Pin index in the pin array
+ * @data: Pin controller driver specific data
+ *
+ */
+static int ti_iodelay_node_iterator(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ const struct of_phandle_args *pinctrl_spec,
+ int *pins, int pin_index, void *data)
+{
+ struct ti_iodelay_device *iod;
+ struct ti_iodelay_cfg *cfg = data;
+ const struct ti_iodelay_reg_data *r;
+ struct pinctrl_pin_desc *pd;
+ int pin;
+
+ iod = pinctrl_dev_get_drvdata(pctldev);
+ if (!iod)
+ return -EINVAL;
+
+ r = iod->reg_data;
+
+ if (pinctrl_spec->args_count < r->reg_nr_per_pin) {
+ dev_err(iod->dev, "invalid args_count for spec: %i\n",
+ pinctrl_spec->args_count);
+
+ return -EINVAL;
+ }
+
+ /* Index plus two value cells */
+ cfg[pin_index].offset = pinctrl_spec->args[0];
+ cfg[pin_index].a_delay = pinctrl_spec->args[1] & 0xffff;
+ cfg[pin_index].g_delay = pinctrl_spec->args[2] & 0xffff;
+
+ pin = ti_iodelay_offset_to_pin(iod, cfg[pin_index].offset);
+ if (pin < 0) {
+ dev_err(iod->dev, "could not add functions for %s %ux\n",
+ np->name, cfg[pin_index].offset);
+ return -ENODEV;
+ }
+ pins[pin_index] = pin;
+
+ pd = &iod->pa[pin];
+ pd->drv_data = &cfg[pin_index];
+
+ dev_dbg(iod->dev, "%s offset=%x a_delay = %d g_delay = %d\n",
+ np->name, cfg[pin_index].offset, cfg[pin_index].a_delay,
+ cfg[pin_index].g_delay);
+
+ return 0;
+}
+
+/**
+ * ti_iodelay_dt_node_to_map() - Map a device tree node to appropriate group
+ * @pctldev: pinctrl device representing IODelay device
+ * @np: Node Pointer (device tree)
+ * @map: Pinctrl Map returned back to pinctrl framework
+ * @num_maps: Number of maps (1)
+ *
+ * Maps the device tree description into a group of configuration parameters
+ * for iodelay block entry.
+ *
+ * Return: 0 in case of success, else appropriate error value
+ */
+static int ti_iodelay_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct pinctrl_map **map,
+ unsigned int *num_maps)
+{
+ struct ti_iodelay_device *iod;
+ struct ti_iodelay_cfg *cfg;
+ struct ti_iodelay_pingroup *g;
+ const char *name = "pinctrl-pin-array";
+ int rows, *pins, error = -EINVAL, i;
+
+ iod = pinctrl_dev_get_drvdata(pctldev);
+ if (!iod)
+ return -EINVAL;
+
+ rows = pinctrl_count_index_with_args(np, name);
+ if (rows == -EINVAL)
+ return rows;
+
+ *map = devm_kzalloc(iod->dev, sizeof(**map), GFP_KERNEL);
+ if (!*map)
+ return -ENOMEM;
+ *num_maps = 0;
+
+ g = devm_kzalloc(iod->dev, sizeof(*g), GFP_KERNEL);
+ if (!g) {
+ error = -ENOMEM;
+ goto free_map;
+ }
+
+ pins = devm_kzalloc(iod->dev, sizeof(*pins) * rows, GFP_KERNEL);
+ if (!pins)
+ goto free_group;
+
+ cfg = devm_kzalloc(iod->dev, sizeof(*cfg) * rows, GFP_KERNEL);
+ if (!cfg) {
+ error = -ENOMEM;
+ goto free_pins;
+ }
+
+ for (i = 0; i < rows; i++) {
+ struct of_phandle_args pinctrl_spec;
+
+ error = pinctrl_parse_index_with_args(np, name, i,
+ &pinctrl_spec);
+ if (error)
+ goto free_data;
+
+ error = ti_iodelay_node_iterator(pctldev, np, &pinctrl_spec,
+ pins, i, cfg);
+ if (error)
+ goto free_data;
+ }
+
+ g->cfg = cfg;
+ g->ncfg = i;
+ g->config = PIN_CONFIG_END;
+
+ error = pinctrl_generic_add_group(iod->pctl, np->name, pins, i, g);
+ if (error < 0)
+ goto free_data;
+
+ (*map)->type = PIN_MAP_TYPE_CONFIGS_GROUP;
+ (*map)->data.configs.group_or_pin = np->name;
+ (*map)->data.configs.configs = &g->config;
+ (*map)->data.configs.num_configs = 1;
+ *num_maps = 1;
+
+ return 0;
+
+free_data:
+ devm_kfree(iod->dev, cfg);
+free_pins:
+ devm_kfree(iod->dev, pins);
+free_group:
+ devm_kfree(iod->dev, g);
+free_map:
+ devm_kfree(iod->dev, *map);
+
+ return error;
+}
+
+/**
+ * ti_iodelay_pinconf_group_get() - Get the group configuration
+ * @pctldev: pinctrl device representing IODelay device
+ * @selector: Group selector
+ * @config: Configuration returned
+ *
+ * Return: The configuration if the group is valid, else returns -EINVAL
+ */
+static int ti_iodelay_pinconf_group_get(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ unsigned long *config)
+{
+ struct ti_iodelay_device *iod;
+ struct device *dev;
+ struct ti_iodelay_pingroup *group;
+
+ iod = pinctrl_dev_get_drvdata(pctldev);
+ dev = iod->dev;
+ group = ti_iodelay_get_pingroup(iod, selector);
+
+ if (!group)
+ return -EINVAL;
+
+ *config = group->config;
+ return 0;
+}
+
+/**
+ * ti_iodelay_pinconf_group_set() - Configure the groups of pins
+ * @pctldev: pinctrl device representing IODelay device
+ * @selector: Group selector
+ * @configs: Configurations
+ * @num_configs: Number of configurations
+ *
+ * Return: 0 if all went fine, else appropriate error value.
+ */
+static int ti_iodelay_pinconf_group_set(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ struct ti_iodelay_device *iod;
+ struct device *dev;
+ struct ti_iodelay_pingroup *group;
+ int i;
+
+ iod = pinctrl_dev_get_drvdata(pctldev);
+ dev = iod->dev;
+ group = ti_iodelay_get_pingroup(iod, selector);
+
+ if (num_configs != 1) {
+ dev_err(dev, "Unsupported number of configurations %d\n",
+ num_configs);
+ return -EINVAL;
+ }
+
+ if (*configs != PIN_CONFIG_END) {
+ dev_err(dev, "Unsupported configuration\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < group->ncfg; i++) {
+ if (ti_iodelay_pinconf_set(iod, &group->cfg[i]))
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+/**
+ * ti_iodelay_pin_to_offset() - get pin register offset based on the pin index
+ * @iod: iodelay driver instance
+ * @selector: Pin index
+ */
+static unsigned int ti_iodelay_pin_to_offset(struct ti_iodelay_device *iod,
+ unsigned int selector)
+{
+ const struct ti_iodelay_reg_data *r = iod->reg_data;
+ unsigned int offset;
+
+ offset = selector * r->regmap_config->reg_stride;
+ offset *= r->reg_nr_per_pin;
+ offset += r->reg_start_offset;
+
+ return offset;
+}
+
+static void ti_iodelay_pin_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s,
+ unsigned int pin)
+{
+ struct ti_iodelay_device *iod;
+ struct pinctrl_pin_desc *pd;
+ struct ti_iodelay_cfg *cfg;
+ const struct ti_iodelay_reg_data *r;
+ unsigned long offset;
+ u32 in, oen, out;
+
+ iod = pinctrl_dev_get_drvdata(pctldev);
+ r = iod->reg_data;
+
+ offset = ti_iodelay_pin_to_offset(iod, pin);
+ pd = &iod->pa[pin];
+ cfg = pd->drv_data;
+
+ regmap_read(iod->regmap, offset, &in);
+ regmap_read(iod->regmap, offset + r->regmap_config->reg_stride, &oen);
+ regmap_read(iod->regmap, offset + r->regmap_config->reg_stride * 2,
+ &out);
+
+ seq_printf(s, "%lx a: %i g: %i (%08x %08x %08x) %s ",
+ iod->phys_base + offset,
+ cfg ? cfg->a_delay : -1,
+ cfg ? cfg->g_delay : -1,
+ in, oen, out, DRIVER_NAME);
+}
+
+/**
+ * ti_iodelay_pinconf_group_dbg_show() - show the group information
+ * @pctldev: Show the group information
+ * @s: Sequence file
+ * @selector: Group selector
+ *
+ * Provide the configuration information of the selected group
+ */
+static void ti_iodelay_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s,
+ unsigned int selector)
+{
+ struct ti_iodelay_device *iod;
+ struct device *dev;
+ struct ti_iodelay_pingroup *group;
+ int i;
+
+ iod = pinctrl_dev_get_drvdata(pctldev);
+ dev = iod->dev;
+ group = ti_iodelay_get_pingroup(iod, selector);
+ if (!group)
+ return;
+
+ for (i = 0; i < group->ncfg; i++) {
+ struct ti_iodelay_cfg *cfg;
+ u32 reg = 0;
+
+ cfg = &group->cfg[i];
+ regmap_read(iod->regmap, cfg->offset, &reg),
+ seq_printf(s, "\n\t0x%08x = 0x%08x (%3d, %3d)",
+ cfg->offset, reg, cfg->a_delay,
+ cfg->g_delay);
+ }
+}
+#endif
+
+static struct pinctrl_ops ti_iodelay_pinctrl_ops = {
+ .get_groups_count = pinctrl_generic_get_group_count,
+ .get_group_name = pinctrl_generic_get_group_name,
+ .get_group_pins = pinctrl_generic_get_group_pins,
+#ifdef CONFIG_DEBUG_FS
+ .pin_dbg_show = ti_iodelay_pin_dbg_show,
+#endif
+ .dt_node_to_map = ti_iodelay_dt_node_to_map,
+};
+
+static struct pinconf_ops ti_iodelay_pinctrl_pinconf_ops = {
+ .pin_config_group_get = ti_iodelay_pinconf_group_get,
+ .pin_config_group_set = ti_iodelay_pinconf_group_set,
+#ifdef CONFIG_DEBUG_FS
+ .pin_config_group_dbg_show = ti_iodelay_pinconf_group_dbg_show,
+#endif
+};
+
+/**
+ * ti_iodelay_alloc_pins() - Allocate structures needed for pins for iodelay
+ * @dev: Device pointer
+ * @iod: iodelay device
+ * @base_phy: Base Physical Address
+ *
+ * Return: 0 if all went fine, else appropriate error value.
+ */
+static int ti_iodelay_alloc_pins(struct device *dev,
+ struct ti_iodelay_device *iod, u32 base_phy)
+{
+ const struct ti_iodelay_reg_data *r = iod->reg_data;
+ struct pinctrl_pin_desc *pin;
+ u32 phy_reg;
+ int nr_pins, i;
+
+ nr_pins = ti_iodelay_offset_to_pin(iod, r->regmap_config->max_register);
+ dev_dbg(dev, "Allocating %i pins\n", nr_pins);
+
+ iod->pa = devm_kzalloc(dev, sizeof(*iod->pa) * nr_pins, GFP_KERNEL);
+ if (!iod->pa)
+ return -ENOMEM;
+
+ iod->desc.pins = iod->pa;
+ iod->desc.npins = nr_pins;
+
+ phy_reg = r->reg_start_offset + base_phy;
+
+ for (i = 0; i < nr_pins; i++, phy_reg += 4) {
+ pin = &iod->pa[i];
+ pin->number = i;
+ }
+
+ return 0;
+}
+
+static struct regmap_config dra7_iodelay_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xd1c,
+};
+
+static struct ti_iodelay_reg_data dra7_iodelay_data = {
+ .signature_mask = 0x0003f000,
+ .signature_value = 0x29,
+ .lock_mask = 0x00000400,
+ .lock_val = 1,
+ .unlock_val = 0,
+ .binary_data_coarse_mask = 0x000003e0,
+ .binary_data_fine_mask = 0x0000001f,
+
+ .reg_refclk_offset = 0x14,
+ .refclk_period_mask = 0xffff,
+
+ .reg_coarse_offset = 0x18,
+ .coarse_delay_count_mask = 0xffff0000,
+ .coarse_ref_count_mask = 0x0000ffff,
+
+ .reg_fine_offset = 0x1C,
+ .fine_delay_count_mask = 0xffff0000,
+ .fine_ref_count_mask = 0x0000ffff,
+
+ .reg_global_lock_offset = 0x2c,
+ .global_lock_mask = 0x0000ffff,
+ .global_unlock_val = 0x0000aaaa,
+ .global_lock_val = 0x0000aaab,
+
+ .reg_start_offset = 0x30,
+ .reg_nr_per_pin = 3,
+ .regmap_config = &dra7_iodelay_regmap_config,
+};
+
+static const struct of_device_id ti_iodelay_of_match[] = {
+ {.compatible = "ti,dra7-iodelay", .data = &dra7_iodelay_data},
+ { /* Hopefully no more.. */ },
+};
+MODULE_DEVICE_TABLE(of, ti_iodelay_of_match);
+
+/**
+ * ti_iodelay_probe() - Standard probe
+ * @pdev: platform device
+ *
+ * Return: 0 if all went fine, else appropriate error value.
+ */
+static int ti_iodelay_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = of_node_get(dev->of_node);
+ const struct of_device_id *match;
+ struct resource *res;
+ struct ti_iodelay_device *iod;
+ int ret = 0;
+
+ if (!np) {
+ ret = -EINVAL;
+ dev_err(dev, "No OF node\n");
+ goto exit_out;
+ }
+
+ match = of_match_device(ti_iodelay_of_match, dev);
+ if (!match) {
+ ret = -EINVAL;
+ dev_err(dev, "No DATA match\n");
+ goto exit_out;
+ }
+
+ iod = devm_kzalloc(dev, sizeof(*iod), GFP_KERNEL);
+ if (!iod) {
+ ret = -ENOMEM;
+ goto exit_out;
+ }
+ iod->dev = dev;
+ iod->reg_data = match->data;
+
+ /* So far We can assume there is only 1 bank of registers */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "Missing MEM resource\n");
+ ret = -ENODEV;
+ goto exit_out;
+ }
+
+ iod->phys_base = res->start;
+ iod->reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(iod->reg_base)) {
+ ret = PTR_ERR(iod->reg_base);
+ goto exit_out;
+ }
+
+ iod->regmap = devm_regmap_init_mmio(dev, iod->reg_base,
+ iod->reg_data->regmap_config);
+ if (IS_ERR(iod->regmap)) {
+ dev_err(dev, "Regmap MMIO init failed.\n");
+ ret = PTR_ERR(iod->regmap);
+ goto exit_out;
+ }
+
+ if (ti_iodelay_pinconf_init_dev(iod))
+ goto exit_out;
+
+ ret = ti_iodelay_alloc_pins(dev, iod, res->start);
+ if (ret)
+ goto exit_out;
+
+ iod->desc.pctlops = &ti_iodelay_pinctrl_ops;
+ /* no pinmux ops - we are pinconf */
+ iod->desc.confops = &ti_iodelay_pinctrl_pinconf_ops;
+ iod->desc.name = dev_name(dev);
+ iod->desc.owner = THIS_MODULE;
+
+ ret = pinctrl_register_and_init(&iod->desc, dev, iod, &iod->pctl);
+ if (ret) {
+ dev_err(dev, "Failed to register pinctrl\n");
+ goto exit_out;
+ }
+
+ platform_set_drvdata(pdev, iod);
+
+exit_out:
+ of_node_put(np);
+ return ret;
+}
+
+/**
+ * ti_iodelay_remove() - standard remove
+ * @pdev: platform device
+ *
+ * Return: 0 if all went fine, else appropriate error value.
+ */
+static int ti_iodelay_remove(struct platform_device *pdev)
+{
+ struct ti_iodelay_device *iod = platform_get_drvdata(pdev);
+
+ if (!iod)
+ return 0;
+
+ if (iod->pctl)
+ pinctrl_unregister(iod->pctl);
+
+ ti_iodelay_pinconf_deinit_dev(iod);
+
+ /* Expect other allocations to be freed by devm */
+
+ return 0;
+}
+
+static struct platform_driver ti_iodelay_driver = {
+ .probe = ti_iodelay_probe,
+ .remove = ti_iodelay_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ .of_match_table = ti_iodelay_of_match,
+ },
+};
+module_platform_driver(ti_iodelay_driver);
+
+MODULE_AUTHOR("Texas Instruments, Inc.");
+MODULE_DESCRIPTION("Pinconf driver for TI's IO Delay module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
index 9b2ee717bccc..546f23c9040c 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
@@ -297,7 +297,7 @@ static int uniphier_conf_pin_config_get(struct pinctrl_dev *pctldev,
static int uniphier_conf_pin_bias_set(struct pinctrl_dev *pctldev,
const struct pin_desc *desc,
- enum pin_config_param param, u16 arg)
+ enum pin_config_param param, u32 arg)
{
struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
enum uniphier_pin_pull_dir pull_dir =
@@ -468,7 +468,7 @@ static int uniphier_conf_pin_config_set(struct pinctrl_dev *pctldev,
for (i = 0; i < num_configs; i++) {
enum pin_config_param param =
pinconf_to_config_param(configs[i]);
- u16 arg = pinconf_to_config_argument(configs[i]);
+ u32 arg = pinconf_to_config_argument(configs[i]);
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
index aa8bd9794683..96686336e3a3 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
@@ -561,7 +561,7 @@ static const int ether_rgmii_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0};
static const unsigned ether_rmii_pins[] = {30, 31, 32, 33, 34, 35, 36, 37, 39,
41, 42, 45};
-static const int ether_rmii_muxvals[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
+static const int ether_rmii_muxvals[] = {0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1};
static const unsigned i2c0_pins[] = {63, 64};
static const int i2c0_muxvals[] = {0, 0};
static const unsigned i2c1_pins[] = {65, 66};
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
index 270ca2a47a8c..c207e60b734f 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -428,7 +428,7 @@ static int wmt_pinconf_set(struct pinctrl_dev *pctldev, unsigned pin,
{
struct wmt_pinctrl_data *data = pinctrl_dev_get_drvdata(pctldev);
enum pin_config_param param;
- u16 arg;
+ u32 arg;
u32 bank = WMT_BANK_FROM_PIN(pin);
u32 bit = WMT_BIT_FROM_PIN(pin);
u32 reg_pull_en = data->banks[bank].reg_pull_en;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 5fe8be089b8b..59aa8e302bc3 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -1034,7 +1034,7 @@ config SURFACE_PRO3_BUTTON
config SURFACE_3_BUTTON
tristate "Power/home/volume buttons driver for Microsoft Surface 3 tablet"
- depends on ACPI && KEYBOARD_GPIO
+ depends on ACPI && KEYBOARD_GPIO && I2C
---help---
This driver handles the power/home/volume buttons on the Microsoft Surface 3 tablet.
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 61f39abf5dc8..82d67715ce76 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -177,43 +177,43 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event);
#if IS_ENABLED(CONFIG_LEDS_CLASS)
static enum led_brightness logolamp_get(struct led_classdev *cdev);
-static void logolamp_set(struct led_classdev *cdev,
+static int logolamp_set(struct led_classdev *cdev,
enum led_brightness brightness);
static struct led_classdev logolamp_led = {
.name = "fujitsu::logolamp",
.brightness_get = logolamp_get,
- .brightness_set = logolamp_set
+ .brightness_set_blocking = logolamp_set
};
static enum led_brightness kblamps_get(struct led_classdev *cdev);
-static void kblamps_set(struct led_classdev *cdev,
+static int kblamps_set(struct led_classdev *cdev,
enum led_brightness brightness);
static struct led_classdev kblamps_led = {
.name = "fujitsu::kblamps",
.brightness_get = kblamps_get,
- .brightness_set = kblamps_set
+ .brightness_set_blocking = kblamps_set
};
static enum led_brightness radio_led_get(struct led_classdev *cdev);
-static void radio_led_set(struct led_classdev *cdev,
+static int radio_led_set(struct led_classdev *cdev,
enum led_brightness brightness);
static struct led_classdev radio_led = {
.name = "fujitsu::radio_led",
.brightness_get = radio_led_get,
- .brightness_set = radio_led_set
+ .brightness_set_blocking = radio_led_set
};
static enum led_brightness eco_led_get(struct led_classdev *cdev);
-static void eco_led_set(struct led_classdev *cdev,
+static int eco_led_set(struct led_classdev *cdev,
enum led_brightness brightness);
static struct led_classdev eco_led = {
.name = "fujitsu::eco_led",
.brightness_get = eco_led_get,
- .brightness_set = eco_led_set
+ .brightness_set_blocking = eco_led_set
};
#endif
@@ -267,48 +267,48 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
#if IS_ENABLED(CONFIG_LEDS_CLASS)
/* LED class callbacks */
-static void logolamp_set(struct led_classdev *cdev,
+static int logolamp_set(struct led_classdev *cdev,
enum led_brightness brightness)
{
if (brightness >= LED_FULL) {
call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, FUNC_LED_ON);
- call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_ALWAYS, FUNC_LED_ON);
+ return call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_ALWAYS, FUNC_LED_ON);
} else if (brightness >= LED_HALF) {
call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, FUNC_LED_ON);
- call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_ALWAYS, FUNC_LED_OFF);
+ return call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_ALWAYS, FUNC_LED_OFF);
} else {
- call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, FUNC_LED_OFF);
+ return call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, FUNC_LED_OFF);
}
}
-static void kblamps_set(struct led_classdev *cdev,
+static int kblamps_set(struct led_classdev *cdev,
enum led_brightness brightness)
{
if (brightness >= LED_FULL)
- call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_ON);
+ return call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_ON);
else
- call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_OFF);
+ return call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_OFF);
}
-static void radio_led_set(struct led_classdev *cdev,
+static int radio_led_set(struct led_classdev *cdev,
enum led_brightness brightness)
{
if (brightness >= LED_FULL)
- call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, RADIO_LED_ON);
+ return call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, RADIO_LED_ON);
else
- call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, 0x0);
+ return call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, 0x0);
}
-static void eco_led_set(struct led_classdev *cdev,
+static int eco_led_set(struct led_classdev *cdev,
enum led_brightness brightness)
{
int curr;
curr = call_fext_func(FUNC_LEDS, 0x2, ECO_LED, 0x0);
if (brightness >= LED_FULL)
- call_fext_func(FUNC_LEDS, 0x1, ECO_LED, curr | ECO_LED_ON);
+ return call_fext_func(FUNC_LEDS, 0x1, ECO_LED, curr | ECO_LED_ON);
else
- call_fext_func(FUNC_LEDS, 0x1, ECO_LED, curr & ~ECO_LED_ON);
+ return call_fext_func(FUNC_LEDS, 0x1, ECO_LED, curr & ~ECO_LED_ON);
}
static enum led_brightness logolamp_get(struct led_classdev *cdev)
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 410741acb3c9..f46ece2ce3c4 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -813,6 +813,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
case 8:
case 7:
case 6:
+ case 1:
ideapad_input_report(priv, vpc_bit);
break;
case 5:
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
index 1fc0de870ff8..361770568ad0 100644
--- a/drivers/platform/x86/intel_mid_powerbtn.c
+++ b/drivers/platform/x86/intel_mid_powerbtn.c
@@ -77,7 +77,7 @@ static int mfld_pb_probe(struct platform_device *pdev)
input_set_capability(input, EV_KEY, KEY_POWER);
- error = request_threaded_irq(irq, NULL, mfld_pb_isr, 0,
+ error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_ONESHOT,
DRIVER_NAME, input);
if (error) {
dev_err(&pdev->dev, "Unable to request irq %d for mfld power"
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index 97b4c3a219c0..25f15df5c2d7 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -326,7 +326,7 @@ static int __init mlxplat_init(void)
return 0;
fail_platform_mux_register:
- for (i--; i > 0 ; i--)
+ while (--i >= 0)
platform_device_unregister(priv->pdev_mux[i]);
platform_device_unregister(priv->pdev_i2c);
fail_alloc:
diff --git a/drivers/platform/x86/surface3-wmi.c b/drivers/platform/x86/surface3-wmi.c
index cbf4d83a7271..25b176996cb7 100644
--- a/drivers/platform/x86/surface3-wmi.c
+++ b/drivers/platform/x86/surface3-wmi.c
@@ -139,7 +139,7 @@ static acpi_status s3_wmi_attach_spi_device(acpi_handle handle,
static int s3_wmi_check_platform_device(struct device *dev, void *data)
{
- struct acpi_device *adev, *ts_adev;
+ struct acpi_device *adev, *ts_adev = NULL;
acpi_handle handle;
acpi_status status;
@@ -244,13 +244,11 @@ static int s3_wmi_remove(struct platform_device *device)
return 0;
}
-#ifdef CONFIG_PM
-static int s3_wmi_resume(struct device *dev)
+static int __maybe_unused s3_wmi_resume(struct device *dev)
{
s3_wmi_send_lid_state();
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(s3_wmi_pm, NULL, s3_wmi_resume);
static struct platform_driver s3_wmi_driver = {
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 9a507e77eced..90b05c72186c 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -396,9 +396,6 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
goto unwind_vring_allocations;
}
- /* track the rvdevs list reference */
- kref_get(&rvdev->refcount);
-
list_add_tail(&rvdev->node, &rproc->rvdevs);
rproc_add_subdev(rproc, &rvdev->subdev,
@@ -889,13 +886,15 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
/*
* Create a copy of the resource table. When a virtio device starts
* and calls vring_new_virtqueue() the address of the allocated vring
- * will be stored in the table_ptr. Before the device is started,
- * table_ptr will be copied into device memory.
+ * will be stored in the cached_table. Before the device is started,
+ * cached_table will be copied into device memory.
*/
- rproc->table_ptr = kmemdup(table, tablesz, GFP_KERNEL);
- if (!rproc->table_ptr)
+ rproc->cached_table = kmemdup(table, tablesz, GFP_KERNEL);
+ if (!rproc->cached_table)
goto clean_up;
+ rproc->table_ptr = rproc->cached_table;
+
/* reset max_notifyid */
rproc->max_notifyid = -1;
@@ -914,16 +913,18 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
}
/*
- * The starting device has been given the rproc->table_ptr as the
+ * The starting device has been given the rproc->cached_table as the
* resource table. The address of the vring along with the other
- * allocated resources (carveouts etc) is stored in table_ptr.
+ * allocated resources (carveouts etc) is stored in cached_table.
* In order to pass this information to the remote device we must copy
* this information to device memory. We also update the table_ptr so
* that any subsequent changes will be applied to the loaded version.
*/
loaded_table = rproc_find_loaded_rsc_table(rproc, fw);
- if (loaded_table)
- memcpy(loaded_table, rproc->table_ptr, tablesz);
+ if (loaded_table) {
+ memcpy(loaded_table, rproc->cached_table, tablesz);
+ rproc->table_ptr = loaded_table;
+ }
/* power up the remote processor */
ret = rproc->ops->start(rproc);
@@ -951,7 +952,8 @@ stop_rproc:
clean_up_resources:
rproc_resource_cleanup(rproc);
clean_up:
- kfree(rproc->table_ptr);
+ kfree(rproc->cached_table);
+ rproc->cached_table = NULL;
rproc->table_ptr = NULL;
rproc_disable_iommu(rproc);
@@ -1185,7 +1187,8 @@ void rproc_shutdown(struct rproc *rproc)
rproc_disable_iommu(rproc);
/* Free the copy of the resource table */
- kfree(rproc->table_ptr);
+ kfree(rproc->cached_table);
+ rproc->cached_table = NULL;
rproc->table_ptr = NULL;
/* if in crash state, unlock crash handler */
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index a79cb5a9e5f2..1cfb775e8e82 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -453,8 +453,8 @@ int rpmsg_register_device(struct rpmsg_device *rpdev)
struct device *dev = &rpdev->dev;
int ret;
- dev_set_name(&rpdev->dev, "%s:%s",
- dev_name(dev->parent), rpdev->id.name);
+ dev_set_name(&rpdev->dev, "%s.%s.%d.%d", dev_name(dev->parent),
+ rpdev->id.name, rpdev->src, rpdev->dst);
rpdev->dev.bus = &rpmsg_bus;
rpdev->dev.release = rpmsg_release_device;
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 51e52446eacb..73594f38c453 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -610,7 +610,7 @@ static int rtc_pinconf_set(struct pinctrl_dev *pctldev,
struct omap_rtc *rtc = pinctrl_dev_get_drvdata(pctldev);
u32 val;
unsigned int param;
- u16 param_val;
+ u32 param_val;
int i;
rtc->type->unlock(rtc);
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 639ed4e6afd1..070c4da95f48 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -145,6 +145,7 @@ static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
#define CCW_CMD_WRITE_CONF 0x21
#define CCW_CMD_WRITE_STATUS 0x31
#define CCW_CMD_READ_VQ_CONF 0x32
+#define CCW_CMD_READ_STATUS 0x72
#define CCW_CMD_SET_IND_ADAPTER 0x73
#define CCW_CMD_SET_VIRTIO_REV 0x83
@@ -160,6 +161,7 @@ static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
#define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000
#define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000
#define VIRTIO_CCW_DOING_SET_VIRTIO_REV 0x10000000
+#define VIRTIO_CCW_DOING_READ_STATUS 0x20000000
#define VIRTIO_CCW_INTPARM_MASK 0xffff0000
static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev)
@@ -452,7 +454,7 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
* This may happen on device detach.
*/
if (ret && (ret != -ENODEV))
- dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d",
+ dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d\n",
ret, index);
vring_del_virtqueue(vq);
@@ -892,6 +894,28 @@ out_free:
static u8 virtio_ccw_get_status(struct virtio_device *vdev)
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+ u8 old_status = *vcdev->status;
+ struct ccw1 *ccw;
+
+ if (vcdev->revision < 1)
+ return *vcdev->status;
+
+ ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ if (!ccw)
+ return old_status;
+
+ ccw->cmd_code = CCW_CMD_READ_STATUS;
+ ccw->flags = 0;
+ ccw->count = sizeof(*vcdev->status);
+ ccw->cda = (__u32)(unsigned long)vcdev->status;
+ ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_STATUS);
+/*
+ * If the channel program failed (should only happen if the device
+ * was hotunplugged, and then we clean up via the machine check
+ * handler anyway), vcdev->status was not overwritten and we just
+ * return the old status, which is fine.
+*/
+ kfree(ccw);
return *vcdev->status;
}
@@ -920,7 +944,7 @@ static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
kfree(ccw);
}
-static struct virtio_config_ops virtio_ccw_config_ops = {
+static const struct virtio_config_ops virtio_ccw_config_ops = {
.get_features = virtio_ccw_get_features,
.finalize_features = virtio_ccw_finalize_features,
.get = virtio_ccw_get_config,
@@ -987,6 +1011,7 @@ static void virtio_ccw_check_activity(struct virtio_ccw_device *vcdev,
case VIRTIO_CCW_DOING_READ_CONFIG:
case VIRTIO_CCW_DOING_WRITE_CONFIG:
case VIRTIO_CCW_DOING_WRITE_STATUS:
+ case VIRTIO_CCW_DOING_READ_STATUS:
case VIRTIO_CCW_DOING_SET_VQ:
case VIRTIO_CCW_DOING_SET_IND:
case VIRTIO_CCW_DOING_SET_CONF_IND:
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index d9e15210b110..5caf5f3ff642 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -64,9 +64,9 @@ int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS;
u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
u32 *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2;
-#define BFAD_FW_FILE_CB "cbfw-3.2.3.0.bin"
-#define BFAD_FW_FILE_CT "ctfw-3.2.3.0.bin"
-#define BFAD_FW_FILE_CT2 "ct2fw-3.2.3.0.bin"
+#define BFAD_FW_FILE_CB "cbfw-3.2.5.1.bin"
+#define BFAD_FW_FILE_CT "ctfw-3.2.5.1.bin"
+#define BFAD_FW_FILE_CT2 "ct2fw-3.2.5.1.bin"
static u32 *bfad_load_fwimg(struct pci_dev *pdev);
static void bfad_free_fwimg(void);
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index a9a00169ad91..b2e8c0dfc79c 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -3363,7 +3363,7 @@ bfad_im_bsg_els_ct_request(struct bsg_job *job)
struct bfad_fcxp *drv_fcxp;
struct bfa_fcs_lport_s *fcs_port;
struct bfa_fcs_rport_s *fcs_rport;
- struct fc_bsg_request *bsg_request = bsg_request;
+ struct fc_bsg_request *bsg_request = job->request;
struct fc_bsg_reply *bsg_reply = job->reply;
uint32_t command_type = bsg_request->msgcode;
unsigned long flags;
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index f9e862093a25..cfcfff48e8e1 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -58,7 +58,7 @@
#ifdef BFA_DRIVER_VERSION
#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION
#else
-#define BFAD_DRIVER_VERSION "3.2.25.0"
+#define BFAD_DRIVER_VERSION "3.2.25.1"
#endif
#define BFAD_PROTO_NAME FCPI_NAME
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 9ddc9200e0a4..9e4b7709043e 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -248,6 +248,7 @@ struct fnic {
struct completion *remove_wait; /* device remove thread blocks */
atomic_t in_flight; /* io counter */
+ bool internal_reset_inprogress;
u32 _reserved; /* fill hole */
unsigned long state_flags; /* protected by host lock */
enum fnic_state state;
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 2544a37ece0a..adb3d5871e74 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -2581,6 +2581,19 @@ int fnic_host_reset(struct scsi_cmnd *sc)
unsigned long wait_host_tmo;
struct Scsi_Host *shost = sc->device->host;
struct fc_lport *lp = shost_priv(shost);
+ struct fnic *fnic = lport_priv(lp);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (fnic->internal_reset_inprogress == 0) {
+ fnic->internal_reset_inprogress = 1;
+ } else {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "host reset in progress skipping another host reset\n");
+ return SUCCESS;
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
/*
* If fnic_reset is successful, wait for fabric login to complete
@@ -2601,6 +2614,9 @@ int fnic_host_reset(struct scsi_cmnd *sc)
}
}
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ fnic->internal_reset_inprogress = 0;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return ret;
}
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 3d3768aaab4f..99b747cedbeb 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -46,6 +46,7 @@
#define INITIAL_SRP_LIMIT 800
#define DEFAULT_MAX_SECTORS 256
+#define MAX_TXU 1024 * 1024
static uint max_vdma_size = MAX_H_COPY_RDMA;
@@ -1391,7 +1392,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
}
info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token,
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!info) {
dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
iue->target);
@@ -1443,7 +1444,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
info->mad_version = cpu_to_be32(MAD_VERSION_1);
info->os_type = cpu_to_be32(LINUX);
memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu));
- info->port_max_txu[0] = cpu_to_be32(128 * PAGE_SIZE);
+ info->port_max_txu[0] = cpu_to_be32(MAX_TXU);
dma_wmb();
rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn,
@@ -1509,7 +1510,7 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
}
cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token,
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!cap) {
dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
iue->target);
@@ -3585,7 +3586,7 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
1, 1);
if (rc) {
pr_err("srp_transfer_data() failed: %d\n", rc);
- return -EAGAIN;
+ return -EIO;
}
/*
* We now tell TCM to add this WRITE CDB directly into the TCM storage
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 236e4e51d161..7b6bd8ed0d0b 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -3590,12 +3590,14 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
} else {
buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
lpfc_els_free_data(phba, buf_ptr1);
+ elsiocb->context2 = NULL;
}
}
if (elsiocb->context3) {
buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
lpfc_els_free_bpl(phba, buf_ptr);
+ elsiocb->context3 = NULL;
}
lpfc_sli_release_iocbq(phba, elsiocb);
return 0;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 4faa7672fc1d..a78a3df68f67 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -5954,18 +5954,25 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
free_vfi_bmask:
kfree(phba->sli4_hba.vfi_bmask);
+ phba->sli4_hba.vfi_bmask = NULL;
free_xri_ids:
kfree(phba->sli4_hba.xri_ids);
+ phba->sli4_hba.xri_ids = NULL;
free_xri_bmask:
kfree(phba->sli4_hba.xri_bmask);
+ phba->sli4_hba.xri_bmask = NULL;
free_vpi_ids:
kfree(phba->vpi_ids);
+ phba->vpi_ids = NULL;
free_vpi_bmask:
kfree(phba->vpi_bmask);
+ phba->vpi_bmask = NULL;
free_rpi_ids:
kfree(phba->sli4_hba.rpi_ids);
+ phba->sli4_hba.rpi_ids = NULL;
free_rpi_bmask:
kfree(phba->sli4_hba.rpi_bmask);
+ phba->sli4_hba.rpi_bmask = NULL;
err_exit:
return rc;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 394fe1338d09..dcb33f4fa687 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -393,6 +393,7 @@ struct MPT3SAS_TARGET {
* @eedp_enable: eedp support enable bit
* @eedp_type: 0(type_1), 1(type_2), 2(type_3)
* @eedp_block_length: block size
+ * @ata_command_pending: SATL passthrough outstanding for device
*/
struct MPT3SAS_DEVICE {
struct MPT3SAS_TARGET *sas_target;
@@ -404,6 +405,17 @@ struct MPT3SAS_DEVICE {
u8 ignore_delay_remove;
/* Iopriority Command Handling */
u8 ncq_prio_enable;
+ /*
+ * Bug workaround for SATL handling: the mpt2/3sas firmware
+ * doesn't return BUSY or TASK_SET_FULL for subsequent
+ * commands while a SATL pass through is in operation as the
+ * spec requires, it simply does nothing with them until the
+ * pass through completes, causing them possibly to timeout if
+ * the passthrough is a long executing command (like format or
+ * secure erase). This variable allows us to do the right
+ * thing while a SATL command is pending.
+ */
+ unsigned long ata_command_pending;
};
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index b5c966e319d3..75f3fce1c867 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -3899,9 +3899,18 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
}
}
-static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd)
+static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
{
- return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16);
+ struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
+
+ if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
+ return 0;
+
+ if (pending)
+ return test_and_set_bit(0, &priv->ata_command_pending);
+
+ clear_bit(0, &priv->ata_command_pending);
+ return 0;
}
/**
@@ -3925,9 +3934,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
if (!scmd)
continue;
count++;
- if (ata_12_16_cmd(scmd))
- scsi_internal_device_unblock(scmd->device,
- SDEV_RUNNING);
+ _scsih_set_satl_pending(scmd, false);
mpt3sas_base_free_smid(ioc, smid);
scsi_dma_unmap(scmd);
if (ioc->pci_error_recovery)
@@ -4063,13 +4070,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
if (ioc->logging_level & MPT_DEBUG_SCSI)
scsi_print_command(scmd);
- /*
- * Lock the device for any subsequent command until command is
- * done.
- */
- if (ata_12_16_cmd(scmd))
- scsi_internal_device_block(scmd->device);
-
sas_device_priv_data = scmd->device->hostdata;
if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
scmd->result = DID_NO_CONNECT << 16;
@@ -4083,6 +4083,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
return 0;
}
+ /*
+ * Bug work around for firmware SATL handling. The loop
+ * is based on atomic operations and ensures consistency
+ * since we're lockless at this point
+ */
+ do {
+ if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
+ scmd->result = SAM_STAT_BUSY;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ } while (_scsih_set_satl_pending(scmd, true));
+
sas_target_priv_data = sas_device_priv_data->sas_target;
/* invalid device handle */
@@ -4650,8 +4663,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
if (scmd == NULL)
return 1;
- if (ata_12_16_cmd(scmd))
- scsi_internal_device_unblock(scmd->device, SDEV_RUNNING);
+ _scsih_set_satl_pending(scmd, false);
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
diff --git a/drivers/scsi/qedi/Kconfig b/drivers/scsi/qedi/Kconfig
index 23ca8a274586..21331453db7b 100644
--- a/drivers/scsi/qedi/Kconfig
+++ b/drivers/scsi/qedi/Kconfig
@@ -1,6 +1,6 @@
config QEDI
tristate "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver Support"
- depends on PCI && SCSI
+ depends on PCI && SCSI && UIO
depends on QED
select SCSI_ISCSI_ATTRS
select QED_LL2
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 47eb4d545d13..f201f4099620 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -243,12 +243,15 @@ qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
struct qla_hw_data *ha = vha->hw;
ssize_t rval = 0;
+ mutex_lock(&ha->optrom_mutex);
+
if (ha->optrom_state != QLA_SREADING)
- return 0;
+ goto out;
- mutex_lock(&ha->optrom_mutex);
rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
ha->optrom_region_size);
+
+out:
mutex_unlock(&ha->optrom_mutex);
return rval;
@@ -263,14 +266,19 @@ qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
struct device, kobj)));
struct qla_hw_data *ha = vha->hw;
- if (ha->optrom_state != QLA_SWRITING)
+ mutex_lock(&ha->optrom_mutex);
+
+ if (ha->optrom_state != QLA_SWRITING) {
+ mutex_unlock(&ha->optrom_mutex);
return -EINVAL;
- if (off > ha->optrom_region_size)
+ }
+ if (off > ha->optrom_region_size) {
+ mutex_unlock(&ha->optrom_mutex);
return -ERANGE;
+ }
if (off + count > ha->optrom_region_size)
count = ha->optrom_region_size - off;
- mutex_lock(&ha->optrom_mutex);
memcpy(&ha->optrom_buffer[off], buf, count);
mutex_unlock(&ha->optrom_mutex);
@@ -753,7 +761,6 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
int type;
- int rval = 0;
port_id_t did;
type = simple_strtol(buf, NULL, 10);
@@ -767,7 +774,7 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type);
- rval = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did);
+ qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did);
return count;
}
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index f7df01b76714..5b1287a63c49 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1556,7 +1556,8 @@ typedef struct {
struct atio {
uint8_t entry_type; /* Entry type. */
uint8_t entry_count; /* Entry count. */
- uint8_t data[58];
+ __le16 attr_n_length;
+ uint8_t data[56];
uint32_t signature;
#define ATIO_PROCESSED 0xDEADDEAD /* Signature */
};
@@ -2732,7 +2733,7 @@ struct isp_operations {
#define QLA_MSIX_FW_MODE(m) (((m) & (BIT_7|BIT_8|BIT_9)) >> 7)
#define QLA_MSIX_FW_MODE_1(m) (QLA_MSIX_FW_MODE(m) == 1)
-#define QLA_MSIX_DEFAULT 0x00
+#define QLA_BASE_VECTORS 2 /* default + RSP */
#define QLA_MSIX_RSP_Q 0x01
#define QLA_ATIO_VECTOR 0x02
#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q 0x03
@@ -2754,7 +2755,6 @@ struct qla_msix_entry {
uint16_t entry;
char name[30];
void *handle;
- struct irq_affinity_notify irq_notify;
int cpuid;
};
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 632d5f30386a..7b6317c8c2e9 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1191,7 +1191,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
/* Wait for soft-reset to complete. */
RD_REG_DWORD(&reg->ctrl_status);
- for (cnt = 0; cnt < 6000000; cnt++) {
+ for (cnt = 0; cnt < 60; cnt++) {
barrier();
if ((RD_REG_DWORD(&reg->ctrl_status) &
CSRX_ISP_SOFT_RESET) == 0)
@@ -1234,7 +1234,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
RD_REG_DWORD(&reg->hccr);
RD_REG_WORD(&reg->mailbox0);
- for (cnt = 6000000; RD_REG_WORD(&reg->mailbox0) != 0 &&
+ for (cnt = 60; RD_REG_WORD(&reg->mailbox0) != 0 &&
rval == QLA_SUCCESS; cnt--) {
barrier();
if (cnt)
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 5093ca9b02ec..dc88a09f9043 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -19,10 +19,6 @@ static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
sts_entry_t *);
-static void qla_irq_affinity_notify(struct irq_affinity_notify *,
- const cpumask_t *);
-static void qla_irq_affinity_release(struct kref *);
-
/**
* qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
@@ -2496,6 +2492,10 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
if (pkt->entry_status & RF_BUSY)
res = DID_BUS_BUSY << 16;
+ if (pkt->entry_type == NOTIFY_ACK_TYPE &&
+ pkt->handle == QLA_TGT_SKIP_HANDLE)
+ return;
+
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (sp) {
sp->done(ha, sp, res);
@@ -2572,14 +2572,6 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
if (!vha->flags.online)
return;
- if (rsp->msix && rsp->msix->cpuid != smp_processor_id()) {
- /* if kernel does not notify qla of IRQ's CPU change,
- * then set it here.
- */
- rsp->msix->cpuid = smp_processor_id();
- ha->tgt.rspq_vector_cpuid = rsp->msix->cpuid;
- }
-
while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
@@ -3018,13 +3010,20 @@ static struct qla_init_msix_entry qla82xx_msix_entries[] = {
static int
qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
{
-#define MIN_MSIX_COUNT 2
int i, ret;
struct qla_msix_entry *qentry;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+ struct irq_affinity desc = {
+ .pre_vectors = QLA_BASE_VECTORS,
+ };
+
+ if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha))
+ desc.pre_vectors++;
+
+ ret = pci_alloc_irq_vectors_affinity(ha->pdev, QLA_BASE_VECTORS,
+ ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
+ &desc);
- ret = pci_alloc_irq_vectors(ha->pdev, MIN_MSIX_COUNT, ha->msix_count,
- PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
if (ret < 0) {
ql_log(ql_log_fatal, vha, 0x00c7,
"MSI-X: Failed to enable support, "
@@ -3069,13 +3068,10 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
qentry->have_irq = 0;
qentry->in_use = 0;
qentry->handle = NULL;
- qentry->irq_notify.notify = qla_irq_affinity_notify;
- qentry->irq_notify.release = qla_irq_affinity_release;
- qentry->cpuid = -1;
}
/* Enable MSI-X vectors for the base queue */
- for (i = 0; i < (QLA_MSIX_RSP_Q + 1); i++) {
+ for (i = 0; i < QLA_BASE_VECTORS; i++) {
qentry = &ha->msix_entries[i];
qentry->handle = rsp;
rsp->msix = qentry;
@@ -3093,18 +3089,6 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
goto msix_register_fail;
qentry->have_irq = 1;
qentry->in_use = 1;
-
- /* Register for CPU affinity notification. */
- irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify);
-
- /* Schedule work (ie. trigger a notification) to read cpu
- * mask for this specific irq.
- * kref_get is required because
- * irq_affinity_notify() will do
- * kref_put().
- */
- kref_get(&qentry->irq_notify.kref);
- schedule_work(&qentry->irq_notify.work);
}
/*
@@ -3301,49 +3285,3 @@ int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
msix->handle = qpair;
return ret;
}
-
-
-/* irq_set_affinity/irqbalance will trigger notification of cpu mask update */
-static void qla_irq_affinity_notify(struct irq_affinity_notify *notify,
- const cpumask_t *mask)
-{
- struct qla_msix_entry *e =
- container_of(notify, struct qla_msix_entry, irq_notify);
- struct qla_hw_data *ha;
- struct scsi_qla_host *base_vha;
- struct rsp_que *rsp = e->handle;
-
- /* user is recommended to set mask to just 1 cpu */
- e->cpuid = cpumask_first(mask);
-
- ha = rsp->hw;
- base_vha = pci_get_drvdata(ha->pdev);
-
- ql_dbg(ql_dbg_init, base_vha, 0xffff,
- "%s: host %ld : vector %d cpu %d \n", __func__,
- base_vha->host_no, e->vector, e->cpuid);
-
- if (e->have_irq) {
- if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) &&
- (e->entry == QLA83XX_RSPQ_MSIX_ENTRY_NUMBER)) {
- ha->tgt.rspq_vector_cpuid = e->cpuid;
- ql_dbg(ql_dbg_init, base_vha, 0xffff,
- "%s: host%ld: rspq vector %d cpu %d runtime change\n",
- __func__, base_vha->host_no, e->vector, e->cpuid);
- }
- }
-}
-
-static void qla_irq_affinity_release(struct kref *ref)
-{
- struct irq_affinity_notify *notify =
- container_of(ref, struct irq_affinity_notify, kref);
- struct qla_msix_entry *e =
- container_of(notify, struct qla_msix_entry, irq_notify);
- struct rsp_que *rsp = e->handle;
- struct scsi_qla_host *base_vha = pci_get_drvdata(rsp->hw->pdev);
-
- ql_dbg(ql_dbg_init, base_vha, 0xffff,
- "%s: host%ld: vector %d cpu %d\n", __func__,
- base_vha->host_no, e->vector, e->cpuid);
-}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 2819ceb96041..67f64db390b0 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -10,7 +10,7 @@
#include <linux/delay.h>
#include <linux/gfp.h>
-struct rom_cmd {
+static struct rom_cmd {
uint16_t cmd;
} rom_cmds[] = {
{ MBC_LOAD_RAM },
@@ -101,12 +101,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
return QLA_FUNCTION_TIMEOUT;
}
- /* if PCI error, then avoid mbx processing.*/
- if (test_bit(PCI_ERR, &base_vha->dpc_flags)) {
+ /* if PCI error, then avoid mbx processing.*/
+ if (test_bit(PCI_ERR, &base_vha->dpc_flags)) {
ql_log(ql_log_warn, vha, 0x1191,
"PCI error, exiting.\n");
return QLA_FUNCTION_TIMEOUT;
- }
+ }
reg = ha->iobase;
io_lock_on = base_vha->flags.init_done;
@@ -323,20 +323,33 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
}
} else {
- uint16_t mb0;
- uint32_t ictrl;
+ uint16_t mb[8];
+ uint32_t ictrl, host_status, hccr;
uint16_t w;
if (IS_FWI2_CAPABLE(ha)) {
- mb0 = RD_REG_WORD(&reg->isp24.mailbox0);
+ mb[0] = RD_REG_WORD(&reg->isp24.mailbox0);
+ mb[1] = RD_REG_WORD(&reg->isp24.mailbox1);
+ mb[2] = RD_REG_WORD(&reg->isp24.mailbox2);
+ mb[3] = RD_REG_WORD(&reg->isp24.mailbox3);
+ mb[7] = RD_REG_WORD(&reg->isp24.mailbox7);
ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
+ host_status = RD_REG_DWORD(&reg->isp24.host_status);
+ hccr = RD_REG_DWORD(&reg->isp24.hccr);
+
+ ql_log(ql_log_warn, vha, 0x1119,
+ "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
+ "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
+ command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
+ mb[7], host_status, hccr);
+
} else {
- mb0 = RD_MAILBOX_REG(ha, &reg->isp, 0);
+ mb[0] = RD_MAILBOX_REG(ha, &reg->isp, 0);
ictrl = RD_REG_WORD(&reg->isp.ictrl);
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
+ "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
+ "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
}
- ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
- "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
- "mb[0]=0x%x\n", command, ictrl, jiffies, mb0);
ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
/* Capture FW dump only, if PCI device active */
@@ -684,7 +697,6 @@ qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- int configured_count;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
"Entered %s.\n", __func__);
@@ -707,7 +719,6 @@ qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
/*EMPTY*/
ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval);
} else {
- configured_count = mcp->mb[11];
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
"Done %s.\n", __func__);
}
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 54380b434b30..0a1723cc08cf 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -42,6 +42,11 @@ static int qla82xx_crb_table_initialized;
(crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
+const int MD_MIU_TEST_AGT_RDDATA[] = {
+ 0x410000A8, 0x410000AC,
+ 0x410000B8, 0x410000BC
+};
+
static void qla82xx_crb_addr_transform_setup(void)
{
qla82xx_crb_addr_transform(XDMA);
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 6201dce3553b..77624eac95a4 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -1176,8 +1176,7 @@ struct qla82xx_md_entry_queue {
#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094
#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
-static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
- 0x410000B8, 0x410000BC };
+extern const int MD_MIU_TEST_AGT_RDDATA[4];
#define CRB_NIU_XG_PAUSE_CTL_P0 0x1
#define CRB_NIU_XG_PAUSE_CTL_P1 0x8
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index 007192d7bad8..dc1ec9b61027 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -15,6 +15,23 @@
#define TIMEOUT_100_MS 100
+static const uint32_t qla8044_reg_tbl[] = {
+ QLA8044_PEG_HALT_STATUS1,
+ QLA8044_PEG_HALT_STATUS2,
+ QLA8044_PEG_ALIVE_COUNTER,
+ QLA8044_CRB_DRV_ACTIVE,
+ QLA8044_CRB_DEV_STATE,
+ QLA8044_CRB_DRV_STATE,
+ QLA8044_CRB_DRV_SCRATCH,
+ QLA8044_CRB_DEV_PART_INFO1,
+ QLA8044_CRB_IDC_VER_MAJOR,
+ QLA8044_FW_VER_MAJOR,
+ QLA8044_FW_VER_MINOR,
+ QLA8044_FW_VER_SUB,
+ QLA8044_CMDPEG_STATE,
+ QLA8044_ASIC_TEMP,
+};
+
/* 8044 Flash Read/Write functions */
uint32_t
qla8044_rd_reg(struct qla_hw_data *ha, ulong addr)
diff --git a/drivers/scsi/qla2xxx/qla_nx2.h b/drivers/scsi/qla2xxx/qla_nx2.h
index 02fe3c4cdf55..83c1b7e17c80 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.h
+++ b/drivers/scsi/qla2xxx/qla_nx2.h
@@ -535,23 +535,6 @@ enum qla_regs {
#define CRB_CMDPEG_CHECK_RETRY_COUNT 60
#define CRB_CMDPEG_CHECK_DELAY 500
-static const uint32_t qla8044_reg_tbl[] = {
- QLA8044_PEG_HALT_STATUS1,
- QLA8044_PEG_HALT_STATUS2,
- QLA8044_PEG_ALIVE_COUNTER,
- QLA8044_CRB_DRV_ACTIVE,
- QLA8044_CRB_DEV_STATE,
- QLA8044_CRB_DRV_STATE,
- QLA8044_CRB_DRV_SCRATCH,
- QLA8044_CRB_DEV_PART_INFO1,
- QLA8044_CRB_IDC_VER_MAJOR,
- QLA8044_FW_VER_MAJOR,
- QLA8044_FW_VER_MINOR,
- QLA8044_FW_VER_SUB,
- QLA8044_CMDPEG_STATE,
- QLA8044_ASIC_TEMP,
-};
-
/* MiniDump Structures */
/* Driver_code is for driver to write some info about the entry
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8521cfe302e9..0a000ecf0881 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -466,7 +466,7 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
continue;
rsp = ha->rsp_q_map[cnt];
- clear_bit(cnt, ha->req_qid_map);
+ clear_bit(cnt, ha->rsp_qid_map);
ha->rsp_q_map[cnt] = NULL;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
qla2x00_free_rsp_que(ha, rsp);
@@ -3662,7 +3662,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
sizeof(struct ct6_dsd), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!ctx_cachep)
- goto fail_free_gid_list;
+ goto fail_free_srb_mempool;
}
ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
ctx_cachep);
@@ -3815,7 +3815,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long),
GFP_KERNEL);
if (!ha->loop_id_map)
- goto fail_async_pd;
+ goto fail_loop_id_map;
else {
qla2x00_set_reserved_loop_ids(ha);
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
@@ -3824,6 +3824,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
return 0;
+fail_loop_id_map:
+ dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
fail_async_pd:
dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
fail_ex_init_cb:
@@ -3851,6 +3853,10 @@ fail_free_ms_iocb:
dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
ha->ms_iocb = NULL;
ha->ms_iocb_dma = 0;
+
+ if (ha->sns_cmd)
+ dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
+ ha->sns_cmd, ha->sns_cmd_dma);
fail_dma_pool:
if (IS_QLA82XX(ha) || ql2xenabledif) {
dma_pool_destroy(ha->fcp_cmnd_dma_pool);
@@ -3868,10 +3874,12 @@ fail_free_nvram:
kfree(ha->nvram);
ha->nvram = NULL;
fail_free_ctx_mempool:
- mempool_destroy(ha->ctx_mempool);
+ if (ha->ctx_mempool)
+ mempool_destroy(ha->ctx_mempool);
ha->ctx_mempool = NULL;
fail_free_srb_mempool:
- mempool_destroy(ha->srb_mempool);
+ if (ha->srb_mempool)
+ mempool_destroy(ha->srb_mempool);
ha->srb_mempool = NULL;
fail_free_gid_list:
dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index bff9689f5ca9..e4fda84b959e 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -668,11 +668,9 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
{
struct qla_hw_data *ha = vha->hw;
struct qla_tgt_sess *sess = NULL;
- uint32_t unpacked_lun, lun = 0;
uint16_t loop_id;
int res = 0;
struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
- struct atio_from_isp *a = (struct atio_from_isp *)iocb;
unsigned long flags;
loop_id = le16_to_cpu(n->u.isp24.nport_handle);
@@ -725,11 +723,7 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
"loop_id %d)\n", vha->host_no, sess, sess->port_name,
mcmd, loop_id);
- lun = a->u.isp24.fcp_cmnd.lun;
- unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
-
- return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd,
- iocb, QLA24XX_MGMT_SEND_NACK);
+ return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK);
}
/* ha->tgt.sess_lock supposed to be held on entry */
@@ -3067,7 +3061,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
pkt->entry_type = NOTIFY_ACK_TYPE;
pkt->entry_count = 1;
- pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+ pkt->handle = QLA_TGT_SKIP_HANDLE;
nack = (struct nack_to_isp *)pkt;
nack->ox_id = ntfy->ox_id;
@@ -3110,6 +3104,9 @@ static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
#if 0 /* Todo */
if (rc == -ENOMEM)
qlt_alloc_qfull_cmd(vha, imm, 0, 0);
+#else
+ if (rc) {
+ }
#endif
goto done;
}
@@ -6457,12 +6454,29 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
if (!vha->flags.online)
return;
- while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) {
+ while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
+ fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) {
pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
cnt = pkt->u.raw.entry_count;
- qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt,
- ha_locked);
+ if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) {
+ /*
+ * This packet is corrupted. The header + payload
+ * can not be trusted. There is no point in passing
+ * it further up.
+ */
+ ql_log(ql_log_warn, vha, 0xffff,
+ "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
+ pkt->u.isp24.fcp_hdr.s_id,
+ be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
+ le32_to_cpu(pkt->u.isp24.exchange_addr), pkt);
+
+ adjust_corrupted_atio(pkt);
+ qlt_send_term_exchange(vha, NULL, pkt, ha_locked, 0);
+ } else {
+ qlt_24xx_atio_pkt_all_vps(vha,
+ (struct atio_from_isp *)pkt, ha_locked);
+ }
for (i = 0; i < cnt; i++) {
ha->tgt.atio_ring_index++;
@@ -6545,6 +6559,13 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
/* Disable Full Login after LIP */
nv->host_p &= cpu_to_le32(~BIT_10);
+
+ /*
+ * clear BIT 15 explicitly as we have seen at least
+ * a couple of instances where this was set and this
+ * was causing the firmware to not be initialized.
+ */
+ nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
/* Enable target PRLI control */
nv->firmware_options_2 |= cpu_to_le32(BIT_14);
} else {
@@ -6560,9 +6581,6 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
return;
}
- /* out-of-order frames reassembly */
- nv->firmware_options_3 |= BIT_6|BIT_9;
-
if (ha->tgt.enable_class_2) {
if (vha->flags.init_done)
fc_host_supported_classes(vha->host) =
@@ -6629,11 +6647,17 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
/* Disable ini mode, if requested */
if (!qla_ini_mode_enabled(vha))
nv->firmware_options_1 |= cpu_to_le32(BIT_5);
-
/* Disable Full Login after LIP */
nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
/* Enable initial LIP */
nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
+ /*
+ * clear BIT 15 explicitly as we have seen at
+ * least a couple of instances where this was set
+ * and this was causing the firmware to not be
+ * initialized.
+ */
+ nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
if (ql2xtgt_tape_enable)
/* Enable FC tape support */
nv->firmware_options_2 |= cpu_to_le32(BIT_12);
@@ -6658,9 +6682,6 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
return;
}
- /* out-of-order frames reassembly */
- nv->firmware_options_3 |= BIT_6|BIT_9;
-
if (ha->tgt.enable_class_2) {
if (vha->flags.init_done)
fc_host_supported_classes(vha->host) =
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index f26c5f60eedd..0824a8164a24 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -427,13 +427,33 @@ struct atio_from_isp {
struct {
uint8_t entry_type; /* Entry type. */
uint8_t entry_count; /* Entry count. */
- uint8_t data[58];
+ __le16 attr_n_length;
+#define FCP_CMD_LENGTH_MASK 0x0fff
+#define FCP_CMD_LENGTH_MIN 0x38
+ uint8_t data[56];
uint32_t signature;
#define ATIO_PROCESSED 0xDEADDEAD /* Signature */
} raw;
} u;
} __packed;
+static inline int fcpcmd_is_corrupted(struct atio *atio)
+{
+ if (atio->entry_type == ATIO_TYPE7 &&
+ (le16_to_cpu(atio->attr_n_length & FCP_CMD_LENGTH_MASK) <
+ FCP_CMD_LENGTH_MIN))
+ return 1;
+ else
+ return 0;
+}
+
+/* adjust corrupted atio so we won't trip over the same entry again. */
+static inline void adjust_corrupted_atio(struct atio_from_isp *atio)
+{
+ atio->u.raw.attr_n_length = cpu_to_le16(FCP_CMD_LENGTH_MIN);
+ atio->u.isp24.fcp_cmnd.add_cdb_len = 0;
+}
+
#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
/*
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 36935c9ed669..8a58ef3adab4 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -433,6 +433,18 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
count++;
}
}
+ } else if (QLA_TGT_MODE_ENABLED() &&
+ ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) {
+ struct qla_hw_data *ha = vha->hw;
+ struct atio *atr = ha->tgt.atio_ring;
+
+ if (atr || !buf) {
+ length = ha->tgt.atio_q_length;
+ qla27xx_insert16(0, buf, len);
+ qla27xx_insert16(length, buf, len);
+ qla27xx_insertbuf(atr, length * sizeof(*atr), buf, len);
+ count++;
+ }
} else {
ql_dbg(ql_dbg_misc, vha, 0xd026,
"%s: unknown queue %x\n", __func__, ent->t263.queue_type);
@@ -676,6 +688,18 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
count++;
}
}
+ } else if (QLA_TGT_MODE_ENABLED() &&
+ ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) {
+ struct qla_hw_data *ha = vha->hw;
+ struct atio *atr = ha->tgt.atio_ring_ptr;
+
+ if (atr || !buf) {
+ qla27xx_insert16(0, buf, len);
+ qla27xx_insert16(1, buf, len);
+ qla27xx_insert32(ha->tgt.atio_q_in ?
+ readl(ha->tgt.atio_q_in) : 0, buf, len);
+ count++;
+ }
} else {
ql_dbg(ql_dbg_misc, vha, 0xd02f,
"%s: unknown queue %x\n", __func__, ent->t274.queue_type);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 6643f6fc7795..d925910be761 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -1800,7 +1800,7 @@ static ssize_t tcm_qla2xxx_wwn_version_show(struct config_item *item,
{
return sprintf(page,
"TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on "
- UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
+ UTS_RELEASE"\n", QLA2XXX_VERSION, utsname()->sysname,
utsname()->machine);
}
@@ -1906,7 +1906,7 @@ static int tcm_qla2xxx_register_configfs(void)
int ret;
pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on "
- UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
+ UTS_RELEASE"\n", QLA2XXX_VERSION, utsname()->sysname,
utsname()->machine);
ret = target_register_template(&tcm_qla2xxx_ops);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 37e026a4823d..cf8430be183b 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -1,7 +1,6 @@
#include <target/target_core_base.h>
#include <linux/btree.h>
-#define TCM_QLA2XXX_VERSION "v0.1"
/* length of ASCII WWPNs including pad */
#define TCM_QLA2XXX_NAMELEN 32
/*
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index c35b6de4ca64..e9e1e141af9c 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1018,7 +1018,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb)
count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
BUG_ON(count > sdb->table.nents);
sdb->table.nents = count;
- sdb->length = blk_rq_bytes(req);
+ sdb->length = blk_rq_payload_bytes(req);
return BLKPREP_OK;
}
@@ -2893,7 +2893,7 @@ scsi_internal_device_block(struct scsi_device *sdev)
* request queue.
*/
if (q->mq_ops) {
- blk_mq_stop_hw_queues(q);
+ blk_mq_quiesce_queue(q);
} else {
spin_lock_irqsave(q->queue_lock, flags);
blk_stop_queue(q);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index b1933041da39..1f5d92a25a49 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -871,11 +871,11 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
cmd->allowed = SD_MAX_RETRIES;
/*
- * For WRITE_SAME the data transferred in the DATA IN buffer is
+ * For WRITE SAME the data transferred via the DATA OUT buffer is
* different from the amount of data actually written to the target.
*
- * We set up __data_len to the amount of data transferred from the
- * DATA IN buffer so that blk_rq_map_sg set up the proper S/G list
+ * We set up __data_len to the amount of data transferred via the
+ * DATA OUT buffer so that blk_rq_map_sg sets up the proper S/G list
* to transfer a single sector of data first, but then reset it to
* the amount of data to be written right after so that the I/O path
* knows how much to actually write.
@@ -2600,7 +2600,8 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
if (sdp->broken_fua) {
sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n");
sdkp->DPOFUA = 0;
- } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
+ } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw &&
+ !sdkp->device->use_16_for_rw) {
sd_first_printk(KERN_NOTICE, sdkp,
"Uses READ/WRITE(6), disabling FUA\n");
sdkp->DPOFUA = 0;
@@ -2783,13 +2784,21 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
}
- sdkp->zoned = (buffer[8] >> 4) & 3;
- if (sdkp->zoned == 1)
- q->limits.zoned = BLK_ZONED_HA;
- else if (sdkp->device->type == TYPE_ZBC)
+ if (sdkp->device->type == TYPE_ZBC) {
+ /* Host-managed */
q->limits.zoned = BLK_ZONED_HM;
- else
- q->limits.zoned = BLK_ZONED_NONE;
+ } else {
+ sdkp->zoned = (buffer[8] >> 4) & 3;
+ if (sdkp->zoned == 1)
+ /* Host-aware */
+ q->limits.zoned = BLK_ZONED_HA;
+ else
+ /*
+ * Treat drive-managed devices as
+ * regular block devices.
+ */
+ q->limits.zoned = BLK_ZONED_NONE;
+ }
if (blk_queue_is_zoned(q) && sdkp->first_scan)
sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware");
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 8c9a35c91705..50adabbb5808 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -587,7 +587,7 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
- if (scsi_is_sas_rphy(&sdev->sdev_gendev))
+ if (scsi_is_sas_rphy(sdev->sdev_target->dev.parent))
efd.addr = sas_get_address(sdev);
if (efd.addr) {
diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c
index 396b32dca074..7cf70aaec0ba 100644
--- a/drivers/scsi/snic/snic_main.c
+++ b/drivers/scsi/snic/snic_main.c
@@ -591,6 +591,7 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!pool) {
SNIC_HOST_ERR(shost, "dflt sgl pool creation failed\n");
+ ret = -ENOMEM;
goto err_free_res;
}
@@ -601,6 +602,7 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!pool) {
SNIC_HOST_ERR(shost, "max sgl pool creation failed\n");
+ ret = -ENOMEM;
goto err_free_dflt_sgl_pool;
}
@@ -611,6 +613,7 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!pool) {
SNIC_HOST_ERR(shost, "snic tmreq info pool creation failed.\n");
+ ret = -ENOMEM;
goto err_free_max_sgl_pool;
}
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
index 0acdfd82e751..813df6e7292d 100644
--- a/drivers/soc/samsung/exynos-pmu.c
+++ b/drivers/soc/samsung/exynos-pmu.c
@@ -11,6 +11,8 @@
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
@@ -92,9 +94,18 @@ static const struct of_device_id exynos_pmu_of_device_ids[] = {
{ /*sentinel*/ },
};
+struct regmap *exynos_get_pmu_regmap(void)
+{
+ struct device_node *np = of_find_matching_node(NULL,
+ exynos_pmu_of_device_ids);
+ if (np)
+ return syscon_node_to_regmap(np);
+ return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap);
+
static int exynos_pmu_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
struct device *dev = &pdev->dev;
struct resource *res;
@@ -106,15 +117,10 @@ static int exynos_pmu_probe(struct platform_device *pdev)
pmu_context = devm_kzalloc(&pdev->dev,
sizeof(struct exynos_pmu_context),
GFP_KERNEL);
- if (!pmu_context) {
- dev_err(dev, "Cannot allocate memory.\n");
+ if (!pmu_context)
return -ENOMEM;
- }
pmu_context->dev = dev;
-
- match = of_match_node(exynos_pmu_of_device_ids, dev->of_node);
-
- pmu_context->pmu_data = match->data;
+ pmu_context->pmu_data = of_device_get_match_data(dev);
if (pmu_context->pmu_data->pmu_init)
pmu_context->pmu_data->pmu_init();
diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
index 8823cc81ae45..5bb376009d98 100644
--- a/drivers/soc/ti/wkup_m3_ipc.c
+++ b/drivers/soc/ti/wkup_m3_ipc.c
@@ -459,6 +459,7 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
if (IS_ERR(task)) {
dev_err(dev, "can't create rproc_boot thread\n");
+ ret = PTR_ERR(task);
goto err_put_rproc;
}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index ec4aa252d6e8..2922a9908302 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -378,6 +378,7 @@ config SPI_FSL_SPI
config SPI_FSL_DSPI
tristate "Freescale DSPI controller"
select REGMAP_MMIO
+ depends on HAS_DMA
depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
help
This enables support for the Freescale DSPI controller in master
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
index e89da0af45d2..0314c6b9e044 100644
--- a/drivers/spi/spi-armada-3700.c
+++ b/drivers/spi/spi-armada-3700.c
@@ -800,7 +800,7 @@ static int a3700_spi_probe(struct platform_device *pdev)
struct spi_master *master;
struct a3700_spi *spi;
u32 num_cs = 0;
- int ret = 0;
+ int irq, ret = 0;
master = spi_alloc_master(dev, sizeof(*spi));
if (!master) {
@@ -825,7 +825,7 @@ static int a3700_spi_probe(struct platform_device *pdev)
master->unprepare_message = a3700_spi_unprepare_message;
master->set_cs = a3700_spi_set_cs;
master->flags = SPI_MASTER_HALF_DUPLEX;
- master->mode_bits |= (SPI_RX_DUAL | SPI_RX_DUAL |
+ master->mode_bits |= (SPI_RX_DUAL | SPI_TX_DUAL |
SPI_RX_QUAD | SPI_TX_QUAD);
platform_set_drvdata(pdev, master);
@@ -846,12 +846,13 @@ static int a3700_spi_probe(struct platform_device *pdev)
goto error;
}
- spi->irq = platform_get_irq(pdev, 0);
- if (spi->irq < 0) {
- dev_err(dev, "could not get irq: %d\n", spi->irq);
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "could not get irq: %d\n", irq);
ret = -ENXIO;
goto error;
}
+ spi->irq = irq;
init_completion(&spi->done);
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index 319225d7e761..6ab4c7700228 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -494,7 +494,8 @@ static int spi_engine_probe(struct platform_device *pdev)
SPI_ENGINE_VERSION_MAJOR(version),
SPI_ENGINE_VERSION_MINOR(version),
SPI_ENGINE_VERSION_PATCH(version));
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_put_master;
}
spi_engine->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index d36c11b73a35..02fb96797ac8 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -646,7 +646,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
buf = t->rx_buf;
t->rx_dma = dma_map_single(&spi->dev, buf,
t->len, DMA_FROM_DEVICE);
- if (!t->rx_dma) {
+ if (dma_mapping_error(&spi->dev, !t->rx_dma)) {
ret = -EFAULT;
goto err_rx_map;
}
@@ -660,7 +660,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
buf = (void *)t->tx_buf;
t->tx_dma = dma_map_single(&spi->dev, buf,
t->len, DMA_TO_DEVICE);
- if (!t->tx_dma) {
+ if (dma_mapping_error(&spi->dev, t->tx_dma)) {
ret = -EFAULT;
goto err_tx_map;
}
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index e31971f91475..837cb8d0bac6 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -274,11 +274,11 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
static void mid_spi_dma_stop(struct dw_spi *dws)
{
if (test_bit(TX_BUSY, &dws->dma_chan_busy)) {
- dmaengine_terminate_all(dws->txchan);
+ dmaengine_terminate_sync(dws->txchan);
clear_bit(TX_BUSY, &dws->dma_chan_busy);
}
if (test_bit(RX_BUSY, &dws->dma_chan_busy)) {
- dmaengine_terminate_all(dws->rxchan);
+ dmaengine_terminate_sync(dws->rxchan);
clear_bit(RX_BUSY, &dws->dma_chan_busy);
}
}
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index b715a26a9148..054012f87567 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -107,7 +107,10 @@ static const struct file_operations dw_spi_regs_ops = {
static int dw_spi_debugfs_init(struct dw_spi *dws)
{
- dws->debugfs = debugfs_create_dir("dw_spi", NULL);
+ char name[128];
+
+ snprintf(name, 128, "dw_spi-%s", dev_name(&dws->master->dev));
+ dws->debugfs = debugfs_create_dir(name, NULL);
if (!dws->debugfs)
return -ENOMEM;
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index dd7b5b47291d..d6239fa718be 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1690,6 +1690,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
pxa2xx_spi_write(drv_data, SSCR1, tmp);
tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8);
pxa2xx_spi_write(drv_data, SSCR0, tmp);
+ break;
default:
tmp = SSCR1_RxTresh(RX_THRESH_DFLT) |
SSCR1_TxTresh(TX_THRESH_DFLT);
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 0012ad02e569..1f00eeb0b5a3 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -973,14 +973,16 @@ static const struct sh_msiof_chipdata r8a779x_data = {
};
static const struct of_device_id sh_msiof_match[] = {
- { .compatible = "renesas,sh-msiof", .data = &sh_data },
{ .compatible = "renesas,sh-mobile-msiof", .data = &sh_data },
{ .compatible = "renesas,msiof-r8a7790", .data = &r8a779x_data },
{ .compatible = "renesas,msiof-r8a7791", .data = &r8a779x_data },
{ .compatible = "renesas,msiof-r8a7792", .data = &r8a779x_data },
{ .compatible = "renesas,msiof-r8a7793", .data = &r8a779x_data },
{ .compatible = "renesas,msiof-r8a7794", .data = &r8a779x_data },
+ { .compatible = "renesas,rcar-gen2-msiof", .data = &r8a779x_data },
{ .compatible = "renesas,msiof-r8a7796", .data = &r8a779x_data },
+ { .compatible = "renesas,rcar-gen3-msiof", .data = &r8a779x_data },
+ { .compatible = "renesas,sh-msiof", .data = &sh_data }, /* Deprecated */
{},
};
MODULE_DEVICE_TABLE(of, sh_msiof_match);
diff --git a/drivers/staging/greybus/gpio.c b/drivers/staging/greybus/gpio.c
index 250caa00de5e..51384bdde450 100644
--- a/drivers/staging/greybus/gpio.c
+++ b/drivers/staging/greybus/gpio.c
@@ -474,17 +474,20 @@ static void gb_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
gb_gpio_set_value_operation(ggc, (u8)offset, !!value);
}
-static int gb_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
- unsigned debounce)
+static int gb_gpio_set_config(struct gpio_chip *chip, unsigned offset,
+ unsigned long config)
{
struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
- u16 usec;
+ u32 debounce;
+ if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+ return -ENOTSUPP;
+
+ debounce = pinconf_to_config_argument(config);
if (debounce > U16_MAX)
return -EINVAL;
- usec = (u16)debounce;
- return gb_gpio_set_debounce_operation(ggc, (u8)offset, usec);
+ return gb_gpio_set_debounce_operation(ggc, (u8)offset, (u16)debounce);
}
static int gb_gpio_controller_setup(struct gb_gpio_controller *ggc)
@@ -689,7 +692,7 @@ static int gb_gpio_probe(struct gbphy_device *gbphy_dev,
gpio->direction_output = gb_gpio_direction_output;
gpio->get = gb_gpio_get;
gpio->set = gb_gpio_set;
- gpio->set_debounce = gb_gpio_set_debounce;
+ gpio->set_config = gb_gpio_set_config;
gpio->to_irq = gb_gpio_to_irq;
gpio->base = -1; /* Allocate base dynamically */
gpio->ngpio = ggc->line_max + 1;
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 8130dfe89745..4971aa54756a 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -770,6 +770,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
/* Initialize the device private structure. */
struct octeon_ethernet *priv = netdev_priv(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
dev->netdev_ops = &cvm_oct_pow_netdev_ops;
priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
priv->port = CVMX_PIP_NUM_INPUT_PORTS;
@@ -816,6 +817,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
}
/* Initialize the device private structure. */
+ SET_NETDEV_DEV(dev, &pdev->dev);
priv = netdev_priv(dev);
priv->netdev = dev;
priv->of_node = cvm_oct_node_for_port(pip, interface,
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 7dfefd66df93..1cadc9eefa21 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1693,6 +1693,10 @@ void transport_generic_request_failure(struct se_cmd *cmd,
case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
+ case TCM_TOO_MANY_TARGET_DESCS:
+ case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE:
+ case TCM_TOO_MANY_SEGMENT_DESCS:
+ case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE:
break;
case TCM_OUT_OF_RESOURCES:
sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -2808,6 +2812,26 @@ static const struct sense_info sense_info_table[] = {
.key = ILLEGAL_REQUEST,
.asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */
},
+ [TCM_TOO_MANY_TARGET_DESCS] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x26,
+ .ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */
+ },
+ [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x26,
+ .ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */
+ },
+ [TCM_TOO_MANY_SEGMENT_DESCS] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x26,
+ .ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */
+ },
+ [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x26,
+ .ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */
+ },
[TCM_PARAMETER_LIST_LENGTH_ERROR] = {
.key = ILLEGAL_REQUEST,
.asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 37d5caebffa6..d828b3b5000b 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -53,18 +53,13 @@ static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
return 0;
}
-static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
- bool src)
+static int target_xcopy_locate_se_dev_e4(const unsigned char *dev_wwn,
+ struct se_device **found_dev)
{
struct se_device *se_dev;
- unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn;
+ unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
int rc;
- if (src)
- dev_wwn = &xop->dst_tid_wwn[0];
- else
- dev_wwn = &xop->src_tid_wwn[0];
-
mutex_lock(&g_device_mutex);
list_for_each_entry(se_dev, &g_device_list, g_dev_node) {
@@ -78,15 +73,8 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
if (rc != 0)
continue;
- if (src) {
- xop->dst_dev = se_dev;
- pr_debug("XCOPY 0xe4: Setting xop->dst_dev: %p from located"
- " se_dev\n", xop->dst_dev);
- } else {
- xop->src_dev = se_dev;
- pr_debug("XCOPY 0xe4: Setting xop->src_dev: %p from located"
- " se_dev\n", xop->src_dev);
- }
+ *found_dev = se_dev;
+ pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
rc = target_depend_item(&se_dev->dev_group.cg_item);
if (rc != 0) {
@@ -110,7 +98,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
}
static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
- unsigned char *p, bool src)
+ unsigned char *p, unsigned short cscd_index)
{
unsigned char *desc = p;
unsigned short ript;
@@ -155,7 +143,13 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op
return -EINVAL;
}
- if (src) {
+ if (cscd_index != xop->stdi && cscd_index != xop->dtdi) {
+ pr_debug("XCOPY 0xe4: ignoring CSCD entry %d - neither src nor "
+ "dest\n", cscd_index);
+ return 0;
+ }
+
+ if (cscd_index == xop->stdi) {
memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
/*
* Determine if the source designator matches the local device
@@ -167,10 +161,15 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op
pr_debug("XCOPY 0xe4: Set xop->src_dev %p from source"
" received xop\n", xop->src_dev);
}
- } else {
+ }
+
+ if (cscd_index == xop->dtdi) {
memcpy(&xop->dst_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
/*
- * Determine if the destination designator matches the local device
+ * Determine if the destination designator matches the local
+ * device. If @cscd_index corresponds to both source (stdi) and
+ * destination (dtdi), or dtdi comes after stdi, then
+ * XCOL_DEST_RECV_OP wins.
*/
if (!memcmp(&xop->local_dev_wwn[0], &xop->dst_tid_wwn[0],
XCOPY_NAA_IEEE_REGEX_LEN)) {
@@ -190,20 +189,23 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
{
struct se_device *local_dev = se_cmd->se_dev;
unsigned char *desc = p;
- int offset = tdll % XCOPY_TARGET_DESC_LEN, rc, ret = 0;
+ int offset = tdll % XCOPY_TARGET_DESC_LEN, rc;
+ unsigned short cscd_index = 0;
unsigned short start = 0;
- bool src = true;
*sense_ret = TCM_INVALID_PARAMETER_LIST;
if (offset != 0) {
pr_err("XCOPY target descriptor list length is not"
" multiple of %d\n", XCOPY_TARGET_DESC_LEN);
+ *sense_ret = TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE;
return -EINVAL;
}
- if (tdll > 64) {
+ if (tdll > RCR_OP_MAX_TARGET_DESC_COUNT * XCOPY_TARGET_DESC_LEN) {
pr_err("XCOPY target descriptor supports a maximum"
" two src/dest descriptors, tdll: %hu too large..\n", tdll);
+ /* spc4r37 6.4.3.4 CSCD DESCRIPTOR LIST LENGTH field */
+ *sense_ret = TCM_TOO_MANY_TARGET_DESCS;
return -EINVAL;
}
/*
@@ -215,37 +217,43 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
while (start < tdll) {
/*
- * Check target descriptor identification with 0xE4 type with
- * use VPD 0x83 WWPN matching ..
+ * Check target descriptor identification with 0xE4 type, and
+ * compare the current index with the CSCD descriptor IDs in
+ * the segment descriptor. Use VPD 0x83 WWPN matching ..
*/
switch (desc[0]) {
case 0xe4:
rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop,
- &desc[0], src);
+ &desc[0], cscd_index);
if (rc != 0)
goto out;
- /*
- * Assume target descriptors are in source -> destination order..
- */
- if (src)
- src = false;
- else
- src = true;
start += XCOPY_TARGET_DESC_LEN;
desc += XCOPY_TARGET_DESC_LEN;
- ret++;
+ cscd_index++;
break;
default:
pr_err("XCOPY unsupported descriptor type code:"
" 0x%02x\n", desc[0]);
+ *sense_ret = TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE;
goto out;
}
}
- if (xop->op_origin == XCOL_SOURCE_RECV_OP)
- rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true);
- else
- rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false);
+ switch (xop->op_origin) {
+ case XCOL_SOURCE_RECV_OP:
+ rc = target_xcopy_locate_se_dev_e4(xop->dst_tid_wwn,
+ &xop->dst_dev);
+ break;
+ case XCOL_DEST_RECV_OP:
+ rc = target_xcopy_locate_se_dev_e4(xop->src_tid_wwn,
+ &xop->src_dev);
+ break;
+ default:
+ pr_err("XCOPY CSCD descriptor IDs not found in CSCD list - "
+ "stdi: %hu dtdi: %hu\n", xop->stdi, xop->dtdi);
+ rc = -EINVAL;
+ break;
+ }
/*
* If a matching IEEE NAA 0x83 descriptor for the requested device
* is not located on this node, return COPY_ABORTED with ASQ/ASQC
@@ -262,7 +270,7 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
pr_debug("XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n",
xop->dst_dev, &xop->dst_tid_wwn[0]);
- return ret;
+ return cscd_index;
out:
return -EINVAL;
@@ -284,6 +292,14 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op
xop->stdi = get_unaligned_be16(&desc[4]);
xop->dtdi = get_unaligned_be16(&desc[6]);
+
+ if (xop->stdi > XCOPY_CSCD_DESC_ID_LIST_OFF_MAX ||
+ xop->dtdi > XCOPY_CSCD_DESC_ID_LIST_OFF_MAX) {
+ pr_err("XCOPY segment desc 0x02: unsupported CSCD ID > 0x%x; stdi: %hu dtdi: %hu\n",
+ XCOPY_CSCD_DESC_ID_LIST_OFF_MAX, xop->stdi, xop->dtdi);
+ return -EINVAL;
+ }
+
pr_debug("XCOPY seg desc 0x02: desc_len: %hu stdi: %hu dtdi: %hu, DC: %d\n",
desc_len, xop->stdi, xop->dtdi, dc);
@@ -306,15 +322,25 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op
static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd,
struct xcopy_op *xop, unsigned char *p,
- unsigned int sdll)
+ unsigned int sdll, sense_reason_t *sense_ret)
{
unsigned char *desc = p;
unsigned int start = 0;
int offset = sdll % XCOPY_SEGMENT_DESC_LEN, rc, ret = 0;
+ *sense_ret = TCM_INVALID_PARAMETER_LIST;
+
if (offset != 0) {
pr_err("XCOPY segment descriptor list length is not"
" multiple of %d\n", XCOPY_SEGMENT_DESC_LEN);
+ *sense_ret = TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE;
+ return -EINVAL;
+ }
+ if (sdll > RCR_OP_MAX_SG_DESC_COUNT * XCOPY_SEGMENT_DESC_LEN) {
+ pr_err("XCOPY supports %u segment descriptor(s), sdll: %u too"
+ " large..\n", RCR_OP_MAX_SG_DESC_COUNT, sdll);
+ /* spc4r37 6.4.3.5 SEGMENT DESCRIPTOR LIST LENGTH field */
+ *sense_ret = TCM_TOO_MANY_SEGMENT_DESCS;
return -EINVAL;
}
@@ -335,6 +361,7 @@ static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd,
default:
pr_err("XCOPY unsupported segment descriptor"
"type: 0x%02x\n", desc[0]);
+ *sense_ret = TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE;
goto out;
}
}
@@ -861,6 +888,16 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
+ if (se_cmd->data_length == 0) {
+ target_complete_cmd(se_cmd, SAM_STAT_GOOD);
+ return TCM_NO_SENSE;
+ }
+ if (se_cmd->data_length < XCOPY_HDR_LEN) {
+ pr_err("XCOPY parameter truncation: length %u < hdr_len %u\n",
+ se_cmd->data_length, XCOPY_HDR_LEN);
+ return TCM_PARAMETER_LIST_LENGTH_ERROR;
+ }
+
xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
if (!xop) {
pr_err("Unable to allocate xcopy_op\n");
@@ -883,6 +920,12 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
*/
tdll = get_unaligned_be16(&p[2]);
sdll = get_unaligned_be32(&p[8]);
+ if (tdll + sdll > RCR_OP_MAX_DESC_LIST_LEN) {
+ pr_err("XCOPY descriptor list length %u exceeds maximum %u\n",
+ tdll + sdll, RCR_OP_MAX_DESC_LIST_LEN);
+ ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
+ goto out;
+ }
inline_dl = get_unaligned_be32(&p[12]);
if (inline_dl != 0) {
@@ -890,10 +933,32 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
goto out;
}
+ if (se_cmd->data_length < (XCOPY_HDR_LEN + tdll + sdll + inline_dl)) {
+ pr_err("XCOPY parameter truncation: data length %u too small "
+ "for tdll: %hu sdll: %u inline_dl: %u\n",
+ se_cmd->data_length, tdll, sdll, inline_dl);
+ ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
+ goto out;
+ }
+
pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x"
" tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
tdll, sdll, inline_dl);
+ /*
+ * skip over the target descriptors until segment descriptors
+ * have been passed - CSCD ids are needed to determine src and dest.
+ */
+ seg_desc = &p[16] + tdll;
+
+ rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc,
+ sdll, &ret);
+ if (rc <= 0)
+ goto out;
+
+ pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc,
+ rc * XCOPY_SEGMENT_DESC_LEN);
+
rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret);
if (rc <= 0)
goto out;
@@ -911,18 +976,8 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc,
rc * XCOPY_TARGET_DESC_LEN);
- seg_desc = &p[16];
- seg_desc += (rc * XCOPY_TARGET_DESC_LEN);
-
- rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, sdll);
- if (rc <= 0) {
- xcopy_pt_undepend_remotedev(xop);
- goto out;
- }
transport_kunmap_data_sg(se_cmd);
- pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc,
- rc * XCOPY_SEGMENT_DESC_LEN);
INIT_WORK(&xop->xop_work, target_xcopy_do_work);
queue_work(xcopy_wq, &xop->xop_work);
return TCM_NO_SENSE;
diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h
index 4d3d4dd060f2..7c0b105cbe1b 100644
--- a/drivers/target/target_core_xcopy.h
+++ b/drivers/target/target_core_xcopy.h
@@ -1,10 +1,17 @@
#include <target/target_core_base.h>
+#define XCOPY_HDR_LEN 16
#define XCOPY_TARGET_DESC_LEN 32
#define XCOPY_SEGMENT_DESC_LEN 28
#define XCOPY_NAA_IEEE_REGEX_LEN 16
#define XCOPY_MAX_SECTORS 1024
+/*
+ * SPC4r37 6.4.6.1
+ * Table 150 — CSCD descriptor ID values
+ */
+#define XCOPY_CSCD_DESC_ID_LIST_OFF_MAX 0x07FF
+
enum xcopy_origin_list {
XCOL_SOURCE_RECV_OP = 0x01,
XCOL_DEST_RECV_OP = 0x02,
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index b811b0fb61b1..4c7796512453 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -118,12 +118,12 @@ struct rockchip_tsadc_chip {
void (*control)(void __iomem *reg, bool on);
/* Per-sensor methods */
- int (*get_temp)(struct chip_tsadc_table table,
+ int (*get_temp)(const struct chip_tsadc_table *table,
int chn, void __iomem *reg, int *temp);
- void (*set_alarm_temp)(struct chip_tsadc_table table,
- int chn, void __iomem *reg, int temp);
- void (*set_tshut_temp)(struct chip_tsadc_table table,
- int chn, void __iomem *reg, int temp);
+ int (*set_alarm_temp)(const struct chip_tsadc_table *table,
+ int chn, void __iomem *reg, int temp);
+ int (*set_tshut_temp)(const struct chip_tsadc_table *table,
+ int chn, void __iomem *reg, int temp);
void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m);
/* Per-table methods */
@@ -317,6 +317,7 @@ static const struct tsadc_table rk3288_code_table[] = {
{3452, 115000},
{3437, 120000},
{3421, 125000},
+ {0, 125000},
};
static const struct tsadc_table rk3368_code_table[] = {
@@ -397,59 +398,80 @@ static const struct tsadc_table rk3399_code_table[] = {
{TSADCV3_DATA_MASK, 125000},
};
-static u32 rk_tsadcv2_temp_to_code(struct chip_tsadc_table table,
+static u32 rk_tsadcv2_temp_to_code(const struct chip_tsadc_table *table,
int temp)
{
int high, low, mid;
- u32 error = 0;
+ unsigned long num;
+ unsigned int denom;
+ u32 error = table->data_mask;
low = 0;
- high = table.length - 1;
+ high = (table->length - 1) - 1; /* ignore the last check for table */
mid = (high + low) / 2;
/* Return mask code data when the temp is over table range */
- if (temp < table.id[low].temp || temp > table.id[high].temp) {
- error = table.data_mask;
+ if (temp < table->id[low].temp || temp > table->id[high].temp)
goto exit;
- }
while (low <= high) {
- if (temp == table.id[mid].temp)
- return table.id[mid].code;
- else if (temp < table.id[mid].temp)
+ if (temp == table->id[mid].temp)
+ return table->id[mid].code;
+ else if (temp < table->id[mid].temp)
high = mid - 1;
else
low = mid + 1;
mid = (low + high) / 2;
}
+ /*
+ * The conversion code granularity provided by the table. Let's
+ * assume that the relationship between temperature and
+ * analog value between 2 table entries is linear and interpolate
+ * to produce less granular result.
+ */
+ num = abs(table->id[mid + 1].code - table->id[mid].code);
+ num *= temp - table->id[mid].temp;
+ denom = table->id[mid + 1].temp - table->id[mid].temp;
+
+ switch (table->mode) {
+ case ADC_DECREMENT:
+ return table->id[mid].code - (num / denom);
+ case ADC_INCREMENT:
+ return table->id[mid].code + (num / denom);
+ default:
+ pr_err("%s: unknown table mode: %d\n", __func__, table->mode);
+ return error;
+ }
+
exit:
- pr_err("Invalid the conversion, error=%d\n", error);
+ pr_err("%s: invalid temperature, temp=%d error=%d\n",
+ __func__, temp, error);
return error;
}
-static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
- int *temp)
+static int rk_tsadcv2_code_to_temp(const struct chip_tsadc_table *table,
+ u32 code, int *temp)
{
unsigned int low = 1;
- unsigned int high = table.length - 1;
+ unsigned int high = table->length - 1;
unsigned int mid = (low + high) / 2;
unsigned int num;
unsigned long denom;
- WARN_ON(table.length < 2);
+ WARN_ON(table->length < 2);
- switch (table.mode) {
+ switch (table->mode) {
case ADC_DECREMENT:
- code &= table.data_mask;
- if (code < table.id[high].code)
+ code &= table->data_mask;
+ if (code <= table->id[high].code)
return -EAGAIN; /* Incorrect reading */
while (low <= high) {
- if (code >= table.id[mid].code &&
- code < table.id[mid - 1].code)
+ if (code >= table->id[mid].code &&
+ code < table->id[mid - 1].code)
break;
- else if (code < table.id[mid].code)
+ else if (code < table->id[mid].code)
low = mid + 1;
else
high = mid - 1;
@@ -458,15 +480,15 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
}
break;
case ADC_INCREMENT:
- code &= table.data_mask;
- if (code < table.id[low].code)
+ code &= table->data_mask;
+ if (code < table->id[low].code)
return -EAGAIN; /* Incorrect reading */
while (low <= high) {
- if (code <= table.id[mid].code &&
- code > table.id[mid - 1].code)
+ if (code <= table->id[mid].code &&
+ code > table->id[mid - 1].code)
break;
- else if (code > table.id[mid].code)
+ else if (code > table->id[mid].code)
low = mid + 1;
else
high = mid - 1;
@@ -475,7 +497,8 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
}
break;
default:
- pr_err("Invalid the conversion table\n");
+ pr_err("%s: unknown table mode: %d\n", __func__, table->mode);
+ return -EINVAL;
}
/*
@@ -484,10 +507,10 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
* temperature between 2 table entries is linear and interpolate
* to produce less granular result.
*/
- num = table.id[mid].temp - table.id[mid - 1].temp;
- num *= abs(table.id[mid - 1].code - code);
- denom = abs(table.id[mid - 1].code - table.id[mid].code);
- *temp = table.id[mid - 1].temp + (num / denom);
+ num = table->id[mid].temp - table->id[mid - 1].temp;
+ num *= abs(table->id[mid - 1].code - code);
+ denom = abs(table->id[mid - 1].code - table->id[mid].code);
+ *temp = table->id[mid - 1].temp + (num / denom);
return 0;
}
@@ -638,7 +661,7 @@ static void rk_tsadcv3_control(void __iomem *regs, bool enable)
writel_relaxed(val, regs + TSADCV2_AUTO_CON);
}
-static int rk_tsadcv2_get_temp(struct chip_tsadc_table table,
+static int rk_tsadcv2_get_temp(const struct chip_tsadc_table *table,
int chn, void __iomem *regs, int *temp)
{
u32 val;
@@ -648,39 +671,57 @@ static int rk_tsadcv2_get_temp(struct chip_tsadc_table table,
return rk_tsadcv2_code_to_temp(table, val, temp);
}
-static void rk_tsadcv2_alarm_temp(struct chip_tsadc_table table,
- int chn, void __iomem *regs, int temp)
+static int rk_tsadcv2_alarm_temp(const struct chip_tsadc_table *table,
+ int chn, void __iomem *regs, int temp)
{
- u32 alarm_value, int_en;
+ u32 alarm_value;
+ u32 int_en, int_clr;
+
+ /*
+ * In some cases, some sensors didn't need the trip points, the
+ * set_trips will pass {-INT_MAX, INT_MAX} to trigger tsadc alarm
+ * in the end, ignore this case and disable the high temperature
+ * interrupt.
+ */
+ if (temp == INT_MAX) {
+ int_clr = readl_relaxed(regs + TSADCV2_INT_EN);
+ int_clr &= ~TSADCV2_INT_SRC_EN(chn);
+ writel_relaxed(int_clr, regs + TSADCV2_INT_EN);
+ return 0;
+ }
/* Make sure the value is valid */
alarm_value = rk_tsadcv2_temp_to_code(table, temp);
- if (alarm_value == table.data_mask)
- return;
+ if (alarm_value == table->data_mask)
+ return -ERANGE;
- writel_relaxed(alarm_value & table.data_mask,
+ writel_relaxed(alarm_value & table->data_mask,
regs + TSADCV2_COMP_INT(chn));
int_en = readl_relaxed(regs + TSADCV2_INT_EN);
int_en |= TSADCV2_INT_SRC_EN(chn);
writel_relaxed(int_en, regs + TSADCV2_INT_EN);
+
+ return 0;
}
-static void rk_tsadcv2_tshut_temp(struct chip_tsadc_table table,
- int chn, void __iomem *regs, int temp)
+static int rk_tsadcv2_tshut_temp(const struct chip_tsadc_table *table,
+ int chn, void __iomem *regs, int temp)
{
u32 tshut_value, val;
/* Make sure the value is valid */
tshut_value = rk_tsadcv2_temp_to_code(table, temp);
- if (tshut_value == table.data_mask)
- return;
+ if (tshut_value == table->data_mask)
+ return -ERANGE;
writel_relaxed(tshut_value, regs + TSADCV2_COMP_SHUT(chn));
/* TSHUT will be valid */
val = readl_relaxed(regs + TSADCV2_AUTO_CON);
writel_relaxed(val | TSADCV2_AUTO_SRC_EN(chn), regs + TSADCV2_AUTO_CON);
+
+ return 0;
}
static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs,
@@ -883,10 +924,8 @@ static int rockchip_thermal_set_trips(void *_sensor, int low, int high)
dev_dbg(&thermal->pdev->dev, "%s: sensor %d: low: %d, high %d\n",
__func__, sensor->id, low, high);
- tsadc->set_alarm_temp(tsadc->table,
- sensor->id, thermal->regs, high);
-
- return 0;
+ return tsadc->set_alarm_temp(&tsadc->table,
+ sensor->id, thermal->regs, high);
}
static int rockchip_thermal_get_temp(void *_sensor, int *out_temp)
@@ -896,7 +935,7 @@ static int rockchip_thermal_get_temp(void *_sensor, int *out_temp)
const struct rockchip_tsadc_chip *tsadc = sensor->thermal->chip;
int retval;
- retval = tsadc->get_temp(tsadc->table,
+ retval = tsadc->get_temp(&tsadc->table,
sensor->id, thermal->regs, out_temp);
dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %d, retval: %d\n",
sensor->id, *out_temp, retval);
@@ -982,8 +1021,12 @@ rockchip_thermal_register_sensor(struct platform_device *pdev,
int error;
tsadc->set_tshut_mode(id, thermal->regs, thermal->tshut_mode);
- tsadc->set_tshut_temp(tsadc->table, id, thermal->regs,
+
+ error = tsadc->set_tshut_temp(&tsadc->table, id, thermal->regs,
thermal->tshut_temp);
+ if (error)
+ dev_err(&pdev->dev, "%s: invalid tshut=%d, error=%d\n",
+ __func__, thermal->tshut_temp, error);
sensor->thermal = thermal;
sensor->id = id;
@@ -1196,9 +1239,13 @@ static int __maybe_unused rockchip_thermal_resume(struct device *dev)
thermal->chip->set_tshut_mode(id, thermal->regs,
thermal->tshut_mode);
- thermal->chip->set_tshut_temp(thermal->chip->table,
+
+ error = thermal->chip->set_tshut_temp(&thermal->chip->table,
id, thermal->regs,
thermal->tshut_temp);
+ if (error)
+ dev_err(&pdev->dev, "%s: invalid tshut=%d, error=%d\n",
+ __func__, thermal->tshut_temp, error);
}
thermal->chip->control(thermal->regs, true);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 641faab6e24b..655591316a88 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -799,6 +799,11 @@ static void thermal_release(struct device *dev)
if (!strncmp(dev_name(dev), "thermal_zone",
sizeof("thermal_zone") - 1)) {
tz = to_thermal_zone(dev);
+ kfree(tz->trip_type_attrs);
+ kfree(tz->trip_temp_attrs);
+ kfree(tz->trip_hyst_attrs);
+ kfree(tz->trips_attribute_group.attrs);
+ kfree(tz->device.groups);
kfree(tz);
} else if (!strncmp(dev_name(dev), "cooling_device",
sizeof("cooling_device") - 1)) {
@@ -1305,10 +1310,6 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
thermal_zone_device_set_polling(tz, 0);
- kfree(tz->trip_type_attrs);
- kfree(tz->trip_temp_attrs);
- kfree(tz->trip_hyst_attrs);
- kfree(tz->trips_attribute_group.attrs);
thermal_set_governor(tz, NULL);
thermal_remove_hwmon_sysfs(tz);
@@ -1316,7 +1317,6 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
idr_destroy(&tz->idr);
mutex_destroy(&tz->lock);
device_unregister(&tz->device);
- kfree(tz->device.groups);
}
EXPORT_SYMBOL_GPL(thermal_zone_device_unregister);
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 61569a765d9e..76e03a7de9cc 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -675,7 +675,7 @@ static struct console univ8250_console = {
.device = uart_console_device,
.setup = univ8250_console_setup,
.match = univ8250_console_match,
- .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_CONSDEV,
+ .flags = CON_PRINTBUFFER | CON_ANYTIME,
.index = -1,
.data = &serial8250_reg,
};
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index aa0166b6d450..116436b7fa52 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -5642,17 +5642,15 @@ static pci_ers_result_t serial8250_io_slot_reset(struct pci_dev *dev)
static void serial8250_io_resume(struct pci_dev *dev)
{
struct serial_private *priv = pci_get_drvdata(dev);
- const struct pciserial_board *board;
+ struct serial_private *new;
if (!priv)
return;
- board = priv->board;
- kfree(priv);
- priv = pciserial_init_ports(dev, board);
-
- if (!IS_ERR(priv)) {
- pci_set_drvdata(dev, priv);
+ new = pciserial_init_ports(dev, priv->board);
+ if (!IS_ERR(new)) {
+ pci_set_drvdata(dev, new);
+ kfree(priv);
}
}
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index fe4399b41df6..c13fec451d03 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1413,7 +1413,7 @@ static void __do_stop_tx_rs485(struct uart_8250_port *p)
* Enable previously disabled RX interrupts.
*/
if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) {
- serial8250_clear_fifos(p);
+ serial8250_clear_and_reinit_fifos(p);
p->ier |= UART_IER_RLSI | UART_IER_RDI;
serial_port_out(&p->port, UART_IER, p->ier);
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 168b10cad47b..fabbe76203bb 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -481,6 +481,14 @@ static void atmel_stop_tx(struct uart_port *port)
/* disable PDC transmit */
atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
}
+
+ /*
+ * Disable the transmitter.
+ * This is mandatory when DMA is used, otherwise the DMA buffer
+ * is fully transmitted.
+ */
+ atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS);
+
/* Disable interrupts */
atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
@@ -513,6 +521,9 @@ static void atmel_start_tx(struct uart_port *port)
/* Enable interrupts */
atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
+
+ /* re-enable the transmitter */
+ atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
}
/*
@@ -798,6 +809,11 @@ static void atmel_complete_tx_dma(void *arg)
*/
if (!uart_circ_empty(xmit))
atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
+ else if ((port->rs485.flags & SER_RS485_ENABLED) &&
+ !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
+ /* DMA done, stop TX, start RX for RS485 */
+ atmel_start_rx(port);
+ }
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -900,12 +916,6 @@ static void atmel_tx_dma(struct uart_port *port)
desc->callback = atmel_complete_tx_dma;
desc->callback_param = atmel_port;
atmel_port->cookie_tx = dmaengine_submit(desc);
-
- } else {
- if (port->rs485.flags & SER_RS485_ENABLED) {
- /* DMA done, stop TX, start RX for RS485 */
- atmel_start_rx(port);
- }
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 52bbd27e93ae..701c085bb19b 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -946,8 +946,8 @@ static const struct input_device_id sysrq_ids[] = {
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_KEYBIT,
- .evbit = { BIT_MASK(EV_KEY) },
- .keybit = { BIT_MASK(KEY_LEFTALT) },
+ .evbit = { [BIT_WORD(EV_KEY)] = BIT_MASK(EV_KEY) },
+ .keybit = { [BIT_WORD(KEY_LEFTALT)] = BIT_MASK(KEY_LEFTALT) },
},
{ },
};
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 0aa9e7d697a5..25dbd8c7aec7 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -239,6 +239,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
if (ifp->desc.bNumEndpoints >= num_ep)
goto skip_to_next_endpoint_or_interface_descriptor;
+ /* Check for duplicate endpoint addresses */
+ for (i = 0; i < ifp->desc.bNumEndpoints; ++i) {
+ if (ifp->endpoint[i].desc.bEndpointAddress ==
+ d->bEndpointAddress) {
+ dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
+ cfgno, inum, asnum, d->bEndpointAddress);
+ goto skip_to_next_endpoint_or_interface_descriptor;
+ }
+ }
+
endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
++ifp->desc.bNumEndpoints;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 1fa5c0f29c64..a56c75e09786 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -103,8 +103,7 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
static void hub_release(struct kref *kref);
static int usb_reset_and_verify_device(struct usb_device *udev);
-static void hub_usb3_port_prepare_disable(struct usb_hub *hub,
- struct usb_port *port_dev);
+static int hub_port_disable(struct usb_hub *hub, int port1, int set_state);
static inline char *portspeed(struct usb_hub *hub, int portstatus)
{
@@ -903,34 +902,6 @@ static int hub_set_port_link_state(struct usb_hub *hub, int port1,
}
/*
- * USB-3 does not have a similar link state as USB-2 that will avoid negotiating
- * a connection with a plugged-in cable but will signal the host when the cable
- * is unplugged. Disable remote wake and set link state to U3 for USB-3 devices
- */
-static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
-{
- struct usb_port *port_dev = hub->ports[port1 - 1];
- struct usb_device *hdev = hub->hdev;
- int ret = 0;
-
- if (!hub->error) {
- if (hub_is_superspeed(hub->hdev)) {
- hub_usb3_port_prepare_disable(hub, port_dev);
- ret = hub_set_port_link_state(hub, port_dev->portnum,
- USB_SS_PORT_LS_U3);
- } else {
- ret = usb_clear_port_feature(hdev, port1,
- USB_PORT_FEAT_ENABLE);
- }
- }
- if (port_dev->child && set_state)
- usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED);
- if (ret && ret != -ENODEV)
- dev_err(&port_dev->dev, "cannot disable (err = %d)\n", ret);
- return ret;
-}
-
-/*
* Disable a port and mark a logical connect-change event, so that some
* time later hub_wq will disconnect() any existing usb_device on the port
* and will re-enumerate if there actually is a device attached.
@@ -4162,6 +4133,34 @@ static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port,
#endif /* CONFIG_PM */
+/*
+ * USB-3 does not have a similar link state as USB-2 that will avoid negotiating
+ * a connection with a plugged-in cable but will signal the host when the cable
+ * is unplugged. Disable remote wake and set link state to U3 for USB-3 devices
+ */
+static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
+{
+ struct usb_port *port_dev = hub->ports[port1 - 1];
+ struct usb_device *hdev = hub->hdev;
+ int ret = 0;
+
+ if (!hub->error) {
+ if (hub_is_superspeed(hub->hdev)) {
+ hub_usb3_port_prepare_disable(hub, port_dev);
+ ret = hub_set_port_link_state(hub, port_dev->portnum,
+ USB_SS_PORT_LS_U3);
+ } else {
+ ret = usb_clear_port_feature(hdev, port1,
+ USB_PORT_FEAT_ENABLE);
+ }
+ }
+ if (port_dev->child && set_state)
+ usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED);
+ if (ret && ret != -ENODEV)
+ dev_err(&port_dev->dev, "cannot disable (err = %d)\n", ret);
+ return ret;
+}
+
/* USB 2.0 spec, 7.1.7.3 / fig 7-29:
*
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 9548d3e03453..302b8f5f7d27 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -513,8 +513,8 @@ struct dwc2_core_params {
/* Gadget parameters */
bool g_dma;
bool g_dma_desc;
- u16 g_rx_fifo_size;
- u16 g_np_tx_fifo_size;
+ u32 g_rx_fifo_size;
+ u32 g_np_tx_fifo_size;
u32 g_tx_fifo_size[MAX_EPS_CHANNELS];
};
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index b95930f20d90..77c5fcf3a5bf 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -3169,7 +3169,7 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
/* keep other bits untouched (so e.g. forced modes are not lost) */
usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
- GUSBCFG_HNPCAP);
+ GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK);
if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS &&
(hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
@@ -3749,11 +3749,11 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
__func__, epctrl, epctrl_reg);
/* Allocate DMA descriptor chain for non-ctrl endpoints */
- if (using_desc_dma(hsotg)) {
- hs_ep->desc_list = dma_alloc_coherent(hsotg->dev,
+ if (using_desc_dma(hsotg) && !hs_ep->desc_list) {
+ hs_ep->desc_list = dmam_alloc_coherent(hsotg->dev,
MAX_DMA_DESC_NUM_GENERIC *
sizeof(struct dwc2_dma_desc),
- &hs_ep->desc_list_dma, GFP_KERNEL);
+ &hs_ep->desc_list_dma, GFP_ATOMIC);
if (!hs_ep->desc_list) {
ret = -ENOMEM;
goto error2;
@@ -3872,7 +3872,7 @@ error1:
error2:
if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) {
- dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
+ dmam_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
sizeof(struct dwc2_dma_desc),
hs_ep->desc_list, hs_ep->desc_list_dma);
hs_ep->desc_list = NULL;
@@ -3902,14 +3902,6 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
return -EINVAL;
}
- /* Remove DMA memory allocated for non-control Endpoints */
- if (using_desc_dma(hsotg)) {
- dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
- sizeof(struct dwc2_dma_desc),
- hs_ep->desc_list, hs_ep->desc_list_dma);
- hs_ep->desc_list = NULL;
- }
-
epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
spin_lock_irqsave(&hsotg->lock, flags);
@@ -4131,7 +4123,7 @@ static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
/* keep other bits untouched (so e.g. forced modes are not lost) */
usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
- GUSBCFG_HNPCAP);
+ GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK);
/* set the PLL on, remove the HNP/SRP and set the PHY */
trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 911c3b36ac06..46d0ad5105e4 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -4367,6 +4367,9 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
if (!HCD_HW_ACCESSIBLE(hcd))
goto unlock;
+ if (hsotg->op_state == OTG_STATE_B_PERIPHERAL)
+ goto unlock;
+
if (!hsotg->params.hibernation)
goto skip_power_saving;
@@ -4489,8 +4492,8 @@ static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb,
{
#ifdef VERBOSE_DEBUG
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
- char *pipetype;
- char *speed;
+ char *pipetype = NULL;
+ char *speed = NULL;
dev_vdbg(hsotg->dev, "%s, urb %p\n", fn_name, urb);
dev_vdbg(hsotg->dev, " Device address: %d\n",
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index a786256535b6..bcd1e19b4076 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -247,8 +247,6 @@ MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
static void dwc2_get_device_property(struct dwc2_hsotg *hsotg,
char *property, u8 size, u64 *value)
{
- u8 val8;
- u16 val16;
u32 val32;
switch (size) {
@@ -256,17 +254,7 @@ static void dwc2_get_device_property(struct dwc2_hsotg *hsotg,
*value = device_property_read_bool(hsotg->dev, property);
break;
case 1:
- if (device_property_read_u8(hsotg->dev, property, &val8))
- return;
-
- *value = val8;
- break;
case 2:
- if (device_property_read_u16(hsotg->dev, property, &val16))
- return;
-
- *value = val16;
- break;
case 4:
if (device_property_read_u32(hsotg->dev, property, &val32))
return;
@@ -397,16 +385,16 @@ static void dwc2_set_param(struct dwc2_hsotg *hsotg, void *param,
}
/**
- * dwc2_set_param_u16() - Set a u16 parameter
+ * dwc2_set_param_u32() - Set a u32 parameter
*
* See dwc2_set_param().
*/
-static void dwc2_set_param_u16(struct dwc2_hsotg *hsotg, u16 *param,
+static void dwc2_set_param_u32(struct dwc2_hsotg *hsotg, u32 *param,
bool lookup, char *property, u16 legacy,
u16 def, u16 min, u16 max)
{
dwc2_set_param(hsotg, param, lookup, property,
- legacy, def, min, max, 2);
+ legacy, def, min, max, 4);
}
/**
@@ -1100,13 +1088,13 @@ static void dwc2_set_gadget_dma(struct dwc2_hsotg *hsotg)
/* Buffer DMA */
dwc2_set_param_bool(hsotg, &p->g_dma,
false, "gadget-dma",
- true, false,
+ dma_capable, false,
dma_capable);
/* DMA Descriptor */
dwc2_set_param_bool(hsotg, &p->g_dma_desc, false,
"gadget-dma-desc",
- p->g_dma, false,
+ !!hw->dma_desc_enable, false,
!!hw->dma_desc_enable);
}
@@ -1130,8 +1118,14 @@ static void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
dwc2_set_param_bool(hsotg, &p->host_dma,
false, "host-dma",
- true, false,
+ dma_capable, false,
dma_capable);
+ dwc2_set_param_host_rx_fifo_size(hsotg,
+ params->host_rx_fifo_size);
+ dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
+ params->host_nperio_tx_fifo_size);
+ dwc2_set_param_host_perio_tx_fifo_size(hsotg,
+ params->host_perio_tx_fifo_size);
}
dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable);
dwc2_set_param_dma_desc_fs_enable(hsotg, params->dma_desc_fs_enable);
@@ -1140,12 +1134,6 @@ static void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
params->host_support_fs_ls_low_power);
dwc2_set_param_enable_dynamic_fifo(hsotg,
params->enable_dynamic_fifo);
- dwc2_set_param_host_rx_fifo_size(hsotg,
- params->host_rx_fifo_size);
- dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
- params->host_nperio_tx_fifo_size);
- dwc2_set_param_host_perio_tx_fifo_size(hsotg,
- params->host_perio_tx_fifo_size);
dwc2_set_param_max_transfer_size(hsotg,
params->max_transfer_size);
dwc2_set_param_max_packet_count(hsotg,
@@ -1190,12 +1178,12 @@ static void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
* auto-detect if the hardware does not support the
* default.
*/
- dwc2_set_param_u16(hsotg, &p->g_rx_fifo_size,
+ dwc2_set_param_u32(hsotg, &p->g_rx_fifo_size,
true, "g-rx-fifo-size", 2048,
hw->rx_fifo_size,
16, hw->rx_fifo_size);
- dwc2_set_param_u16(hsotg, &p->g_np_tx_fifo_size,
+ dwc2_set_param_u32(hsotg, &p->g_np_tx_fifo_size,
true, "g-np-tx-fifo-size", 1024,
hw->dev_nperio_tx_fifo_size,
16, hw->dev_nperio_tx_fifo_size);
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index de5a8570be04..14b760209680 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -45,9 +45,7 @@
#define DWC3_XHCI_RESOURCES_NUM 2
#define DWC3_SCRATCHBUF_SIZE 4096 /* each buffer is assumed to be 4KiB */
-#define DWC3_EVENT_SIZE 4 /* bytes */
-#define DWC3_EVENT_MAX_NUM 64 /* 2 events/endpoint */
-#define DWC3_EVENT_BUFFERS_SIZE (DWC3_EVENT_SIZE * DWC3_EVENT_MAX_NUM)
+#define DWC3_EVENT_BUFFERS_SIZE 4096
#define DWC3_EVENT_TYPE_MASK 0xfe
#define DWC3_EVENT_TYPE_DEV 0
@@ -311,9 +309,8 @@
#define DWC3_DCFG_SUPERSPEED_PLUS (5 << 0) /* DWC_usb31 only */
#define DWC3_DCFG_SUPERSPEED (4 << 0)
#define DWC3_DCFG_HIGHSPEED (0 << 0)
-#define DWC3_DCFG_FULLSPEED2 (1 << 0)
+#define DWC3_DCFG_FULLSPEED (1 << 0)
#define DWC3_DCFG_LOWSPEED (2 << 0)
-#define DWC3_DCFG_FULLSPEED1 (3 << 0)
#define DWC3_DCFG_NUMP_SHIFT 17
#define DWC3_DCFG_NUMP(n) (((n) >> DWC3_DCFG_NUMP_SHIFT) & 0x1f)
@@ -405,9 +402,8 @@
#define DWC3_DSTS_SUPERSPEED_PLUS (5 << 0) /* DWC_usb31 only */
#define DWC3_DSTS_SUPERSPEED (4 << 0)
#define DWC3_DSTS_HIGHSPEED (0 << 0)
-#define DWC3_DSTS_FULLSPEED2 (1 << 0)
+#define DWC3_DSTS_FULLSPEED (1 << 0)
#define DWC3_DSTS_LOWSPEED (2 << 0)
-#define DWC3_DSTS_FULLSPEED1 (3 << 0)
/* Device Generic Command Register */
#define DWC3_DGCMD_SET_LMP 0x01
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index e27899bb5706..e956306d9b0f 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -138,7 +138,8 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
exynos->axius_clk = devm_clk_get(dev, "usbdrd30_axius_clk");
if (IS_ERR(exynos->axius_clk)) {
dev_err(dev, "no AXI UpScaler clk specified\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto axius_clk_err;
}
clk_prepare_enable(exynos->axius_clk);
} else {
@@ -196,6 +197,7 @@ err3:
regulator_disable(exynos->vdd33);
err2:
clk_disable_unprepare(exynos->axius_clk);
+axius_clk_err:
clk_disable_unprepare(exynos->susp_clk);
clk_disable_unprepare(exynos->clk);
return ret;
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 29e80cc9b634..eb1b9cb3f9d1 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/platform_data/dwc3-omap.h>
@@ -510,7 +511,7 @@ static int dwc3_omap_probe(struct platform_device *pdev)
/* check the DMA Status */
reg = dwc3_omap_readl(omap->base, USBOTGSS_SYSCONFIG);
-
+ irq_set_status_flags(omap->irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(dev, omap->irq, dwc3_omap_interrupt,
dwc3_omap_interrupt_thread, IRQF_SHARED,
"dwc3-omap", omap);
@@ -531,7 +532,7 @@ static int dwc3_omap_probe(struct platform_device *pdev)
}
dwc3_omap_enable_irqs(omap);
-
+ enable_irq(omap->irq);
return 0;
err2:
@@ -552,6 +553,7 @@ static int dwc3_omap_remove(struct platform_device *pdev)
extcon_unregister_notifier(omap->edev, EXTCON_USB, &omap->vbus_nb);
extcon_unregister_notifier(omap->edev, EXTCON_USB_HOST, &omap->id_nb);
dwc3_omap_disable_irqs(omap);
+ disable_irq(omap->irq);
of_platform_depopulate(omap->dev);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 2b73339f286b..cce0a220b6b0 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -38,6 +38,7 @@
#define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa
#define PCI_DEVICE_ID_INTEL_APL 0x5aaa
#define PCI_DEVICE_ID_INTEL_KBP 0xa2b0
+#define PCI_DEVICE_ID_INTEL_GLK 0x31aa
#define PCI_INTEL_BXT_DSM_UUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
#define PCI_INTEL_BXT_FUNC_PMU_PWR 4
@@ -73,16 +74,6 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
{
struct platform_device *dwc3 = dwc->dwc3;
struct pci_dev *pdev = dwc->pci;
- int ret;
-
- struct property_entry sysdev_property[] = {
- PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
- { },
- };
-
- ret = platform_device_add_properties(dwc3, sysdev_property);
- if (ret)
- return ret;
if (pdev->vendor == PCI_VENDOR_ID_AMD &&
pdev->device == PCI_DEVICE_ID_AMD_NL_USB) {
@@ -105,6 +96,7 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
PROPERTY_ENTRY_BOOL("snps,disable_scramble_quirk"),
PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
+ PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
{ },
};
@@ -115,7 +107,8 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
int ret;
struct property_entry properties[] = {
- PROPERTY_ENTRY_STRING("dr-mode", "peripheral"),
+ PROPERTY_ENTRY_STRING("dr_mode", "peripheral"),
+ PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
{ }
};
@@ -167,6 +160,7 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
PROPERTY_ENTRY_BOOL("snps,usb3_lpm_capable"),
PROPERTY_ENTRY_BOOL("snps,has-lpm-erratum"),
PROPERTY_ENTRY_BOOL("snps,dis_enblslpm_quirk"),
+ PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
{ },
};
@@ -274,6 +268,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT_M), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBP), },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GLK), },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
{ } /* Terminating Entry */
};
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 4878d187c7d4..9bb1f8526f3e 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -39,18 +39,13 @@ static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep);
static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
struct dwc3_ep *dep, struct dwc3_request *req);
-static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
- u32 len, u32 type, bool chain)
+static void dwc3_ep0_prepare_one_trb(struct dwc3 *dwc, u8 epnum,
+ dma_addr_t buf_dma, u32 len, u32 type, bool chain)
{
- struct dwc3_gadget_ep_cmd_params params;
struct dwc3_trb *trb;
struct dwc3_ep *dep;
- int ret;
-
dep = dwc->eps[epnum];
- if (dep->flags & DWC3_EP_BUSY)
- return 0;
trb = &dwc->ep0_trb[dep->trb_enqueue];
@@ -71,15 +66,23 @@ static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
trb->ctrl |= (DWC3_TRB_CTRL_IOC
| DWC3_TRB_CTRL_LST);
- if (chain)
+ trace_dwc3_prepare_trb(dep, trb);
+}
+
+static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum)
+{
+ struct dwc3_gadget_ep_cmd_params params;
+ struct dwc3_ep *dep;
+ int ret;
+
+ dep = dwc->eps[epnum];
+ if (dep->flags & DWC3_EP_BUSY)
return 0;
memset(&params, 0, sizeof(params));
params.param0 = upper_32_bits(dwc->ep0_trb_addr);
params.param1 = lower_32_bits(dwc->ep0_trb_addr);
- trace_dwc3_prepare_trb(dep, trb);
-
ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_STARTTRANSFER, &params);
if (ret < 0)
return ret;
@@ -280,8 +283,9 @@ void dwc3_ep0_out_start(struct dwc3 *dwc)
complete(&dwc->ep0_in_setup);
- ret = dwc3_ep0_start_trans(dwc, 0, dwc->ctrl_req_addr, 8,
+ dwc3_ep0_prepare_one_trb(dwc, 0, dwc->ctrl_req_addr, 8,
DWC3_TRBCTL_CONTROL_SETUP, false);
+ ret = dwc3_ep0_start_trans(dwc, 0);
WARN_ON(ret < 0);
}
@@ -912,9 +916,9 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
dwc->ep0_next_event = DWC3_EP0_COMPLETE;
- ret = dwc3_ep0_start_trans(dwc, epnum,
- dwc->ctrl_req_addr, 0,
- DWC3_TRBCTL_CONTROL_DATA, false);
+ dwc3_ep0_prepare_one_trb(dwc, epnum, dwc->ctrl_req_addr,
+ 0, DWC3_TRBCTL_CONTROL_DATA, false);
+ ret = dwc3_ep0_start_trans(dwc, epnum);
WARN_ON(ret < 0);
}
}
@@ -993,9 +997,10 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
req->direction = !!dep->number;
if (req->request.length == 0) {
- ret = dwc3_ep0_start_trans(dwc, dep->number,
+ dwc3_ep0_prepare_one_trb(dwc, dep->number,
dwc->ctrl_req_addr, 0,
DWC3_TRBCTL_CONTROL_DATA, false);
+ ret = dwc3_ep0_start_trans(dwc, dep->number);
} else if (!IS_ALIGNED(req->request.length, dep->endpoint.maxpacket)
&& (dep->number == 0)) {
u32 transfer_size = 0;
@@ -1011,7 +1016,7 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
if (req->request.length > DWC3_EP0_BOUNCE_SIZE) {
transfer_size = ALIGN(req->request.length - maxpacket,
maxpacket);
- ret = dwc3_ep0_start_trans(dwc, dep->number,
+ dwc3_ep0_prepare_one_trb(dwc, dep->number,
req->request.dma,
transfer_size,
DWC3_TRBCTL_CONTROL_DATA,
@@ -1023,18 +1028,20 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
dwc->ep0_bounced = true;
- ret = dwc3_ep0_start_trans(dwc, dep->number,
+ dwc3_ep0_prepare_one_trb(dwc, dep->number,
dwc->ep0_bounce_addr, transfer_size,
DWC3_TRBCTL_CONTROL_DATA, false);
+ ret = dwc3_ep0_start_trans(dwc, dep->number);
} else {
ret = usb_gadget_map_request_by_dev(dwc->sysdev,
&req->request, dep->number);
if (ret)
return;
- ret = dwc3_ep0_start_trans(dwc, dep->number, req->request.dma,
+ dwc3_ep0_prepare_one_trb(dwc, dep->number, req->request.dma,
req->request.length, DWC3_TRBCTL_CONTROL_DATA,
false);
+ ret = dwc3_ep0_start_trans(dwc, dep->number);
}
WARN_ON(ret < 0);
@@ -1048,8 +1055,9 @@ static int dwc3_ep0_start_control_status(struct dwc3_ep *dep)
type = dwc->three_stage_setup ? DWC3_TRBCTL_CONTROL_STATUS3
: DWC3_TRBCTL_CONTROL_STATUS2;
- return dwc3_ep0_start_trans(dwc, dep->number,
+ dwc3_ep0_prepare_one_trb(dwc, dep->number,
dwc->ctrl_req_addr, 0, type, false);
+ return dwc3_ep0_start_trans(dwc, dep->number);
}
static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep)
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index efddaf5d11d1..204c754cc647 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -180,11 +180,11 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
if (req->request.status == -EINPROGRESS)
req->request.status = status;
- if (dwc->ep0_bounced && dep->number == 0)
+ if (dwc->ep0_bounced && dep->number <= 1)
dwc->ep0_bounced = false;
- else
- usb_gadget_unmap_request_by_dev(dwc->sysdev,
- &req->request, req->direction);
+
+ usb_gadget_unmap_request_by_dev(dwc->sysdev,
+ &req->request, req->direction);
trace_dwc3_gadget_giveback(req);
@@ -1720,7 +1720,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
reg |= DWC3_DCFG_LOWSPEED;
break;
case USB_SPEED_FULL:
- reg |= DWC3_DCFG_FULLSPEED1;
+ reg |= DWC3_DCFG_FULLSPEED;
break;
case USB_SPEED_HIGH:
reg |= DWC3_DCFG_HIGHSPEED;
@@ -2232,9 +2232,14 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
dep = dwc->eps[epnum];
- if (!(dep->flags & DWC3_EP_ENABLED) &&
- !(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
- return;
+ if (!(dep->flags & DWC3_EP_ENABLED)) {
+ if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
+ return;
+
+ /* Handle only EPCMDCMPLT when EP disabled */
+ if (event->endpoint_event != DWC3_DEPEVT_EPCMDCMPLT)
+ return;
+ }
if (epnum == 0 || epnum == 1) {
dwc3_ep0_interrupt(dwc, event);
@@ -2531,8 +2536,7 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
dwc->gadget.ep0->maxpacket = 64;
dwc->gadget.speed = USB_SPEED_HIGH;
break;
- case DWC3_DSTS_FULLSPEED2:
- case DWC3_DSTS_FULLSPEED1:
+ case DWC3_DSTS_FULLSPEED:
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
dwc->gadget.ep0->maxpacket = 64;
dwc->gadget.speed = USB_SPEED_FULL;
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 41ab61f9b6e0..49d685ad0da9 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1694,9 +1694,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
value = min(w_length, (u16) 1);
break;
- /* function drivers must handle get/set altsetting; if there's
- * no get() method, we know only altsetting zero works.
- */
+ /* function drivers must handle get/set altsetting */
case USB_REQ_SET_INTERFACE:
if (ctrl->bRequestType != USB_RECIP_INTERFACE)
goto unknown;
@@ -1705,7 +1703,13 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
f = cdev->config->interface[intf];
if (!f)
break;
- if (w_value && !f->set_alt)
+
+ /*
+ * If there's no get_alt() method, we know only altsetting zero
+ * works. There is no need to check if set_alt() is not NULL
+ * as we check this in usb_add_function().
+ */
+ if (w_value && !f->get_alt)
break;
value = f->set_alt(f, w_index, w_value);
if (value == USB_GADGET_DELAYED_STATUS) {
@@ -2143,7 +2147,7 @@ int composite_os_desc_req_prepare(struct usb_composite_dev *cdev,
cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL);
if (!cdev->os_desc_req->buf) {
ret = -ENOMEM;
- kfree(cdev->os_desc_req);
+ usb_ep_free_request(ep0, cdev->os_desc_req);
goto end;
}
cdev->os_desc_req->context = cdev;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index aab3fc1dbb94..5490fc51638e 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1806,7 +1806,7 @@ static void ffs_func_eps_disable(struct ffs_function *func)
unsigned long flags;
spin_lock_irqsave(&func->ffs->eps_lock, flags);
- do {
+ while (count--) {
/* pending requests get nuked */
if (likely(ep->ep))
usb_ep_disable(ep->ep);
@@ -1817,7 +1817,7 @@ static void ffs_func_eps_disable(struct ffs_function *func)
__ffs_epfile_read_buffer_free(epfile);
++epfile;
}
- } while (--count);
+ }
spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
}
@@ -1831,7 +1831,7 @@ static int ffs_func_eps_enable(struct ffs_function *func)
int ret = 0;
spin_lock_irqsave(&func->ffs->eps_lock, flags);
- do {
+ while(count--) {
struct usb_endpoint_descriptor *ds;
int desc_idx;
@@ -1867,7 +1867,7 @@ static int ffs_func_eps_enable(struct ffs_function *func)
++ep;
++epfile;
- } while (--count);
+ }
spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
return ret;
@@ -2091,8 +2091,8 @@ static int __ffs_data_do_entity(enum ffs_entity_type type,
case FFS_STRING:
/*
- * Strings are indexed from 1 (0 is magic ;) reserved
- * for languages list or some such)
+ * Strings are indexed from 1 (0 is reserved
+ * for languages list)
*/
if (*valuep > helper->ffs->strings_count)
helper->ffs->strings_count = *valuep;
@@ -2252,7 +2252,7 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
if (len < sizeof(*d) ||
d->bFirstInterfaceNumber >= ffs->interfaces_count ||
- !d->Reserved1)
+ d->Reserved1)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
if (d->Reserved2[i])
@@ -3448,12 +3448,12 @@ static void ffs_func_unbind(struct usb_configuration *c,
/* cleanup after autoconfig */
spin_lock_irqsave(&func->ffs->eps_lock, flags);
- do {
+ while (count--) {
if (ep->ep && ep->req)
usb_ep_free_request(ep->ep, ep->req);
ep->req = NULL;
++ep;
- } while (--count);
+ }
spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
kfree(func->eps);
func->eps = NULL;
@@ -3666,6 +3666,7 @@ static void ffs_closed(struct ffs_data *ffs)
{
struct ffs_dev *ffs_obj;
struct f_fs_opts *opts;
+ struct config_item *ci;
ENTER();
ffs_dev_lock();
@@ -3689,8 +3690,11 @@ static void ffs_closed(struct ffs_data *ffs)
|| !atomic_read(&opts->func_inst.group.cg_item.ci_kref.refcount))
goto done;
- unregister_gadget_item(ffs_obj->opts->
- func_inst.group.cg_item.ci_parent->ci_parent);
+ ci = opts->func_inst.group.cg_item.ci_parent->ci_parent;
+ ffs_dev_unlock();
+
+ unregister_gadget_item(ci);
+ return;
done:
ffs_dev_unlock();
}
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 3151d2a0fe59..5f8139b8e601 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -593,7 +593,7 @@ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
}
status = usb_ep_enable(hidg->out_ep);
if (status < 0) {
- ERROR(cdev, "Enable IN endpoint FAILED!\n");
+ ERROR(cdev, "Enable OUT endpoint FAILED!\n");
goto fail;
}
hidg->out_ep->driver_data = hidg;
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index e8f4102d19df..6bde4396927c 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1126,7 +1126,7 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
/* data and/or status stage for control request */
} else if (dev->state == STATE_DEV_SETUP) {
- /* IN DATA+STATUS caller makes len <= wLength */
+ len = min_t(size_t, len, dev->setup_wLength);
if (dev->setup_in) {
retval = setup_req (dev->gadget->ep0, dev->req, len);
if (retval == 0) {
@@ -1734,10 +1734,12 @@ static struct usb_gadget_driver gadgetfs_driver = {
* such as configuration notifications.
*/
-static int is_valid_config (struct usb_config_descriptor *config)
+static int is_valid_config(struct usb_config_descriptor *config,
+ unsigned int total)
{
return config->bDescriptorType == USB_DT_CONFIG
&& config->bLength == USB_DT_CONFIG_SIZE
+ && total >= USB_DT_CONFIG_SIZE
&& config->bConfigurationValue != 0
&& (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0
&& (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0;
@@ -1762,7 +1764,8 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
}
spin_unlock_irq(&dev->lock);
- if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4))
+ if ((len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) ||
+ (len > PAGE_SIZE * 4))
return -EINVAL;
/* we might need to change message format someday */
@@ -1786,7 +1789,8 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
/* full or low speed config */
dev->config = (void *) kbuf;
total = le16_to_cpu(dev->config->wTotalLength);
- if (!is_valid_config (dev->config) || total >= length)
+ if (!is_valid_config(dev->config, total) ||
+ total > length - USB_DT_DEVICE_SIZE)
goto fail;
kbuf += total;
length -= total;
@@ -1795,10 +1799,13 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
if (kbuf [1] == USB_DT_CONFIG) {
dev->hs_config = (void *) kbuf;
total = le16_to_cpu(dev->hs_config->wTotalLength);
- if (!is_valid_config (dev->hs_config) || total >= length)
+ if (!is_valid_config(dev->hs_config, total) ||
+ total > length - USB_DT_DEVICE_SIZE)
goto fail;
kbuf += total;
length -= total;
+ } else {
+ dev->hs_config = NULL;
}
/* could support multiple configs, using another encoding! */
@@ -1811,7 +1818,6 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
|| dev->dev->bDescriptorType != USB_DT_DEVICE
|| dev->dev->bNumConfigurations != 1)
goto fail;
- dev->dev->bNumConfigurations = 1;
dev->dev->bcdUSB = cpu_to_le16 (0x0200);
/* triggers gadgetfs_bind(); then we can enumerate. */
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index f3212db9bc37..12c7687216e6 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -1978,7 +1978,8 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret);
goto err;
}
- ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", ep->index);
+ sprintf(ep->name, "ep%d", ep->index);
+ ep->ep.name = ep->name;
ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.h b/drivers/usb/gadget/udc/atmel_usba_udc.h
index 3e1c9d589dfa..b03b2ebfc53a 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.h
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.h
@@ -280,6 +280,7 @@ struct usba_ep {
void __iomem *ep_regs;
void __iomem *dma_regs;
void __iomem *fifo;
+ char name[8];
struct usb_ep ep;
struct usba_udc *udc;
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 9483489080f6..0402177f93cd 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1317,7 +1317,11 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver)
if (!ret)
break;
}
- if (!ret && !udc->driver)
+ if (ret)
+ ret = -ENODEV;
+ else if (udc->driver)
+ ret = -EBUSY;
+ else
goto found;
} else {
list_for_each_entry(udc, &udc_list, list) {
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 02b14e91ae6c..c60abe3a68f9 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -330,7 +330,7 @@ static void nuke(struct dummy *dum, struct dummy_ep *ep)
/* caller must hold lock */
static void stop_activity(struct dummy *dum)
{
- struct dummy_ep *ep;
+ int i;
/* prevent any more requests */
dum->address = 0;
@@ -338,8 +338,8 @@ static void stop_activity(struct dummy *dum)
/* The timer is left running so that outstanding URBs can fail */
/* nuke any pending requests first, so driver i/o is quiesced */
- list_for_each_entry(ep, &dum->gadget.ep_list, ep.ep_list)
- nuke(dum, ep);
+ for (i = 0; i < DUMMY_ENDPOINTS; ++i)
+ nuke(dum, &dum->ep[i]);
/* driver now does any non-usb quiescing necessary */
}
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index be9e63836881..414e3c376dbb 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -43,7 +43,6 @@ struct at91_usbh_data {
struct gpio_desc *overcurrent_pin[AT91_MAX_USBH_PORTS];
u8 ports; /* number of ports on root hub */
u8 overcurrent_supported;
- u8 vbus_pin_active_low[AT91_MAX_USBH_PORTS];
u8 overcurrent_status[AT91_MAX_USBH_PORTS];
u8 overcurrent_changed[AT91_MAX_USBH_PORTS];
};
@@ -266,8 +265,7 @@ static void ohci_at91_usb_set_power(struct at91_usbh_data *pdata, int port, int
if (!valid_port(port))
return;
- gpiod_set_value(pdata->vbus_pin[port],
- pdata->vbus_pin_active_low[port] ^ enable);
+ gpiod_set_value(pdata->vbus_pin[port], enable);
}
static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port)
@@ -275,8 +273,7 @@ static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port)
if (!valid_port(port))
return -EINVAL;
- return gpiod_get_value(pdata->vbus_pin[port]) ^
- pdata->vbus_pin_active_low[port];
+ return gpiod_get_value(pdata->vbus_pin[port]);
}
/*
@@ -533,18 +530,17 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
pdata->ports = ports;
at91_for_each_port(i) {
- pdata->vbus_pin[i] = devm_gpiod_get_optional(&pdev->dev,
- "atmel,vbus-gpio",
- GPIOD_IN);
+ if (i >= pdata->ports)
+ break;
+
+ pdata->vbus_pin[i] =
+ devm_gpiod_get_index_optional(&pdev->dev, "atmel,vbus",
+ i, GPIOD_OUT_HIGH);
if (IS_ERR(pdata->vbus_pin[i])) {
err = PTR_ERR(pdata->vbus_pin[i]);
dev_err(&pdev->dev, "unable to claim gpio \"vbus\": %d\n", err);
continue;
}
-
- pdata->vbus_pin_active_low[i] = gpiod_get_value(pdata->vbus_pin[i]);
-
- ohci_at91_usb_set_power(pdata, i, 1);
}
at91_for_each_port(i) {
@@ -552,8 +548,8 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
break;
pdata->overcurrent_pin[i] =
- devm_gpiod_get_optional(&pdev->dev,
- "atmel,oc-gpio", GPIOD_IN);
+ devm_gpiod_get_index_optional(&pdev->dev, "atmel,oc",
+ i, GPIOD_IN);
if (IS_ERR(pdata->overcurrent_pin[i])) {
err = PTR_ERR(pdata->overcurrent_pin[i]);
dev_err(&pdev->dev, "unable to claim gpio \"overcurrent\": %d\n", err);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 321de2e0161b..8414ed2a02de 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -979,6 +979,40 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
xhci->devs[slot_id] = NULL;
}
+/*
+ * Free a virt_device structure.
+ * If the virt_device added a tt_info (a hub) and has children pointing to
+ * that tt_info, then free the child first. Recursive.
+ * We can't rely on udev at this point to find child-parent relationships.
+ */
+void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
+{
+ struct xhci_virt_device *vdev;
+ struct list_head *tt_list_head;
+ struct xhci_tt_bw_info *tt_info, *next;
+ int i;
+
+ vdev = xhci->devs[slot_id];
+ if (!vdev)
+ return;
+
+ tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
+ list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
+ /* is this a hub device that added a tt_info to the tts list */
+ if (tt_info->slot_id == slot_id) {
+ /* are any devices using this tt_info? */
+ for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
+ vdev = xhci->devs[i];
+ if (vdev && (vdev->tt_info == tt_info))
+ xhci_free_virt_devices_depth_first(
+ xhci, i);
+ }
+ }
+ }
+ /* we are now at a leaf device */
+ xhci_free_virt_device(xhci, slot_id);
+}
+
int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
struct usb_device *udev, gfp_t flags)
{
@@ -1795,7 +1829,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
int size;
int i, j, num_ports;
- del_timer_sync(&xhci->cmd_timer);
+ cancel_delayed_work_sync(&xhci->cmd_timer);
/* Free the Event Ring Segment Table and the actual Event Ring */
size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
@@ -1828,8 +1862,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
}
}
- for (i = 1; i < MAX_HC_SLOTS; ++i)
- xhci_free_virt_device(xhci, i);
+ for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--)
+ xhci_free_virt_devices_depth_first(xhci, i);
dma_pool_destroy(xhci->segment_pool);
xhci->segment_pool = NULL;
@@ -2342,9 +2376,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
INIT_LIST_HEAD(&xhci->cmd_list);
- /* init command timeout timer */
- setup_timer(&xhci->cmd_timer, xhci_handle_command_timeout,
- (unsigned long)xhci);
+ /* init command timeout work */
+ INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout);
+ init_completion(&xhci->cmd_ring_stop_completion);
page_size = readl(&xhci->op_regs->page_size);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index 1094ebd2838f..bac961cd24ad 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -579,8 +579,10 @@ static int xhci_mtk_probe(struct platform_device *pdev)
goto disable_ldos;
irq = platform_get_irq(pdev, 0);
- if (irq < 0)
+ if (irq < 0) {
+ ret = irq;
goto disable_clk;
+ }
/* Initialize dma_mask and coherent_dma_mask to 32-bits */
ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index e96ae80d107e..954abfd5014d 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -165,7 +165,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
- pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI)) {
+ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI)) {
xhci->quirks |= XHCI_PME_STUCK_QUIRK;
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index ddfab301e366..e5834dd9bcde 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -165,7 +165,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
return -ENODEV;
/* Try to set 64-bit DMA first */
- if (WARN_ON(!pdev->dev.dma_mask))
+ if (!pdev->dev.dma_mask)
/* Platform did not initialize dma_mask */
ret = dma_coerce_mask_and_coherent(&pdev->dev,
DMA_BIT_MASK(64));
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index bdf6b13d9b67..e32029a31ca4 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -279,23 +279,76 @@ void xhci_ring_cmd_db(struct xhci_hcd *xhci)
readl(&xhci->dba->doorbell[0]);
}
-static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
+static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci, unsigned long delay)
+{
+ return mod_delayed_work(system_wq, &xhci->cmd_timer, delay);
+}
+
+static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci)
+{
+ return list_first_entry_or_null(&xhci->cmd_list, struct xhci_command,
+ cmd_list);
+}
+
+/*
+ * Turn all commands on command ring with status set to "aborted" to no-op trbs.
+ * If there are other commands waiting then restart the ring and kick the timer.
+ * This must be called with command ring stopped and xhci->lock held.
+ */
+static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
+ struct xhci_command *cur_cmd)
+{
+ struct xhci_command *i_cmd;
+ u32 cycle_state;
+
+ /* Turn all aborted commands in list to no-ops, then restart */
+ list_for_each_entry(i_cmd, &xhci->cmd_list, cmd_list) {
+
+ if (i_cmd->status != COMP_CMD_ABORT)
+ continue;
+
+ i_cmd->status = COMP_CMD_STOP;
+
+ xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
+ i_cmd->command_trb);
+ /* get cycle state from the original cmd trb */
+ cycle_state = le32_to_cpu(
+ i_cmd->command_trb->generic.field[3]) & TRB_CYCLE;
+ /* modify the command trb to no-op command */
+ i_cmd->command_trb->generic.field[0] = 0;
+ i_cmd->command_trb->generic.field[1] = 0;
+ i_cmd->command_trb->generic.field[2] = 0;
+ i_cmd->command_trb->generic.field[3] = cpu_to_le32(
+ TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
+
+ /*
+ * caller waiting for completion is called when command
+ * completion event is received for these no-op commands
+ */
+ }
+
+ xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
+
+ /* ring command ring doorbell to restart the command ring */
+ if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
+ !(xhci->xhc_state & XHCI_STATE_DYING)) {
+ xhci->current_cmd = cur_cmd;
+ xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
+ xhci_ring_cmd_db(xhci);
+ }
+}
+
+/* Must be called with xhci->lock held, releases and aquires lock back */
+static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
{
u64 temp_64;
int ret;
xhci_dbg(xhci, "Abort command ring\n");
- temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
- xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
+ reinit_completion(&xhci->cmd_ring_stop_completion);
- /*
- * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
- * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
- * but the completion event in never sent. Use the cmd timeout timer to
- * handle those cases. Use twice the time to cover the bit polling retry
- */
- mod_timer(&xhci->cmd_timer, jiffies + (2 * XHCI_CMD_DEFAULT_TIMEOUT));
+ temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
&xhci->op_regs->cmd_ring);
@@ -315,17 +368,30 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
udelay(1000);
ret = xhci_handshake(&xhci->op_regs->cmd_ring,
CMD_RING_RUNNING, 0, 3 * 1000 * 1000);
- if (ret == 0)
- return 0;
-
- xhci_err(xhci, "Stopped the command ring failed, "
- "maybe the host is dead\n");
- del_timer(&xhci->cmd_timer);
- xhci->xhc_state |= XHCI_STATE_DYING;
- xhci_halt(xhci);
- return -ESHUTDOWN;
+ if (ret < 0) {
+ xhci_err(xhci, "Stopped the command ring failed, "
+ "maybe the host is dead\n");
+ xhci->xhc_state |= XHCI_STATE_DYING;
+ xhci_halt(xhci);
+ return -ESHUTDOWN;
+ }
+ }
+ /*
+ * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
+ * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
+ * but the completion event in never sent. Wait 2 secs (arbitrary
+ * number) to handle those cases after negation of CMD_RING_RUNNING.
+ */
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ ret = wait_for_completion_timeout(&xhci->cmd_ring_stop_completion,
+ msecs_to_jiffies(2000));
+ spin_lock_irqsave(&xhci->lock, flags);
+ if (!ret) {
+ xhci_dbg(xhci, "No stop event for abort, ring start fail?\n");
+ xhci_cleanup_command_queue(xhci);
+ } else {
+ xhci_handle_stopped_cmd_ring(xhci, xhci_next_queued_cmd(xhci));
}
-
return 0;
}
@@ -847,17 +913,6 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
spin_lock_irqsave(&xhci->lock, flags);
ep->stop_cmds_pending--;
- if (xhci->xhc_state & XHCI_STATE_REMOVING) {
- spin_unlock_irqrestore(&xhci->lock, flags);
- return;
- }
- if (xhci->xhc_state & XHCI_STATE_DYING) {
- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "Stop EP timer ran, but another timer marked "
- "xHCI as DYING, exiting.");
- spin_unlock_irqrestore(&xhci->lock, flags);
- return;
- }
if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Stop EP timer ran, but no command pending, "
@@ -1207,101 +1262,62 @@ void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
xhci_complete_del_and_free_cmd(cur_cmd, COMP_CMD_ABORT);
}
-/*
- * Turn all commands on command ring with status set to "aborted" to no-op trbs.
- * If there are other commands waiting then restart the ring and kick the timer.
- * This must be called with command ring stopped and xhci->lock held.
- */
-static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
- struct xhci_command *cur_cmd)
-{
- struct xhci_command *i_cmd, *tmp_cmd;
- u32 cycle_state;
-
- /* Turn all aborted commands in list to no-ops, then restart */
- list_for_each_entry_safe(i_cmd, tmp_cmd, &xhci->cmd_list,
- cmd_list) {
-
- if (i_cmd->status != COMP_CMD_ABORT)
- continue;
-
- i_cmd->status = COMP_CMD_STOP;
-
- xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
- i_cmd->command_trb);
- /* get cycle state from the original cmd trb */
- cycle_state = le32_to_cpu(
- i_cmd->command_trb->generic.field[3]) & TRB_CYCLE;
- /* modify the command trb to no-op command */
- i_cmd->command_trb->generic.field[0] = 0;
- i_cmd->command_trb->generic.field[1] = 0;
- i_cmd->command_trb->generic.field[2] = 0;
- i_cmd->command_trb->generic.field[3] = cpu_to_le32(
- TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
-
- /*
- * caller waiting for completion is called when command
- * completion event is received for these no-op commands
- */
- }
-
- xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
-
- /* ring command ring doorbell to restart the command ring */
- if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
- !(xhci->xhc_state & XHCI_STATE_DYING)) {
- xhci->current_cmd = cur_cmd;
- mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
- xhci_ring_cmd_db(xhci);
- }
- return;
-}
-
-
-void xhci_handle_command_timeout(unsigned long data)
+void xhci_handle_command_timeout(struct work_struct *work)
{
struct xhci_hcd *xhci;
int ret;
unsigned long flags;
u64 hw_ring_state;
- bool second_timeout = false;
- xhci = (struct xhci_hcd *) data;
- /* mark this command to be cancelled */
+ xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer);
+
spin_lock_irqsave(&xhci->lock, flags);
- if (xhci->current_cmd) {
- if (xhci->current_cmd->status == COMP_CMD_ABORT)
- second_timeout = true;
- xhci->current_cmd->status = COMP_CMD_ABORT;
+
+ /*
+ * If timeout work is pending, or current_cmd is NULL, it means we
+ * raced with command completion. Command is handled so just return.
+ */
+ if (!xhci->current_cmd || delayed_work_pending(&xhci->cmd_timer)) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return;
}
+ /* mark this command to be cancelled */
+ xhci->current_cmd->status = COMP_CMD_ABORT;
/* Make sure command ring is running before aborting it */
hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
(hw_ring_state & CMD_RING_RUNNING)) {
- spin_unlock_irqrestore(&xhci->lock, flags);
+ /* Prevent new doorbell, and start command abort */
+ xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
xhci_dbg(xhci, "Command timeout\n");
- ret = xhci_abort_cmd_ring(xhci);
+ ret = xhci_abort_cmd_ring(xhci, flags);
if (unlikely(ret == -ESHUTDOWN)) {
xhci_err(xhci, "Abort command ring failed\n");
xhci_cleanup_command_queue(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
xhci_dbg(xhci, "xHCI host controller is dead.\n");
+
+ return;
}
- return;
+
+ goto time_out_completed;
}
- /* command ring failed to restart, or host removed. Bail out */
- if (second_timeout || xhci->xhc_state & XHCI_STATE_REMOVING) {
- spin_unlock_irqrestore(&xhci->lock, flags);
- xhci_dbg(xhci, "command timed out twice, ring start fail?\n");
+ /* host removed. Bail out */
+ if (xhci->xhc_state & XHCI_STATE_REMOVING) {
+ xhci_dbg(xhci, "host removed, ring start fail?\n");
xhci_cleanup_command_queue(xhci);
- return;
+
+ goto time_out_completed;
}
/* command timeout on stopped ring, ring can't be aborted */
xhci_dbg(xhci, "Command timeout on stopped ring\n");
xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
+
+time_out_completed:
spin_unlock_irqrestore(&xhci->lock, flags);
return;
}
@@ -1333,7 +1349,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list);
- del_timer(&xhci->cmd_timer);
+ cancel_delayed_work(&xhci->cmd_timer);
trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
@@ -1341,7 +1357,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
/* If CMD ring stopped we own the trbs between enqueue and dequeue */
if (cmd_comp_code == COMP_CMD_STOP) {
- xhci_handle_stopped_cmd_ring(xhci, cmd);
+ complete_all(&xhci->cmd_ring_stop_completion);
return;
}
@@ -1359,8 +1375,11 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
*/
if (cmd_comp_code == COMP_CMD_ABORT) {
xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
- if (cmd->status == COMP_CMD_ABORT)
+ if (cmd->status == COMP_CMD_ABORT) {
+ if (xhci->current_cmd == cmd)
+ xhci->current_cmd = NULL;
goto event_handled;
+ }
}
cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
@@ -1421,7 +1440,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
if (cmd->cmd_list.next != &xhci->cmd_list) {
xhci->current_cmd = list_entry(cmd->cmd_list.next,
struct xhci_command, cmd_list);
- mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
+ xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
+ } else if (xhci->current_cmd == cmd) {
+ xhci->current_cmd = NULL;
}
event_handled:
@@ -1939,8 +1960,9 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
struct xhci_ep_ctx *ep_ctx;
u32 trb_comp_code;
u32 remaining, requested;
- bool on_data_stage;
+ u32 trb_type;
+ trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3]));
slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
xdev = xhci->devs[slot_id];
ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
@@ -1950,14 +1972,11 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
requested = td->urb->transfer_buffer_length;
remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
- /* not setup (dequeue), or status stage means we are at data stage */
- on_data_stage = (ep_trb != ep_ring->dequeue && ep_trb != td->last_trb);
-
switch (trb_comp_code) {
case COMP_SUCCESS:
- if (ep_trb != td->last_trb) {
+ if (trb_type != TRB_STATUS) {
xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n",
- on_data_stage ? "data" : "setup");
+ (trb_type == TRB_DATA) ? "data" : "setup");
*status = -ESHUTDOWN;
break;
}
@@ -1967,15 +1986,25 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
*status = 0;
break;
case COMP_STOP_SHORT:
- if (on_data_stage)
+ if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
td->urb->actual_length = remaining;
else
xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
goto finish_td;
case COMP_STOP:
- if (on_data_stage)
+ switch (trb_type) {
+ case TRB_SETUP:
+ td->urb->actual_length = 0;
+ goto finish_td;
+ case TRB_DATA:
+ case TRB_NORMAL:
td->urb->actual_length = requested - remaining;
- goto finish_td;
+ goto finish_td;
+ default:
+ xhci_warn(xhci, "WARN: unexpected TRB Type %d\n",
+ trb_type);
+ goto finish_td;
+ }
case COMP_STOP_INVAL:
goto finish_td;
default:
@@ -1987,7 +2016,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
/* else fall through */
case COMP_STALL:
/* Did we transfer part of the data (middle) phase? */
- if (on_data_stage)
+ if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
td->urb->actual_length = requested - remaining;
else if (!td->urb_length_set)
td->urb->actual_length = 0;
@@ -1995,14 +2024,15 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
}
/* stopped at setup stage, no data transferred */
- if (ep_trb == ep_ring->dequeue)
+ if (trb_type == TRB_SETUP)
goto finish_td;
/*
* if on data stage then update the actual_length of the URB and flag it
* as set, so it won't be overwritten in the event for the last TRB.
*/
- if (on_data_stage) {
+ if (trb_type == TRB_DATA ||
+ trb_type == TRB_NORMAL) {
td->urb_length_set = true;
td->urb->actual_length = requested - remaining;
xhci_dbg(xhci, "Waiting for status stage event\n");
@@ -3790,9 +3820,9 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
/* if there are no other commands queued we start the timeout timer */
if (xhci->cmd_list.next == &cmd->cmd_list &&
- !timer_pending(&xhci->cmd_timer)) {
+ !delayed_work_pending(&xhci->cmd_timer)) {
xhci->current_cmd = cmd;
- mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
+ xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
}
queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 1cd56417cbec..9a0ec116654a 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1534,19 +1534,6 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
xhci_urb_free_priv(urb_priv);
return ret;
}
- if ((xhci->xhc_state & XHCI_STATE_DYING) ||
- (xhci->xhc_state & XHCI_STATE_HALTED)) {
- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "Ep 0x%x: URB %p to be canceled on "
- "non-responsive xHCI host.",
- urb->ep->desc.bEndpointAddress, urb);
- /* Let the stop endpoint command watchdog timer (which set this
- * state) finish cleaning up the endpoint TD lists. We must
- * have caught it in the middle of dropping a lock and giving
- * back an URB.
- */
- goto done;
- }
ep_index = xhci_get_endpoint_index(&urb->ep->desc);
ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
@@ -3787,8 +3774,10 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
mutex_lock(&xhci->mutex);
- if (xhci->xhc_state) /* dying, removing or halted */
+ if (xhci->xhc_state) { /* dying, removing or halted */
+ ret = -ESHUTDOWN;
goto out;
+ }
if (!udev->slot_id) {
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 8ccc11a974b8..2d7b6374b58d 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1568,7 +1568,8 @@ struct xhci_hcd {
#define CMD_RING_STATE_STOPPED (1 << 2)
struct list_head cmd_list;
unsigned int cmd_ring_reserved_trbs;
- struct timer_list cmd_timer;
+ struct delayed_work cmd_timer;
+ struct completion cmd_ring_stop_completion;
struct xhci_command *current_cmd;
struct xhci_ring *event_ring;
struct xhci_erst erst;
@@ -1934,7 +1935,7 @@ void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
struct xhci_dequeue_state *deq_state);
void xhci_stop_endpoint_command_watchdog(unsigned long arg);
-void xhci_handle_command_timeout(unsigned long data);
+void xhci_handle_command_timeout(struct work_struct *work);
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
unsigned int ep_index, unsigned int stream_id);
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index 310238c6b5cd..896798071817 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -469,6 +469,7 @@ static const struct musb_platform_ops bfin_ops = {
.init = bfin_musb_init,
.exit = bfin_musb_exit,
+ .fifo_offset = bfin_fifo_offset,
.readb = bfin_readb,
.writeb = bfin_writeb,
.readw = bfin_readw,
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 9e226468a13e..fca288bbc800 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2050,6 +2050,7 @@ struct musb_pending_work {
struct list_head node;
};
+#ifdef CONFIG_PM
/*
* Called from musb_runtime_resume(), musb_resume(), and
* musb_queue_resume_work(). Callers must take musb->lock.
@@ -2077,6 +2078,7 @@ static int musb_run_resume_work(struct musb *musb)
return error;
}
+#endif
/*
* Called to run work if device is active or else queue the work to happen
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index a611e2f67bdc..ade902ea1221 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -216,6 +216,7 @@ struct musb_platform_ops {
void (*pre_root_reset_end)(struct musb *musb);
void (*post_root_reset_end)(struct musb *musb);
int (*phy_callback)(enum musb_vbus_id_status status);
+ void (*clear_ep_rxintr)(struct musb *musb, int epnum);
};
/*
@@ -626,6 +627,12 @@ static inline void musb_platform_post_root_reset_end(struct musb *musb)
musb->ops->post_root_reset_end(musb);
}
+static inline void musb_platform_clear_ep_rxintr(struct musb *musb, int epnum)
+{
+ if (musb->ops->clear_ep_rxintr)
+ musb->ops->clear_ep_rxintr(musb, epnum);
+}
+
/*
* gets the "dr_mode" property from DT and converts it into musb_mode
* if the property is not found or not recognized returns MUSB_OTG
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index 4fef50e5c8c1..dd70c88419d2 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -114,6 +114,7 @@ static int musb_regdump_show(struct seq_file *s, void *unused)
unsigned i;
seq_printf(s, "MUSB (M)HDRC Register Dump\n");
+ pm_runtime_get_sync(musb->controller);
for (i = 0; i < ARRAY_SIZE(musb_regmap); i++) {
switch (musb_regmap[i].size) {
@@ -132,6 +133,8 @@ static int musb_regdump_show(struct seq_file *s, void *unused)
}
}
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
return 0;
}
@@ -145,7 +148,10 @@ static int musb_test_mode_show(struct seq_file *s, void *unused)
struct musb *musb = s->private;
unsigned test;
+ pm_runtime_get_sync(musb->controller);
test = musb_readb(musb->mregs, MUSB_TESTMODE);
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
if (test & MUSB_TEST_FORCE_HOST)
seq_printf(s, "force host\n");
@@ -194,11 +200,12 @@ static ssize_t musb_test_mode_write(struct file *file,
u8 test;
char buf[18];
+ pm_runtime_get_sync(musb->controller);
test = musb_readb(musb->mregs, MUSB_TESTMODE);
if (test) {
dev_err(musb->controller, "Error: test mode is already set. "
"Please do USB Bus Reset to start a new test.\n");
- return count;
+ goto ret;
}
memset(buf, 0x00, sizeof(buf));
@@ -234,6 +241,9 @@ static ssize_t musb_test_mode_write(struct file *file,
musb_writeb(musb->mregs, MUSB_TESTMODE, test);
+ret:
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
return count;
}
@@ -254,8 +264,13 @@ static int musb_softconnect_show(struct seq_file *s, void *unused)
switch (musb->xceiv->otg->state) {
case OTG_STATE_A_HOST:
case OTG_STATE_A_WAIT_BCON:
+ pm_runtime_get_sync(musb->controller);
+
reg = musb_readb(musb->mregs, MUSB_DEVCTL);
connect = reg & MUSB_DEVCTL_SESSION ? 1 : 0;
+
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
break;
default:
connect = -1;
@@ -284,6 +299,7 @@ static ssize_t musb_softconnect_write(struct file *file,
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
+ pm_runtime_get_sync(musb->controller);
if (!strncmp(buf, "0", 1)) {
switch (musb->xceiv->otg->state) {
case OTG_STATE_A_HOST:
@@ -314,6 +330,8 @@ static ssize_t musb_softconnect_write(struct file *file,
}
}
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
return count;
}
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index feae1561b9ab..9f125e179acd 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -267,6 +267,17 @@ static void otg_timer(unsigned long _musb)
pm_runtime_put_autosuspend(dev);
}
+void dsps_musb_clear_ep_rxintr(struct musb *musb, int epnum)
+{
+ u32 epintr;
+ struct dsps_glue *glue = dev_get_drvdata(musb->controller->parent);
+ const struct dsps_musb_wrapper *wrp = glue->wrp;
+
+ /* musb->lock might already been held */
+ epintr = (1 << epnum) << wrp->rxep_shift;
+ musb_writel(musb->ctrl_base, wrp->epintr_status, epintr);
+}
+
static irqreturn_t dsps_interrupt(int irq, void *hci)
{
struct musb *musb = hci;
@@ -622,6 +633,7 @@ static struct musb_platform_ops dsps_ops = {
.set_mode = dsps_musb_set_mode,
.recover = dsps_musb_recover,
+ .clear_ep_rxintr = dsps_musb_clear_ep_rxintr,
};
static u64 musb_dmamask = DMA_BIT_MASK(32);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index f6cdbad00dac..ac3a4952abb4 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2374,12 +2374,11 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
int is_in = usb_pipein(urb->pipe);
int status = 0;
u16 csr;
+ struct dma_channel *dma = NULL;
musb_ep_select(regs, hw_end);
if (is_dma_capable()) {
- struct dma_channel *dma;
-
dma = is_in ? ep->rx_channel : ep->tx_channel;
if (dma) {
status = ep->musb->dma_controller->channel_abort(dma);
@@ -2395,10 +2394,9 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
/* giveback saves bulk toggle */
csr = musb_h_flush_rxfifo(ep, 0);
- /* REVISIT we still get an irq; should likely clear the
- * endpoint's irq status here to avoid bogus irqs.
- * clearing that status is platform-specific...
- */
+ /* clear the endpoint's irq status here to avoid bogus irqs */
+ if (is_dma_capable() && dma)
+ musb_platform_clear_ep_rxintr(musb, ep->epnum);
} else if (ep->epnum) {
musb_h_tx_flush_fifo(ep);
csr = musb_readw(epio, MUSB_TXCSR);
diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h
index f7b13fd25257..a3dcbd55e436 100644
--- a/drivers/usb/musb/musbhsdma.h
+++ b/drivers/usb/musb/musbhsdma.h
@@ -157,5 +157,5 @@ struct musb_dma_controller {
void __iomem *base;
u8 channel_count;
u8 used_channels;
- u8 irq;
+ int irq;
};
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 2597b83a8ae2..95aa5233726c 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -95,6 +95,7 @@ struct ch341_private {
unsigned baud_rate; /* set baud rate */
u8 line_control; /* set line control value RTS/DTR */
u8 line_status; /* active status of modem control inputs */
+ u8 lcr;
};
static void ch341_set_termios(struct tty_struct *tty,
@@ -112,6 +113,8 @@ static int ch341_control_out(struct usb_device *dev, u8 request,
r = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), request,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
value, index, NULL, 0, DEFAULT_TIMEOUT);
+ if (r < 0)
+ dev_err(&dev->dev, "failed to send control message: %d\n", r);
return r;
}
@@ -129,11 +132,24 @@ static int ch341_control_in(struct usb_device *dev,
r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
value, index, buf, bufsize, DEFAULT_TIMEOUT);
- return r;
+ if (r < bufsize) {
+ if (r >= 0) {
+ dev_err(&dev->dev,
+ "short control message received (%d < %u)\n",
+ r, bufsize);
+ r = -EIO;
+ }
+
+ dev_err(&dev->dev, "failed to receive control message: %d\n",
+ r);
+ return r;
+ }
+
+ return 0;
}
-static int ch341_init_set_baudrate(struct usb_device *dev,
- struct ch341_private *priv, unsigned ctrl)
+static int ch341_set_baudrate_lcr(struct usb_device *dev,
+ struct ch341_private *priv, u8 lcr)
{
short a;
int r;
@@ -156,9 +172,19 @@ static int ch341_init_set_baudrate(struct usb_device *dev,
factor = 0x10000 - factor;
a = (factor & 0xff00) | divisor;
- /* 0x9c is "enable SFR_UART Control register and timer" */
- r = ch341_control_out(dev, CH341_REQ_SERIAL_INIT,
- 0x9c | (ctrl << 8), a | 0x80);
+ /*
+ * CH341A buffers data until a full endpoint-size packet (32 bytes)
+ * has been received unless bit 7 is set.
+ */
+ a |= BIT(7);
+
+ r = ch341_control_out(dev, CH341_REQ_WRITE_REG, 0x1312, a);
+ if (r)
+ return r;
+
+ r = ch341_control_out(dev, CH341_REQ_WRITE_REG, 0x2518, lcr);
+ if (r)
+ return r;
return r;
}
@@ -170,9 +196,9 @@ static int ch341_set_handshake(struct usb_device *dev, u8 control)
static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv)
{
+ const unsigned int size = 2;
char *buffer;
int r;
- const unsigned size = 8;
unsigned long flags;
buffer = kmalloc(size, GFP_KERNEL);
@@ -183,14 +209,9 @@ static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv)
if (r < 0)
goto out;
- /* setup the private status if available */
- if (r == 2) {
- r = 0;
- spin_lock_irqsave(&priv->lock, flags);
- priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT;
- spin_unlock_irqrestore(&priv->lock, flags);
- } else
- r = -EPROTO;
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT;
+ spin_unlock_irqrestore(&priv->lock, flags);
out: kfree(buffer);
return r;
@@ -200,9 +221,9 @@ out: kfree(buffer);
static int ch341_configure(struct usb_device *dev, struct ch341_private *priv)
{
+ const unsigned int size = 2;
char *buffer;
int r;
- const unsigned size = 8;
buffer = kmalloc(size, GFP_KERNEL);
if (!buffer)
@@ -232,7 +253,7 @@ static int ch341_configure(struct usb_device *dev, struct ch341_private *priv)
if (r < 0)
goto out;
- r = ch341_init_set_baudrate(dev, priv, 0);
+ r = ch341_set_baudrate_lcr(dev, priv, priv->lcr);
if (r < 0)
goto out;
@@ -258,7 +279,6 @@ static int ch341_port_probe(struct usb_serial_port *port)
spin_lock_init(&priv->lock);
priv->baud_rate = DEFAULT_BAUD_RATE;
- priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR;
r = ch341_configure(port->serial->dev, priv);
if (r < 0)
@@ -320,7 +340,7 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port)
r = ch341_configure(serial->dev, priv);
if (r)
- goto out;
+ return r;
if (tty)
ch341_set_termios(tty, port, NULL);
@@ -330,12 +350,19 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port)
if (r) {
dev_err(&port->dev, "%s - failed to submit interrupt urb: %d\n",
__func__, r);
- goto out;
+ return r;
}
r = usb_serial_generic_open(tty, port);
+ if (r)
+ goto err_kill_interrupt_urb;
+
+ return 0;
+
+err_kill_interrupt_urb:
+ usb_kill_urb(port->interrupt_in_urb);
-out: return r;
+ return r;
}
/* Old_termios contains the original termios settings and
@@ -356,7 +383,6 @@ static void ch341_set_termios(struct tty_struct *tty,
baud_rate = tty_get_baud_rate(tty);
- priv->baud_rate = baud_rate;
ctrl = CH341_LCR_ENABLE_RX | CH341_LCR_ENABLE_TX;
switch (C_CSIZE(tty)) {
@@ -386,22 +412,25 @@ static void ch341_set_termios(struct tty_struct *tty,
ctrl |= CH341_LCR_STOP_BITS_2;
if (baud_rate) {
- spin_lock_irqsave(&priv->lock, flags);
- priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS);
- spin_unlock_irqrestore(&priv->lock, flags);
- r = ch341_init_set_baudrate(port->serial->dev, priv, ctrl);
+ priv->baud_rate = baud_rate;
+
+ r = ch341_set_baudrate_lcr(port->serial->dev, priv, ctrl);
if (r < 0 && old_termios) {
priv->baud_rate = tty_termios_baud_rate(old_termios);
tty_termios_copy_hw(&tty->termios, old_termios);
+ } else if (r == 0) {
+ priv->lcr = ctrl;
}
- } else {
- spin_lock_irqsave(&priv->lock, flags);
- priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS);
- spin_unlock_irqrestore(&priv->lock, flags);
}
- ch341_set_handshake(port->serial->dev, priv->line_control);
+ spin_lock_irqsave(&priv->lock, flags);
+ if (C_BAUD(tty) == B0)
+ priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS);
+ else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
+ priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ ch341_set_handshake(port->serial->dev, priv->line_control);
}
static void ch341_break_ctl(struct tty_struct *tty, int break_state)
@@ -576,14 +605,23 @@ static int ch341_tiocmget(struct tty_struct *tty)
static int ch341_reset_resume(struct usb_serial *serial)
{
- struct ch341_private *priv;
-
- priv = usb_get_serial_port_data(serial->port[0]);
+ struct usb_serial_port *port = serial->port[0];
+ struct ch341_private *priv = usb_get_serial_port_data(port);
+ int ret;
/* reconfigure ch341 serial port after bus-reset */
ch341_configure(serial->dev, priv);
- return 0;
+ if (tty_port_initialized(&port->port)) {
+ ret = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
+ if (ret) {
+ dev_err(&port->dev, "failed to submit interrupt urb: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return usb_serial_generic_resume(serial);
}
static struct usb_serial_driver ch341_device = {
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index fff718352e0c..5d61d0871f2e 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -1329,17 +1329,20 @@ static int cp210x_gpio_direction_output(struct gpio_chip *gc, unsigned int gpio,
return 0;
}
-static int cp210x_gpio_set_single_ended(struct gpio_chip *gc, unsigned int gpio,
- enum single_ended_mode mode)
+static int cp210x_gpio_set_config(struct gpio_chip *gc, unsigned int gpio,
+ unsigned long config)
{
struct usb_serial *serial = gpiochip_get_data(gc);
struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+ enum pin_config_param param = pinconf_to_config_param(config);
/* Succeed only if in correct mode (this can't be set at runtime) */
- if ((mode == LINE_MODE_PUSH_PULL) && (priv->gpio_mode & BIT(gpio)))
+ if ((param == PIN_CONFIG_DRIVE_PUSH_PULL) &&
+ (priv->gpio_mode & BIT(gpio)))
return 0;
- if ((mode == LINE_MODE_OPEN_DRAIN) && !(priv->gpio_mode & BIT(gpio)))
+ if ((param == PIN_CONFIG_DRIVE_OPEN_DRAIN) &&
+ !(priv->gpio_mode & BIT(gpio)))
return 0;
return -ENOTSUPP;
@@ -1402,7 +1405,7 @@ static int cp2105_shared_gpio_init(struct usb_serial *serial)
priv->gc.direction_output = cp210x_gpio_direction_output;
priv->gc.get = cp210x_gpio_get;
priv->gc.set = cp210x_gpio_set;
- priv->gc.set_single_ended = cp210x_gpio_set_single_ended;
+ priv->gc.set_config = cp210x_gpio_set_config;
priv->gc.owner = THIS_MODULE;
priv->gc.parent = &serial->interface->dev;
priv->gc.base = -1;
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index 5f17a3b9916d..80260b08398b 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -50,6 +50,7 @@
#define CYBERJACK_PRODUCT_ID 0x0100
/* Function prototypes */
+static int cyberjack_attach(struct usb_serial *serial);
static int cyberjack_port_probe(struct usb_serial_port *port);
static int cyberjack_port_remove(struct usb_serial_port *port);
static int cyberjack_open(struct tty_struct *tty,
@@ -77,6 +78,7 @@ static struct usb_serial_driver cyberjack_device = {
.description = "Reiner SCT Cyberjack USB card reader",
.id_table = id_table,
.num_ports = 1,
+ .attach = cyberjack_attach,
.port_probe = cyberjack_port_probe,
.port_remove = cyberjack_port_remove,
.open = cyberjack_open,
@@ -100,6 +102,14 @@ struct cyberjack_private {
short wrsent; /* Data already sent */
};
+static int cyberjack_attach(struct usb_serial *serial)
+{
+ if (serial->num_bulk_out < serial->num_ports)
+ return -ENODEV;
+
+ return 0;
+}
+
static int cyberjack_port_probe(struct usb_serial_port *port)
{
struct cyberjack_private *priv;
diff --git a/drivers/usb/serial/f81534.c b/drivers/usb/serial/f81534.c
index 8282a6a18fee..22f23a429a95 100644
--- a/drivers/usb/serial/f81534.c
+++ b/drivers/usb/serial/f81534.c
@@ -1237,6 +1237,7 @@ static int f81534_attach(struct usb_serial *serial)
static int f81534_port_probe(struct usb_serial_port *port)
{
struct f81534_port_private *port_priv;
+ int ret;
port_priv = devm_kzalloc(&port->dev, sizeof(*port_priv), GFP_KERNEL);
if (!port_priv)
@@ -1246,10 +1247,11 @@ static int f81534_port_probe(struct usb_serial_port *port)
mutex_init(&port_priv->mcr_mutex);
/* Assign logic-to-phy mapping */
- port_priv->phy_num = f81534_logic_to_phy_port(port->serial, port);
- if (port_priv->phy_num < 0 || port_priv->phy_num >= F81534_NUM_PORT)
- return -ENODEV;
+ ret = f81534_logic_to_phy_port(port->serial, port);
+ if (ret < 0)
+ return ret;
+ port_priv->phy_num = ret;
usb_set_serial_port_data(port, port_priv);
dev_dbg(&port->dev, "%s: port_number: %d, phy_num: %d\n", __func__,
port->port_number, port_priv->phy_num);
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 97cabf803c2f..b2f2e87aed94 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -1043,6 +1043,7 @@ static int garmin_write_bulk(struct usb_serial_port *port,
"%s - usb_submit_urb(write bulk) failed with status = %d\n",
__func__, status);
count = status;
+ kfree(buffer);
}
/* we are done with this urb, so let the host driver
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index dcc0c58aaad5..d50e5773483f 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -2751,6 +2751,11 @@ static int edge_startup(struct usb_serial *serial)
EDGE_COMPATIBILITY_MASK1,
EDGE_COMPATIBILITY_MASK2 };
+ if (serial->num_bulk_in < 1 || serial->num_interrupt_in < 1) {
+ dev_err(&serial->interface->dev, "missing endpoints\n");
+ return -ENODEV;
+ }
+
dev = serial->dev;
/* create our private serial structure */
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index c339163698eb..9a0db2965fbb 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -1499,8 +1499,7 @@ static int do_boot_mode(struct edgeport_serial *serial,
dev_dbg(dev, "%s - Download successful -- Device rebooting...\n", __func__);
- /* return an error on purpose */
- return -ENODEV;
+ return 1;
}
stayinbootmode:
@@ -1508,7 +1507,7 @@ stayinbootmode:
dev_dbg(dev, "%s - STAYING IN BOOT MODE\n", __func__);
serial->product_info.TiMode = TI_MODE_BOOT;
- return 0;
+ return 1;
}
static int ti_do_config(struct edgeport_port *port, int feature, int on)
@@ -2546,6 +2545,13 @@ static int edge_startup(struct usb_serial *serial)
int status;
u16 product_id;
+ /* Make sure we have the required endpoints when in download mode. */
+ if (serial->interface->cur_altsetting->desc.bNumEndpoints > 1) {
+ if (serial->num_bulk_in < serial->num_ports ||
+ serial->num_bulk_out < serial->num_ports)
+ return -ENODEV;
+ }
+
/* create our private serial structure */
edge_serial = kzalloc(sizeof(struct edgeport_serial), GFP_KERNEL);
if (!edge_serial)
@@ -2553,14 +2559,18 @@ static int edge_startup(struct usb_serial *serial)
mutex_init(&edge_serial->es_lock);
edge_serial->serial = serial;
+ INIT_DELAYED_WORK(&edge_serial->heartbeat_work, edge_heartbeat_work);
usb_set_serial_data(serial, edge_serial);
status = download_fw(edge_serial);
- if (status) {
+ if (status < 0) {
kfree(edge_serial);
return status;
}
+ if (status > 0)
+ return 1; /* bind but do not register any ports */
+
product_id = le16_to_cpu(
edge_serial->serial->dev->descriptor.idProduct);
@@ -2572,7 +2582,6 @@ static int edge_startup(struct usb_serial *serial)
}
}
- INIT_DELAYED_WORK(&edge_serial->heartbeat_work, edge_heartbeat_work);
edge_heartbeat_schedule(edge_serial);
return 0;
@@ -2580,6 +2589,9 @@ static int edge_startup(struct usb_serial *serial)
static void edge_disconnect(struct usb_serial *serial)
{
+ struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
+
+ cancel_delayed_work_sync(&edge_serial->heartbeat_work);
}
static void edge_release(struct usb_serial *serial)
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 344b4eea4bd5..d57fb5199218 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -68,6 +68,16 @@ struct iuu_private {
u32 clk;
};
+static int iuu_attach(struct usb_serial *serial)
+{
+ unsigned char num_ports = serial->num_ports;
+
+ if (serial->num_bulk_in < num_ports || serial->num_bulk_out < num_ports)
+ return -ENODEV;
+
+ return 0;
+}
+
static int iuu_port_probe(struct usb_serial_port *port)
{
struct iuu_private *priv;
@@ -1196,6 +1206,7 @@ static struct usb_serial_driver iuu_device = {
.tiocmset = iuu_tiocmset,
.set_termios = iuu_set_termios,
.init_termios = iuu_init_termios,
+ .attach = iuu_attach,
.port_probe = iuu_port_probe,
.port_remove = iuu_port_remove,
};
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index e49ad0c63ad8..83523fcf6fb9 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -699,6 +699,19 @@ MODULE_FIRMWARE("keyspan_pda/keyspan_pda.fw");
MODULE_FIRMWARE("keyspan_pda/xircom_pgs.fw");
#endif
+static int keyspan_pda_attach(struct usb_serial *serial)
+{
+ unsigned char num_ports = serial->num_ports;
+
+ if (serial->num_bulk_out < num_ports ||
+ serial->num_interrupt_in < num_ports) {
+ dev_err(&serial->interface->dev, "missing endpoints\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static int keyspan_pda_port_probe(struct usb_serial_port *port)
{
@@ -776,6 +789,7 @@ static struct usb_serial_driver keyspan_pda_device = {
.break_ctl = keyspan_pda_break_ctl,
.tiocmget = keyspan_pda_tiocmget,
.tiocmset = keyspan_pda_tiocmset,
+ .attach = keyspan_pda_attach,
.port_probe = keyspan_pda_port_probe,
.port_remove = keyspan_pda_port_remove,
};
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index 0ee190fc1bf8..6cb45757818f 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -192,10 +192,11 @@ static int klsi_105_get_line_state(struct usb_serial_port *port,
status_buf, KLSI_STATUSBUF_LEN,
10000
);
- if (rc < 0)
- dev_err(&port->dev, "Reading line status failed (error = %d)\n",
- rc);
- else {
+ if (rc != KLSI_STATUSBUF_LEN) {
+ dev_err(&port->dev, "reading line status failed: %d\n", rc);
+ if (rc >= 0)
+ rc = -EIO;
+ } else {
status = get_unaligned_le16(status_buf);
dev_info(&port->serial->dev->dev, "read status %x %x\n",
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index 2363654cafc9..813035f51fe7 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -51,6 +51,7 @@
/* Function prototypes */
+static int kobil_attach(struct usb_serial *serial);
static int kobil_port_probe(struct usb_serial_port *probe);
static int kobil_port_remove(struct usb_serial_port *probe);
static int kobil_open(struct tty_struct *tty, struct usb_serial_port *port);
@@ -86,6 +87,7 @@ static struct usb_serial_driver kobil_device = {
.description = "KOBIL USB smart card terminal",
.id_table = id_table,
.num_ports = 1,
+ .attach = kobil_attach,
.port_probe = kobil_port_probe,
.port_remove = kobil_port_remove,
.ioctl = kobil_ioctl,
@@ -113,6 +115,16 @@ struct kobil_private {
};
+static int kobil_attach(struct usb_serial *serial)
+{
+ if (serial->num_interrupt_out < serial->num_ports) {
+ dev_err(&serial->interface->dev, "missing interrupt-out endpoint\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static int kobil_port_probe(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index d52caa03679c..91bc170b408a 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -65,8 +65,6 @@ struct moschip_port {
struct urb *write_urb_pool[NUM_URBS];
};
-static struct usb_serial_driver moschip7720_2port_driver;
-
#define USB_VENDOR_ID_MOSCHIP 0x9710
#define MOSCHIP_DEVICE_ID_7720 0x7720
#define MOSCHIP_DEVICE_ID_7715 0x7715
@@ -970,25 +968,6 @@ static void mos7720_bulk_out_data_callback(struct urb *urb)
tty_port_tty_wakeup(&mos7720_port->port->port);
}
-/*
- * mos77xx_probe
- * this function installs the appropriate read interrupt endpoint callback
- * depending on whether the device is a 7720 or 7715, thus avoiding costly
- * run-time checks in the high-frequency callback routine itself.
- */
-static int mos77xx_probe(struct usb_serial *serial,
- const struct usb_device_id *id)
-{
- if (id->idProduct == MOSCHIP_DEVICE_ID_7715)
- moschip7720_2port_driver.read_int_callback =
- mos7715_interrupt_callback;
- else
- moschip7720_2port_driver.read_int_callback =
- mos7720_interrupt_callback;
-
- return 0;
-}
-
static int mos77xx_calc_num_ports(struct usb_serial *serial)
{
u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
@@ -1917,6 +1896,11 @@ static int mos7720_startup(struct usb_serial *serial)
u16 product;
int ret_val;
+ if (serial->num_bulk_in < 2 || serial->num_bulk_out < 2) {
+ dev_err(&serial->interface->dev, "missing bulk endpoints\n");
+ return -ENODEV;
+ }
+
product = le16_to_cpu(serial->dev->descriptor.idProduct);
dev = serial->dev;
@@ -1941,19 +1925,18 @@ static int mos7720_startup(struct usb_serial *serial)
tmp->interrupt_in_endpointAddress;
serial->port[1]->interrupt_in_urb = NULL;
serial->port[1]->interrupt_in_buffer = NULL;
+
+ if (serial->port[0]->interrupt_in_urb) {
+ struct urb *urb = serial->port[0]->interrupt_in_urb;
+
+ urb->complete = mos7715_interrupt_callback;
+ }
}
/* setting configuration feature to one */
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
(__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000);
- /* start the interrupt urb */
- ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL);
- if (ret_val)
- dev_err(&dev->dev,
- "%s - Error %d submitting control urb\n",
- __func__, ret_val);
-
#ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
if (product == MOSCHIP_DEVICE_ID_7715) {
ret_val = mos7715_parport_init(serial);
@@ -1961,6 +1944,13 @@ static int mos7720_startup(struct usb_serial *serial)
return ret_val;
}
#endif
+ /* start the interrupt urb */
+ ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL);
+ if (ret_val) {
+ dev_err(&dev->dev, "failed to submit interrupt urb: %d\n",
+ ret_val);
+ }
+
/* LSR For Port 1 */
read_mos_reg(serial, 0, MOS7720_LSR, &data);
dev_dbg(&dev->dev, "LSR:%x\n", data);
@@ -1970,6 +1960,8 @@ static int mos7720_startup(struct usb_serial *serial)
static void mos7720_release(struct usb_serial *serial)
{
+ usb_kill_urb(serial->port[0]->interrupt_in_urb);
+
#ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
/* close the parallel port */
@@ -2019,11 +2011,6 @@ static int mos7720_port_probe(struct usb_serial_port *port)
if (!mos7720_port)
return -ENOMEM;
- /* Initialize all port interrupt end point to port 0 int endpoint.
- * Our device has only one interrupt endpoint common to all ports.
- */
- port->interrupt_in_endpointAddress =
- port->serial->port[0]->interrupt_in_endpointAddress;
mos7720_port->port = port;
usb_set_serial_port_data(port, mos7720_port);
@@ -2053,7 +2040,6 @@ static struct usb_serial_driver moschip7720_2port_driver = {
.close = mos7720_close,
.throttle = mos7720_throttle,
.unthrottle = mos7720_unthrottle,
- .probe = mos77xx_probe,
.attach = mos7720_startup,
.release = mos7720_release,
.port_probe = mos7720_port_probe,
@@ -2067,7 +2053,7 @@ static struct usb_serial_driver moschip7720_2port_driver = {
.chars_in_buffer = mos7720_chars_in_buffer,
.break_ctl = mos7720_break,
.read_bulk_callback = mos7720_bulk_in_callback,
- .read_int_callback = NULL /* dynamically assigned in probe() */
+ .read_int_callback = mos7720_interrupt_callback,
};
static struct usb_serial_driver * const serial_drivers[] = {
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 9a220b8e810f..ea27fb23967a 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -214,7 +214,6 @@ MODULE_DEVICE_TABLE(usb, id_table);
struct moschip_port {
int port_num; /*Actual port number in the device(1,2,etc) */
- struct urb *write_urb; /* write URB for this port */
struct urb *read_urb; /* read URB for this port */
__u8 shadowLCR; /* last LCR value received */
__u8 shadowMCR; /* last MCR value received */
@@ -1037,9 +1036,7 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
serial,
serial->port[0]->interrupt_in_urb->interval);
- /* start interrupt read for mos7840 *
- * will continue as long as mos7840 is connected */
-
+ /* start interrupt read for mos7840 */
response =
usb_submit_urb(serial->port[0]->interrupt_in_urb,
GFP_KERNEL);
@@ -1186,7 +1183,6 @@ static void mos7840_close(struct usb_serial_port *port)
}
}
- usb_kill_urb(mos7840_port->write_urb);
usb_kill_urb(mos7840_port->read_urb);
mos7840_port->read_urb_busy = false;
@@ -1199,12 +1195,6 @@ static void mos7840_close(struct usb_serial_port *port)
}
}
- if (mos7840_port->write_urb) {
- /* if this urb had a transfer buffer already (old tx) free it */
- kfree(mos7840_port->write_urb->transfer_buffer);
- usb_free_urb(mos7840_port->write_urb);
- }
-
Data = 0x0;
mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
@@ -2113,6 +2103,17 @@ static int mos7840_calc_num_ports(struct usb_serial *serial)
return mos7840_num_ports;
}
+static int mos7840_attach(struct usb_serial *serial)
+{
+ if (serial->num_bulk_in < serial->num_ports ||
+ serial->num_bulk_out < serial->num_ports) {
+ dev_err(&serial->interface->dev, "missing endpoints\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static int mos7840_port_probe(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
@@ -2388,6 +2389,7 @@ static struct usb_serial_driver moschip7840_4port_device = {
.tiocmset = mos7840_tiocmset,
.tiocmiwait = usb_serial_generic_tiocmiwait,
.get_icount = usb_serial_generic_get_icount,
+ .attach = mos7840_attach,
.port_probe = mos7840_port_probe,
.port_remove = mos7840_port_remove,
.read_bulk_callback = mos7840_bulk_in_callback,
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index f6c6900bccf0..a180b17d2432 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -38,6 +38,7 @@ static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
static int omninet_write_room(struct tty_struct *tty);
static void omninet_disconnect(struct usb_serial *serial);
+static int omninet_attach(struct usb_serial *serial);
static int omninet_port_probe(struct usb_serial_port *port);
static int omninet_port_remove(struct usb_serial_port *port);
@@ -56,6 +57,7 @@ static struct usb_serial_driver zyxel_omninet_device = {
.description = "ZyXEL - omni.net lcd plus usb",
.id_table = id_table,
.num_ports = 1,
+ .attach = omninet_attach,
.port_probe = omninet_port_probe,
.port_remove = omninet_port_remove,
.open = omninet_open,
@@ -104,6 +106,17 @@ struct omninet_data {
__u8 od_outseq; /* Sequence number for bulk_out URBs */
};
+static int omninet_attach(struct usb_serial *serial)
+{
+ /* The second bulk-out endpoint is used for writing. */
+ if (serial->num_bulk_out < 2) {
+ dev_err(&serial->interface->dev, "missing endpoints\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static int omninet_port_probe(struct usb_serial_port *port)
{
struct omninet_data *od;
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index a4b88bc038b6..b8bf52bf7a94 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -134,6 +134,7 @@ static int oti6858_chars_in_buffer(struct tty_struct *tty);
static int oti6858_tiocmget(struct tty_struct *tty);
static int oti6858_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
+static int oti6858_attach(struct usb_serial *serial);
static int oti6858_port_probe(struct usb_serial_port *port);
static int oti6858_port_remove(struct usb_serial_port *port);
@@ -158,6 +159,7 @@ static struct usb_serial_driver oti6858_device = {
.write_bulk_callback = oti6858_write_bulk_callback,
.write_room = oti6858_write_room,
.chars_in_buffer = oti6858_chars_in_buffer,
+ .attach = oti6858_attach,
.port_probe = oti6858_port_probe,
.port_remove = oti6858_port_remove,
};
@@ -324,6 +326,20 @@ static void send_data(struct work_struct *work)
usb_serial_port_softint(port);
}
+static int oti6858_attach(struct usb_serial *serial)
+{
+ unsigned char num_ports = serial->num_ports;
+
+ if (serial->num_bulk_in < num_ports ||
+ serial->num_bulk_out < num_ports ||
+ serial->num_interrupt_in < num_ports) {
+ dev_err(&serial->interface->dev, "missing endpoints\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static int oti6858_port_probe(struct usb_serial_port *port)
{
struct oti6858_private *priv;
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index ae682e4eeaef..46fca6b75846 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -220,9 +220,17 @@ static int pl2303_probe(struct usb_serial *serial,
static int pl2303_startup(struct usb_serial *serial)
{
struct pl2303_serial_private *spriv;
+ unsigned char num_ports = serial->num_ports;
enum pl2303_type type = TYPE_01;
unsigned char *buf;
+ if (serial->num_bulk_in < num_ports ||
+ serial->num_bulk_out < num_ports ||
+ serial->num_interrupt_in < num_ports) {
+ dev_err(&serial->interface->dev, "missing endpoints\n");
+ return -ENODEV;
+ }
+
spriv = kzalloc(sizeof(*spriv), GFP_KERNEL);
if (!spriv)
return -ENOMEM;
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index 659cb8606bd9..5709cc93b083 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -408,16 +408,12 @@ static void qt2_close(struct usb_serial_port *port)
{
struct usb_serial *serial;
struct qt2_port_private *port_priv;
- unsigned long flags;
int i;
serial = port->serial;
port_priv = usb_get_serial_port_data(port);
- spin_lock_irqsave(&port_priv->urb_lock, flags);
usb_kill_urb(port_priv->write_urb);
- port_priv->urb_in_use = false;
- spin_unlock_irqrestore(&port_priv->urb_lock, flags);
/* flush the port transmit buffer */
i = usb_control_msg(serial->dev,
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index ef0dbf0703c5..475e6c31b266 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -154,6 +154,19 @@ static int spcp8x5_probe(struct usb_serial *serial,
return 0;
}
+static int spcp8x5_attach(struct usb_serial *serial)
+{
+ unsigned char num_ports = serial->num_ports;
+
+ if (serial->num_bulk_in < num_ports ||
+ serial->num_bulk_out < num_ports) {
+ dev_err(&serial->interface->dev, "missing endpoints\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static int spcp8x5_port_probe(struct usb_serial_port *port)
{
const struct usb_device_id *id = usb_get_serial_data(port->serial);
@@ -477,6 +490,7 @@ static struct usb_serial_driver spcp8x5_device = {
.tiocmget = spcp8x5_tiocmget,
.tiocmset = spcp8x5_tiocmset,
.probe = spcp8x5_probe,
+ .attach = spcp8x5_attach,
.port_probe = spcp8x5_port_probe,
.port_remove = spcp8x5_port_remove,
};
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 8db9d071d940..64b85b8dedf3 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -579,6 +579,13 @@ static int ti_startup(struct usb_serial *serial)
goto free_tdev;
}
+ if (serial->num_bulk_in < serial->num_ports ||
+ serial->num_bulk_out < serial->num_ports) {
+ dev_err(&serial->interface->dev, "missing endpoints\n");
+ status = -ENODEV;
+ goto free_tdev;
+ }
+
return 0;
free_tdev:
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index af3c7eecff91..16cc18369111 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2109,6 +2109,13 @@ UNUSUAL_DEV( 0x152d, 0x2566, 0x0114, 0x0114,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_BROKEN_FUA ),
+/* Reported-by George Cherian <george.cherian@cavium.com> */
+UNUSUAL_DEV(0x152d, 0x9561, 0x0000, 0x9999,
+ "JMicron",
+ "JMS56x",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_REPORT_OPCODES),
+
/*
* Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI)
* and Mac USB Dock USB-SCSI */
diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c
index 79451f7ef1b7..062c205f0046 100644
--- a/drivers/usb/wusbcore/crypto.c
+++ b/drivers/usb/wusbcore/crypto.c
@@ -216,7 +216,6 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
struct scatterlist sg[4], sg_dst;
void *dst_buf;
size_t dst_size;
- const u8 bzero[16] = { 0 };
u8 iv[crypto_skcipher_ivsize(tfm_cbc)];
size_t zero_padding;
@@ -261,7 +260,7 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
sg_set_buf(&sg[1], &scratch->b1, sizeof(scratch->b1));
sg_set_buf(&sg[2], b, blen);
/* 0 if well behaved :) */
- sg_set_buf(&sg[3], bzero, zero_padding);
+ sg_set_page(&sg[3], ZERO_PAGE(0), zero_padding, 0);
sg_init_one(&sg_dst, dst_buf, dst_size);
skcipher_request_set_tfm(req, tfm_cbc);
diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c
index be1ee89ee917..36d75c367d22 100644
--- a/drivers/vfio/mdev/mdev_core.c
+++ b/drivers/vfio/mdev/mdev_core.c
@@ -27,6 +27,45 @@ static LIST_HEAD(parent_list);
static DEFINE_MUTEX(parent_list_lock);
static struct class_compat *mdev_bus_compat_class;
+static LIST_HEAD(mdev_list);
+static DEFINE_MUTEX(mdev_list_lock);
+
+struct device *mdev_parent_dev(struct mdev_device *mdev)
+{
+ return mdev->parent->dev;
+}
+EXPORT_SYMBOL(mdev_parent_dev);
+
+void *mdev_get_drvdata(struct mdev_device *mdev)
+{
+ return mdev->driver_data;
+}
+EXPORT_SYMBOL(mdev_get_drvdata);
+
+void mdev_set_drvdata(struct mdev_device *mdev, void *data)
+{
+ mdev->driver_data = data;
+}
+EXPORT_SYMBOL(mdev_set_drvdata);
+
+struct device *mdev_dev(struct mdev_device *mdev)
+{
+ return &mdev->dev;
+}
+EXPORT_SYMBOL(mdev_dev);
+
+struct mdev_device *mdev_from_dev(struct device *dev)
+{
+ return dev_is_mdev(dev) ? to_mdev_device(dev) : NULL;
+}
+EXPORT_SYMBOL(mdev_from_dev);
+
+uuid_le mdev_uuid(struct mdev_device *mdev)
+{
+ return mdev->uuid;
+}
+EXPORT_SYMBOL(mdev_uuid);
+
static int _find_mdev_device(struct device *dev, void *data)
{
struct mdev_device *mdev;
@@ -42,7 +81,7 @@ static int _find_mdev_device(struct device *dev, void *data)
return 0;
}
-static bool mdev_device_exist(struct parent_device *parent, uuid_le uuid)
+static bool mdev_device_exist(struct mdev_parent *parent, uuid_le uuid)
{
struct device *dev;
@@ -56,9 +95,9 @@ static bool mdev_device_exist(struct parent_device *parent, uuid_le uuid)
}
/* Should be called holding parent_list_lock */
-static struct parent_device *__find_parent_device(struct device *dev)
+static struct mdev_parent *__find_parent_device(struct device *dev)
{
- struct parent_device *parent;
+ struct mdev_parent *parent;
list_for_each_entry(parent, &parent_list, next) {
if (parent->dev == dev)
@@ -69,8 +108,8 @@ static struct parent_device *__find_parent_device(struct device *dev)
static void mdev_release_parent(struct kref *kref)
{
- struct parent_device *parent = container_of(kref, struct parent_device,
- ref);
+ struct mdev_parent *parent = container_of(kref, struct mdev_parent,
+ ref);
struct device *dev = parent->dev;
kfree(parent);
@@ -78,7 +117,7 @@ static void mdev_release_parent(struct kref *kref)
}
static
-inline struct parent_device *mdev_get_parent(struct parent_device *parent)
+inline struct mdev_parent *mdev_get_parent(struct mdev_parent *parent)
{
if (parent)
kref_get(&parent->ref);
@@ -86,7 +125,7 @@ inline struct parent_device *mdev_get_parent(struct parent_device *parent)
return parent;
}
-static inline void mdev_put_parent(struct parent_device *parent)
+static inline void mdev_put_parent(struct mdev_parent *parent)
{
if (parent)
kref_put(&parent->ref, mdev_release_parent);
@@ -95,7 +134,7 @@ static inline void mdev_put_parent(struct parent_device *parent)
static int mdev_device_create_ops(struct kobject *kobj,
struct mdev_device *mdev)
{
- struct parent_device *parent = mdev->parent;
+ struct mdev_parent *parent = mdev->parent;
int ret;
ret = parent->ops->create(kobj, mdev);
@@ -122,7 +161,7 @@ static int mdev_device_create_ops(struct kobject *kobj,
*/
static int mdev_device_remove_ops(struct mdev_device *mdev, bool force_remove)
{
- struct parent_device *parent = mdev->parent;
+ struct mdev_parent *parent = mdev->parent;
int ret;
/*
@@ -153,10 +192,10 @@ static int mdev_device_remove_cb(struct device *dev, void *data)
* Add device to list of registered parent devices.
* Returns a negative value on error, otherwise 0.
*/
-int mdev_register_device(struct device *dev, const struct parent_ops *ops)
+int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops)
{
int ret;
- struct parent_device *parent;
+ struct mdev_parent *parent;
/* check for mandatory ops */
if (!ops || !ops->create || !ops->remove || !ops->supported_type_groups)
@@ -229,7 +268,7 @@ EXPORT_SYMBOL(mdev_register_device);
void mdev_unregister_device(struct device *dev)
{
- struct parent_device *parent;
+ struct mdev_parent *parent;
bool force_remove = true;
mutex_lock(&parent_list_lock);
@@ -266,7 +305,7 @@ int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid)
{
int ret;
struct mdev_device *mdev;
- struct parent_device *parent;
+ struct mdev_parent *parent;
struct mdev_type *type = to_mdev_type(kobj);
parent = mdev_get_parent(type->parent);
@@ -316,6 +355,11 @@ int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid)
dev_dbg(&mdev->dev, "MDEV: created\n");
mutex_unlock(&parent->lock);
+
+ mutex_lock(&mdev_list_lock);
+ list_add(&mdev->next, &mdev_list);
+ mutex_unlock(&mdev_list_lock);
+
return ret;
create_failed:
@@ -329,12 +373,30 @@ create_err:
int mdev_device_remove(struct device *dev, bool force_remove)
{
- struct mdev_device *mdev;
- struct parent_device *parent;
+ struct mdev_device *mdev, *tmp;
+ struct mdev_parent *parent;
struct mdev_type *type;
int ret;
+ bool found = false;
mdev = to_mdev_device(dev);
+
+ mutex_lock(&mdev_list_lock);
+ list_for_each_entry(tmp, &mdev_list, next) {
+ if (tmp == mdev) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found)
+ list_del(&mdev->next);
+
+ mutex_unlock(&mdev_list_lock);
+
+ if (!found)
+ return -ENODEV;
+
type = to_mdev_type(mdev->type_kobj);
parent = mdev->parent;
mutex_lock(&parent->lock);
@@ -342,6 +404,11 @@ int mdev_device_remove(struct device *dev, bool force_remove)
ret = mdev_device_remove_ops(mdev, force_remove);
if (ret) {
mutex_unlock(&parent->lock);
+
+ mutex_lock(&mdev_list_lock);
+ list_add(&mdev->next, &mdev_list);
+ mutex_unlock(&mdev_list_lock);
+
return ret;
}
@@ -349,7 +416,8 @@ int mdev_device_remove(struct device *dev, bool force_remove)
device_unregister(dev);
mutex_unlock(&parent->lock);
mdev_put_parent(parent);
- return ret;
+
+ return 0;
}
static int __init mdev_init(void)
diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h
index d35097cbf3d7..a9cefd70a705 100644
--- a/drivers/vfio/mdev/mdev_private.h
+++ b/drivers/vfio/mdev/mdev_private.h
@@ -16,10 +16,33 @@
int mdev_bus_register(void);
void mdev_bus_unregister(void);
+struct mdev_parent {
+ struct device *dev;
+ const struct mdev_parent_ops *ops;
+ struct kref ref;
+ struct mutex lock;
+ struct list_head next;
+ struct kset *mdev_types_kset;
+ struct list_head type_list;
+};
+
+struct mdev_device {
+ struct device dev;
+ struct mdev_parent *parent;
+ uuid_le uuid;
+ void *driver_data;
+ struct kref ref;
+ struct list_head next;
+ struct kobject *type_kobj;
+};
+
+#define to_mdev_device(dev) container_of(dev, struct mdev_device, dev)
+#define dev_is_mdev(d) ((d)->bus == &mdev_bus_type)
+
struct mdev_type {
struct kobject kobj;
struct kobject *devices_kobj;
- struct parent_device *parent;
+ struct mdev_parent *parent;
struct list_head next;
struct attribute_group *group;
};
@@ -29,8 +52,8 @@ struct mdev_type {
#define to_mdev_type(_kobj) \
container_of(_kobj, struct mdev_type, kobj)
-int parent_create_sysfs_files(struct parent_device *parent);
-void parent_remove_sysfs_files(struct parent_device *parent);
+int parent_create_sysfs_files(struct mdev_parent *parent);
+void parent_remove_sysfs_files(struct mdev_parent *parent);
int mdev_create_sysfs_files(struct device *dev, struct mdev_type *type);
void mdev_remove_sysfs_files(struct device *dev, struct mdev_type *type);
diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c
index 1a53deb2ee10..802df210929b 100644
--- a/drivers/vfio/mdev/mdev_sysfs.c
+++ b/drivers/vfio/mdev/mdev_sysfs.c
@@ -92,7 +92,7 @@ static struct kobj_type mdev_type_ktype = {
.release = mdev_type_release,
};
-struct mdev_type *add_mdev_supported_type(struct parent_device *parent,
+struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent,
struct attribute_group *group)
{
struct mdev_type *type;
@@ -158,7 +158,7 @@ static void remove_mdev_supported_type(struct mdev_type *type)
kobject_put(&type->kobj);
}
-static int add_mdev_supported_type_groups(struct parent_device *parent)
+static int add_mdev_supported_type_groups(struct mdev_parent *parent)
{
int i;
@@ -183,7 +183,7 @@ static int add_mdev_supported_type_groups(struct parent_device *parent)
}
/* mdev sysfs functions */
-void parent_remove_sysfs_files(struct parent_device *parent)
+void parent_remove_sysfs_files(struct mdev_parent *parent)
{
struct mdev_type *type, *tmp;
@@ -196,7 +196,7 @@ void parent_remove_sysfs_files(struct parent_device *parent)
kset_unregister(parent->mdev_types_kset);
}
-int parent_create_sysfs_files(struct parent_device *parent)
+int parent_create_sysfs_files(struct mdev_parent *parent)
{
int ret;
diff --git a/drivers/vfio/mdev/vfio_mdev.c b/drivers/vfio/mdev/vfio_mdev.c
index ffc36758cb84..fa848a701b8b 100644
--- a/drivers/vfio/mdev/vfio_mdev.c
+++ b/drivers/vfio/mdev/vfio_mdev.c
@@ -27,7 +27,7 @@
static int vfio_mdev_open(void *device_data)
{
struct mdev_device *mdev = device_data;
- struct parent_device *parent = mdev->parent;
+ struct mdev_parent *parent = mdev->parent;
int ret;
if (unlikely(!parent->ops->open))
@@ -46,7 +46,7 @@ static int vfio_mdev_open(void *device_data)
static void vfio_mdev_release(void *device_data)
{
struct mdev_device *mdev = device_data;
- struct parent_device *parent = mdev->parent;
+ struct mdev_parent *parent = mdev->parent;
if (likely(parent->ops->release))
parent->ops->release(mdev);
@@ -58,7 +58,7 @@ static long vfio_mdev_unlocked_ioctl(void *device_data,
unsigned int cmd, unsigned long arg)
{
struct mdev_device *mdev = device_data;
- struct parent_device *parent = mdev->parent;
+ struct mdev_parent *parent = mdev->parent;
if (unlikely(!parent->ops->ioctl))
return -EINVAL;
@@ -70,7 +70,7 @@ static ssize_t vfio_mdev_read(void *device_data, char __user *buf,
size_t count, loff_t *ppos)
{
struct mdev_device *mdev = device_data;
- struct parent_device *parent = mdev->parent;
+ struct mdev_parent *parent = mdev->parent;
if (unlikely(!parent->ops->read))
return -EINVAL;
@@ -82,7 +82,7 @@ static ssize_t vfio_mdev_write(void *device_data, const char __user *buf,
size_t count, loff_t *ppos)
{
struct mdev_device *mdev = device_data;
- struct parent_device *parent = mdev->parent;
+ struct mdev_parent *parent = mdev->parent;
if (unlikely(!parent->ops->write))
return -EINVAL;
@@ -93,7 +93,7 @@ static ssize_t vfio_mdev_write(void *device_data, const char __user *buf,
static int vfio_mdev_mmap(void *device_data, struct vm_area_struct *vma)
{
struct mdev_device *mdev = device_data;
- struct parent_device *parent = mdev->parent;
+ struct mdev_parent *parent = mdev->parent;
if (unlikely(!parent->ops->mmap))
return -EINVAL;
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index dcd7c2a99618..324c52e3a1a4 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -1142,6 +1142,10 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
return ret;
vdev->barmap[index] = pci_iomap(pdev, index, 0);
+ if (!vdev->barmap[index]) {
+ pci_release_selected_regions(pdev, 1 << index);
+ return -ENOMEM;
+ }
}
vma->vm_private_data = vdev;
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
index 5ffd1d9ad4bd..357243d76f10 100644
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -193,7 +193,10 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
if (!vdev->has_vga)
return -EINVAL;
- switch (pos) {
+ if (pos > 0xbfffful)
+ return -EINVAL;
+
+ switch ((u32)pos) {
case 0xa0000 ... 0xbffff:
count = min(count, (size_t)(0xc0000 - pos));
iomem = ioremap_nocache(0xa0000, 0xbffff - 0xa0000 + 1);
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index c8823578a1b2..128d10282d16 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -1270,6 +1270,10 @@ static int tce_iommu_attach_group(void *iommu_data,
/* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
iommu_group_id(iommu_group), iommu_group); */
table_group = iommu_group_get_iommudata(iommu_group);
+ if (!table_group) {
+ ret = -ENODEV;
+ goto unlock_exit;
+ }
if (tce_groups_attached(container) && (!table_group->ops ||
!table_group->ops->take_ownership ||
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index f3726ba12aa6..b3cc33fa6d26 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -36,7 +36,6 @@
#include <linux/uaccess.h>
#include <linux/vfio.h>
#include <linux/workqueue.h>
-#include <linux/pid_namespace.h>
#include <linux/mdev.h>
#include <linux/notifier.h>
@@ -268,28 +267,38 @@ static void vfio_lock_acct(struct task_struct *task, long npage)
{
struct vwork *vwork;
struct mm_struct *mm;
+ bool is_current;
if (!npage)
return;
- mm = get_task_mm(task);
+ is_current = (task->mm == current->mm);
+
+ mm = is_current ? task->mm : get_task_mm(task);
if (!mm)
- return; /* process exited or nothing to do */
+ return; /* process exited */
if (down_write_trylock(&mm->mmap_sem)) {
mm->locked_vm += npage;
up_write(&mm->mmap_sem);
- mmput(mm);
+ if (!is_current)
+ mmput(mm);
return;
}
+ if (is_current) {
+ mm = get_task_mm(task);
+ if (!mm)
+ return;
+ }
+
/*
* Couldn't get mmap_sem lock, so must setup to update
* mm->locked_vm later. If locked_vm were atomic, we
* wouldn't need this silliness
*/
vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL);
- if (!vwork) {
+ if (WARN_ON(!vwork)) {
mmput(mm);
return;
}
@@ -393,77 +402,71 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
long npage, unsigned long *pfn_base)
{
- unsigned long limit;
- bool lock_cap = ns_capable(task_active_pid_ns(dma->task)->user_ns,
- CAP_IPC_LOCK);
- struct mm_struct *mm;
- long ret, i = 0, lock_acct = 0;
+ unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ bool lock_cap = capable(CAP_IPC_LOCK);
+ long ret, pinned = 0, lock_acct = 0;
bool rsvd;
dma_addr_t iova = vaddr - dma->vaddr + dma->iova;
- mm = get_task_mm(dma->task);
- if (!mm)
+ /* This code path is only user initiated */
+ if (!current->mm)
return -ENODEV;
- ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base);
+ ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, pfn_base);
if (ret)
- goto pin_pg_remote_exit;
+ return ret;
+ pinned++;
rsvd = is_invalid_reserved_pfn(*pfn_base);
- limit = task_rlimit(dma->task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
/*
* Reserved pages aren't counted against the user, externally pinned
* pages are already counted against the user.
*/
if (!rsvd && !vfio_find_vpfn(dma, iova)) {
- if (!lock_cap && mm->locked_vm + 1 > limit) {
+ if (!lock_cap && current->mm->locked_vm + 1 > limit) {
put_pfn(*pfn_base, dma->prot);
pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
limit << PAGE_SHIFT);
- ret = -ENOMEM;
- goto pin_pg_remote_exit;
+ return -ENOMEM;
}
lock_acct++;
}
- i++;
- if (likely(!disable_hugepages)) {
- /* Lock all the consecutive pages from pfn_base */
- for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; i < npage;
- i++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) {
- unsigned long pfn = 0;
+ if (unlikely(disable_hugepages))
+ goto out;
- ret = vaddr_get_pfn(mm, vaddr, dma->prot, &pfn);
- if (ret)
- break;
+ /* Lock all the consecutive pages from pfn_base */
+ for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage;
+ pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) {
+ unsigned long pfn = 0;
- if (pfn != *pfn_base + i ||
- rsvd != is_invalid_reserved_pfn(pfn)) {
+ ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, &pfn);
+ if (ret)
+ break;
+
+ if (pfn != *pfn_base + pinned ||
+ rsvd != is_invalid_reserved_pfn(pfn)) {
+ put_pfn(pfn, dma->prot);
+ break;
+ }
+
+ if (!rsvd && !vfio_find_vpfn(dma, iova)) {
+ if (!lock_cap &&
+ current->mm->locked_vm + lock_acct + 1 > limit) {
put_pfn(pfn, dma->prot);
+ pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
+ __func__, limit << PAGE_SHIFT);
break;
}
-
- if (!rsvd && !vfio_find_vpfn(dma, iova)) {
- if (!lock_cap &&
- mm->locked_vm + lock_acct + 1 > limit) {
- put_pfn(pfn, dma->prot);
- pr_warn("%s: RLIMIT_MEMLOCK (%ld) "
- "exceeded\n", __func__,
- limit << PAGE_SHIFT);
- break;
- }
- lock_acct++;
- }
+ lock_acct++;
}
}
- vfio_lock_acct(dma->task, lock_acct);
- ret = i;
+out:
+ vfio_lock_acct(current, lock_acct);
-pin_pg_remote_exit:
- mmput(mm);
- return ret;
+ return pinned;
}
static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova,
@@ -473,10 +476,10 @@ static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova,
long unlocked = 0, locked = 0;
long i;
- for (i = 0; i < npage; i++) {
+ for (i = 0; i < npage; i++, iova += PAGE_SIZE) {
if (put_pfn(pfn++, dma->prot)) {
unlocked++;
- if (vfio_find_vpfn(dma, iova + (i << PAGE_SHIFT)))
+ if (vfio_find_vpfn(dma, iova))
locked++;
}
}
@@ -491,8 +494,7 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
unsigned long *pfn_base, bool do_accounting)
{
unsigned long limit;
- bool lock_cap = ns_capable(task_active_pid_ns(dma->task)->user_ns,
- CAP_IPC_LOCK);
+ bool lock_cap = has_capability(dma->task, CAP_IPC_LOCK);
struct mm_struct *mm;
int ret;
bool rsvd;
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 253310cdaaca..fd6c8b66f06f 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -843,7 +843,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
struct iov_iter out_iter, in_iter, prot_iter, data_iter;
u64 tag;
u32 exp_data_len, data_direction;
- unsigned out, in;
+ unsigned int out = 0, in = 0;
int head, ret, prot_bytes;
size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
size_t out_size, in_size;
@@ -2087,7 +2087,7 @@ static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
NULL,
};
-static struct target_core_fabric_ops vhost_scsi_ops = {
+static const struct target_core_fabric_ops vhost_scsi_ops = {
.module = THIS_MODULE,
.name = "vhost",
.get_fabric_name = vhost_scsi_get_fabric_name,
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index bbbf588540ed..ce5e63d2c66a 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -373,6 +373,7 @@ static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
static int vhost_vsock_start(struct vhost_vsock *vsock)
{
+ struct vhost_virtqueue *vq;
size_t i;
int ret;
@@ -383,19 +384,20 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
goto err;
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
- struct vhost_virtqueue *vq = &vsock->vqs[i];
+ vq = &vsock->vqs[i];
mutex_lock(&vq->mutex);
if (!vhost_vq_access_ok(vq)) {
ret = -EFAULT;
- mutex_unlock(&vq->mutex);
goto err_vq;
}
if (!vq->private_data) {
vq->private_data = vsock;
- vhost_vq_init_access(vq);
+ ret = vhost_vq_init_access(vq);
+ if (ret)
+ goto err_vq;
}
mutex_unlock(&vq->mutex);
@@ -405,8 +407,11 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
return 0;
err_vq:
+ vq->private_data = NULL;
+ mutex_unlock(&vq->mutex);
+
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
- struct vhost_virtqueue *vq = &vsock->vqs[i];
+ vq = &vsock->vqs[i];
mutex_lock(&vq->mutex);
vq->private_data = NULL;
diff --git a/drivers/video/fbdev/cobalt_lcdfb.c b/drivers/video/fbdev/cobalt_lcdfb.c
index 2d3b691f3fc4..038ac6934fe9 100644
--- a/drivers/video/fbdev/cobalt_lcdfb.c
+++ b/drivers/video/fbdev/cobalt_lcdfb.c
@@ -308,6 +308,11 @@ static int cobalt_lcdfb_probe(struct platform_device *dev)
info->screen_size = resource_size(res);
info->screen_base = devm_ioremap(&dev->dev, res->start,
info->screen_size);
+ if (!info->screen_base) {
+ framebuffer_release(info);
+ return -ENOMEM;
+ }
+
info->fbops = &cobalt_lcd_fbops;
info->fix = cobalt_lcdfb_fix;
info->fix.smem_start = res->start;
diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
index f89245b8ba8e..68a113594808 100644
--- a/drivers/video/fbdev/core/fbcmap.c
+++ b/drivers/video/fbdev/core/fbcmap.c
@@ -163,17 +163,18 @@ void fb_dealloc_cmap(struct fb_cmap *cmap)
int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
{
- int tooff = 0, fromoff = 0;
- int size;
+ unsigned int tooff = 0, fromoff = 0;
+ size_t size;
if (to->start > from->start)
fromoff = to->start - from->start;
else
tooff = from->start - to->start;
- size = to->len - tooff;
- if (size > (int) (from->len - fromoff))
- size = from->len - fromoff;
- if (size <= 0)
+ if (fromoff >= from->len || tooff >= to->len)
+ return -EINVAL;
+
+ size = min_t(size_t, to->len - tooff, from->len - fromoff);
+ if (size == 0)
return -EINVAL;
size *= sizeof(u16);
@@ -187,17 +188,18 @@ int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to)
{
- int tooff = 0, fromoff = 0;
- int size;
+ unsigned int tooff = 0, fromoff = 0;
+ size_t size;
if (to->start > from->start)
fromoff = to->start - from->start;
else
tooff = from->start - to->start;
- size = to->len - tooff;
- if (size > (int) (from->len - fromoff))
- size = from->len - fromoff;
- if (size <= 0)
+ if (fromoff >= from->len || tooff >= to->len)
+ return -EINVAL;
+
+ size = min_t(size_t, to->len - tooff, from->len - fromoff);
+ if (size == 0)
return -EINVAL;
size *= sizeof(u16);
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index d47a2fcef818..c71fde5fe835 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -59,6 +59,7 @@
#define pr_fmt(fmt) "virtio-mmio: " fmt
#include <linux/acpi.h>
+#include <linux/dma-mapping.h>
#include <linux/highmem.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -498,6 +499,7 @@ static int virtio_mmio_probe(struct platform_device *pdev)
struct virtio_mmio_device *vm_dev;
struct resource *mem;
unsigned long magic;
+ int rc;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem)
@@ -547,9 +549,25 @@ static int virtio_mmio_probe(struct platform_device *pdev)
}
vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
- if (vm_dev->version == 1)
+ if (vm_dev->version == 1) {
writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
+ rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ /*
+ * In the legacy case, ensure our coherently-allocated virtio
+ * ring will be at an address expressable as a 32-bit PFN.
+ */
+ if (!rc)
+ dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32 + PAGE_SHIFT));
+ } else {
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ }
+ if (rc)
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (rc)
+ dev_warn(&pdev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
+
platform_set_drvdata(pdev, vm_dev);
return register_virtio_device(&vm_dev->vdev);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 409aeaa49246..7e38ed79c3fc 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -159,6 +159,13 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
if (xen_domain())
return true;
+ /*
+ * On ARM-based machines, the DMA ops will do the right thing,
+ * so always use them with legacy devices.
+ */
+ if (IS_ENABLED(CONFIG_ARM) || IS_ENABLED(CONFIG_ARM64))
+ return !virtio_has_feature(vdev, VIRTIO_F_VERSION_1);
+
return false;
}
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
index 6b5ee896af63..7cc51223db1c 100644
--- a/drivers/vme/bridges/vme_ca91cx42.c
+++ b/drivers/vme/bridges/vme_ca91cx42.c
@@ -464,7 +464,7 @@ static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]);
pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]);
- *pci_base = (dma_addr_t)vme_base + pci_offset;
+ *pci_base = (dma_addr_t)*vme_base + pci_offset;
*size = (unsigned long long)((vme_bound - *vme_base) + granularity);
*enabled = 0;
diff --git a/drivers/xen/arm-device.c b/drivers/xen/arm-device.c
index 778acf80aacb..85dd20e05726 100644
--- a/drivers/xen/arm-device.c
+++ b/drivers/xen/arm-device.c
@@ -58,9 +58,13 @@ static int xen_map_device_mmio(const struct resource *resources,
xen_pfn_t *gpfns;
xen_ulong_t *idxs;
int *errs;
- struct xen_add_to_physmap_range xatp;
for (i = 0; i < count; i++) {
+ struct xen_add_to_physmap_range xatp = {
+ .domid = DOMID_SELF,
+ .space = XENMAPSPACE_dev_mmio
+ };
+
r = &resources[i];
nr = DIV_ROUND_UP(resource_size(r), XEN_PAGE_SIZE);
if ((resource_type(r) != IORESOURCE_MEM) || (nr == 0))
@@ -87,9 +91,7 @@ static int xen_map_device_mmio(const struct resource *resources,
idxs[j] = XEN_PFN_DOWN(r->start) + j;
}
- xatp.domid = DOMID_SELF;
xatp.size = nr;
- xatp.space = XENMAPSPACE_dev_mmio;
set_xen_guest_handle(xatp.gpfns, gpfns);
set_xen_guest_handle(xatp.idxs, idxs);
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index c03f9c86c7e3..3c41470c7fc4 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -369,8 +369,7 @@ static void evtchn_fifo_resume(void)
}
ret = init_control_block(cpu, control_block);
- if (ret < 0)
- BUG();
+ BUG_ON(ret < 0);
}
/*
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index e8c7f09d01be..6890897a6f30 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -125,7 +125,7 @@ static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn)
while (*new) {
struct user_evtchn *this;
- this = container_of(*new, struct user_evtchn, node);
+ this = rb_entry(*new, struct user_evtchn, node);
parent = *new;
if (this->port < evtchn->port)
@@ -157,7 +157,7 @@ static struct user_evtchn *find_evtchn(struct per_user_data *u, unsigned port)
while (node) {
struct user_evtchn *evtchn;
- evtchn = container_of(node, struct user_evtchn, node);
+ evtchn = rb_entry(node, struct user_evtchn, node);
if (evtchn->port < port)
node = node->rb_left;
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index 112ce422dc22..2a165cc8a43c 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -42,6 +42,7 @@
static unsigned long platform_mmio;
static unsigned long platform_mmio_alloc;
static unsigned long platform_mmiolen;
+static uint64_t callback_via;
static unsigned long alloc_xen_mmio(unsigned long len)
{
@@ -54,6 +55,51 @@ static unsigned long alloc_xen_mmio(unsigned long len)
return addr;
}
+static uint64_t get_callback_via(struct pci_dev *pdev)
+{
+ u8 pin;
+ int irq;
+
+ irq = pdev->irq;
+ if (irq < 16)
+ return irq; /* ISA IRQ */
+
+ pin = pdev->pin;
+
+ /* We don't know the GSI. Specify the PCI INTx line instead. */
+ return ((uint64_t)0x01 << HVM_CALLBACK_VIA_TYPE_SHIFT) | /* PCI INTx identifier */
+ ((uint64_t)pci_domain_nr(pdev->bus) << 32) |
+ ((uint64_t)pdev->bus->number << 16) |
+ ((uint64_t)(pdev->devfn & 0xff) << 8) |
+ ((uint64_t)(pin - 1) & 3);
+}
+
+static irqreturn_t do_hvm_evtchn_intr(int irq, void *dev_id)
+{
+ xen_hvm_evtchn_do_upcall();
+ return IRQ_HANDLED;
+}
+
+static int xen_allocate_irq(struct pci_dev *pdev)
+{
+ return request_irq(pdev->irq, do_hvm_evtchn_intr,
+ IRQF_NOBALANCING | IRQF_TRIGGER_RISING,
+ "xen-platform-pci", pdev);
+}
+
+static int platform_pci_resume(struct pci_dev *pdev)
+{
+ int err;
+ if (!xen_pv_domain())
+ return 0;
+ err = xen_set_callback_via(callback_via);
+ if (err) {
+ dev_err(&pdev->dev, "platform_pci_resume failure!\n");
+ return err;
+ }
+ return 0;
+}
+
static int platform_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -92,6 +138,28 @@ static int platform_pci_probe(struct pci_dev *pdev,
platform_mmio = mmio_addr;
platform_mmiolen = mmio_len;
+ /*
+ * Xen HVM guests always use the vector callback mechanism.
+ * L1 Dom0 in a nested Xen environment is a PV guest inside in an
+ * HVM environment. It needs the platform-pci driver to get
+ * notifications from L0 Xen, but it cannot use the vector callback
+ * as it is not exported by L1 Xen.
+ */
+ if (xen_pv_domain()) {
+ ret = xen_allocate_irq(pdev);
+ if (ret) {
+ dev_warn(&pdev->dev, "request_irq failed err=%d\n", ret);
+ goto out;
+ }
+ callback_via = get_callback_via(pdev);
+ ret = xen_set_callback_via(callback_via);
+ if (ret) {
+ dev_warn(&pdev->dev, "Unable to set the evtchn callback "
+ "err=%d\n", ret);
+ goto out;
+ }
+ }
+
max_nr_gframes = gnttab_max_grant_frames();
grant_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes);
ret = gnttab_setup_auto_xlat_frames(grant_frames);
@@ -123,6 +191,9 @@ static struct pci_driver platform_driver = {
.name = DRV_NAME,
.probe = platform_pci_probe,
.id_table = platform_pci_tbl,
+#ifdef CONFIG_PM
+ .resume_early = platform_pci_resume,
+#endif
};
builtin_pci_driver(platform_driver);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 478fb91e3df2..f8afc6dcc29f 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -275,6 +275,10 @@ retry:
rc = 0;
} else
rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
+
+ if (!rc)
+ swiotlb_set_max_segment(PAGE_SIZE);
+
return rc;
error:
if (repeat--) {
@@ -392,7 +396,7 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
if (dma_capable(dev, dev_addr, size) &&
!range_straddles_page_boundary(phys, size) &&
!xen_arch_need_swiotlb(dev, phys, dev_addr) &&
- !swiotlb_force) {
+ (swiotlb_force != SWIOTLB_FORCE)) {
/* we are not interested in the dma_addr returned by
* xen_dma_map_page, only in the potential cache flushes executed
* by the function. */
@@ -410,9 +414,9 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
if (map == SWIOTLB_MAP_ERROR)
return DMA_ERROR_CODE;
+ dev_addr = xen_phys_to_bus(map);
xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
dev_addr, map & ~PAGE_MASK, size, dir, attrs);
- dev_addr = xen_phys_to_bus(map);
/*
* Ensure that the address returned is DMA'ble
@@ -552,7 +556,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
phys_addr_t paddr = sg_phys(sg);
dma_addr_t dev_addr = xen_phys_to_bus(paddr);
- if (swiotlb_force ||
+ if (swiotlb_force == SWIOTLB_FORCE ||
xen_arch_need_swiotlb(hwdev, paddr, dev_addr) ||
!dma_capable(hwdev, dev_addr, sg->length) ||
range_straddles_page_boundary(paddr, sg->length)) {
@@ -571,13 +575,14 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
sg_dma_len(sgl) = 0;
return 0;
}
+ dev_addr = xen_phys_to_bus(map);
xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
dev_addr,
map & ~PAGE_MASK,
sg->length,
dir,
attrs);
- sg->dma_address = xen_phys_to_bus(map);
+ sg->dma_address = dev_addr;
} else {
/* we are not interested in the dma_addr returned by
* xen_dma_map_page, only in the potential cache flushes executed
diff --git a/drivers/xen/xenbus/xenbus_comms.h b/drivers/xen/xenbus/xenbus_comms.h
index e74f9c1fbd80..867a2e425208 100644
--- a/drivers/xen/xenbus/xenbus_comms.h
+++ b/drivers/xen/xenbus/xenbus_comms.h
@@ -42,7 +42,6 @@ int xb_write(const void *data, unsigned len);
int xb_read(void *data, unsigned len);
int xb_data_to_read(void);
int xb_wait_for_data_to_read(void);
-int xs_input_avail(void);
extern struct xenstore_domain_interface *xen_store_interface;
extern int xen_store_evtchn;
extern enum xenstore_init xen_store_domain_type;
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 6c0ead4be784..79130b310247 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -302,6 +302,29 @@ static void watch_fired(struct xenbus_watch *watch,
mutex_unlock(&adap->dev_data->reply_mutex);
}
+static int xenbus_command_reply(struct xenbus_file_priv *u,
+ unsigned int msg_type, const char *reply)
+{
+ struct {
+ struct xsd_sockmsg hdr;
+ const char body[16];
+ } msg;
+ int rc;
+
+ msg.hdr = u->u.msg;
+ msg.hdr.type = msg_type;
+ msg.hdr.len = strlen(reply) + 1;
+ if (msg.hdr.len > sizeof(msg.body))
+ return -E2BIG;
+
+ mutex_lock(&u->reply_mutex);
+ rc = queue_reply(&u->read_buffers, &msg, sizeof(msg.hdr) + msg.hdr.len);
+ wake_up(&u->read_waitq);
+ mutex_unlock(&u->reply_mutex);
+
+ return rc;
+}
+
static int xenbus_write_transaction(unsigned msg_type,
struct xenbus_file_priv *u)
{
@@ -316,12 +339,12 @@ static int xenbus_write_transaction(unsigned msg_type,
rc = -ENOMEM;
goto out;
}
- } else if (msg_type == XS_TRANSACTION_END) {
+ } else if (u->u.msg.tx_id != 0) {
list_for_each_entry(trans, &u->transactions, list)
if (trans->handle.id == u->u.msg.tx_id)
break;
if (&trans->list == &u->transactions)
- return -ESRCH;
+ return xenbus_command_reply(u, XS_ERROR, "ENOENT");
}
reply = xenbus_dev_request_and_reply(&u->u.msg);
@@ -372,12 +395,12 @@ static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
path = u->u.buffer + sizeof(u->u.msg);
token = memchr(path, 0, u->u.msg.len);
if (token == NULL) {
- rc = -EILSEQ;
+ rc = xenbus_command_reply(u, XS_ERROR, "EINVAL");
goto out;
}
token++;
if (memchr(token, 0, u->u.msg.len - (token - path)) == NULL) {
- rc = -EILSEQ;
+ rc = xenbus_command_reply(u, XS_ERROR, "EINVAL");
goto out;
}
@@ -411,23 +434,7 @@ static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
}
/* Success. Synthesize a reply to say all is OK. */
- {
- struct {
- struct xsd_sockmsg hdr;
- char body[3];
- } __packed reply = {
- {
- .type = msg_type,
- .len = sizeof(reply.body)
- },
- "OK"
- };
-
- mutex_lock(&u->reply_mutex);
- rc = queue_reply(&u->read_buffers, &reply, sizeof(reply));
- wake_up(&u->read_waitq);
- mutex_unlock(&u->reply_mutex);
- }
+ rc = xenbus_command_reply(u, msg_type, "OK");
out:
return rc;
diff --git a/fs/Kconfig b/fs/Kconfig
index c2a377cdda2b..83eab52fb3f6 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -38,6 +38,7 @@ config FS_DAX
bool "Direct Access (DAX) support"
depends on MMU
depends on !(ARM || MIPS || SPARC)
+ select FS_IOMAP
help
Direct Access (DAX) can be used on memory-backed block devices.
If the block device supports DAX and the filesystem supports DAX,
diff --git a/fs/aio.c b/fs/aio.c
index 4ab67e8cb776..873b4ca82ccb 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1085,7 +1085,8 @@ static void aio_complete(struct kiocb *kiocb, long res, long res2)
* Tell lockdep we inherited freeze protection from submission
* thread.
*/
- __sb_writers_acquired(file_inode(file)->i_sb, SB_FREEZE_WRITE);
+ if (S_ISREG(file_inode(file)->i_mode))
+ __sb_writers_acquired(file_inode(file)->i_sb, SB_FREEZE_WRITE);
file_end_write(file);
}
@@ -1525,7 +1526,8 @@ static ssize_t aio_write(struct kiocb *req, struct iocb *iocb, bool vectored,
* by telling it the lock got released so that it doesn't
* complain about held lock when we return to userspace.
*/
- __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE);
+ if (S_ISREG(file_inode(file)->i_mode))
+ __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE);
}
kfree(iovec);
return ret;
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 29a02daf08a9..422370293cfd 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -2298,6 +2298,7 @@ static int elf_core_dump(struct coredump_params *cprm)
goto end_coredump;
}
}
+ dump_truncate(cprm);
if (!elf_core_write_extra_data(cprm))
goto end_coredump;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 6254cee8f8f3..3c47614a4b32 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -328,9 +328,10 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
struct file *file = iocb->ki_filp;
struct inode *inode = bdev_file_inode(file);
struct block_device *bdev = I_BDEV(inode);
+ struct blk_plug plug;
struct blkdev_dio *dio;
struct bio *bio;
- bool is_read = (iov_iter_rw(iter) == READ);
+ bool is_read = (iov_iter_rw(iter) == READ), is_sync;
loff_t pos = iocb->ki_pos;
blk_qc_t qc = BLK_QC_T_NONE;
int ret;
@@ -343,7 +344,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
bio_get(bio); /* extra ref for the completion handler */
dio = container_of(bio, struct blkdev_dio, bio);
- dio->is_sync = is_sync_kiocb(iocb);
+ dio->is_sync = is_sync = is_sync_kiocb(iocb);
if (dio->is_sync)
dio->waiter = current;
else
@@ -353,6 +354,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
dio->multi_bio = false;
dio->should_dirty = is_read && (iter->type == ITER_IOVEC);
+ blk_start_plug(&plug);
for (;;) {
bio->bi_bdev = bdev;
bio->bi_iter.bi_sector = pos >> 9;
@@ -394,8 +396,9 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
submit_bio(bio);
bio = bio_alloc(GFP_KERNEL, nr_pages);
}
+ blk_finish_plug(&plug);
- if (!dio->is_sync)
+ if (!is_sync)
return -EIOCBQUEUED;
for (;;) {
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 63d197724519..ff0b0be92d61 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -273,6 +273,8 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
unsigned long flags;
while (1) {
+ void *wtag;
+
spin_lock_irqsave(lock, flags);
if (list_empty(list))
break;
@@ -299,11 +301,13 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
spin_unlock_irqrestore(lock, flags);
/*
- * we don't want to call the ordered free functions
- * with the lock held though
+ * We don't want to call the ordered free functions with the
+ * lock held though. Save the work as tag for the trace event,
+ * because the callback could free the structure.
*/
+ wtag = work;
work->ordered_free(work);
- trace_btrfs_all_work_done(work);
+ trace_btrfs_all_work_done(wq->fs_info, wtag);
}
spin_unlock_irqrestore(lock, flags);
}
@@ -311,6 +315,7 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
static void normal_work_helper(struct btrfs_work *work)
{
struct __btrfs_workqueue *wq;
+ void *wtag;
int need_order = 0;
/*
@@ -324,6 +329,8 @@ static void normal_work_helper(struct btrfs_work *work)
if (work->ordered_func)
need_order = 1;
wq = work->wq;
+ /* Safe for tracepoints in case work gets freed by the callback */
+ wtag = work;
trace_btrfs_work_sched(work);
thresh_exec_hook(wq);
@@ -333,7 +340,7 @@ static void normal_work_helper(struct btrfs_work *work)
run_ordered_work(wq);
}
if (!need_order)
- trace_btrfs_all_work_done(work);
+ trace_btrfs_all_work_done(wq->fs_info, wtag);
}
void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index e97302f437a1..dcd2e798767e 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2522,11 +2522,11 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
if (ref && ref->seq &&
btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
spin_unlock(&locked_ref->lock);
- btrfs_delayed_ref_unlock(locked_ref);
spin_lock(&delayed_refs->lock);
locked_ref->processing = 0;
delayed_refs->num_heads_ready++;
spin_unlock(&delayed_refs->lock);
+ btrfs_delayed_ref_unlock(locked_ref);
locked_ref = NULL;
cond_resched();
count++;
@@ -2572,7 +2572,10 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
*/
if (must_insert_reserved)
locked_ref->must_insert_reserved = 1;
+ spin_lock(&delayed_refs->lock);
locked_ref->processing = 0;
+ delayed_refs->num_heads_ready++;
+ spin_unlock(&delayed_refs->lock);
btrfs_debug(fs_info,
"run_delayed_extent_op returned %d",
ret);
@@ -7384,7 +7387,8 @@ btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
spin_unlock(&cluster->refill_lock);
- down_read(&used_bg->data_rwsem);
+ /* We should only have one-level nested. */
+ down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING);
spin_lock(&cluster->refill_lock);
if (used_bg == cluster->block_group)
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index f2b281ad7af6..1e861a063721 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3835,10 +3835,7 @@ cache_acl:
break;
case S_IFDIR:
inode->i_fop = &btrfs_dir_file_operations;
- if (root == fs_info->tree_root)
- inode->i_op = &btrfs_dir_ro_inode_operations;
- else
- inode->i_op = &btrfs_dir_inode_operations;
+ inode->i_op = &btrfs_dir_inode_operations;
break;
case S_IFLNK:
inode->i_op = &btrfs_symlink_inode_operations;
@@ -4505,8 +4502,19 @@ search_again:
if (found_type > min_type) {
del_item = 1;
} else {
- if (item_end < new_size)
+ if (item_end < new_size) {
+ /*
+ * With NO_HOLES mode, for the following mapping
+ *
+ * [0-4k][hole][8k-12k]
+ *
+ * if truncating isize down to 6k, it ends up
+ * isize being 8k.
+ */
+ if (btrfs_fs_incompat(root->fs_info, NO_HOLES))
+ last_size = new_size;
break;
+ }
if (found_key.offset >= new_size)
del_item = 1;
else
@@ -5710,6 +5718,7 @@ static struct inode *new_simple_dir(struct super_block *s,
inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
inode->i_op = &btrfs_dir_ro_inode_operations;
+ inode->i_opflags &= ~IOP_XATTR;
inode->i_fop = &simple_dir_operations;
inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
inode->i_mtime = current_time(inode);
@@ -7059,7 +7068,7 @@ insert:
write_unlock(&em_tree->lock);
out:
- trace_btrfs_get_extent(root, em);
+ trace_btrfs_get_extent(root, inode, em);
btrfs_free_path(path);
if (trans) {
@@ -7215,7 +7224,6 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode,
struct extent_map *em = NULL;
int ret;
- down_read(&BTRFS_I(inode)->dio_sem);
if (type != BTRFS_ORDERED_NOCOW) {
em = create_pinned_em(inode, start, len, orig_start,
block_start, block_len, orig_block_len,
@@ -7234,7 +7242,6 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode,
em = ERR_PTR(ret);
}
out:
- up_read(&BTRFS_I(inode)->dio_sem);
return em;
}
@@ -7623,11 +7630,18 @@ static void adjust_dio_outstanding_extents(struct inode *inode,
* within our reservation, otherwise we need to adjust our inode
* counter appropriately.
*/
- if (dio_data->outstanding_extents) {
+ if (dio_data->outstanding_extents >= num_extents) {
dio_data->outstanding_extents -= num_extents;
} else {
+ /*
+ * If dio write length has been split due to no large enough
+ * contiguous space, we need to compensate our inode counter
+ * appropriately.
+ */
+ u64 num_needed = num_extents - dio_data->outstanding_extents;
+
spin_lock(&BTRFS_I(inode)->lock);
- BTRFS_I(inode)->outstanding_extents += num_extents;
+ BTRFS_I(inode)->outstanding_extents += num_needed;
spin_unlock(&BTRFS_I(inode)->lock);
}
}
@@ -8685,6 +8699,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
dio_data.unsubmitted_oe_range_start = (u64)offset;
dio_data.unsubmitted_oe_range_end = (u64)offset;
current->journal_info = &dio_data;
+ down_read(&BTRFS_I(inode)->dio_sem);
} else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
&BTRFS_I(inode)->runtime_flags)) {
inode_dio_end(inode);
@@ -8697,6 +8712,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
iter, btrfs_get_blocks_direct, NULL,
btrfs_submit_direct, flags);
if (iov_iter_rw(iter) == WRITE) {
+ up_read(&BTRFS_I(inode)->dio_sem);
current->journal_info = NULL;
if (ret < 0 && ret != -EIOCBQUEUED) {
if (dio_data.reserve)
@@ -9205,6 +9221,7 @@ static int btrfs_truncate(struct inode *inode)
break;
}
+ btrfs_block_rsv_release(fs_info, rsv, -1);
ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
rsv, min_size, 0);
BUG_ON(ret); /* shouldn't happen */
@@ -10572,8 +10589,6 @@ static const struct inode_operations btrfs_dir_inode_operations = {
static const struct inode_operations btrfs_dir_ro_inode_operations = {
.lookup = btrfs_lookup,
.permission = btrfs_permission,
- .get_acl = btrfs_get_acl,
- .set_acl = btrfs_set_acl,
.update_time = btrfs_update_time,
};
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index f10bf5213ed8..eeffff84f280 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -37,6 +37,7 @@
*/
#define LOG_INODE_ALL 0
#define LOG_INODE_EXISTS 1
+#define LOG_OTHER_INODE 2
/*
* directory trouble cases
@@ -4641,7 +4642,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
if (S_ISDIR(inode->i_mode) ||
(!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
&BTRFS_I(inode)->runtime_flags) &&
- inode_only == LOG_INODE_EXISTS))
+ inode_only >= LOG_INODE_EXISTS))
max_key.type = BTRFS_XATTR_ITEM_KEY;
else
max_key.type = (u8)-1;
@@ -4665,7 +4666,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
return ret;
}
- mutex_lock(&BTRFS_I(inode)->log_mutex);
+ if (inode_only == LOG_OTHER_INODE) {
+ inode_only = LOG_INODE_EXISTS;
+ mutex_lock_nested(&BTRFS_I(inode)->log_mutex,
+ SINGLE_DEPTH_NESTING);
+ } else {
+ mutex_lock(&BTRFS_I(inode)->log_mutex);
+ }
/*
* a brute force approach to making sure we get the most uptodate
@@ -4817,7 +4824,7 @@ again:
* unpin it.
*/
err = btrfs_log_inode(trans, root, other_inode,
- LOG_INODE_EXISTS,
+ LOG_OTHER_INODE,
0, LLONG_MAX, ctx);
iput(other_inode);
if (err)
diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
index 161342b73ce5..726f928238d0 100644
--- a/fs/btrfs/uuid-tree.c
+++ b/fs/btrfs/uuid-tree.c
@@ -352,7 +352,5 @@ skip:
out:
btrfs_free_path(path);
- if (ret)
- btrfs_warn(fs_info, "btrfs_uuid_tree_iterate failed %d", ret);
- return 0;
+ return ret;
}
diff --git a/fs/buffer.c b/fs/buffer.c
index d21771fcf7d3..0e87401cf335 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1660,7 +1660,7 @@ void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
head = page_buffers(page);
bh = head;
do {
- if (!buffer_mapped(bh))
+ if (!buffer_mapped(bh) || (bh->b_blocknr < block))
goto next;
if (bh->b_blocknr >= block + len)
break;
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 9cd0c0ea7cdb..e4b066cd912a 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -502,9 +502,9 @@ static struct ceph_snap_context *get_oldest_context(struct inode *inode,
dout(" head snapc %p has %d dirty pages\n",
snapc, ci->i_wrbuffer_ref_head);
if (truncate_size)
- *truncate_size = capsnap->truncate_size;
+ *truncate_size = ci->i_truncate_size;
if (truncate_seq)
- *truncate_seq = capsnap->truncate_seq;
+ *truncate_seq = ci->i_truncate_seq;
}
spin_unlock(&ci->i_ceph_lock);
return snapc;
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index baea866a6751..94fd76d04683 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -2591,8 +2591,13 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
add_wait_queue(&ci->i_cap_wq, &wait);
while (!try_get_cap_refs(ci, need, want, endoff,
- true, &_got, &err))
+ true, &_got, &err)) {
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
+ }
remove_wait_queue(&ci->i_cap_wq, &wait);
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index d7a93696663b..8ab1fdf0bd49 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1230,7 +1230,8 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
struct ceph_mds_client *mdsc =
ceph_sb_to_client(dir->i_sb)->mdsc;
struct ceph_mds_request *req;
- int op, mask, err;
+ int op, err;
+ u32 mask;
if (flags & LOOKUP_RCU)
return -ECHILD;
@@ -1245,7 +1246,7 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
if (ceph_security_xattr_wanted(dir))
mask |= CEPH_CAP_XATTR_SHARED;
- req->r_args.getattr.mask = mask;
+ req->r_args.getattr.mask = cpu_to_le32(mask);
err = ceph_mdsc_do_request(mdsc, NULL, req);
switch (err) {
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 398e5328b309..5e659d054b40 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -305,7 +305,8 @@ static int frag_tree_split_cmp(const void *l, const void *r)
{
struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l;
struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r;
- return ceph_frag_compare(ls->frag, rs->frag);
+ return ceph_frag_compare(le32_to_cpu(ls->frag),
+ le32_to_cpu(rs->frag));
}
static bool is_frag_child(u32 f, struct ceph_inode_frag *frag)
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 4f49253387a0..c9d2e553a6c4 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -288,12 +288,13 @@ static int parse_reply_info_extra(void **p, void *end,
struct ceph_mds_reply_info_parsed *info,
u64 features)
{
- if (info->head->op == CEPH_MDS_OP_GETFILELOCK)
+ u32 op = le32_to_cpu(info->head->op);
+
+ if (op == CEPH_MDS_OP_GETFILELOCK)
return parse_reply_info_filelock(p, end, info, features);
- else if (info->head->op == CEPH_MDS_OP_READDIR ||
- info->head->op == CEPH_MDS_OP_LSSNAP)
+ else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP)
return parse_reply_info_dir(p, end, info, features);
- else if (info->head->op == CEPH_MDS_OP_CREATE)
+ else if (op == CEPH_MDS_OP_CREATE)
return parse_reply_info_create(p, end, info, features);
else
return -EIO;
@@ -2106,6 +2107,11 @@ static int __do_request(struct ceph_mds_client *mdsc,
dout("do_request mdsmap err %d\n", err);
goto finish;
}
+ if (mdsc->mdsmap->m_epoch == 0) {
+ dout("do_request no mdsmap, waiting for map\n");
+ list_add(&req->r_wait, &mdsc->waiting_for_map);
+ goto finish;
+ }
if (!(mdsc->fsc->mount_options->flags &
CEPH_MOUNT_OPT_MOUNTWAIT) &&
!ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) {
diff --git a/fs/coredump.c b/fs/coredump.c
index e525b6017cdf..ae6b05629ca1 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -833,3 +833,21 @@ int dump_align(struct coredump_params *cprm, int align)
return mod ? dump_skip(cprm, align - mod) : 1;
}
EXPORT_SYMBOL(dump_align);
+
+/*
+ * Ensures that file size is big enough to contain the current file
+ * postion. This prevents gdb from complaining about a truncated file
+ * if the last "write" to the file was dump_skip.
+ */
+void dump_truncate(struct coredump_params *cprm)
+{
+ struct file *file = cprm->file;
+ loff_t offset;
+
+ if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
+ offset = file->f_op->llseek(file, 0, SEEK_CUR);
+ if (i_size_read(file->f_mapping->host) < offset)
+ do_truncate(file->f_path.dentry, offset, 0, file);
+ }
+}
+EXPORT_SYMBOL(dump_truncate);
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index 6eeea1dcba41..95cd4c3b06c3 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -248,7 +248,8 @@ retry:
goto out;
if (fscrypt_dummy_context_enabled(inode)) {
- memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE);
+ memset(raw_key, 0x42, keysize/2);
+ memset(raw_key+keysize/2, 0x24, keysize - (keysize/2));
goto got_key;
}
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index 6ed7c2eebeec..d6cd7ea4851d 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -179,6 +179,11 @@ int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
BUG_ON(1);
}
+ /* No restrictions on file types which are never encrypted */
+ if (!S_ISREG(child->i_mode) && !S_ISDIR(child->i_mode) &&
+ !S_ISLNK(child->i_mode))
+ return 1;
+
/* no restrictions if the parent directory is not encrypted */
if (!parent->i_sb->s_cop->is_encrypted(parent))
return 1;
diff --git a/fs/dax.c b/fs/dax.c
index a8732fbed381..3af2da5e64ce 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -451,16 +451,37 @@ void dax_wake_mapping_entry_waiter(struct address_space *mapping,
__wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
}
+static int __dax_invalidate_mapping_entry(struct address_space *mapping,
+ pgoff_t index, bool trunc)
+{
+ int ret = 0;
+ void *entry;
+ struct radix_tree_root *page_tree = &mapping->page_tree;
+
+ spin_lock_irq(&mapping->tree_lock);
+ entry = get_unlocked_mapping_entry(mapping, index, NULL);
+ if (!entry || !radix_tree_exceptional_entry(entry))
+ goto out;
+ if (!trunc &&
+ (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
+ radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)))
+ goto out;
+ radix_tree_delete(page_tree, index);
+ mapping->nrexceptional--;
+ ret = 1;
+out:
+ put_unlocked_mapping_entry(mapping, index, entry);
+ spin_unlock_irq(&mapping->tree_lock);
+ return ret;
+}
/*
* Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
* entry to get unlocked before deleting it.
*/
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
{
- void *entry;
+ int ret = __dax_invalidate_mapping_entry(mapping, index, true);
- spin_lock_irq(&mapping->tree_lock);
- entry = get_unlocked_mapping_entry(mapping, index, NULL);
/*
* This gets called from truncate / punch_hole path. As such, the caller
* must hold locks protecting against concurrent modifications of the
@@ -468,16 +489,46 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
* caller has seen exceptional entry for this index, we better find it
* at that index as well...
*/
- if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry))) {
- spin_unlock_irq(&mapping->tree_lock);
- return 0;
- }
- radix_tree_delete(&mapping->page_tree, index);
+ WARN_ON_ONCE(!ret);
+ return ret;
+}
+
+/*
+ * Invalidate exceptional DAX entry if easily possible. This handles DAX
+ * entries for invalidate_inode_pages() so we evict the entry only if we can
+ * do so without blocking.
+ */
+int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index)
+{
+ int ret = 0;
+ void *entry, **slot;
+ struct radix_tree_root *page_tree = &mapping->page_tree;
+
+ spin_lock_irq(&mapping->tree_lock);
+ entry = __radix_tree_lookup(page_tree, index, NULL, &slot);
+ if (!entry || !radix_tree_exceptional_entry(entry) ||
+ slot_locked(mapping, slot))
+ goto out;
+ if (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
+ radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
+ goto out;
+ radix_tree_delete(page_tree, index);
mapping->nrexceptional--;
+ ret = 1;
+out:
spin_unlock_irq(&mapping->tree_lock);
- dax_wake_mapping_entry_waiter(mapping, index, entry, true);
+ if (ret)
+ dax_wake_mapping_entry_waiter(mapping, index, entry, true);
+ return ret;
+}
- return 1;
+/*
+ * Invalidate exceptional DAX entry if it is clean.
+ */
+int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
+ pgoff_t index)
+{
+ return __dax_invalidate_mapping_entry(mapping, index, false);
}
/*
@@ -488,15 +539,16 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
* otherwise it will simply fall out of the page cache under memory
* pressure without ever having been dirtied.
*/
-static int dax_load_hole(struct address_space *mapping, void *entry,
+static int dax_load_hole(struct address_space *mapping, void **entry,
struct vm_fault *vmf)
{
struct page *page;
+ int ret;
/* Hole page already exists? Return it... */
- if (!radix_tree_exceptional_entry(entry)) {
- vmf->page = entry;
- return VM_FAULT_LOCKED;
+ if (!radix_tree_exceptional_entry(*entry)) {
+ page = *entry;
+ goto out;
}
/* This will replace locked radix tree entry with a hole page */
@@ -504,8 +556,17 @@ static int dax_load_hole(struct address_space *mapping, void *entry,
vmf->gfp_mask | __GFP_ZERO);
if (!page)
return VM_FAULT_OOM;
+ out:
vmf->page = page;
- return VM_FAULT_LOCKED;
+ ret = finish_fault(vmf);
+ vmf->page = NULL;
+ *entry = page;
+ if (!ret) {
+ /* Grab reference for PTE that is now referencing the page */
+ get_page(page);
+ return VM_FAULT_NOPAGE;
+ }
+ return ret;
}
static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size,
@@ -630,8 +691,8 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
pgoff_t index, unsigned long pfn)
{
struct vm_area_struct *vma;
- pte_t *ptep;
- pte_t pte;
+ pte_t pte, *ptep = NULL;
+ pmd_t *pmdp = NULL;
spinlock_t *ptl;
bool changed;
@@ -646,21 +707,42 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
address = pgoff_address(index, vma);
changed = false;
- if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
+ if (follow_pte_pmd(vma->vm_mm, address, &ptep, &pmdp, &ptl))
continue;
- if (pfn != pte_pfn(*ptep))
- goto unlock;
- if (!pte_dirty(*ptep) && !pte_write(*ptep))
- goto unlock;
- flush_cache_page(vma, address, pfn);
- pte = ptep_clear_flush(vma, address, ptep);
- pte = pte_wrprotect(pte);
- pte = pte_mkclean(pte);
- set_pte_at(vma->vm_mm, address, ptep, pte);
- changed = true;
-unlock:
- pte_unmap_unlock(ptep, ptl);
+ if (pmdp) {
+#ifdef CONFIG_FS_DAX_PMD
+ pmd_t pmd;
+
+ if (pfn != pmd_pfn(*pmdp))
+ goto unlock_pmd;
+ if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
+ goto unlock_pmd;
+
+ flush_cache_page(vma, address, pfn);
+ pmd = pmdp_huge_clear_flush(vma, address, pmdp);
+ pmd = pmd_wrprotect(pmd);
+ pmd = pmd_mkclean(pmd);
+ set_pmd_at(vma->vm_mm, address, pmdp, pmd);
+ changed = true;
+unlock_pmd:
+ spin_unlock(ptl);
+#endif
+ } else {
+ if (pfn != pte_pfn(*ptep))
+ goto unlock_pte;
+ if (!pte_dirty(*ptep) && !pte_write(*ptep))
+ goto unlock_pte;
+
+ flush_cache_page(vma, address, pfn);
+ pte = ptep_clear_flush(vma, address, ptep);
+ pte = pte_wrprotect(pte);
+ pte = pte_mkclean(pte);
+ set_pte_at(vma->vm_mm, address, ptep, pte);
+ changed = true;
+unlock_pte:
+ pte_unmap_unlock(ptep, ptl);
+ }
if (changed)
mmu_notifier_invalidate_page(vma->vm_mm, address);
@@ -908,7 +990,6 @@ int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
}
EXPORT_SYMBOL_GPL(__dax_zero_page_range);
-#ifdef CONFIG_FS_IOMAP
static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
{
return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
@@ -934,6 +1015,17 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
return -EIO;
+ /*
+ * Write can allocate block for an area which has a hole page mapped
+ * into page tables. We have to tear down these mappings so that data
+ * written by write(2) is visible in mmap.
+ */
+ if ((iomap->flags & IOMAP_F_NEW) && inode->i_mapping->nrpages) {
+ invalidate_inode_pages2_range(inode->i_mapping,
+ pos >> PAGE_SHIFT,
+ (end - 1) >> PAGE_SHIFT);
+ }
+
while (pos < end) {
unsigned offset = pos & (PAGE_SIZE - 1);
struct blk_dax_ctl dax = { 0 };
@@ -992,23 +1084,6 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
if (iov_iter_rw(iter) == WRITE)
flags |= IOMAP_WRITE;
- /*
- * Yes, even DAX files can have page cache attached to them: A zeroed
- * page is inserted into the pagecache when we have to serve a write
- * fault on a hole. It should never be dirtied and can simply be
- * dropped from the pagecache once we get real data for the page.
- *
- * XXX: This is racy against mmap, and there's nothing we can do about
- * it. We'll eventually need to shift this down even further so that
- * we can check if we allocated blocks over a hole first.
- */
- if (mapping->nrpages) {
- ret = invalidate_inode_pages2_range(mapping,
- pos >> PAGE_SHIFT,
- (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT);
- WARN_ON_ONCE(ret);
- }
-
while (iov_iter_count(iter)) {
ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
iter, dax_iomap_actor);
@@ -1023,6 +1098,15 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
}
EXPORT_SYMBOL_GPL(dax_iomap_rw);
+static int dax_fault_return(int error)
+{
+ if (error == 0)
+ return VM_FAULT_NOPAGE;
+ if (error == -ENOMEM)
+ return VM_FAULT_OOM;
+ return VM_FAULT_SIGBUS;
+}
+
/**
* dax_iomap_fault - handle a page fault on a DAX file
* @vma: The virtual memory area where the fault occurred
@@ -1055,12 +1139,6 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
if (pos >= i_size_read(inode))
return VM_FAULT_SIGBUS;
- entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
- if (IS_ERR(entry)) {
- error = PTR_ERR(entry);
- goto out;
- }
-
if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
flags |= IOMAP_WRITE;
@@ -1071,9 +1149,15 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
*/
error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
if (error)
- goto unlock_entry;
+ return dax_fault_return(error);
if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
- error = -EIO; /* fs corruption? */
+ vmf_ret = dax_fault_return(-EIO); /* fs corruption? */
+ goto finish_iomap;
+ }
+
+ entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
+ if (IS_ERR(entry)) {
+ vmf_ret = dax_fault_return(PTR_ERR(entry));
goto finish_iomap;
}
@@ -1096,13 +1180,13 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
}
if (error)
- goto finish_iomap;
+ goto error_unlock_entry;
__SetPageUptodate(vmf->cow_page);
vmf_ret = finish_fault(vmf);
if (!vmf_ret)
vmf_ret = VM_FAULT_DONE_COW;
- goto finish_iomap;
+ goto unlock_entry;
}
switch (iomap.type) {
@@ -1114,12 +1198,15 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
}
error = dax_insert_mapping(mapping, iomap.bdev, sector,
PAGE_SIZE, &entry, vma, vmf);
+ /* -EBUSY is fine, somebody else faulted on the same PTE */
+ if (error == -EBUSY)
+ error = 0;
break;
case IOMAP_UNWRITTEN:
case IOMAP_HOLE:
if (!(vmf->flags & FAULT_FLAG_WRITE)) {
- vmf_ret = dax_load_hole(mapping, entry, vmf);
- break;
+ vmf_ret = dax_load_hole(mapping, &entry, vmf);
+ goto unlock_entry;
}
/*FALLTHRU*/
default:
@@ -1128,31 +1215,25 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
break;
}
+ error_unlock_entry:
+ vmf_ret = dax_fault_return(error) | major;
+ unlock_entry:
+ put_locked_mapping_entry(mapping, vmf->pgoff, entry);
finish_iomap:
if (ops->iomap_end) {
- if (error || (vmf_ret & VM_FAULT_ERROR)) {
- /* keep previous error */
- ops->iomap_end(inode, pos, PAGE_SIZE, 0, flags,
- &iomap);
- } else {
- error = ops->iomap_end(inode, pos, PAGE_SIZE,
- PAGE_SIZE, flags, &iomap);
- }
- }
- unlock_entry:
- if (vmf_ret != VM_FAULT_LOCKED || error)
- put_locked_mapping_entry(mapping, vmf->pgoff, entry);
- out:
- if (error == -ENOMEM)
- return VM_FAULT_OOM | major;
- /* -EBUSY is fine, somebody else faulted on the same PTE */
- if (error < 0 && error != -EBUSY)
- return VM_FAULT_SIGBUS | major;
- if (vmf_ret) {
- WARN_ON_ONCE(error); /* -EBUSY from ops->iomap_end? */
- return vmf_ret;
+ int copied = PAGE_SIZE;
+
+ if (vmf_ret & VM_FAULT_ERROR)
+ copied = 0;
+ /*
+ * The fault is done by now and there's no way back (other
+ * thread may be already happily using PTE we have installed).
+ * Just ignore error from ->iomap_end since we cannot do much
+ * with it.
+ */
+ ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
}
- return VM_FAULT_NOPAGE | major;
+ return vmf_ret;
}
EXPORT_SYMBOL_GPL(dax_iomap_fault);
@@ -1277,16 +1358,6 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
goto fallback;
/*
- * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
- * PMD or a HZP entry. If it can't (because a 4k page is already in
- * the tree, for instance), it will return -EEXIST and we just fall
- * back to 4k entries.
- */
- entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
- if (IS_ERR(entry))
- goto fallback;
-
- /*
* Note that we don't use iomap_apply here. We aren't doing I/O, only
* setting up a mapping, so really we're using iomap_begin() as a way
* to look up our filesystem block.
@@ -1294,10 +1365,21 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
pos = (loff_t)pgoff << PAGE_SHIFT;
error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
if (error)
- goto unlock_entry;
+ goto fallback;
+
if (iomap.offset + iomap.length < pos + PMD_SIZE)
goto finish_iomap;
+ /*
+ * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
+ * PMD or a HZP entry. If it can't (because a 4k page is already in
+ * the tree, for instance), it will return -EEXIST and we just fall
+ * back to 4k entries.
+ */
+ entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
+ if (IS_ERR(entry))
+ goto finish_iomap;
+
vmf.pgoff = pgoff;
vmf.flags = flags;
vmf.gfp_mask = mapping_gfp_mask(mapping) | __GFP_IO;
@@ -1310,7 +1392,7 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
case IOMAP_UNWRITTEN:
case IOMAP_HOLE:
if (WARN_ON_ONCE(write))
- goto finish_iomap;
+ goto unlock_entry;
result = dax_pmd_load_hole(vma, pmd, &vmf, address, &iomap,
&entry);
break;
@@ -1319,20 +1401,23 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
break;
}
+ unlock_entry:
+ put_locked_mapping_entry(mapping, pgoff, entry);
finish_iomap:
if (ops->iomap_end) {
- if (result == VM_FAULT_FALLBACK) {
- ops->iomap_end(inode, pos, PMD_SIZE, 0, iomap_flags,
- &iomap);
- } else {
- error = ops->iomap_end(inode, pos, PMD_SIZE, PMD_SIZE,
- iomap_flags, &iomap);
- if (error)
- result = VM_FAULT_FALLBACK;
- }
+ int copied = PMD_SIZE;
+
+ if (result == VM_FAULT_FALLBACK)
+ copied = 0;
+ /*
+ * The fault is done by now and there's no way back (other
+ * thread may be already happily using PMD we have installed).
+ * Just ignore error from ->iomap_end since we cannot do much
+ * with it.
+ */
+ ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
+ &iomap);
}
- unlock_entry:
- put_locked_mapping_entry(mapping, pgoff, entry);
fallback:
if (result == VM_FAULT_FALLBACK) {
split_huge_pmd(vma, pmd, address);
@@ -1342,4 +1427,3 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
}
EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault);
#endif /* CONFIG_FS_DAX_PMD */
-#endif /* CONFIG_FS_IOMAP */
diff --git a/fs/dcache.c b/fs/dcache.c
index 769903dbc19d..95d71eda8142 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1336,8 +1336,11 @@ int d_set_mounted(struct dentry *dentry)
}
spin_lock(&dentry->d_lock);
if (!d_unlinked(dentry)) {
- dentry->d_flags |= DCACHE_MOUNTED;
- ret = 0;
+ ret = -EBUSY;
+ if (!d_mountpoint(dentry)) {
+ dentry->d_flags |= DCACHE_MOUNTED;
+ ret = 0;
+ }
}
spin_unlock(&dentry->d_lock);
out:
diff --git a/fs/direct-io.c b/fs/direct-io.c
index aeae8c063451..c87bae4376b8 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -906,6 +906,7 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
struct buffer_head *map_bh)
{
const unsigned blkbits = sdio->blkbits;
+ const unsigned i_blkbits = blkbits + sdio->blkfactor;
int ret = 0;
while (sdio->block_in_file < sdio->final_block_in_request) {
@@ -949,7 +950,7 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
clean_bdev_aliases(
map_bh->b_bdev,
map_bh->b_blocknr,
- map_bh->b_size >> blkbits);
+ map_bh->b_size >> i_blkbits);
}
if (!sdio->blkfactor)
diff --git a/fs/ext2/Kconfig b/fs/ext2/Kconfig
index 36bea5adcaba..c634874e12d9 100644
--- a/fs/ext2/Kconfig
+++ b/fs/ext2/Kconfig
@@ -1,6 +1,5 @@
config EXT2_FS
tristate "Second extended fs support"
- select FS_IOMAP if FS_DAX
help
Ext2 is a standard Linux file system for hard disks.
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 0093ea2512a8..f073bfca694b 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -751,9 +751,8 @@ static int ext2_get_blocks(struct inode *inode,
mutex_unlock(&ei->truncate_mutex);
goto cleanup;
}
- } else {
- *new = true;
}
+ *new = true;
ext2_splice_branch(inode, iblock, partial, indirect_blks, count);
mutex_unlock(&ei->truncate_mutex);
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index 7b90691e98c4..e38039fd96ff 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -37,7 +37,6 @@ config EXT4_FS
select CRC16
select CRYPTO
select CRYPTO_CRC32C
- select FS_IOMAP if FS_DAX
help
This is the next generation of the ext3 filesystem.
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index b5f184493c57..d663d3d7c81c 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -258,7 +258,6 @@ out:
static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
int result;
- handle_t *handle = NULL;
struct inode *inode = file_inode(vma->vm_file);
struct super_block *sb = inode->i_sb;
bool write = vmf->flags & FAULT_FLAG_WRITE;
@@ -266,24 +265,12 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (write) {
sb_start_pagefault(sb);
file_update_time(vma->vm_file);
- down_read(&EXT4_I(inode)->i_mmap_sem);
- handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
- EXT4_DATA_TRANS_BLOCKS(sb));
- } else
- down_read(&EXT4_I(inode)->i_mmap_sem);
-
- if (IS_ERR(handle))
- result = VM_FAULT_SIGBUS;
- else
- result = dax_iomap_fault(vma, vmf, &ext4_iomap_ops);
-
- if (write) {
- if (!IS_ERR(handle))
- ext4_journal_stop(handle);
- up_read(&EXT4_I(inode)->i_mmap_sem);
+ }
+ down_read(&EXT4_I(inode)->i_mmap_sem);
+ result = dax_iomap_fault(vma, vmf, &ext4_iomap_ops);
+ up_read(&EXT4_I(inode)->i_mmap_sem);
+ if (write)
sb_end_pagefault(sb);
- } else
- up_read(&EXT4_I(inode)->i_mmap_sem);
return result;
}
@@ -292,7 +279,6 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, unsigned int flags)
{
int result;
- handle_t *handle = NULL;
struct inode *inode = file_inode(vma->vm_file);
struct super_block *sb = inode->i_sb;
bool write = flags & FAULT_FLAG_WRITE;
@@ -300,27 +286,13 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
if (write) {
sb_start_pagefault(sb);
file_update_time(vma->vm_file);
- down_read(&EXT4_I(inode)->i_mmap_sem);
- handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
- ext4_chunk_trans_blocks(inode,
- PMD_SIZE / PAGE_SIZE));
- } else
- down_read(&EXT4_I(inode)->i_mmap_sem);
-
- if (IS_ERR(handle))
- result = VM_FAULT_SIGBUS;
- else {
- result = dax_iomap_pmd_fault(vma, addr, pmd, flags,
- &ext4_iomap_ops);
}
-
- if (write) {
- if (!IS_ERR(handle))
- ext4_journal_stop(handle);
- up_read(&EXT4_I(inode)->i_mmap_sem);
+ down_read(&EXT4_I(inode)->i_mmap_sem);
+ result = dax_iomap_pmd_fault(vma, addr, pmd, flags,
+ &ext4_iomap_ops);
+ up_read(&EXT4_I(inode)->i_mmap_sem);
+ if (write)
sb_end_pagefault(sb);
- } else
- up_read(&EXT4_I(inode)->i_mmap_sem);
return result;
}
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 0738f48293cc..0d8802453758 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -713,8 +713,8 @@ static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
}
sector = SECTOR_FROM_BLOCK(blkstart);
- if (sector & (bdev_zone_size(bdev) - 1) ||
- nr_sects != bdev_zone_size(bdev)) {
+ if (sector & (bdev_zone_sectors(bdev) - 1) ||
+ nr_sects != bdev_zone_sectors(bdev)) {
f2fs_msg(sbi->sb, KERN_INFO,
"(%d) %s: Unaligned discard attempted (block %x + %x)",
devi, sbi->s_ndevs ? FDEV(devi).path: "",
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 702638e21c76..46fd30d8af77 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1553,16 +1553,16 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
return 0;
if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
- SECTOR_TO_BLOCK(bdev_zone_size(bdev)))
+ SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)))
return -EINVAL;
- sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_size(bdev));
+ sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev));
if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
__ilog2_u32(sbi->blocks_per_blkz))
return -EINVAL;
sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
sbi->log_blocks_per_blkz;
- if (nr_sectors & (bdev_zone_size(bdev) - 1))
+ if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
FDEV(devi).nr_blkz++;
FDEV(devi).blkz_type = kmalloc(FDEV(devi).nr_blkz, GFP_KERNEL);
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 70ea57c7b6bb..4e06a27ed7f8 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -2025,7 +2025,6 @@ static void end_requests(struct fuse_conn *fc, struct list_head *head)
struct fuse_req *req;
req = list_entry(head->next, struct fuse_req, list);
req->out.h.error = -ECONNABORTED;
- clear_bit(FR_PENDING, &req->flags);
clear_bit(FR_SENT, &req->flags);
list_del_init(&req->list);
request_end(fc, req);
@@ -2103,6 +2102,8 @@ void fuse_abort_conn(struct fuse_conn *fc)
spin_lock(&fiq->waitq.lock);
fiq->connected = 0;
list_splice_init(&fiq->pending, &to_end2);
+ list_for_each_entry(req, &to_end2, list)
+ clear_bit(FR_PENDING, &req->flags);
while (forget_pending(fiq))
kfree(dequeue_forget(fiq, 1, NULL));
wake_up_all_locked(&fiq->waitq);
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 1f7c732f32b0..811fd8929a18 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -68,7 +68,7 @@ static u64 time_to_jiffies(u64 sec, u32 nsec)
if (sec || nsec) {
struct timespec64 ts = {
sec,
- max_t(u32, nsec, NSEC_PER_SEC - 1)
+ min_t(u32, nsec, NSEC_PER_SEC - 1)
};
return get_jiffies_64() + timespec64_to_jiffies(&ts);
diff --git a/fs/libfs.c b/fs/libfs.c
index e973cd51f126..28d6f35feed6 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -245,7 +245,8 @@ struct dentry *mount_pseudo_xattr(struct file_system_type *fs_type, char *name,
struct inode *root;
struct qstr d_name = QSTR_INIT(name, strlen(name));
- s = sget(fs_type, NULL, set_anon_super, MS_NOUSER, NULL);
+ s = sget_userns(fs_type, NULL, set_anon_super, MS_KERNMOUNT|MS_NOUSER,
+ &init_user_ns, NULL);
if (IS_ERR(s))
return ERR_CAST(s);
diff --git a/fs/namespace.c b/fs/namespace.c
index b5b1259e064f..487ba30bb5c6 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -742,26 +742,50 @@ static struct mountpoint *lookup_mountpoint(struct dentry *dentry)
return NULL;
}
-static struct mountpoint *new_mountpoint(struct dentry *dentry)
+static struct mountpoint *get_mountpoint(struct dentry *dentry)
{
- struct hlist_head *chain = mp_hash(dentry);
- struct mountpoint *mp;
+ struct mountpoint *mp, *new = NULL;
int ret;
- mp = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
- if (!mp)
+ if (d_mountpoint(dentry)) {
+mountpoint:
+ read_seqlock_excl(&mount_lock);
+ mp = lookup_mountpoint(dentry);
+ read_sequnlock_excl(&mount_lock);
+ if (mp)
+ goto done;
+ }
+
+ if (!new)
+ new = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
+ if (!new)
return ERR_PTR(-ENOMEM);
+
+ /* Exactly one processes may set d_mounted */
ret = d_set_mounted(dentry);
- if (ret) {
- kfree(mp);
- return ERR_PTR(ret);
- }
- mp->m_dentry = dentry;
- mp->m_count = 1;
- hlist_add_head(&mp->m_hash, chain);
- INIT_HLIST_HEAD(&mp->m_list);
+ /* Someone else set d_mounted? */
+ if (ret == -EBUSY)
+ goto mountpoint;
+
+ /* The dentry is not available as a mountpoint? */
+ mp = ERR_PTR(ret);
+ if (ret)
+ goto done;
+
+ /* Add the new mountpoint to the hash table */
+ read_seqlock_excl(&mount_lock);
+ new->m_dentry = dentry;
+ new->m_count = 1;
+ hlist_add_head(&new->m_hash, mp_hash(dentry));
+ INIT_HLIST_HEAD(&new->m_list);
+ read_sequnlock_excl(&mount_lock);
+
+ mp = new;
+ new = NULL;
+done:
+ kfree(new);
return mp;
}
@@ -1595,11 +1619,11 @@ void __detach_mounts(struct dentry *dentry)
struct mount *mnt;
namespace_lock();
+ lock_mount_hash();
mp = lookup_mountpoint(dentry);
if (IS_ERR_OR_NULL(mp))
goto out_unlock;
- lock_mount_hash();
event++;
while (!hlist_empty(&mp->m_list)) {
mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
@@ -1609,9 +1633,9 @@ void __detach_mounts(struct dentry *dentry)
}
else umount_tree(mnt, UMOUNT_CONNECTED);
}
- unlock_mount_hash();
put_mountpoint(mp);
out_unlock:
+ unlock_mount_hash();
namespace_unlock();
}
@@ -2038,9 +2062,7 @@ retry:
namespace_lock();
mnt = lookup_mnt(path);
if (likely(!mnt)) {
- struct mountpoint *mp = lookup_mountpoint(dentry);
- if (!mp)
- mp = new_mountpoint(dentry);
+ struct mountpoint *mp = get_mountpoint(dentry);
if (IS_ERR(mp)) {
namespace_unlock();
inode_unlock(dentry->d_inode);
@@ -2059,7 +2081,11 @@ retry:
static void unlock_mount(struct mountpoint *where)
{
struct dentry *dentry = where->m_dentry;
+
+ read_seqlock_excl(&mount_lock);
put_mountpoint(where);
+ read_sequnlock_excl(&mount_lock);
+
namespace_unlock();
inode_unlock(dentry->d_inode);
}
@@ -3135,9 +3161,9 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
touch_mnt_namespace(current->nsproxy->mnt_ns);
/* A moved mount should not expire automatically */
list_del_init(&new_mnt->mnt_expire);
+ put_mountpoint(root_mp);
unlock_mount_hash();
chroot_fs_refs(&root, &new);
- put_mountpoint(root_mp);
error = 0;
out4:
unlock_mount(old_mp);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 6dcbc5defb7a..0a0eaecf9676 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -38,7 +38,6 @@
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/errno.h>
-#include <linux/file.h>
#include <linux/string.h>
#include <linux/ratelimit.h>
#include <linux/printk.h>
@@ -1083,7 +1082,8 @@ int nfs4_call_sync(struct rpc_clnt *clnt,
return nfs4_call_sync_sequence(clnt, server, msg, args, res);
}
-static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
+static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo,
+ unsigned long timestamp)
{
struct nfs_inode *nfsi = NFS_I(dir);
@@ -1099,6 +1099,7 @@ static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
NFS_INO_INVALID_ACL;
}
dir->i_version = cinfo->after;
+ nfsi->read_cache_jiffies = timestamp;
nfsi->attr_gencount = nfs_inc_attr_generation_counter();
nfs_fscache_invalidate(dir);
spin_unlock(&dir->i_lock);
@@ -2391,11 +2392,13 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
nfs_fattr_map_and_free_names(server, &data->f_attr);
if (o_arg->open_flags & O_CREAT) {
- update_changeattr(dir, &o_res->cinfo);
if (o_arg->open_flags & O_EXCL)
data->file_created = 1;
else if (o_res->cinfo.before != o_res->cinfo.after)
data->file_created = 1;
+ if (data->file_created || dir->i_version != o_res->cinfo.after)
+ update_changeattr(dir, &o_res->cinfo,
+ o_res->f_attr->time_start);
}
if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
server->caps &= ~NFS_CAP_POSIX_LOCK;
@@ -2697,7 +2700,8 @@ static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
sattr->ia_valid |= ATTR_MTIME;
/* Except MODE, it seems harmless of setting twice. */
- if ((attrset[1] & FATTR4_WORD1_MODE))
+ if (opendata->o_arg.createmode != NFS4_CREATE_EXCLUSIVE &&
+ attrset[1] & FATTR4_WORD1_MODE)
sattr->ia_valid &= ~ATTR_MODE;
if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL)
@@ -4073,11 +4077,12 @@ static int _nfs4_proc_remove(struct inode *dir, const struct qstr *name)
.rpc_argp = &args,
.rpc_resp = &res,
};
+ unsigned long timestamp = jiffies;
int status;
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
if (status == 0)
- update_changeattr(dir, &res.cinfo);
+ update_changeattr(dir, &res.cinfo, timestamp);
return status;
}
@@ -4125,7 +4130,8 @@ static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
if (nfs4_async_handle_error(task, res->server, NULL,
&data->timeout) == -EAGAIN)
return 0;
- update_changeattr(dir, &res->cinfo);
+ if (task->tk_status == 0)
+ update_changeattr(dir, &res->cinfo, res->dir_attr->time_start);
return 1;
}
@@ -4159,8 +4165,11 @@ static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN)
return 0;
- update_changeattr(old_dir, &res->old_cinfo);
- update_changeattr(new_dir, &res->new_cinfo);
+ if (task->tk_status == 0) {
+ update_changeattr(old_dir, &res->old_cinfo, res->old_fattr->time_start);
+ if (new_dir != old_dir)
+ update_changeattr(new_dir, &res->new_cinfo, res->new_fattr->time_start);
+ }
return 1;
}
@@ -4197,7 +4206,7 @@ static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct
status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
if (!status) {
- update_changeattr(dir, &res.cinfo);
+ update_changeattr(dir, &res.cinfo, res.fattr->time_start);
status = nfs_post_op_update_inode(inode, res.fattr);
if (!status)
nfs_setsecurity(inode, res.fattr, res.label);
@@ -4272,7 +4281,8 @@ static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_
int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
&data->arg.seq_args, &data->res.seq_res, 1);
if (status == 0) {
- update_changeattr(dir, &data->res.dir_cinfo);
+ update_changeattr(dir, &data->res.dir_cinfo,
+ data->res.fattr->time_start);
status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label);
}
return status;
@@ -6127,7 +6137,6 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
p->server = server;
atomic_inc(&lsp->ls_count);
p->ctx = get_nfs_open_context(ctx);
- get_file(fl->fl_file);
memcpy(&p->fl, fl, sizeof(p->fl));
return p;
out_free_seqid:
@@ -6240,7 +6249,6 @@ static void nfs4_lock_release(void *calldata)
nfs_free_seqid(data->arg.lock_seqid);
nfs4_put_lock_state(data->lsp);
put_nfs_open_context(data->ctx);
- fput(data->fl.fl_file);
kfree(data);
dprintk("%s: done!\n", __func__);
}
@@ -8483,6 +8491,7 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
goto out;
}
+ nfs4_sequence_free_slot(&lgp->res.seq_res);
err = nfs4_handle_exception(server, nfs4err, exception);
if (!status) {
if (exception->retry)
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 1d152f4470cd..daeb94e3acd4 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1091,6 +1091,7 @@ static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
case -NFS4ERR_BADXDR:
case -NFS4ERR_RESOURCE:
case -NFS4ERR_NOFILEHANDLE:
+ case -NFS4ERR_MOVED:
/* Non-seqid mutating errors */
return;
};
@@ -1729,7 +1730,6 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
break;
case -NFS4ERR_STALE_CLIENTID:
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
- nfs4_state_clear_reclaim_reboot(clp);
nfs4_state_start_reclaim_reboot(clp);
break;
case -NFS4ERR_EXPIRED:
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 59554f3adf29..dd042498ce7c 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1200,10 +1200,10 @@ _pnfs_return_layout(struct inode *ino)
send = pnfs_prepare_layoutreturn(lo, &stateid, NULL);
spin_unlock(&ino->i_lock);
- pnfs_free_lseg_list(&tmp_list);
if (send)
status = pnfs_send_layoutreturn(lo, &stateid, IOMODE_ANY, true);
out_put_layout_hdr:
+ pnfs_free_lseg_list(&tmp_list);
pnfs_put_layout_hdr(lo);
out:
dprintk("<-- %s status: %d\n", __func__, status);
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 7ecf16be4a44..8fae53ce21d1 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2440,7 +2440,9 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
p++; /* to be backfilled later */
if (bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
- u32 *supp = nfsd_suppattrs[minorversion];
+ u32 supp[3];
+
+ memcpy(supp, nfsd_suppattrs[minorversion], sizeof(supp));
if (!IS_POSIXACL(dentry->d_inode))
supp[0] &= ~FATTR4_WORD0_ACL;
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index d3fea0bd89e2..6043306e8e21 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -510,18 +510,6 @@ void fsnotify_detach_group_marks(struct fsnotify_group *group)
}
}
-void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old)
-{
- assert_spin_locked(&old->lock);
- new->inode = old->inode;
- new->mnt = old->mnt;
- if (old->group)
- fsnotify_get_group(old->group);
- new->group = old->group;
- new->mask = old->mask;
- new->free_mark = old->free_mark;
-}
-
/*
* Nothing fancy, just initialize lists and locks and counters.
*/
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 83d576f6a287..77d1632e905d 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -3303,6 +3303,16 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name,
lockres->l_level, new_level);
+ /*
+ * On DLM_LKF_VALBLK, fsdlm behaves differently with o2cb. It always
+ * expects DLM_LKF_VALBLK being set if the LKB has LVB, so that
+ * we can recover correctly from node failure. Otherwise, we may get
+ * invalid LVB in LKB, but without DLM_SBF_VALNOTVALID being set.
+ */
+ if (!ocfs2_is_o2cb_active() &&
+ lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
+ lvb = 1;
+
if (lvb)
dlm_flags |= DLM_LKF_VALBLK;
diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
index 52c07346bea3..820359096c7a 100644
--- a/fs/ocfs2/stackglue.c
+++ b/fs/ocfs2/stackglue.c
@@ -48,6 +48,12 @@ static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl";
*/
static struct ocfs2_stack_plugin *active_stack;
+inline int ocfs2_is_o2cb_active(void)
+{
+ return !strcmp(active_stack->sp_name, OCFS2_STACK_PLUGIN_O2CB);
+}
+EXPORT_SYMBOL_GPL(ocfs2_is_o2cb_active);
+
static struct ocfs2_stack_plugin *ocfs2_stack_lookup(const char *name)
{
struct ocfs2_stack_plugin *p;
diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h
index f2dce10fae54..e3036e1790e8 100644
--- a/fs/ocfs2/stackglue.h
+++ b/fs/ocfs2/stackglue.h
@@ -298,6 +298,9 @@ void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_p
int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin);
void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin);
+/* In ocfs2_downconvert_lock(), we need to know which stack we are using */
+int ocfs2_is_o2cb_active(void);
+
extern struct kset *ocfs2_kset;
#endif /* STACKGLUE_H */
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index 9ad48d9202a9..023bb0b03352 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -154,29 +154,38 @@ out_err:
static int ovl_lookup_layer(struct dentry *base, struct ovl_lookup_data *d,
struct dentry **ret)
{
- const char *s = d->name.name;
+ /* Counting down from the end, since the prefix can change */
+ size_t rem = d->name.len - 1;
struct dentry *dentry = NULL;
int err;
- if (*s != '/')
+ if (d->name.name[0] != '/')
return ovl_lookup_single(base, d, d->name.name, d->name.len,
0, "", ret);
- while (*s++ == '/' && !IS_ERR_OR_NULL(base) && d_can_lookup(base)) {
+ while (!IS_ERR_OR_NULL(base) && d_can_lookup(base)) {
+ const char *s = d->name.name + d->name.len - rem;
const char *next = strchrnul(s, '/');
- size_t slen = strlen(s);
+ size_t thislen = next - s;
+ bool end = !next[0];
- if (WARN_ON(slen > d->name.len) ||
- WARN_ON(strcmp(d->name.name + d->name.len - slen, s)))
+ /* Verify we did not go off the rails */
+ if (WARN_ON(s[-1] != '/'))
return -EIO;
- err = ovl_lookup_single(base, d, s, next - s,
- d->name.len - slen, next, &base);
+ err = ovl_lookup_single(base, d, s, thislen,
+ d->name.len - rem, next, &base);
dput(dentry);
if (err)
return err;
dentry = base;
- s = next;
+ if (end)
+ break;
+
+ rem -= thislen + 1;
+
+ if (WARN_ON(rem >= d->name.len))
+ return -EIO;
}
*ret = dentry;
return 0;
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index 595522022aca..c9d48dc78495 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -922,11 +922,10 @@ int simple_set_acl(struct inode *inode, struct posix_acl *acl, int type)
int error;
if (type == ACL_TYPE_ACCESS) {
- error = posix_acl_equiv_mode(acl, &inode->i_mode);
- if (error < 0)
- return 0;
- if (error == 0)
- acl = NULL;
+ error = posix_acl_update_mode(inode,
+ &inode->i_mode, &acl);
+ if (error)
+ return error;
}
inode->i_ctime = current_time(inode);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 8e7e61b28f31..87c9a9aacda3 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -3179,6 +3179,8 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx)
iter.tgid += 1, iter = next_tgid(ns, iter)) {
char name[PROC_NUMBUF];
int len;
+
+ cond_resched();
if (!has_pid_permissions(ns, iter.task, 2))
continue;
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 55313d994895..d4e37acd4821 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -709,7 +709,7 @@ static int proc_sys_readdir(struct file *file, struct dir_context *ctx)
ctl_dir = container_of(head, struct ctl_dir, header);
if (!dir_emit_dots(file, ctx))
- return 0;
+ goto out;
pos = 2;
@@ -719,6 +719,7 @@ static int proc_sys_readdir(struct file *file, struct dir_context *ctx)
break;
}
}
+out:
sysctl_head_finish(head);
return 0;
}
diff --git a/fs/romfs/super.c b/fs/romfs/super.c
index d0f8a38dfafa..0186fe6d39f3 100644
--- a/fs/romfs/super.c
+++ b/fs/romfs/super.c
@@ -74,6 +74,7 @@
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/uaccess.h>
+#include <linux/major.h>
#include "internal.h"
static struct kmem_cache *romfs_inode_cachep;
@@ -416,7 +417,22 @@ static void romfs_destroy_inode(struct inode *inode)
static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
- u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
+ u64 id = 0;
+
+ /* When calling huge_encode_dev(),
+ * use sb->s_bdev->bd_dev when,
+ * - CONFIG_ROMFS_ON_BLOCK defined
+ * use sb->s_dev when,
+ * - CONFIG_ROMFS_ON_BLOCK undefined and
+ * - CONFIG_ROMFS_ON_MTD defined
+ * leave id as 0 when,
+ * - CONFIG_ROMFS_ON_BLOCK undefined and
+ * - CONFIG_ROMFS_ON_MTD undefined
+ */
+ if (sb->s_bdev)
+ id = huge_encode_dev(sb->s_bdev->bd_dev);
+ else if (sb->s_dev)
+ id = huge_encode_dev(sb->s_dev);
buf->f_type = ROMFS_MAGIC;
buf->f_namelen = ROMFS_MAXFN;
@@ -489,6 +505,11 @@ static int romfs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_flags |= MS_RDONLY | MS_NOATIME;
sb->s_op = &romfs_super_ops;
+#ifdef CONFIG_ROMFS_ON_MTD
+ /* Use same dev ID from the underlying mtdblock device */
+ if (sb->s_mtd)
+ sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, sb->s_mtd->index);
+#endif
/* read the image superblock and check it */
rsb = kmalloc(512, GFP_KERNEL);
if (!rsb)
diff --git a/fs/ubifs/Kconfig b/fs/ubifs/Kconfig
index 0a908ae7af13..b0d0623c83ed 100644
--- a/fs/ubifs/Kconfig
+++ b/fs/ubifs/Kconfig
@@ -53,7 +53,7 @@ config UBIFS_ATIME_SUPPORT
config UBIFS_FS_ENCRYPTION
bool "UBIFS Encryption"
- depends on UBIFS_FS
+ depends on UBIFS_FS && BLOCK
select FS_ENCRYPTION
default n
help
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 1c5331ac9614..528369f3e472 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -390,16 +390,6 @@ static int do_tmpfile(struct inode *dir, struct dentry *dentry,
dbg_gen("dent '%pd', mode %#hx in dir ino %lu",
dentry, mode, dir->i_ino);
- if (ubifs_crypt_is_encrypted(dir)) {
- err = fscrypt_get_encryption_info(dir);
- if (err)
- return err;
-
- if (!fscrypt_has_encryption_key(dir)) {
- return -EPERM;
- }
- }
-
err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
if (err)
return err;
@@ -741,17 +731,9 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
ubifs_assert(inode_is_locked(dir));
ubifs_assert(inode_is_locked(inode));
- if (ubifs_crypt_is_encrypted(dir)) {
- if (!fscrypt_has_permitted_context(dir, inode))
- return -EPERM;
-
- err = fscrypt_get_encryption_info(inode);
- if (err)
- return err;
-
- if (!fscrypt_has_encryption_key(inode))
- return -EPERM;
- }
+ if (ubifs_crypt_is_encrypted(dir) &&
+ !fscrypt_has_permitted_context(dir, inode))
+ return -EPERM;
err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
if (err)
@@ -1000,17 +982,6 @@ static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
if (err)
return err;
- if (ubifs_crypt_is_encrypted(dir)) {
- err = fscrypt_get_encryption_info(dir);
- if (err)
- goto out_budg;
-
- if (!fscrypt_has_encryption_key(dir)) {
- err = -EPERM;
- goto out_budg;
- }
- }
-
err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
if (err)
goto out_budg;
@@ -1096,17 +1067,6 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry,
return err;
}
- if (ubifs_crypt_is_encrypted(dir)) {
- err = fscrypt_get_encryption_info(dir);
- if (err)
- goto out_budg;
-
- if (!fscrypt_has_encryption_key(dir)) {
- err = -EPERM;
- goto out_budg;
- }
- }
-
err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
if (err)
goto out_budg;
@@ -1231,18 +1191,6 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
goto out_inode;
}
- err = fscrypt_get_encryption_info(inode);
- if (err) {
- kfree(sd);
- goto out_inode;
- }
-
- if (!fscrypt_has_encryption_key(inode)) {
- kfree(sd);
- err = -EPERM;
- goto out_inode;
- }
-
ostr.name = sd->encrypted_path;
ostr.len = disk_link.len;
diff --git a/fs/ubifs/ioctl.c b/fs/ubifs/ioctl.c
index 78d713644df3..da519ba205f6 100644
--- a/fs/ubifs/ioctl.c
+++ b/fs/ubifs/ioctl.c
@@ -217,6 +217,9 @@ long ubifs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case FS_IOC32_SETFLAGS:
cmd = FS_IOC_SETFLAGS;
break;
+ case FS_IOC_SET_ENCRYPTION_POLICY:
+ case FS_IOC_GET_ENCRYPTION_POLICY:
+ break;
default:
return -ENOIOCTLCMD;
}
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index a459211a1c21..294519b98874 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -744,6 +744,7 @@ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
} else {
data->compr_size = 0;
+ out_len = compr_len;
}
dlen = UBIFS_DATA_NODE_SZ + out_len;
@@ -1319,6 +1320,7 @@ static int truncate_data_node(const struct ubifs_info *c, const struct inode *in
dn->compr_type = cpu_to_le16(compr_type);
dn->size = cpu_to_le32(*new_len);
*new_len = UBIFS_DATA_NODE_SZ + out_len;
+ err = 0;
out:
kfree(buf);
return err;
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
index 74ae2de949df..709aa098dd46 100644
--- a/fs/ubifs/tnc.c
+++ b/fs/ubifs/tnc.c
@@ -34,6 +34,11 @@
#include <linux/slab.h>
#include "ubifs.h"
+static int try_read_node(const struct ubifs_info *c, void *buf, int type,
+ int len, int lnum, int offs);
+static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key,
+ struct ubifs_zbranch *zbr, void *node);
+
/*
* Returned codes of 'matches_name()' and 'fallible_matches_name()' functions.
* @NAME_LESS: name corresponding to the first argument is less than second
@@ -402,7 +407,19 @@ static int tnc_read_hashed_node(struct ubifs_info *c, struct ubifs_zbranch *zbr,
return 0;
}
- err = ubifs_tnc_read_node(c, zbr, node);
+ if (c->replaying) {
+ err = fallible_read_node(c, &zbr->key, zbr, node);
+ /*
+ * When the node was not found, return -ENOENT, 0 otherwise.
+ * Negative return codes stay as-is.
+ */
+ if (err == 0)
+ err = -ENOENT;
+ else if (err == 1)
+ err = 0;
+ } else {
+ err = ubifs_tnc_read_node(c, zbr, node);
+ }
if (err)
return err;
@@ -2857,7 +2874,11 @@ struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c,
if (fname_len(nm) > 0) {
if (err) {
/* Handle collisions */
- err = resolve_collision(c, key, &znode, &n, nm);
+ if (c->replaying)
+ err = fallible_resolve_collision(c, key, &znode, &n,
+ nm, 0);
+ else
+ err = resolve_collision(c, key, &znode, &n, nm);
dbg_tnc("rc returned %d, znode %p, n %d",
err, znode, n);
if (unlikely(err < 0))
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index d96e2f30084b..43953e03c356 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -63,6 +63,7 @@ struct userfaultfd_wait_queue {
struct uffd_msg msg;
wait_queue_t wq;
struct userfaultfd_ctx *ctx;
+ bool waken;
};
struct userfaultfd_wake_range {
@@ -86,6 +87,12 @@ static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode,
if (len && (start > uwq->msg.arg.pagefault.address ||
start + len <= uwq->msg.arg.pagefault.address))
goto out;
+ WRITE_ONCE(uwq->waken, true);
+ /*
+ * The implicit smp_mb__before_spinlock in try_to_wake_up()
+ * renders uwq->waken visible to other CPUs before the task is
+ * waken.
+ */
ret = wake_up_state(wq->private, mode);
if (ret)
/*
@@ -264,6 +271,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
struct userfaultfd_wait_queue uwq;
int ret;
bool must_wait, return_to_userland;
+ long blocking_state;
BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
@@ -334,10 +342,13 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
uwq.wq.private = current;
uwq.msg = userfault_msg(vmf->address, vmf->flags, reason);
uwq.ctx = ctx;
+ uwq.waken = false;
return_to_userland =
(vmf->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) ==
(FAULT_FLAG_USER|FAULT_FLAG_KILLABLE);
+ blocking_state = return_to_userland ? TASK_INTERRUPTIBLE :
+ TASK_KILLABLE;
spin_lock(&ctx->fault_pending_wqh.lock);
/*
@@ -350,8 +361,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
* following the spin_unlock to happen before the list_add in
* __add_wait_queue.
*/
- set_current_state(return_to_userland ? TASK_INTERRUPTIBLE :
- TASK_KILLABLE);
+ set_current_state(blocking_state);
spin_unlock(&ctx->fault_pending_wqh.lock);
must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
@@ -364,6 +374,29 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
wake_up_poll(&ctx->fd_wqh, POLLIN);
schedule();
ret |= VM_FAULT_MAJOR;
+
+ /*
+ * False wakeups can orginate even from rwsem before
+ * up_read() however userfaults will wait either for a
+ * targeted wakeup on the specific uwq waitqueue from
+ * wake_userfault() or for signals or for uffd
+ * release.
+ */
+ while (!READ_ONCE(uwq.waken)) {
+ /*
+ * This needs the full smp_store_mb()
+ * guarantee as the state write must be
+ * visible to other CPUs before reading
+ * uwq.waken from other CPUs.
+ */
+ set_current_state(blocking_state);
+ if (READ_ONCE(uwq.waken) ||
+ READ_ONCE(ctx->released) ||
+ (return_to_userland ? signal_pending(current) :
+ fatal_signal_pending(current)))
+ break;
+ schedule();
+ }
}
__set_current_state(TASK_RUNNING);
diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c
index e5ebc3770460..33db69be4832 100644
--- a/fs/xfs/libxfs/xfs_ag_resv.c
+++ b/fs/xfs/libxfs/xfs_ag_resv.c
@@ -39,6 +39,7 @@
#include "xfs_rmap_btree.h"
#include "xfs_btree.h"
#include "xfs_refcount_btree.h"
+#include "xfs_ialloc_btree.h"
/*
* Per-AG Block Reservations
@@ -200,22 +201,30 @@ __xfs_ag_resv_init(
struct xfs_mount *mp = pag->pag_mount;
struct xfs_ag_resv *resv;
int error;
+ xfs_extlen_t reserved;
- resv = xfs_perag_resv(pag, type);
if (used > ask)
ask = used;
- resv->ar_asked = ask;
- resv->ar_reserved = resv->ar_orig_reserved = ask - used;
- mp->m_ag_max_usable -= ask;
+ reserved = ask - used;
- trace_xfs_ag_resv_init(pag, type, ask);
-
- error = xfs_mod_fdblocks(mp, -(int64_t)resv->ar_reserved, true);
- if (error)
+ error = xfs_mod_fdblocks(mp, -(int64_t)reserved, true);
+ if (error) {
trace_xfs_ag_resv_init_error(pag->pag_mount, pag->pag_agno,
error, _RET_IP_);
+ xfs_warn(mp,
+"Per-AG reservation for AG %u failed. Filesystem may run out of space.",
+ pag->pag_agno);
+ return error;
+ }
- return error;
+ mp->m_ag_max_usable -= ask;
+
+ resv = xfs_perag_resv(pag, type);
+ resv->ar_asked = ask;
+ resv->ar_reserved = resv->ar_orig_reserved = reserved;
+
+ trace_xfs_ag_resv_init(pag, type, ask);
+ return 0;
}
/* Create a per-AG block reservation. */
@@ -223,6 +232,8 @@ int
xfs_ag_resv_init(
struct xfs_perag *pag)
{
+ struct xfs_mount *mp = pag->pag_mount;
+ xfs_agnumber_t agno = pag->pag_agno;
xfs_extlen_t ask;
xfs_extlen_t used;
int error = 0;
@@ -231,23 +242,45 @@ xfs_ag_resv_init(
if (pag->pag_meta_resv.ar_asked == 0) {
ask = used = 0;
- error = xfs_refcountbt_calc_reserves(pag->pag_mount,
- pag->pag_agno, &ask, &used);
+ error = xfs_refcountbt_calc_reserves(mp, agno, &ask, &used);
if (error)
goto out;
- error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA,
- ask, used);
+ error = xfs_finobt_calc_reserves(mp, agno, &ask, &used);
if (error)
goto out;
+
+ error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA,
+ ask, used);
+ if (error) {
+ /*
+ * Because we didn't have per-AG reservations when the
+ * finobt feature was added we might not be able to
+ * reserve all needed blocks. Warn and fall back to the
+ * old and potentially buggy code in that case, but
+ * ensure we do have the reservation for the refcountbt.
+ */
+ ask = used = 0;
+
+ mp->m_inotbt_nores = true;
+
+ error = xfs_refcountbt_calc_reserves(mp, agno, &ask,
+ &used);
+ if (error)
+ goto out;
+
+ error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA,
+ ask, used);
+ if (error)
+ goto out;
+ }
}
/* Create the AGFL metadata reservation */
if (pag->pag_agfl_resv.ar_asked == 0) {
ask = used = 0;
- error = xfs_rmapbt_calc_reserves(pag->pag_mount, pag->pag_agno,
- &ask, &used);
+ error = xfs_rmapbt_calc_reserves(mp, agno, &ask, &used);
if (error)
goto out;
@@ -256,6 +289,16 @@ xfs_ag_resv_init(
goto out;
}
+#ifdef DEBUG
+ /* need to read in the AGF for the ASSERT below to work */
+ error = xfs_alloc_pagf_init(pag->pag_mount, NULL, pag->pag_agno, 0);
+ if (error)
+ return error;
+
+ ASSERT(xfs_perag_resv(pag, XFS_AG_RESV_METADATA)->ar_reserved +
+ xfs_perag_resv(pag, XFS_AG_RESV_AGFL)->ar_reserved <=
+ pag->pagf_freeblks + pag->pagf_flcount);
+#endif
out:
return error;
}
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index 5050056a0b06..9f06a211e157 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -95,10 +95,7 @@ unsigned int
xfs_alloc_set_aside(
struct xfs_mount *mp)
{
- unsigned int blocks;
-
- blocks = 4 + (mp->m_sb.sb_agcount * XFS_ALLOC_AGFL_RESERVE);
- return blocks;
+ return mp->m_sb.sb_agcount * (XFS_ALLOC_AGFL_RESERVE + 4);
}
/*
@@ -365,36 +362,12 @@ xfs_alloc_fix_len(
return;
ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
ASSERT(rlen % args->prod == args->mod);
+ ASSERT(args->pag->pagf_freeblks + args->pag->pagf_flcount >=
+ rlen + args->minleft);
args->len = rlen;
}
/*
- * Fix up length if there is too little space left in the a.g.
- * Return 1 if ok, 0 if too little, should give up.
- */
-STATIC int
-xfs_alloc_fix_minleft(
- xfs_alloc_arg_t *args) /* allocation argument structure */
-{
- xfs_agf_t *agf; /* a.g. freelist header */
- int diff; /* free space difference */
-
- if (args->minleft == 0)
- return 1;
- agf = XFS_BUF_TO_AGF(args->agbp);
- diff = be32_to_cpu(agf->agf_freeblks)
- - args->len - args->minleft;
- if (diff >= 0)
- return 1;
- args->len += diff; /* shrink the allocated space */
- /* casts to (int) catch length underflows */
- if ((int)args->len >= (int)args->minlen)
- return 1;
- args->agbno = NULLAGBLOCK;
- return 0;
-}
-
-/*
* Update the two btrees, logically removing from freespace the extent
* starting at rbno, rlen blocks. The extent is contained within the
* actual (current) free extent fbno for flen blocks.
@@ -689,8 +662,6 @@ xfs_alloc_ag_vextent(
xfs_alloc_arg_t *args) /* argument structure for allocation */
{
int error=0;
- xfs_extlen_t reservation;
- xfs_extlen_t oldmax;
ASSERT(args->minlen > 0);
ASSERT(args->maxlen > 0);
@@ -699,20 +670,6 @@ xfs_alloc_ag_vextent(
ASSERT(args->alignment > 0);
/*
- * Clamp maxlen to the amount of free space minus any reservations
- * that have been made.
- */
- oldmax = args->maxlen;
- reservation = xfs_ag_resv_needed(args->pag, args->resv);
- if (args->maxlen > args->pag->pagf_freeblks - reservation)
- args->maxlen = args->pag->pagf_freeblks - reservation;
- if (args->maxlen == 0) {
- args->agbno = NULLAGBLOCK;
- args->maxlen = oldmax;
- return 0;
- }
-
- /*
* Branch to correct routine based on the type.
*/
args->wasfromfl = 0;
@@ -731,8 +688,6 @@ xfs_alloc_ag_vextent(
/* NOTREACHED */
}
- args->maxlen = oldmax;
-
if (error || args->agbno == NULLAGBLOCK)
return error;
@@ -841,9 +796,6 @@ xfs_alloc_ag_vextent_exact(
args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
- args->agbno;
xfs_alloc_fix_len(args);
- if (!xfs_alloc_fix_minleft(args))
- goto not_found;
-
ASSERT(args->agbno + args->len <= tend);
/*
@@ -1149,12 +1101,7 @@ restart:
XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
args->len = blen;
- if (!xfs_alloc_fix_minleft(args)) {
- xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
- trace_xfs_alloc_near_nominleft(args);
- return 0;
- }
- blen = args->len;
+
/*
* We are allocating starting at bnew for blen blocks.
*/
@@ -1346,12 +1293,6 @@ restart:
*/
args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
xfs_alloc_fix_len(args);
- if (!xfs_alloc_fix_minleft(args)) {
- trace_xfs_alloc_near_nominleft(args);
- xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
- xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
- return 0;
- }
rlen = args->len;
(void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment,
args->datatype, ltbnoa, ltlena, &ltnew);
@@ -1553,8 +1494,6 @@ restart:
}
xfs_alloc_fix_len(args);
- if (!xfs_alloc_fix_minleft(args))
- goto out_nominleft;
rlen = args->len;
XFS_WANT_CORRUPTED_GOTO(args->mp, rlen <= flen, error0);
/*
@@ -2056,7 +1995,7 @@ xfs_alloc_space_available(
int flags)
{
struct xfs_perag *pag = args->pag;
- xfs_extlen_t longest;
+ xfs_extlen_t alloc_len, longest;
xfs_extlen_t reservation; /* blocks that are still reserved */
int available;
@@ -2066,17 +2005,28 @@ xfs_alloc_space_available(
reservation = xfs_ag_resv_needed(pag, args->resv);
/* do we have enough contiguous free space for the allocation? */
+ alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop;
longest = xfs_alloc_longest_free_extent(args->mp, pag, min_free,
reservation);
- if ((args->minlen + args->alignment + args->minalignslop - 1) > longest)
+ if (longest < alloc_len)
return false;
/* do we have enough free space remaining for the allocation? */
available = (int)(pag->pagf_freeblks + pag->pagf_flcount -
- reservation - min_free - args->total);
- if (available < (int)args->minleft || available <= 0)
+ reservation - min_free - args->minleft);
+ if (available < (int)max(args->total, alloc_len))
return false;
+ /*
+ * Clamp maxlen to the amount of free space available for the actual
+ * extent allocation.
+ */
+ if (available < (int)args->maxlen && !(flags & XFS_ALLOC_FLAG_CHECK)) {
+ args->maxlen = available;
+ ASSERT(args->maxlen > 0);
+ ASSERT(args->maxlen >= args->minlen);
+ }
+
return true;
}
@@ -2122,7 +2072,8 @@ xfs_alloc_fix_freelist(
}
need = xfs_alloc_min_freelist(mp, pag);
- if (!xfs_alloc_space_available(args, need, flags))
+ if (!xfs_alloc_space_available(args, need, flags |
+ XFS_ALLOC_FLAG_CHECK))
goto out_agbp_relse;
/*
@@ -2638,12 +2589,10 @@ xfs_alloc_vextent(
xfs_agblock_t agsize; /* allocation group size */
int error;
int flags; /* XFS_ALLOC_FLAG_... locking flags */
- xfs_extlen_t minleft;/* minimum left value, temp copy */
xfs_mount_t *mp; /* mount structure pointer */
xfs_agnumber_t sagno; /* starting allocation group number */
xfs_alloctype_t type; /* input allocation type */
int bump_rotor = 0;
- int no_min = 0;
xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */
mp = args->mp;
@@ -2672,7 +2621,6 @@ xfs_alloc_vextent(
trace_xfs_alloc_vextent_badargs(args);
return 0;
}
- minleft = args->minleft;
switch (type) {
case XFS_ALLOCTYPE_THIS_AG:
@@ -2683,9 +2631,7 @@ xfs_alloc_vextent(
*/
args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
args->pag = xfs_perag_get(mp, args->agno);
- args->minleft = 0;
error = xfs_alloc_fix_freelist(args, 0);
- args->minleft = minleft;
if (error) {
trace_xfs_alloc_vextent_nofix(args);
goto error0;
@@ -2750,9 +2696,7 @@ xfs_alloc_vextent(
*/
for (;;) {
args->pag = xfs_perag_get(mp, args->agno);
- if (no_min) args->minleft = 0;
error = xfs_alloc_fix_freelist(args, flags);
- args->minleft = minleft;
if (error) {
trace_xfs_alloc_vextent_nofix(args);
goto error0;
@@ -2792,20 +2736,17 @@ xfs_alloc_vextent(
* or switch to non-trylock mode.
*/
if (args->agno == sagno) {
- if (no_min == 1) {
+ if (flags == 0) {
args->agbno = NULLAGBLOCK;
trace_xfs_alloc_vextent_allfailed(args);
break;
}
- if (flags == 0) {
- no_min = 1;
- } else {
- flags = 0;
- if (type == XFS_ALLOCTYPE_START_BNO) {
- args->agbno = XFS_FSB_TO_AGBNO(mp,
- args->fsbno);
- args->type = XFS_ALLOCTYPE_NEAR_BNO;
- }
+
+ flags = 0;
+ if (type == XFS_ALLOCTYPE_START_BNO) {
+ args->agbno = XFS_FSB_TO_AGBNO(mp,
+ args->fsbno);
+ args->type = XFS_ALLOCTYPE_NEAR_BNO;
}
}
xfs_perag_put(args->pag);
diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h
index 7c404a6b0ae3..1d0f48a501a3 100644
--- a/fs/xfs/libxfs/xfs_alloc.h
+++ b/fs/xfs/libxfs/xfs_alloc.h
@@ -56,7 +56,7 @@ typedef unsigned int xfs_alloctype_t;
#define XFS_ALLOC_FLAG_FREEING 0x00000002 /* indicate caller is freeing extents*/
#define XFS_ALLOC_FLAG_NORMAP 0x00000004 /* don't modify the rmapbt */
#define XFS_ALLOC_FLAG_NOSHRINK 0x00000008 /* don't shrink the freelist */
-
+#define XFS_ALLOC_FLAG_CHECK 0x00000010 /* test only, don't modify args */
/*
* Argument structure for xfs_alloc routines.
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index af1ecb19121e..6622d46ddec3 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -131,9 +131,6 @@ xfs_attr_get(
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
return -EIO;
- if (!xfs_inode_hasattr(ip))
- return -ENOATTR;
-
error = xfs_attr_args_init(&args, ip, name, flags);
if (error)
return error;
@@ -392,9 +389,6 @@ xfs_attr_remove(
if (XFS_FORCED_SHUTDOWN(dp->i_mount))
return -EIO;
- if (!xfs_inode_hasattr(dp))
- return -ENOATTR;
-
error = xfs_attr_args_init(&args, dp, name, flags);
if (error)
return error;
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 2760bc3b2536..bfc00de5c6f1 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -3629,7 +3629,7 @@ xfs_bmap_btalloc(
align = xfs_get_cowextsz_hint(ap->ip);
else if (xfs_alloc_is_userdata(ap->datatype))
align = xfs_get_extsz_hint(ap->ip);
- if (unlikely(align)) {
+ if (align) {
error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
align, 0, ap->eof, 0, ap->conv,
&ap->offset, &ap->length);
@@ -3701,7 +3701,7 @@ xfs_bmap_btalloc(
args.minlen = ap->minlen;
}
/* apply extent size hints if obtained earlier */
- if (unlikely(align)) {
+ if (align) {
args.prod = align;
if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod)))
args.mod = (xfs_extlen_t)(args.prod - args.mod);
@@ -3812,7 +3812,6 @@ xfs_bmap_btalloc(
args.fsbno = 0;
args.type = XFS_ALLOCTYPE_FIRST_AG;
args.total = ap->minlen;
- args.minleft = 0;
if ((error = xfs_alloc_vextent(&args)))
return error;
ap->dfops->dop_low = true;
@@ -4344,8 +4343,6 @@ xfs_bmapi_allocate(
if (error)
return error;
- if (bma->dfops->dop_low)
- bma->minleft = 0;
if (bma->cur)
bma->cur->bc_private.b.firstblock = *bma->firstblock;
if (bma->blkno == NULLFSBLOCK)
@@ -4517,8 +4514,6 @@ xfs_bmapi_write(
int n; /* current extent index */
xfs_fileoff_t obno; /* old block number (offset) */
int whichfork; /* data or attr fork */
- char inhole; /* current location is hole in file */
- char wasdelay; /* old extent was delayed */
#ifdef DEBUG
xfs_fileoff_t orig_bno; /* original block number value */
@@ -4606,22 +4601,44 @@ xfs_bmapi_write(
bma.firstblock = firstblock;
while (bno < end && n < *nmap) {
- inhole = eof || bma.got.br_startoff > bno;
- wasdelay = !inhole && isnullstartblock(bma.got.br_startblock);
+ bool need_alloc = false, wasdelay = false;
- /*
- * Make sure we only reflink into a hole.
- */
- if (flags & XFS_BMAPI_REMAP)
- ASSERT(inhole);
- if (flags & XFS_BMAPI_COWFORK)
- ASSERT(!inhole);
+ /* in hole or beyoned EOF? */
+ if (eof || bma.got.br_startoff > bno) {
+ if (flags & XFS_BMAPI_DELALLOC) {
+ /*
+ * For the COW fork we can reasonably get a
+ * request for converting an extent that races
+ * with other threads already having converted
+ * part of it, as there converting COW to
+ * regular blocks is not protected using the
+ * IOLOCK.
+ */
+ ASSERT(flags & XFS_BMAPI_COWFORK);
+ if (!(flags & XFS_BMAPI_COWFORK)) {
+ error = -EIO;
+ goto error0;
+ }
+
+ if (eof || bno >= end)
+ break;
+ } else {
+ need_alloc = true;
+ }
+ } else {
+ /*
+ * Make sure we only reflink into a hole.
+ */
+ ASSERT(!(flags & XFS_BMAPI_REMAP));
+ if (isnullstartblock(bma.got.br_startblock))
+ wasdelay = true;
+ }
/*
* First, deal with the hole before the allocated space
* that we found, if any.
*/
- if (inhole || wasdelay) {
+ if (need_alloc || wasdelay) {
bma.eof = eof;
bma.conv = !!(flags & XFS_BMAPI_CONVERT);
bma.wasdel = wasdelay;
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index cecd094404cc..cdef87db5262 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -110,6 +110,9 @@ struct xfs_extent_free_item
/* Map something in the CoW fork. */
#define XFS_BMAPI_COWFORK 0x200
+/* Only convert delalloc space, don't allocate entirely new extents */
+#define XFS_BMAPI_DELALLOC 0x400
+
#define XFS_BMAPI_FLAGS \
{ XFS_BMAPI_ENTIRE, "ENTIRE" }, \
{ XFS_BMAPI_METADATA, "METADATA" }, \
@@ -120,7 +123,8 @@ struct xfs_extent_free_item
{ XFS_BMAPI_CONVERT, "CONVERT" }, \
{ XFS_BMAPI_ZERO, "ZERO" }, \
{ XFS_BMAPI_REMAP, "REMAP" }, \
- { XFS_BMAPI_COWFORK, "COWFORK" }
+ { XFS_BMAPI_COWFORK, "COWFORK" }, \
+ { XFS_BMAPI_DELALLOC, "DELALLOC" }
static inline int xfs_bmapi_aflag(int w)
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
index d6330c297ca0..d9be241fc86f 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.c
+++ b/fs/xfs/libxfs/xfs_bmap_btree.c
@@ -502,12 +502,11 @@ try_another_ag:
if (args.fsbno == NULLFSBLOCK && args.minleft) {
/*
* Could not find an AG with enough free space to satisfy
- * a full btree split. Try again without minleft and if
+ * a full btree split. Try again and if
* successful activate the lowspace algorithm.
*/
args.fsbno = 0;
args.type = XFS_ALLOCTYPE_FIRST_AG;
- args.minleft = 0;
error = xfs_alloc_vextent(&args);
if (error)
goto error0;
diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c
index c58d72c220f5..2f389d366e93 100644
--- a/fs/xfs/libxfs/xfs_dir2.c
+++ b/fs/xfs/libxfs/xfs_dir2.c
@@ -36,21 +36,29 @@
struct xfs_name xfs_name_dotdot = { (unsigned char *)"..", 2, XFS_DIR3_FT_DIR };
/*
- * @mode, if set, indicates that the type field needs to be set up.
- * This uses the transformation from file mode to DT_* as defined in linux/fs.h
- * for file type specification. This will be propagated into the directory
- * structure if appropriate for the given operation and filesystem config.
+ * Convert inode mode to directory entry filetype
*/
-const unsigned char xfs_mode_to_ftype[S_IFMT >> S_SHIFT] = {
- [0] = XFS_DIR3_FT_UNKNOWN,
- [S_IFREG >> S_SHIFT] = XFS_DIR3_FT_REG_FILE,
- [S_IFDIR >> S_SHIFT] = XFS_DIR3_FT_DIR,
- [S_IFCHR >> S_SHIFT] = XFS_DIR3_FT_CHRDEV,
- [S_IFBLK >> S_SHIFT] = XFS_DIR3_FT_BLKDEV,
- [S_IFIFO >> S_SHIFT] = XFS_DIR3_FT_FIFO,
- [S_IFSOCK >> S_SHIFT] = XFS_DIR3_FT_SOCK,
- [S_IFLNK >> S_SHIFT] = XFS_DIR3_FT_SYMLINK,
-};
+unsigned char xfs_mode_to_ftype(int mode)
+{
+ switch (mode & S_IFMT) {
+ case S_IFREG:
+ return XFS_DIR3_FT_REG_FILE;
+ case S_IFDIR:
+ return XFS_DIR3_FT_DIR;
+ case S_IFCHR:
+ return XFS_DIR3_FT_CHRDEV;
+ case S_IFBLK:
+ return XFS_DIR3_FT_BLKDEV;
+ case S_IFIFO:
+ return XFS_DIR3_FT_FIFO;
+ case S_IFSOCK:
+ return XFS_DIR3_FT_SOCK;
+ case S_IFLNK:
+ return XFS_DIR3_FT_SYMLINK;
+ default:
+ return XFS_DIR3_FT_UNKNOWN;
+ }
+}
/*
* ASCII case-insensitive (ie. A-Z) support for directories that was
@@ -631,7 +639,8 @@ xfs_dir2_isblock(
if ((rval = xfs_bmap_last_offset(args->dp, &last, XFS_DATA_FORK)))
return rval;
rval = XFS_FSB_TO_B(args->dp->i_mount, last) == args->geo->blksize;
- ASSERT(rval == 0 || args->dp->i_d.di_size == args->geo->blksize);
+ if (rval != 0 && args->dp->i_d.di_size != args->geo->blksize)
+ return -EFSCORRUPTED;
*vp = rval;
return 0;
}
diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h
index 0197590fa7d7..d6e6d9d16f6c 100644
--- a/fs/xfs/libxfs/xfs_dir2.h
+++ b/fs/xfs/libxfs/xfs_dir2.h
@@ -18,6 +18,9 @@
#ifndef __XFS_DIR2_H__
#define __XFS_DIR2_H__
+#include "xfs_da_format.h"
+#include "xfs_da_btree.h"
+
struct xfs_defer_ops;
struct xfs_da_args;
struct xfs_inode;
@@ -32,10 +35,9 @@ struct xfs_dir2_data_unused;
extern struct xfs_name xfs_name_dotdot;
/*
- * directory filetype conversion tables.
+ * Convert inode mode to directory entry filetype
*/
-#define S_SHIFT 12
-extern const unsigned char xfs_mode_to_ftype[];
+extern unsigned char xfs_mode_to_ftype(int mode);
/*
* directory operations vector for encode/decode routines
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
index 0fd086d03d41..7c471881c9a6 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
@@ -82,11 +82,12 @@ xfs_finobt_set_root(
}
STATIC int
-xfs_inobt_alloc_block(
+__xfs_inobt_alloc_block(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *start,
union xfs_btree_ptr *new,
- int *stat)
+ int *stat,
+ enum xfs_ag_resv_type resv)
{
xfs_alloc_arg_t args; /* block allocation args */
int error; /* error return value */
@@ -103,6 +104,7 @@ xfs_inobt_alloc_block(
args.maxlen = 1;
args.prod = 1;
args.type = XFS_ALLOCTYPE_NEAR_BNO;
+ args.resv = resv;
error = xfs_alloc_vextent(&args);
if (error) {
@@ -123,6 +125,27 @@ xfs_inobt_alloc_block(
}
STATIC int
+xfs_inobt_alloc_block(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *start,
+ union xfs_btree_ptr *new,
+ int *stat)
+{
+ return __xfs_inobt_alloc_block(cur, start, new, stat, XFS_AG_RESV_NONE);
+}
+
+STATIC int
+xfs_finobt_alloc_block(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *start,
+ union xfs_btree_ptr *new,
+ int *stat)
+{
+ return __xfs_inobt_alloc_block(cur, start, new, stat,
+ XFS_AG_RESV_METADATA);
+}
+
+STATIC int
xfs_inobt_free_block(
struct xfs_btree_cur *cur,
struct xfs_buf *bp)
@@ -328,7 +351,7 @@ static const struct xfs_btree_ops xfs_finobt_ops = {
.dup_cursor = xfs_inobt_dup_cursor,
.set_root = xfs_finobt_set_root,
- .alloc_block = xfs_inobt_alloc_block,
+ .alloc_block = xfs_finobt_alloc_block,
.free_block = xfs_inobt_free_block,
.get_minrecs = xfs_inobt_get_minrecs,
.get_maxrecs = xfs_inobt_get_maxrecs,
@@ -480,3 +503,64 @@ xfs_inobt_rec_check_count(
return 0;
}
#endif /* DEBUG */
+
+static xfs_extlen_t
+xfs_inobt_max_size(
+ struct xfs_mount *mp)
+{
+ /* Bail out if we're uninitialized, which can happen in mkfs. */
+ if (mp->m_inobt_mxr[0] == 0)
+ return 0;
+
+ return xfs_btree_calc_size(mp, mp->m_inobt_mnr,
+ (uint64_t)mp->m_sb.sb_agblocks * mp->m_sb.sb_inopblock /
+ XFS_INODES_PER_CHUNK);
+}
+
+static int
+xfs_inobt_count_blocks(
+ struct xfs_mount *mp,
+ xfs_agnumber_t agno,
+ xfs_btnum_t btnum,
+ xfs_extlen_t *tree_blocks)
+{
+ struct xfs_buf *agbp;
+ struct xfs_btree_cur *cur;
+ int error;
+
+ error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
+ if (error)
+ return error;
+
+ cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, btnum);
+ error = xfs_btree_count_blocks(cur, tree_blocks);
+ xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ xfs_buf_relse(agbp);
+
+ return error;
+}
+
+/*
+ * Figure out how many blocks to reserve and how many are used by this btree.
+ */
+int
+xfs_finobt_calc_reserves(
+ struct xfs_mount *mp,
+ xfs_agnumber_t agno,
+ xfs_extlen_t *ask,
+ xfs_extlen_t *used)
+{
+ xfs_extlen_t tree_len = 0;
+ int error;
+
+ if (!xfs_sb_version_hasfinobt(&mp->m_sb))
+ return 0;
+
+ error = xfs_inobt_count_blocks(mp, agno, XFS_BTNUM_FINO, &tree_len);
+ if (error)
+ return error;
+
+ *ask += xfs_inobt_max_size(mp);
+ *used += tree_len;
+ return 0;
+}
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.h b/fs/xfs/libxfs/xfs_ialloc_btree.h
index bd88453217ce..aa81e2e63f3f 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.h
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.h
@@ -72,4 +72,7 @@ int xfs_inobt_rec_check_count(struct xfs_mount *,
#define xfs_inobt_rec_check_count(mp, rec) 0
#endif /* DEBUG */
+int xfs_finobt_calc_reserves(struct xfs_mount *mp, xfs_agnumber_t agno,
+ xfs_extlen_t *ask, xfs_extlen_t *used);
+
#endif /* __XFS_IALLOC_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index dd483e2767f7..d93f9d918cfc 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -29,6 +29,7 @@
#include "xfs_icache.h"
#include "xfs_trans.h"
#include "xfs_ialloc.h"
+#include "xfs_dir2.h"
/*
* Check that none of the inode's in the buffer have a next
@@ -386,6 +387,7 @@ xfs_dinode_verify(
xfs_ino_t ino,
struct xfs_dinode *dip)
{
+ uint16_t mode;
uint16_t flags;
uint64_t flags2;
@@ -396,8 +398,12 @@ xfs_dinode_verify(
if (be64_to_cpu(dip->di_size) & (1ULL << 63))
return false;
- /* No zero-length symlinks. */
- if (S_ISLNK(be16_to_cpu(dip->di_mode)) && dip->di_size == 0)
+ mode = be16_to_cpu(dip->di_mode);
+ if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN)
+ return false;
+
+ /* No zero-length symlinks/dirs. */
+ if ((S_ISLNK(mode) || S_ISDIR(mode)) && dip->di_size == 0)
return false;
/* only version 3 or greater inodes are extensively verified here */
diff --git a/fs/xfs/libxfs/xfs_refcount_btree.c b/fs/xfs/libxfs/xfs_refcount_btree.c
index 6fb2215f8ff7..50add5272807 100644
--- a/fs/xfs/libxfs/xfs_refcount_btree.c
+++ b/fs/xfs/libxfs/xfs_refcount_btree.c
@@ -409,13 +409,14 @@ xfs_refcountbt_calc_size(
*/
xfs_extlen_t
xfs_refcountbt_max_size(
- struct xfs_mount *mp)
+ struct xfs_mount *mp,
+ xfs_agblock_t agblocks)
{
/* Bail out if we're uninitialized, which can happen in mkfs. */
if (mp->m_refc_mxr[0] == 0)
return 0;
- return xfs_refcountbt_calc_size(mp, mp->m_sb.sb_agblocks);
+ return xfs_refcountbt_calc_size(mp, agblocks);
}
/*
@@ -430,22 +431,24 @@ xfs_refcountbt_calc_reserves(
{
struct xfs_buf *agbp;
struct xfs_agf *agf;
+ xfs_agblock_t agblocks;
xfs_extlen_t tree_len;
int error;
if (!xfs_sb_version_hasreflink(&mp->m_sb))
return 0;
- *ask += xfs_refcountbt_max_size(mp);
error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
if (error)
return error;
agf = XFS_BUF_TO_AGF(agbp);
+ agblocks = be32_to_cpu(agf->agf_length);
tree_len = be32_to_cpu(agf->agf_refcount_blocks);
xfs_buf_relse(agbp);
+ *ask += xfs_refcountbt_max_size(mp, agblocks);
*used += tree_len;
return error;
diff --git a/fs/xfs/libxfs/xfs_refcount_btree.h b/fs/xfs/libxfs/xfs_refcount_btree.h
index 3be7768bd51a..9db008b955b7 100644
--- a/fs/xfs/libxfs/xfs_refcount_btree.h
+++ b/fs/xfs/libxfs/xfs_refcount_btree.h
@@ -66,7 +66,8 @@ extern void xfs_refcountbt_compute_maxlevels(struct xfs_mount *mp);
extern xfs_extlen_t xfs_refcountbt_calc_size(struct xfs_mount *mp,
unsigned long long len);
-extern xfs_extlen_t xfs_refcountbt_max_size(struct xfs_mount *mp);
+extern xfs_extlen_t xfs_refcountbt_max_size(struct xfs_mount *mp,
+ xfs_agblock_t agblocks);
extern int xfs_refcountbt_calc_reserves(struct xfs_mount *mp,
xfs_agnumber_t agno, xfs_extlen_t *ask, xfs_extlen_t *used);
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c
index de25771764ba..74e5a54bc428 100644
--- a/fs/xfs/libxfs/xfs_rmap_btree.c
+++ b/fs/xfs/libxfs/xfs_rmap_btree.c
@@ -550,13 +550,14 @@ xfs_rmapbt_calc_size(
*/
xfs_extlen_t
xfs_rmapbt_max_size(
- struct xfs_mount *mp)
+ struct xfs_mount *mp,
+ xfs_agblock_t agblocks)
{
/* Bail out if we're uninitialized, which can happen in mkfs. */
if (mp->m_rmap_mxr[0] == 0)
return 0;
- return xfs_rmapbt_calc_size(mp, mp->m_sb.sb_agblocks);
+ return xfs_rmapbt_calc_size(mp, agblocks);
}
/*
@@ -571,25 +572,24 @@ xfs_rmapbt_calc_reserves(
{
struct xfs_buf *agbp;
struct xfs_agf *agf;
- xfs_extlen_t pool_len;
+ xfs_agblock_t agblocks;
xfs_extlen_t tree_len;
int error;
if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
return 0;
- /* Reserve 1% of the AG or enough for 1 block per record. */
- pool_len = max(mp->m_sb.sb_agblocks / 100, xfs_rmapbt_max_size(mp));
- *ask += pool_len;
-
error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
if (error)
return error;
agf = XFS_BUF_TO_AGF(agbp);
+ agblocks = be32_to_cpu(agf->agf_length);
tree_len = be32_to_cpu(agf->agf_rmap_blocks);
xfs_buf_relse(agbp);
+ /* Reserve 1% of the AG or enough for 1 block per record. */
+ *ask += max(agblocks / 100, xfs_rmapbt_max_size(mp, agblocks));
*used += tree_len;
return error;
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.h b/fs/xfs/libxfs/xfs_rmap_btree.h
index 2a9ac472fb15..19c08e933049 100644
--- a/fs/xfs/libxfs/xfs_rmap_btree.h
+++ b/fs/xfs/libxfs/xfs_rmap_btree.h
@@ -60,7 +60,8 @@ extern void xfs_rmapbt_compute_maxlevels(struct xfs_mount *mp);
extern xfs_extlen_t xfs_rmapbt_calc_size(struct xfs_mount *mp,
unsigned long long len);
-extern xfs_extlen_t xfs_rmapbt_max_size(struct xfs_mount *mp);
+extern xfs_extlen_t xfs_rmapbt_max_size(struct xfs_mount *mp,
+ xfs_agblock_t agblocks);
extern int xfs_rmapbt_calc_reserves(struct xfs_mount *mp,
xfs_agnumber_t agno, xfs_extlen_t *ask, xfs_extlen_t *used);
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 2580262e4ea0..584ec896a533 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -242,7 +242,7 @@ xfs_mount_validate_sb(
sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG ||
sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG ||
sbp->sb_blocksize != (1 << sbp->sb_blocklog) ||
- sbp->sb_dirblklog > XFS_MAX_BLOCKSIZE_LOG ||
+ sbp->sb_dirblklog + sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG ||
sbp->sb_inodesize < XFS_DINODE_MIN_SIZE ||
sbp->sb_inodesize > XFS_DINODE_MAX_SIZE ||
sbp->sb_inodelog < XFS_DINODE_MIN_LOG ||
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 0f56fcd3a5d5..631e7c0e0a29 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -1152,19 +1152,22 @@ xfs_vm_releasepage(
* block_invalidatepage() can send pages that are still marked dirty
* but otherwise have invalidated buffers.
*
- * We've historically freed buffers on the latter. Instead, quietly
- * filter out all dirty pages to avoid spurious buffer state warnings.
- * This can likely be removed once shrink_active_list() is fixed.
+ * We want to release the latter to avoid unnecessary buildup of the
+ * LRU, skip the former and warn if we've left any lingering
+ * delalloc/unwritten buffers on clean pages. Skip pages with delalloc
+ * or unwritten buffers and warn if the page is not dirty. Otherwise
+ * try to release the buffers.
*/
- if (PageDirty(page))
- return 0;
-
xfs_count_page_state(page, &delalloc, &unwritten);
- if (WARN_ON_ONCE(delalloc))
+ if (delalloc) {
+ WARN_ON_ONCE(!PageDirty(page));
return 0;
- if (WARN_ON_ONCE(unwritten))
+ }
+ if (unwritten) {
+ WARN_ON_ONCE(!PageDirty(page));
return 0;
+ }
return try_to_free_buffers(page);
}
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index b9abce524c33..c1417919ab0a 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -528,7 +528,6 @@ xfs_getbmap(
xfs_bmbt_irec_t *map; /* buffer for user's data */
xfs_mount_t *mp; /* file system mount point */
int nex; /* # of user extents can do */
- int nexleft; /* # of user extents left */
int subnex; /* # of bmapi's can do */
int nmap; /* number of map entries */
struct getbmapx *out; /* output structure */
@@ -686,10 +685,8 @@ xfs_getbmap(
goto out_free_map;
}
- nexleft = nex;
-
do {
- nmap = (nexleft > subnex) ? subnex : nexleft;
+ nmap = (nex> subnex) ? subnex : nex;
error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
XFS_BB_TO_FSB(mp, bmv->bmv_length),
map, &nmap, bmapi_flags);
@@ -697,8 +694,8 @@ xfs_getbmap(
goto out_free_map;
ASSERT(nmap <= subnex);
- for (i = 0; i < nmap && nexleft && bmv->bmv_length &&
- cur_ext < bmv->bmv_count; i++) {
+ for (i = 0; i < nmap && bmv->bmv_length &&
+ cur_ext < bmv->bmv_count - 1; i++) {
out[cur_ext].bmv_oflags = 0;
if (map[i].br_state == XFS_EXT_UNWRITTEN)
out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
@@ -760,16 +757,27 @@ xfs_getbmap(
continue;
}
+ /*
+ * In order to report shared extents accurately,
+ * we report each distinct shared/unshared part
+ * of a single bmbt record using multiple bmap
+ * extents. To make that happen, we iterate the
+ * same map array item multiple times, each
+ * time trimming out the subextent that we just
+ * reported.
+ *
+ * Because of this, we must check the out array
+ * index (cur_ext) directly against bmv_count-1
+ * to avoid overflows.
+ */
if (inject_map.br_startblock != NULLFSBLOCK) {
map[i] = inject_map;
i--;
- } else
- nexleft--;
+ }
bmv->bmv_entries++;
cur_ext++;
}
- } while (nmap && nexleft && bmv->bmv_length &&
- cur_ext < bmv->bmv_count);
+ } while (nmap && bmv->bmv_length && cur_ext < bmv->bmv_count - 1);
out_free_map:
kmem_free(map);
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 7f0a01f7b592..ac3b4db519df 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -422,6 +422,7 @@ retry:
out_free_pages:
for (i = 0; i < bp->b_page_count; i++)
__free_page(bp->b_pages[i]);
+ bp->b_flags &= ~_XBF_PAGES;
return error;
}
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 7a30b8f11db7..9d06cc30e875 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -710,6 +710,10 @@ xfs_dq_get_next_id(
/* Simple advance */
next_id = *id + 1;
+ /* If we'd wrap past the max ID, stop */
+ if (next_id < *id)
+ return -ENOENT;
+
/* If new ID is within the current chunk, advancing it sufficed */
if (next_id % mp->m_quotainfo->qi_dqperchunk) {
*id = next_id;
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 93d12fa2670d..242e8091296d 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -631,6 +631,20 @@ xfs_growfs_data_private(
xfs_set_low_space_thresholds(mp);
mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
+ /*
+ * If we expanded the last AG, free the per-AG reservation
+ * so we can reinitialize it with the new size.
+ */
+ if (new) {
+ struct xfs_perag *pag;
+
+ pag = xfs_perag_get(mp, agno);
+ error = xfs_ag_resv_free(pag);
+ xfs_perag_put(pag);
+ if (error)
+ goto out;
+ }
+
/* Reserve AG metadata blocks. */
error = xfs_fs_reserve_ag_blocks(mp);
if (error && error != -ENOSPC)
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index ff4d6311c7f4..70ca4f608321 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -1597,7 +1597,8 @@ xfs_inode_free_cowblocks(
* If the mapping is dirty or under writeback we cannot touch the
* CoW fork. Leave it alone if we're in the midst of a directio.
*/
- if (mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
+ if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
+ mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
atomic_read(&VFS_I(ip)->i_dio_count))
return 0;
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index b9557795eb74..de32f0fe47c8 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1792,22 +1792,23 @@ xfs_inactive_ifree(
int error;
/*
- * The ifree transaction might need to allocate blocks for record
- * insertion to the finobt. We don't want to fail here at ENOSPC, so
- * allow ifree to dip into the reserved block pool if necessary.
- *
- * Freeing large sets of inodes generally means freeing inode chunks,
- * directory and file data blocks, so this should be relatively safe.
- * Only under severe circumstances should it be possible to free enough
- * inodes to exhaust the reserve block pool via finobt expansion while
- * at the same time not creating free space in the filesystem.
+ * We try to use a per-AG reservation for any block needed by the finobt
+ * tree, but as the finobt feature predates the per-AG reservation
+ * support a degraded file system might not have enough space for the
+ * reservation at mount time. In that case try to dip into the reserved
+ * pool and pray.
*
* Send a warning if the reservation does happen to fail, as the inode
* now remains allocated and sits on the unlinked list until the fs is
* repaired.
*/
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
- XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp);
+ if (unlikely(mp->m_inotbt_nores)) {
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
+ XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
+ &tp);
+ } else {
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
+ }
if (error) {
if (error == -ENOSPC) {
xfs_warn_ratelimited(mp,
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 0d147428971e..1aa3abd67b36 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -681,7 +681,7 @@ xfs_iomap_write_allocate(
xfs_trans_t *tp;
int nimaps;
int error = 0;
- int flags = 0;
+ int flags = XFS_BMAPI_DELALLOC;
int nres;
if (whichfork == XFS_COW_FORK)
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 308bebb6dfd2..22c16155f1b4 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -98,12 +98,27 @@ xfs_init_security(
static void
xfs_dentry_to_name(
struct xfs_name *namep,
+ struct dentry *dentry)
+{
+ namep->name = dentry->d_name.name;
+ namep->len = dentry->d_name.len;
+ namep->type = XFS_DIR3_FT_UNKNOWN;
+}
+
+static int
+xfs_dentry_mode_to_name(
+ struct xfs_name *namep,
struct dentry *dentry,
int mode)
{
namep->name = dentry->d_name.name;
namep->len = dentry->d_name.len;
- namep->type = xfs_mode_to_ftype[(mode & S_IFMT) >> S_SHIFT];
+ namep->type = xfs_mode_to_ftype(mode);
+
+ if (unlikely(namep->type == XFS_DIR3_FT_UNKNOWN))
+ return -EFSCORRUPTED;
+
+ return 0;
}
STATIC void
@@ -119,7 +134,7 @@ xfs_cleanup_inode(
* xfs_init_security we must back out.
* ENOSPC can hit here, among other things.
*/
- xfs_dentry_to_name(&teardown, dentry, 0);
+ xfs_dentry_to_name(&teardown, dentry);
xfs_remove(XFS_I(dir), &teardown, XFS_I(inode));
}
@@ -154,8 +169,12 @@ xfs_generic_create(
if (error)
return error;
+ /* Verify mode is valid also for tmpfile case */
+ error = xfs_dentry_mode_to_name(&name, dentry, mode);
+ if (unlikely(error))
+ goto out_free_acl;
+
if (!tmpfile) {
- xfs_dentry_to_name(&name, dentry, mode);
error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip);
} else {
error = xfs_create_tmpfile(XFS_I(dir), dentry, mode, &ip);
@@ -248,7 +267,7 @@ xfs_vn_lookup(
if (dentry->d_name.len >= MAXNAMELEN)
return ERR_PTR(-ENAMETOOLONG);
- xfs_dentry_to_name(&name, dentry, 0);
+ xfs_dentry_to_name(&name, dentry);
error = xfs_lookup(XFS_I(dir), &name, &cip, NULL);
if (unlikely(error)) {
if (unlikely(error != -ENOENT))
@@ -275,7 +294,7 @@ xfs_vn_ci_lookup(
if (dentry->d_name.len >= MAXNAMELEN)
return ERR_PTR(-ENAMETOOLONG);
- xfs_dentry_to_name(&xname, dentry, 0);
+ xfs_dentry_to_name(&xname, dentry);
error = xfs_lookup(XFS_I(dir), &xname, &ip, &ci_name);
if (unlikely(error)) {
if (unlikely(error != -ENOENT))
@@ -310,7 +329,9 @@ xfs_vn_link(
struct xfs_name name;
int error;
- xfs_dentry_to_name(&name, dentry, inode->i_mode);
+ error = xfs_dentry_mode_to_name(&name, dentry, inode->i_mode);
+ if (unlikely(error))
+ return error;
error = xfs_link(XFS_I(dir), XFS_I(inode), &name);
if (unlikely(error))
@@ -329,7 +350,7 @@ xfs_vn_unlink(
struct xfs_name name;
int error;
- xfs_dentry_to_name(&name, dentry, 0);
+ xfs_dentry_to_name(&name, dentry);
error = xfs_remove(XFS_I(dir), &name, XFS_I(d_inode(dentry)));
if (error)
@@ -359,7 +380,9 @@ xfs_vn_symlink(
mode = S_IFLNK |
(irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO);
- xfs_dentry_to_name(&name, dentry, mode);
+ error = xfs_dentry_mode_to_name(&name, dentry, mode);
+ if (unlikely(error))
+ goto out;
error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip);
if (unlikely(error))
@@ -395,6 +418,7 @@ xfs_vn_rename(
{
struct inode *new_inode = d_inode(ndentry);
int omode = 0;
+ int error;
struct xfs_name oname;
struct xfs_name nname;
@@ -405,8 +429,14 @@ xfs_vn_rename(
if (flags & RENAME_EXCHANGE)
omode = d_inode(ndentry)->i_mode;
- xfs_dentry_to_name(&oname, odentry, omode);
- xfs_dentry_to_name(&nname, ndentry, d_inode(odentry)->i_mode);
+ error = xfs_dentry_mode_to_name(&oname, odentry, omode);
+ if (omode && unlikely(error))
+ return error;
+
+ error = xfs_dentry_mode_to_name(&nname, ndentry,
+ d_inode(odentry)->i_mode);
+ if (unlikely(error))
+ return error;
return xfs_rename(XFS_I(odir), &oname, XFS_I(d_inode(odentry)),
XFS_I(ndir), &nname,
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index e467218c0098..7a989de224f4 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -331,11 +331,11 @@ static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y)
}
#define ASSERT_ALWAYS(expr) \
- (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
+ (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
#ifdef DEBUG
#define ASSERT(expr) \
- (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
+ (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
#ifndef STATIC
# define STATIC noinline
@@ -346,7 +346,7 @@ static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y)
#ifdef XFS_WARN
#define ASSERT(expr) \
- (unlikely(expr) ? (void)0 : asswarn(#expr, __FILE__, __LINE__))
+ (likely(expr) ? (void)0 : asswarn(#expr, __FILE__, __LINE__))
#ifndef STATIC
# define STATIC static noinline
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index c39ac14ff540..b1469f0a91a6 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -3317,12 +3317,8 @@ xfs_log_force(
xfs_mount_t *mp,
uint flags)
{
- int error;
-
trace_xfs_log_force(mp, 0, _RET_IP_);
- error = _xfs_log_force(mp, flags, NULL);
- if (error)
- xfs_warn(mp, "%s: error %d returned.", __func__, error);
+ _xfs_log_force(mp, flags, NULL);
}
/*
@@ -3466,12 +3462,8 @@ xfs_log_force_lsn(
xfs_lsn_t lsn,
uint flags)
{
- int error;
-
trace_xfs_log_force(mp, lsn, _RET_IP_);
- error = _xfs_log_force_lsn(mp, lsn, flags, NULL);
- if (error)
- xfs_warn(mp, "%s: error %d returned.", __func__, error);
+ _xfs_log_force_lsn(mp, lsn, flags, NULL);
}
/*
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 84f785218907..7f351f706b7a 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -140,6 +140,7 @@ typedef struct xfs_mount {
int m_fixedfsid[2]; /* unchanged for life of FS */
uint m_dmevmask; /* DMI events for this FS */
__uint64_t m_flags; /* global mount flags */
+ bool m_inotbt_nores; /* no per-AG finobt resv. */
int m_ialloc_inos; /* inodes in inode allocation */
int m_ialloc_blks; /* blocks in inode allocation */
int m_ialloc_min_blks;/* min blocks in sparse inode
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 45e50ea90769..b669b123287b 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -1177,7 +1177,8 @@ xfs_qm_dqusage_adjust(
* the case in all other instances. It's OK that we do this because
* quotacheck is done only at mount time.
*/
- error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip);
+ error = xfs_iget(mp, NULL, ino, XFS_IGET_DONTCACHE, XFS_ILOCK_EXCL,
+ &ip);
if (error) {
*res = BULKSTAT_RV_NOTHING;
return error;
diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c
index fe86a668a57e..6e4c7446c3d4 100644
--- a/fs/xfs/xfs_refcount_item.c
+++ b/fs/xfs/xfs_refcount_item.c
@@ -526,13 +526,14 @@ xfs_cui_recover(
xfs_refcount_finish_one_cleanup(tp, rcur, error);
error = xfs_defer_finish(&tp, &dfops, NULL);
if (error)
- goto abort_error;
+ goto abort_defer;
set_bit(XFS_CUI_RECOVERED, &cuip->cui_flags);
error = xfs_trans_commit(tp);
return error;
abort_error:
xfs_refcount_finish_one_cleanup(tp, rcur, error);
+abort_defer:
xfs_defer_cancel(&dfops);
xfs_trans_cancel(tp);
return error;
diff --git a/fs/xfs/xfs_sysfs.c b/fs/xfs/xfs_sysfs.c
index 276d3023d60f..de6195e38910 100644
--- a/fs/xfs/xfs_sysfs.c
+++ b/fs/xfs/xfs_sysfs.c
@@ -396,7 +396,7 @@ max_retries_show(
int retries;
struct xfs_error_cfg *cfg = to_error_cfg(kobject);
- if (cfg->retry_timeout == XFS_ERR_RETRY_FOREVER)
+ if (cfg->max_retries == XFS_ERR_RETRY_FOREVER)
retries = -1;
else
retries = cfg->max_retries;
@@ -422,7 +422,7 @@ max_retries_store(
return -EINVAL;
if (val == -1)
- cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
+ cfg->max_retries = XFS_ERR_RETRY_FOREVER;
else
cfg->max_retries = val;
return count;
diff --git a/include/asm-generic/asm-prototypes.h b/include/asm-generic/asm-prototypes.h
index df13637e4017..939869c772b1 100644
--- a/include/asm-generic/asm-prototypes.h
+++ b/include/asm-generic/asm-prototypes.h
@@ -1,7 +1,13 @@
#include <linux/bitops.h>
+#undef __memset
extern void *__memset(void *, int, __kernel_size_t);
+#undef __memcpy
extern void *__memcpy(void *, const void *, __kernel_size_t);
+#undef __memmove
extern void *__memmove(void *, const void *, __kernel_size_t);
+#undef memset
extern void *memset(void *, int, __kernel_size_t);
+#undef memcpy
extern void *memcpy(void *, const void *, __kernel_size_t);
+#undef memmove
extern void *memmove(void *, const void *, __kernel_size_t);
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index d6d241f63b9f..56814e8ae7ea 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -144,7 +144,7 @@ struct __drm_crtcs_state {
struct drm_crtc *ptr;
struct drm_crtc_state *state;
struct drm_crtc_commit *commit;
- s64 __user *out_fence_ptr;
+ s32 __user *out_fence_ptr;
};
struct __drm_connnectors_state {
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index bf9991b20611..137432386310 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -488,7 +488,7 @@ struct drm_mode_config {
/**
* @prop_out_fence_ptr: Sync File fd pointer representing the
* outgoing fences for a CRTC. Userspace should provide a pointer to a
- * value of type s64, and then cast that pointer to u64.
+ * value of type s32, and then cast that pointer to u64.
*/
struct drm_property *prop_out_fence_ptr;
/**
diff --git a/include/dt-bindings/mfd/tps65217.h b/include/dt-bindings/mfd/tps65217.h
deleted file mode 100644
index cafb9e60cf12..000000000000
--- a/include/dt-bindings/mfd/tps65217.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * This header provides macros for TI TPS65217 DT bindings.
- *
- * Copyright (C) 2016 Texas Instruments
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __DT_BINDINGS_TPS65217_H__
-#define __DT_BINDINGS_TPS65217_H__
-
-#define TPS65217_IRQ_USB 0
-#define TPS65217_IRQ_AC 1
-#define TPS65217_IRQ_PB 2
-
-#endif
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index b717ed9d2b75..5c970ce67949 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -76,4 +76,5 @@ void kvm_timer_unschedule(struct kvm_vcpu *vcpu);
void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu);
+void kvm_timer_init_vhe(void);
#endif
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 83695641bd5e..1ca8e8fd1078 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -739,7 +739,7 @@ static inline bool blk_queue_is_zoned(struct request_queue *q)
}
}
-static inline unsigned int blk_queue_zone_size(struct request_queue *q)
+static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
{
return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
}
@@ -1000,6 +1000,19 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
return blk_rq_cur_bytes(rq) >> 9;
}
+/*
+ * Some commands like WRITE SAME have a payload or data transfer size which
+ * is different from the size of the request. Any driver that supports such
+ * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
+ * calculate the data transfer size.
+ */
+static inline unsigned int blk_rq_payload_bytes(struct request *rq)
+{
+ if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
+ return rq->special_vec.bv_len;
+ return blk_rq_bytes(rq);
+}
+
static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
int op)
{
@@ -1536,12 +1549,12 @@ static inline bool bdev_is_zoned(struct block_device *bdev)
return false;
}
-static inline unsigned int bdev_zone_size(struct block_device *bdev)
+static inline unsigned int bdev_zone_sectors(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
if (q)
- return blk_queue_zone_size(q);
+ return blk_queue_zone_sectors(q);
return 0;
}
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index f74ae68086dc..3ed1f3b1d594 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -216,7 +216,7 @@ u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
-int bpf_prog_calc_digest(struct bpf_prog *fp);
+int bpf_prog_calc_tag(struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
@@ -247,6 +247,8 @@ struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
int bpf_map_precharge_memlock(u32 pages);
+void *bpf_map_area_alloc(size_t size);
+void bpf_map_area_free(void *base);
extern int sysctl_unprivileged_bpf_disabled;
diff --git a/include/linux/coredump.h b/include/linux/coredump.h
index d016a121a8c4..28ffa94aed6b 100644
--- a/include/linux/coredump.h
+++ b/include/linux/coredump.h
@@ -14,6 +14,7 @@ struct coredump_params;
extern int dump_skip(struct coredump_params *cprm, size_t nr);
extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr);
extern int dump_align(struct coredump_params *cprm, int align);
+extern void dump_truncate(struct coredump_params *cprm);
#ifdef CONFIG_COREDUMP
extern void do_coredump(const siginfo_t *siginfo);
#else
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 20bfefbe7594..d936a0021839 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -74,6 +74,8 @@ enum cpuhp_state {
CPUHP_ZCOMP_PREPARE,
CPUHP_TIMERS_DEAD,
CPUHP_MIPS_SOC_PREPARE,
+ CPUHP_BP_PREPARE_DYN,
+ CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20,
CPUHP_BRINGUP_CPU,
CPUHP_AP_IDLE_DEAD,
CPUHP_AP_OFFLINE,
diff --git a/include/linux/dax.h b/include/linux/dax.h
index f97bcfe79472..24ad71173995 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -41,6 +41,9 @@ ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
struct iomap_ops *ops);
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
+int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index);
+int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
+ pgoff_t index);
void dax_wake_mapping_entry_waiter(struct address_space *mapping,
pgoff_t index, void *entry, bool wake_all);
diff --git a/include/linux/efi.h b/include/linux/efi.h
index a07a476178cd..5b1af30ece55 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -103,6 +103,7 @@ typedef struct {
#define EFI_PAGE_SHIFT 12
#define EFI_PAGE_SIZE (1UL << EFI_PAGE_SHIFT)
+#define EFI_PAGES_MAX (U64_MAX >> EFI_PAGE_SHIFT)
typedef struct {
u32 type;
@@ -950,6 +951,7 @@ static inline efi_status_t efi_query_variable_store(u32 attributes,
#endif
extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
+extern phys_addr_t __init efi_memmap_alloc(unsigned int num_entries);
extern int __init efi_memmap_init_early(struct efi_memory_map_data *data);
extern int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size);
extern void __init efi_memmap_unmap(void);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 702314253797..e4eb2546339a 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -57,6 +57,8 @@ struct bpf_prog_aux;
/* BPF program can access up to 512 bytes of stack space. */
#define MAX_BPF_STACK 512
+#define BPF_TAG_SIZE 8
+
/* Helper macros for filter block array initializers. */
/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
@@ -408,7 +410,7 @@ struct bpf_prog {
kmemcheck_bitfield_end(meta);
enum bpf_prog_type type; /* Type of BPF program */
u32 len; /* Number of filter blocks */
- u32 digest[SHA_DIGEST_WORDS]; /* Program digest */
+ u8 tag[BPF_TAG_SIZE];
struct bpf_prog_aux *aux; /* Auxiliary fields */
struct sock_fprog_kern *orig_prog; /* Original BPF program */
unsigned int (*bpf_func)(const void *ctx,
@@ -519,7 +521,7 @@ static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
return prog->len * sizeof(struct bpf_insn);
}
-static inline u32 bpf_prog_digest_scratch_size(const struct bpf_prog *prog)
+static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog)
{
return round_up(bpf_prog_insn_size(prog) +
sizeof(__be64) + 1, SHA_MESSAGE_BYTES);
@@ -610,7 +612,6 @@ bool bpf_helper_changes_pkt_data(void *func);
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len);
void bpf_warn_invalid_xdp_action(u32 act);
-void bpf_warn_invalid_xdp_buffer(void);
#ifdef CONFIG_BPF_JIT
extern int bpf_jit_enable;
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 0cf34d6cc253..487246546ebe 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -323,8 +323,6 @@ extern void fsnotify_init_mark(struct fsnotify_mark *mark, void (*free_mark)(str
extern struct fsnotify_mark *fsnotify_find_inode_mark(struct fsnotify_group *group, struct inode *inode);
/* find (and take a reference) to a mark associated with group and vfsmount */
extern struct fsnotify_mark *fsnotify_find_vfsmount_mark(struct fsnotify_group *group, struct vfsmount *mnt);
-/* copy the values from old into new */
-extern void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old);
/* set the ignored_mask of a mark */
extern void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mask);
/* set the mask of a mark (might pin the object into memory */
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index e0341af6950e..76f39754e7b0 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -146,15 +146,6 @@ enum {
DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */
};
-#define BLK_SCSI_MAX_CMDS (256)
-#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
-
-struct blk_scsi_cmd_filter {
- unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
- unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
- struct kobject kobj;
-};
-
struct disk_part_tbl {
struct rcu_head rcu_head;
int len;
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 4175dca4ac39..0fe0b6295ab5 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -38,9 +38,8 @@ struct vm_area_struct;
#define ___GFP_ACCOUNT 0x100000u
#define ___GFP_NOTRACK 0x200000u
#define ___GFP_DIRECT_RECLAIM 0x400000u
-#define ___GFP_OTHER_NODE 0x800000u
-#define ___GFP_WRITE 0x1000000u
-#define ___GFP_KSWAPD_RECLAIM 0x2000000u
+#define ___GFP_WRITE 0x800000u
+#define ___GFP_KSWAPD_RECLAIM 0x1000000u
/* If the above are modified, __GFP_BITS_SHIFT may need updating */
/*
@@ -172,11 +171,6 @@ struct vm_area_struct;
* __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of
* distinguishing in the source between false positives and allocations that
* cannot be supported (e.g. page tables).
- *
- * __GFP_OTHER_NODE is for allocations that are on a remote node but that
- * should not be accounted for as a remote allocation in vmstat. A
- * typical user would be khugepaged collapsing a huge page on a remote
- * node.
*/
#define __GFP_COLD ((__force gfp_t)___GFP_COLD)
#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
@@ -184,10 +178,9 @@ struct vm_area_struct;
#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK)
#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
-#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE)
/* Room for N __GFP_FOO bits */
-#define __GFP_BITS_SHIFT 26
+#define __GFP_BITS_SHIFT 25
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
/*
@@ -506,11 +499,10 @@ extern void free_hot_cold_page(struct page *page, bool cold);
extern void free_hot_cold_page_list(struct list_head *list, bool cold);
struct page_frag_cache;
-extern void __page_frag_drain(struct page *page, unsigned int order,
- unsigned int count);
-extern void *__alloc_page_frag(struct page_frag_cache *nc,
- unsigned int fragsz, gfp_t gfp_mask);
-extern void __free_page_frag(void *addr);
+extern void __page_frag_cache_drain(struct page *page, unsigned int count);
+extern void *page_frag_alloc(struct page_frag_cache *nc,
+ unsigned int fragsz, gfp_t gfp_mask);
+extern void page_frag_free(void *addr);
#define __free_page(page) __free_pages((page), 0)
#define free_page(addr) free_pages((addr), 0)
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index c2748accea71..846f3b989480 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -8,6 +8,7 @@
#include <linux/irqdomain.h>
#include <linux/lockdep.h>
#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf-generic.h>
struct gpio_desc;
struct of_phandle_args;
@@ -19,18 +20,6 @@ struct module;
#ifdef CONFIG_GPIOLIB
/**
- * enum single_ended_mode - mode for single ended operation
- * @LINE_MODE_PUSH_PULL: normal mode for a GPIO line, drive actively high/low
- * @LINE_MODE_OPEN_DRAIN: set line to be open drain
- * @LINE_MODE_OPEN_SOURCE: set line to be open source
- */
-enum single_ended_mode {
- LINE_MODE_PUSH_PULL,
- LINE_MODE_OPEN_DRAIN,
- LINE_MODE_OPEN_SOURCE,
-};
-
-/**
* struct gpio_chip - abstract a GPIO controller
* @label: a functional name for the GPIO device, such as a part
* number or the name of the SoC IP-block implementing it.
@@ -48,16 +37,8 @@ enum single_ended_mode {
* @get: returns value for signal "offset", 0=low, 1=high, or negative error
* @set: assigns output value for signal "offset"
* @set_multiple: assigns output values for multiple signals defined by "mask"
- * @set_debounce: optional hook for setting debounce time for specified gpio in
- * interrupt triggered gpio chips
- * @set_single_ended: optional hook for setting a line as open drain, open
- * source, or non-single ended (restore from open drain/source to normal
- * push-pull mode) this should be implemented if the hardware supports
- * open drain or open source settings. The GPIOlib will otherwise try
- * to emulate open drain/source by not actively driving lines high/low
- * if a consumer request this. The driver may return -ENOTSUPP if e.g.
- * it supports just open drain but not open source and is called
- * with LINE_MODE_OPEN_SOURCE as mode argument.
+ * @set_config: optional hook for all kinds of settings. Uses the same
+ * packed config format as generic pinconf.
* @to_irq: optional hook supporting non-static gpio_to_irq() mappings;
* implementation may not sleep
* @dbg_show: optional routine to show contents in debugfs; default code
@@ -150,13 +131,9 @@ struct gpio_chip {
void (*set_multiple)(struct gpio_chip *chip,
unsigned long *mask,
unsigned long *bits);
- int (*set_debounce)(struct gpio_chip *chip,
- unsigned offset,
- unsigned debounce);
- int (*set_single_ended)(struct gpio_chip *chip,
- unsigned offset,
- enum single_ended_mode mode);
-
+ int (*set_config)(struct gpio_chip *chip,
+ unsigned offset,
+ unsigned long config);
int (*to_irq)(struct gpio_chip *chip,
unsigned offset);
@@ -274,42 +251,74 @@ void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
int parent_irq);
-int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
+ struct irq_chip *irqchip,
+ unsigned int first_irq,
+ irq_flow_handler_t handler,
+ unsigned int type,
+ bool nested,
+ struct lock_class_key *lock_key);
+
+#ifdef CONFIG_LOCKDEP
+
+/*
+ * Lockdep requires that each irqchip instance be created with a
+ * unique key so as to avoid unnecessary warnings. This upfront
+ * boilerplate static inlines provides such a key for each
+ * unique instance.
+ */
+static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+ struct irq_chip *irqchip,
+ unsigned int first_irq,
+ irq_flow_handler_t handler,
+ unsigned int type)
+{
+ static struct lock_class_key key;
+
+ return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
+ handler, type, false, &key);
+}
+
+static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
unsigned int first_irq,
irq_flow_handler_t handler,
- unsigned int type,
- bool nested,
- struct lock_class_key *lock_key);
+ unsigned int type)
+{
+
+ static struct lock_class_key key;
+
+ return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
+ handler, type, true, &key);
+}
+#else
+static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+ struct irq_chip *irqchip,
+ unsigned int first_irq,
+ irq_flow_handler_t handler,
+ unsigned int type)
+{
+ return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
+ handler, type, false, NULL);
+}
-/* FIXME: I assume threaded IRQchips do not have the lockdep problem */
static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
unsigned int first_irq,
irq_flow_handler_t handler,
unsigned int type)
{
- return _gpiochip_irqchip_add(gpiochip, irqchip, first_irq,
- handler, type, true, NULL);
+ return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
+ handler, type, true, NULL);
}
-
-#ifdef CONFIG_LOCKDEP
-#define gpiochip_irqchip_add(...) \
-( \
- ({ \
- static struct lock_class_key _key; \
- _gpiochip_irqchip_add(__VA_ARGS__, false, &_key); \
- }) \
-)
-#else
-#define gpiochip_irqchip_add(...) \
- _gpiochip_irqchip_add(__VA_ARGS__, false, NULL)
-#endif
+#endif /* CONFIG_LOCKDEP */
#endif /* CONFIG_GPIOLIB_IRQCHIP */
int gpiochip_generic_request(struct gpio_chip *chip, unsigned offset);
void gpiochip_generic_free(struct gpio_chip *chip, unsigned offset);
+int gpiochip_generic_config(struct gpio_chip *chip, unsigned offset,
+ unsigned long config);
#ifdef CONFIG_PINCTRL
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index b2109c522dec..4b45ec46161f 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -665,6 +665,7 @@ i2c_unlock_adapter(struct i2c_adapter *adapter)
#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */
/* Must equal I2C_M_TEN below */
#define I2C_CLIENT_SLAVE 0x20 /* we are the slave */
+#define I2C_CLIENT_HOST_NOTIFY 0x40 /* We want to use I2C host notify */
#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */
#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */
/* Must match I2C_M_STOP|IGNORE_NAK */
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index 228bd44efa4c..497f2b3a5a62 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -116,6 +116,16 @@ struct st_sensor_bdu {
};
/**
+ * struct st_sensor_das - ST sensor device data alignment selection
+ * @addr: address of the register.
+ * @mask: mask to write the das flag for left alignment.
+ */
+struct st_sensor_das {
+ u8 addr;
+ u8 mask;
+};
+
+/**
* struct st_sensor_data_ready_irq - ST sensor device data-ready interrupt
* @addr: address of the register.
* @mask_int1: mask to enable/disable IRQ on INT1 pin.
@@ -185,6 +195,7 @@ struct st_sensor_transfer_function {
* @enable_axis: Enable one or more axis of the sensor.
* @fs: Full scale register and full scale list available.
* @bdu: Block data update register.
+ * @das: Data Alignment Selection register.
* @drdy_irq: Data ready register of the sensor.
* @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read.
* @bootime: samples to discard when sensor passing from power-down to power-up.
@@ -200,6 +211,7 @@ struct st_sensor_settings {
struct st_sensor_axis enable_axis;
struct st_sensor_fullscale fs;
struct st_sensor_bdu bdu;
+ struct st_sensor_das das;
struct st_sensor_data_ready_irq drdy_irq;
bool multi_read_bit;
unsigned int bootime;
diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h
index 089f70f83e97..23da3af459fe 100644
--- a/include/linux/jump_label_ratelimit.h
+++ b/include/linux/jump_label_ratelimit.h
@@ -14,6 +14,7 @@ struct static_key_deferred {
#ifdef HAVE_JUMP_LABEL
extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
+extern void static_key_deferred_flush(struct static_key_deferred *key);
extern void
jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
@@ -26,6 +27,10 @@ static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
STATIC_KEY_CHECK_USE();
static_key_slow_dec(&key->key);
}
+static inline void static_key_deferred_flush(struct static_key_deferred *key)
+{
+ STATIC_KEY_CHECK_USE();
+}
static inline void
jump_label_rate_limit(struct static_key_deferred *key,
unsigned long rl)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 56aec84237ad..cb09238f6d32 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -514,8 +514,8 @@ extern enum system_states {
#define TAINT_FLAGS_COUNT 16
struct taint_flag {
- char true; /* character printed when tainted */
- char false; /* character printed when not tainted */
+ char c_true; /* character printed when tainted */
+ char c_false; /* character printed when not tainted */
bool module; /* also show as a per-module taint flag */
};
diff --git a/include/linux/mdev.h b/include/linux/mdev.h
index ec819e9a115a..b6e048e1045f 100644
--- a/include/linux/mdev.h
+++ b/include/linux/mdev.h
@@ -13,34 +13,10 @@
#ifndef MDEV_H
#define MDEV_H
-/* Parent device */
-struct parent_device {
- struct device *dev;
- const struct parent_ops *ops;
-
- /* internal */
- struct kref ref;
- struct mutex lock;
- struct list_head next;
- struct kset *mdev_types_kset;
- struct list_head type_list;
-};
-
-/* Mediated device */
-struct mdev_device {
- struct device dev;
- struct parent_device *parent;
- uuid_le uuid;
- void *driver_data;
-
- /* internal */
- struct kref ref;
- struct list_head next;
- struct kobject *type_kobj;
-};
+struct mdev_device;
/**
- * struct parent_ops - Structure to be registered for each parent device to
+ * struct mdev_parent_ops - Structure to be registered for each parent device to
* register the device to mdev module.
*
* @owner: The module owner.
@@ -86,10 +62,9 @@ struct mdev_device {
* @mdev: mediated device structure
* @vma: vma structure
* Parent device that support mediated device should be registered with mdev
- * module with parent_ops structure.
+ * module with mdev_parent_ops structure.
**/
-
-struct parent_ops {
+struct mdev_parent_ops {
struct module *owner;
const struct attribute_group **dev_attr_groups;
const struct attribute_group **mdev_attr_groups;
@@ -103,7 +78,7 @@ struct parent_ops {
size_t count, loff_t *ppos);
ssize_t (*write)(struct mdev_device *mdev, const char __user *buf,
size_t count, loff_t *ppos);
- ssize_t (*ioctl)(struct mdev_device *mdev, unsigned int cmd,
+ long (*ioctl)(struct mdev_device *mdev, unsigned int cmd,
unsigned long arg);
int (*mmap)(struct mdev_device *mdev, struct vm_area_struct *vma);
};
@@ -142,27 +117,22 @@ struct mdev_driver {
};
#define to_mdev_driver(drv) container_of(drv, struct mdev_driver, driver)
-#define to_mdev_device(dev) container_of(dev, struct mdev_device, dev)
-
-static inline void *mdev_get_drvdata(struct mdev_device *mdev)
-{
- return mdev->driver_data;
-}
-static inline void mdev_set_drvdata(struct mdev_device *mdev, void *data)
-{
- mdev->driver_data = data;
-}
+extern void *mdev_get_drvdata(struct mdev_device *mdev);
+extern void mdev_set_drvdata(struct mdev_device *mdev, void *data);
+extern uuid_le mdev_uuid(struct mdev_device *mdev);
extern struct bus_type mdev_bus_type;
-#define dev_is_mdev(d) ((d)->bus == &mdev_bus_type)
-
extern int mdev_register_device(struct device *dev,
- const struct parent_ops *ops);
+ const struct mdev_parent_ops *ops);
extern void mdev_unregister_device(struct device *dev);
extern int mdev_register_driver(struct mdev_driver *drv, struct module *owner);
extern void mdev_unregister_driver(struct mdev_driver *drv);
+extern struct device *mdev_parent_dev(struct mdev_device *mdev);
+extern struct device *mdev_dev(struct mdev_device *mdev);
+extern struct mdev_device *mdev_from_dev(struct device *dev);
+
#endif /* MDEV_H */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 61d20c17f3b7..254698856b8f 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -120,7 +120,7 @@ struct mem_cgroup_reclaim_iter {
*/
struct mem_cgroup_per_node {
struct lruvec lruvec;
- unsigned long lru_size[NR_LRU_LISTS];
+ unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
@@ -432,7 +432,7 @@ static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
- int nr_pages);
+ int zid, int nr_pages);
unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
int nid, unsigned int lru_mask);
@@ -441,9 +441,23 @@ static inline
unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
{
struct mem_cgroup_per_node *mz;
+ unsigned long nr_pages = 0;
+ int zid;
mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- return mz->lru_size[lru];
+ for (zid = 0; zid < MAX_NR_ZONES; zid++)
+ nr_pages += mz->lru_zone_size[zid][lru];
+ return nr_pages;
+}
+
+static inline
+unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
+ enum lru_list lru, int zone_idx)
+{
+ struct mem_cgroup_per_node *mz;
+
+ mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
+ return mz->lru_zone_size[zone_idx][lru];
}
void mem_cgroup_handle_over_high(void);
@@ -671,6 +685,12 @@ mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
{
return 0;
}
+static inline
+unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
+ enum lru_list lru, int zone_idx)
+{
+ return 0;
+}
static inline unsigned long
mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 01033fadea47..c1784c0b4f35 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -284,7 +284,7 @@ extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
unsigned long map_offset);
extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
unsigned long pnum);
-extern int zone_can_shift(unsigned long pfn, unsigned long nr_pages,
- enum zone_type target);
+extern bool zone_can_shift(unsigned long pfn, unsigned long nr_pages,
+ enum zone_type target, int *zone_shift);
#endif /* __LINUX_MEMORY_HOTPLUG_H */
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 257173e0095e..f541da68d1e7 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -35,6 +35,8 @@
#define PHY_ID_KSZ886X 0x00221430
#define PHY_ID_KSZ8863 0x00221435
+#define PHY_ID_KSZ8795 0x00221550
+
/* struct phy_device dev_flags definitions */
#define MICREL_PHY_50MHZ_CLK 0x00000001
#define MICREL_PHY_FXEN 0x00000002
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 93bdb3485192..6533c16e27ad 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -1384,6 +1384,8 @@ int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val);
int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv);
int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port,
bool *vlan_offload_disabled);
+void mlx4_handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
+ struct _rule_hw *eth_header);
int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 9f489365b3d3..52b437431c6a 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -1071,11 +1071,6 @@ enum {
MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
};
-enum {
- MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP = 0x0,
- MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP = 0x2,
-};
-
static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
{
if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 0ae55361e674..735b36335f29 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -123,7 +123,6 @@ enum {
MLX5_REG_HOST_ENDIANNESS = 0x7004,
MLX5_REG_MCIA = 0x9014,
MLX5_REG_MLCR = 0x902b,
- MLX5_REG_MPCNT = 0x9051,
};
enum mlx5_dcbx_oper_mode {
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 57bec544e20a..a852e9db6f0d 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1757,80 +1757,6 @@ struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits {
u8 reserved_at_4c0[0x300];
};
-struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits {
- u8 life_time_counter_high[0x20];
-
- u8 life_time_counter_low[0x20];
-
- u8 rx_errors[0x20];
-
- u8 tx_errors[0x20];
-
- u8 l0_to_recovery_eieos[0x20];
-
- u8 l0_to_recovery_ts[0x20];
-
- u8 l0_to_recovery_framing[0x20];
-
- u8 l0_to_recovery_retrain[0x20];
-
- u8 crc_error_dllp[0x20];
-
- u8 crc_error_tlp[0x20];
-
- u8 reserved_at_140[0x680];
-};
-
-struct mlx5_ifc_pcie_tas_cntrs_grp_data_layout_bits {
- u8 life_time_counter_high[0x20];
-
- u8 life_time_counter_low[0x20];
-
- u8 time_to_boot_image_start[0x20];
-
- u8 time_to_link_image[0x20];
-
- u8 calibration_time[0x20];
-
- u8 time_to_first_perst[0x20];
-
- u8 time_to_detect_state[0x20];
-
- u8 time_to_l0[0x20];
-
- u8 time_to_crs_en[0x20];
-
- u8 time_to_plastic_image_start[0x20];
-
- u8 time_to_iron_image_start[0x20];
-
- u8 perst_handler[0x20];
-
- u8 times_in_l1[0x20];
-
- u8 times_in_l23[0x20];
-
- u8 dl_down[0x20];
-
- u8 config_cycle1usec[0x20];
-
- u8 config_cycle2to7usec[0x20];
-
- u8 config_cycle_8to15usec[0x20];
-
- u8 config_cycle_16_to_63usec[0x20];
-
- u8 config_cycle_64usec[0x20];
-
- u8 correctable_err_msg_sent[0x20];
-
- u8 non_fatal_err_msg_sent[0x20];
-
- u8 fatal_err_msg_sent[0x20];
-
- u8 reserved_at_2e0[0x4e0];
-};
-
struct mlx5_ifc_cmd_inter_comp_event_bits {
u8 command_completion_vector[0x20];
@@ -2995,12 +2921,6 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
u8 reserved_at_0[0x7c0];
};
-union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits {
- struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits pcie_perf_cntrs_grp_data_layout;
- struct mlx5_ifc_pcie_tas_cntrs_grp_data_layout_bits pcie_tas_cntrs_grp_data_layout;
- u8 reserved_at_0[0x7c0];
-};
-
union mlx5_ifc_event_auto_bits {
struct mlx5_ifc_comp_event_bits comp_event;
struct mlx5_ifc_dct_events_bits dct_events;
@@ -7320,18 +7240,6 @@ struct mlx5_ifc_ppcnt_reg_bits {
union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
};
-struct mlx5_ifc_mpcnt_reg_bits {
- u8 reserved_at_0[0x8];
- u8 pcie_index[0x8];
- u8 reserved_at_10[0xa];
- u8 grp[0x6];
-
- u8 clr[0x1];
- u8 reserved_at_21[0x1f];
-
- union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits counter_set;
-};
-
struct mlx5_ifc_ppad_reg_bits {
u8 reserved_at_0[0x3];
u8 single_mac[0x1];
@@ -7937,7 +7845,6 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_pmtu_reg_bits pmtu_reg;
struct mlx5_ifc_ppad_reg_bits ppad_reg;
struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg;
- struct mlx5_ifc_mpcnt_reg_bits mpcnt_reg;
struct mlx5_ifc_pplm_reg_bits pplm_reg;
struct mlx5_ifc_pplr_reg_bits pplr_reg;
struct mlx5_ifc_ppsc_reg_bits ppsc_reg;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index fe6b4036664a..b84615b0f64c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1210,8 +1210,8 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma);
void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows);
-int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp,
- spinlock_t **ptlp);
+int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
+ pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
unsigned long *pfn);
int follow_phys(struct vm_area_struct *vma, unsigned long address,
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 71613e8a720f..41d376e7116d 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -39,7 +39,7 @@ static __always_inline void update_lru_size(struct lruvec *lruvec,
{
__update_lru_size(lruvec, lru, zid, nr_pages);
#ifdef CONFIG_MEMCG
- mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
+ mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
#endif
}
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 36d9896fbc1e..f4aac87adcc3 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -972,12 +972,16 @@ static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
* @zonelist - The zonelist to search for a suitable zone
* @highest_zoneidx - The zone index of the highest zone to return
* @nodes - An optional nodemask to filter the zonelist with
- * @zone - The first suitable zone found is returned via this parameter
+ * @return - Zoneref pointer for the first suitable zone found (see below)
*
* This function returns the first zone at or below a given zone index that is
* within the allowed nodemask. The zoneref returned is a cursor that can be
* used to iterate the zonelist with next_zones_zonelist by advancing it by
* one before calling.
+ *
+ * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
+ * never NULL). This may happen either genuinely, or due to concurrent nodemask
+ * update due to cpuset modification.
*/
static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
enum zone_type highest_zoneidx,
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 994f7423a74b..9bde9558b596 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2477,14 +2477,19 @@ static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
return NAPI_GRO_CB(skb)->frag0_len < hlen;
}
+static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
+{
+ NAPI_GRO_CB(skb)->frag0 = NULL;
+ NAPI_GRO_CB(skb)->frag0_len = 0;
+}
+
static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
unsigned int offset)
{
if (!pskb_may_pull(skb, hlen))
return NULL;
- NAPI_GRO_CB(skb)->frag0 = NULL;
- NAPI_GRO_CB(skb)->frag0_len = 0;
+ skb_gro_frag0_invalidate(skb);
return skb->data + offset;
}
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index bca536341d1a..1b1ca04820a3 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -282,7 +282,7 @@ enum nfsstat4 {
static inline bool seqid_mutating_err(u32 err)
{
- /* rfc 3530 section 8.1.5: */
+ /* See RFC 7530, section 9.1.7 */
switch (err) {
case NFS4ERR_STALE_CLIENTID:
case NFS4ERR_STALE_STATEID:
@@ -291,6 +291,7 @@ static inline bool seqid_mutating_err(u32 err)
case NFS4ERR_BADXDR:
case NFS4ERR_RESOURCE:
case NFS4ERR_NOFILEHANDLE:
+ case NFS4ERR_MOVED:
return false;
};
return true;
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index aacca824a6ae..0a3fadc32693 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -110,6 +110,7 @@ extern int watchdog_user_enabled;
extern int watchdog_thresh;
extern unsigned long watchdog_enabled;
extern unsigned long *watchdog_cpumask_bits;
+extern atomic_t watchdog_park_in_progress;
#ifdef CONFIG_SMP
extern int sysctl_softlockup_all_cpu_backtrace;
extern int sysctl_hardlockup_all_cpu_backtrace;
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index c56b39890a41..6b5818d6de32 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -73,13 +73,13 @@
*/
enum pageflags {
PG_locked, /* Page is locked. Don't touch. */
- PG_waiters, /* Page has waiters, check its waitqueue */
PG_error,
PG_referenced,
PG_uptodate,
PG_dirty,
PG_lru,
PG_active,
+ PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
PG_slab,
PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
PG_arch_1,
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 4741ecdb9817..78ed8105e64d 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1259,6 +1259,7 @@ extern void perf_event_disable(struct perf_event *event);
extern void perf_event_disable_local(struct perf_event *event);
extern void perf_event_disable_inatomic(struct perf_event *event);
extern void perf_event_task_tick(void);
+extern int perf_event_account_interrupt(struct perf_event *event);
#else /* !CONFIG_PERF_EVENTS: */
static inline void *
perf_aux_output_begin(struct perf_output_handle *handle,
diff --git a/include/linux/phy.h b/include/linux/phy.h
index f7d95f644eed..7fc1105605bf 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -25,7 +25,6 @@
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/mod_devicetable.h>
-#include <linux/phy_led_triggers.h>
#include <linux/atomic.h>
diff --git a/include/linux/phy_led_triggers.h b/include/linux/phy_led_triggers.h
index a2daea0a37d2..b37b05bfd1a6 100644
--- a/include/linux/phy_led_triggers.h
+++ b/include/linux/phy_led_triggers.h
@@ -18,11 +18,11 @@ struct phy_device;
#ifdef CONFIG_LED_TRIGGER_PHY
#include <linux/leds.h>
+#include <linux/phy.h>
#define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 10
-#define PHY_MII_BUS_ID_SIZE (20 - 3)
-#define PHY_LINK_LED_TRIGGER_NAME_SIZE (PHY_MII_BUS_ID_SIZE + \
+#define PHY_LINK_LED_TRIGGER_NAME_SIZE (MII_BUS_ID_SIZE + \
FIELD_SIZEOF(struct mdio_device, addr)+\
PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE)
diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h
index d7e5d608faa7..a0f2aba72fa9 100644
--- a/include/linux/pinctrl/consumer.h
+++ b/include/linux/pinctrl/consumer.h
@@ -29,6 +29,7 @@ extern int pinctrl_request_gpio(unsigned gpio);
extern void pinctrl_free_gpio(unsigned gpio);
extern int pinctrl_gpio_direction_input(unsigned gpio);
extern int pinctrl_gpio_direction_output(unsigned gpio);
+extern int pinctrl_gpio_set_config(unsigned gpio, unsigned long config);
extern struct pinctrl * __must_check pinctrl_get(struct device *dev);
extern void pinctrl_put(struct pinctrl *p);
@@ -80,6 +81,11 @@ static inline int pinctrl_gpio_direction_output(unsigned gpio)
return 0;
}
+static inline int pinctrl_gpio_set_config(unsigned gpio, unsigned long config)
+{
+ return 0;
+}
+
static inline struct pinctrl * __must_check pinctrl_get(struct device *dev)
{
return NULL;
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index 12343caa114e..7620eb127cff 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -12,12 +12,6 @@
#ifndef __LINUX_PINCTRL_PINCONF_GENERIC_H
#define __LINUX_PINCTRL_PINCONF_GENERIC_H
-/*
- * You shouldn't even be able to compile with these enums etc unless you're
- * using generic pin config. That is why this is defined out.
- */
-#ifdef CONFIG_GENERIC_PINCONF
-
/**
* enum pin_config_param - possible pin configuration parameters
* @PIN_CONFIG_BIAS_BUS_HOLD: the pin will be set to weakly latch so that it
@@ -92,6 +86,8 @@
* @PIN_CONFIG_END: this is the last enumerator for pin configurations, if
* you need to pass in custom configurations to the pin controller, use
* PIN_CONFIG_END+1 as the base offset.
+ * @PIN_CONFIG_MAX: this is the maximum configuration value that can be
+ * presented using the packed format.
*/
enum pin_config_param {
PIN_CONFIG_BIAS_BUS_HOLD,
@@ -112,49 +108,53 @@ enum pin_config_param {
PIN_CONFIG_OUTPUT,
PIN_CONFIG_POWER_SOURCE,
PIN_CONFIG_SLEW_RATE,
- PIN_CONFIG_END = 0x7FFF,
-};
-
-#ifdef CONFIG_DEBUG_FS
-#define PCONFDUMP(a, b, c, d) { .param = a, .display = b, .format = c, \
- .has_arg = d }
-
-struct pin_config_item {
- const enum pin_config_param param;
- const char * const display;
- const char * const format;
- bool has_arg;
+ PIN_CONFIG_END = 0x7F,
+ PIN_CONFIG_MAX = 0xFF,
};
-#endif /* CONFIG_DEBUG_FS */
/*
* Helpful configuration macro to be used in tables etc.
*/
-#define PIN_CONF_PACKED(p, a) ((a << 16) | ((unsigned long) p & 0xffffUL))
+#define PIN_CONF_PACKED(p, a) ((a << 8) | ((unsigned long) p & 0xffUL))
/*
* The following inlines stuffs a configuration parameter and data value
* into and out of an unsigned long argument, as used by the generic pin config
- * system. We put the parameter in the lower 16 bits and the argument in the
- * upper 16 bits.
+ * system. We put the parameter in the lower 8 bits and the argument in the
+ * upper 24 bits.
*/
static inline enum pin_config_param pinconf_to_config_param(unsigned long config)
{
- return (enum pin_config_param) (config & 0xffffUL);
+ return (enum pin_config_param) (config & 0xffUL);
}
-static inline u16 pinconf_to_config_argument(unsigned long config)
+static inline u32 pinconf_to_config_argument(unsigned long config)
{
- return (enum pin_config_param) ((config >> 16) & 0xffffUL);
+ return (u32) ((config >> 8) & 0xffffffUL);
}
static inline unsigned long pinconf_to_config_packed(enum pin_config_param param,
- u16 argument)
+ u32 argument)
{
return PIN_CONF_PACKED(param, argument);
}
+#ifdef CONFIG_GENERIC_PINCONF
+
+#ifdef CONFIG_DEBUG_FS
+#define PCONFDUMP(a, b, c, d) { \
+ .param = a, .display = b, .format = c, .has_arg = d \
+ }
+
+struct pin_config_item {
+ const enum pin_config_param param;
+ const char * const display;
+ const char * const format;
+ bool has_arg;
+};
+#endif /* CONFIG_DEBUG_FS */
+
#ifdef CONFIG_OF
#include <linux/device.h>
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index a42e57da270d..8ce2d87a238b 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -141,12 +141,27 @@ struct pinctrl_desc {
};
/* External interface to pin controller */
+
+extern int pinctrl_register_and_init(struct pinctrl_desc *pctldesc,
+ struct device *dev, void *driver_data,
+ struct pinctrl_dev **pctldev);
+
+/* Please use pinctrl_register_and_init() instead */
extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
struct device *dev, void *driver_data);
+
extern void pinctrl_unregister(struct pinctrl_dev *pctldev);
+
+extern int devm_pinctrl_register_and_init(struct device *dev,
+ struct pinctrl_desc *pctldesc,
+ void *driver_data,
+ struct pinctrl_dev **pctldev);
+
+/* Please use devm_pinctrl_register_and_init() instead */
extern struct pinctrl_dev *devm_pinctrl_register(struct device *dev,
struct pinctrl_desc *pctldesc,
void *driver_data);
+
extern void devm_pinctrl_unregister(struct device *dev,
struct pinctrl_dev *pctldev);
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 5dea8f6440e4..52bda854593b 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -306,7 +306,9 @@ void radix_tree_iter_replace(struct radix_tree_root *,
void radix_tree_replace_slot(struct radix_tree_root *root,
void **slot, void *item);
void __radix_tree_delete_node(struct radix_tree_root *root,
- struct radix_tree_node *node);
+ struct radix_tree_node *node,
+ radix_tree_update_node_t update_node,
+ void *private);
void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
void *radix_tree_delete(struct radix_tree_root *, unsigned long);
void radix_tree_clear_tags(struct radix_tree_root *root,
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 321f9ed552a9..01f71e1d2e94 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -444,6 +444,10 @@ bool __rcu_is_watching(void);
#error "Unknown RCU implementation specified to kernel configuration"
#endif
+#define RCU_SCHEDULER_INACTIVE 0
+#define RCU_SCHEDULER_INIT 1
+#define RCU_SCHEDULER_RUNNING 2
+
/*
* init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
* initialization and destruction of rcu_head on the stack. rcu_head structures
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index e2f3a3281d8f..8265d351c9f0 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -408,7 +408,8 @@ enum rproc_crash_type {
* @crash_comp: completion used to sync crash handler and the rproc reload
* @recovery_disabled: flag that state if recovery was disabled
* @max_notifyid: largest allocated notify id.
- * @table_ptr: our copy of the resource table
+ * @table_ptr: pointer to the resource table in effect
+ * @cached_table: copy of the resource table
* @has_iommu: flag to indicate if remote processor is behind an MMU
*/
struct rproc {
@@ -440,6 +441,7 @@ struct rproc {
bool recovery_disabled;
int max_notifyid;
struct resource_table *table_ptr;
+ struct resource_table *cached_table;
bool has_iommu;
bool auto_boot;
};
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4d1905245c7a..ad3ec9ec61f7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -854,6 +854,16 @@ struct signal_struct {
#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
+#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
+ SIGNAL_STOP_CONTINUED)
+
+static inline void signal_set_stop_flags(struct signal_struct *sig,
+ unsigned int flags)
+{
+ WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
+ sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
+}
+
/* If true, all threads except ->group_exit_task have pending SIGKILL */
static inline int signal_group_exit(const struct signal_struct *sig)
{
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index b53c0cfd417e..a410715bbef8 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2480,7 +2480,7 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
static inline void skb_free_frag(void *addr)
{
- __free_page_frag(addr);
+ page_frag_free(addr);
}
void *napi_alloc_frag(unsigned int fragsz);
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 084b12bad198..4c5363566815 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -226,7 +226,7 @@ static inline const char *__check_heap_object(const void *ptr,
* (PAGE_SIZE*2). Larger requests are passed to the page allocator.
*/
#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
-#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
+#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 3
#endif
@@ -239,7 +239,7 @@ static inline const char *__check_heap_object(const void *ptr,
* be allocated from the same page.
*/
#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
-#define KMALLOC_SHIFT_MAX 30
+#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 3
#endif
diff --git a/include/linux/soc/samsung/exynos-pmu.h b/include/linux/soc/samsung/exynos-pmu.h
index e2e9de1acc5b..e57eb4b6cc5a 100644
--- a/include/linux/soc/samsung/exynos-pmu.h
+++ b/include/linux/soc/samsung/exynos-pmu.h
@@ -12,6 +12,8 @@
#ifndef __LINUX_SOC_EXYNOS_PMU_H
#define __LINUX_SOC_EXYNOS_PMU_H
+struct regmap;
+
enum sys_powerdown {
SYS_AFTR,
SYS_LPA,
@@ -20,5 +22,13 @@ enum sys_powerdown {
};
extern void exynos_sys_powerdown_conf(enum sys_powerdown mode);
+#ifdef CONFIG_EXYNOS_PMU
+extern struct regmap *exynos_get_pmu_regmap(void);
+#else
+static inline struct regmap *exynos_get_pmu_regmap(void)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif
#endif /* __LINUX_SOC_EXYNOS_PMU_H */
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 85cc819676e8..333ad11b3dd9 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -216,5 +216,6 @@ void rpc_clnt_xprt_switch_put(struct rpc_clnt *);
void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *);
bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
const struct sockaddr *sap);
+void rpc_cleanup_clids(void);
#endif /* __KERNEL__ */
#endif /* _LINUX_SUNRPC_CLNT_H */
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index e5d193440374..7440290f64ac 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -66,6 +66,7 @@ struct svc_xprt {
#define XPT_LISTENER 10 /* listening endpoint */
#define XPT_CACHE_AUTH 11 /* cache auth info */
#define XPT_LOCAL 12 /* connection from loopback interface */
+#define XPT_KILL_TEMP 13 /* call xpo_kill_temp_xprt before closing */
struct svc_serv *xpt_server; /* service for transport */
atomic_t xpt_reserved; /* space on outq that is rsvd */
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 0c729c3c8549..d9718378a8be 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -194,8 +194,6 @@ struct platform_freeze_ops {
};
#ifdef CONFIG_SUSPEND
-extern suspend_state_t mem_sleep_default;
-
/**
* suspend_set_ops - set platform dependent suspend operations
* @ops: The new suspend operations to set.
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 09f4be179ff3..7f47b7098b1b 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -150,8 +150,9 @@ enum {
SWP_FILE = (1 << 7), /* set after swap_activate success */
SWP_AREA_DISCARD = (1 << 8), /* single-time swap area discards */
SWP_PAGE_DISCARD = (1 << 9), /* freed swap page-cluster discards */
+ SWP_STABLE_WRITES = (1 << 10), /* no overwrite PG_writeback pages */
/* add others here before... */
- SWP_SCANNING = (1 << 10), /* refcount in scan_swap_map */
+ SWP_SCANNING = (1 << 11), /* refcount in scan_swap_map */
};
#define SWAP_CLUSTER_MAX 32UL
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 183f37c8a5e1..4ee479f2f355 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -9,7 +9,13 @@ struct device;
struct page;
struct scatterlist;
-extern int swiotlb_force;
+enum swiotlb_force {
+ SWIOTLB_NORMAL, /* Default - depending on HW DMA mask etc. */
+ SWIOTLB_FORCE, /* swiotlb=force */
+ SWIOTLB_NO_FORCE, /* swiotlb=noforce */
+};
+
+extern enum swiotlb_force swiotlb_force;
/*
* Maximum allowable number of contiguous slabs to map,
@@ -108,11 +114,14 @@ swiotlb_dma_supported(struct device *hwdev, u64 mask);
#ifdef CONFIG_SWIOTLB
extern void __init swiotlb_free(void);
+unsigned int swiotlb_max_segment(void);
#else
static inline void swiotlb_free(void) { }
+static inline unsigned int swiotlb_max_segment(void) { return 0; }
#endif
extern void swiotlb_print_info(void);
extern int is_swiotlb_buffer(phys_addr_t paddr);
+extern void swiotlb_set_max_segment(unsigned int);
#endif /* __LINUX_SWIOTLB_H */
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index fc5848dad7a4..c93f4b3a59cb 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -62,8 +62,13 @@ static inline unsigned int tcp_optlen(const struct sk_buff *skb)
/* TCP Fast Open Cookie as stored in memory */
struct tcp_fastopen_cookie {
+ union {
+ u8 val[TCP_FASTOPEN_COOKIE_MAX];
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr addr;
+#endif
+ };
s8 len;
- u8 val[TCP_FASTOPEN_COOKIE_MAX];
bool exp; /* In RFC6994 experimental option format */
};
diff --git a/include/linux/timerfd.h b/include/linux/timerfd.h
index bd36ce431e32..bab0b1ad0613 100644
--- a/include/linux/timerfd.h
+++ b/include/linux/timerfd.h
@@ -8,23 +8,7 @@
#ifndef _LINUX_TIMERFD_H
#define _LINUX_TIMERFD_H
-/* For O_CLOEXEC and O_NONBLOCK */
-#include <linux/fcntl.h>
-
-/* For _IO helpers */
-#include <linux/ioctl.h>
-
-/*
- * CAREFUL: Check include/asm-generic/fcntl.h when defining
- * new flags, since they might collide with O_* ones. We want
- * to re-use O_* flags that couldn't possibly have a meaning
- * from eventfd, in order to leave a free define-space for
- * shared O_* flags.
- */
-#define TFD_TIMER_ABSTIME (1 << 0)
-#define TFD_TIMER_CANCEL_ON_SET (1 << 1)
-#define TFD_CLOEXEC O_CLOEXEC
-#define TFD_NONBLOCK O_NONBLOCK
+#include <uapi/linux/timerfd.h>
#define TFD_SHARED_FCNTL_FLAGS (TFD_CLOEXEC | TFD_NONBLOCK)
/* Flags for timerfd_create. */
@@ -32,6 +16,4 @@
/* Flags for timerfd_settime. */
#define TFD_SETTIME_FLAGS (TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET)
-#define TFD_IOC_SET_TICKS _IOW('T', 0, u64)
-
#endif /* _LINUX_TIMERFD_H */
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 66204007d7ac..5209b5ed2a64 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -56,7 +56,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
struct virtio_net_hdr *hdr,
- bool little_endian)
+ bool little_endian,
+ bool has_data_valid)
{
memset(hdr, 0, sizeof(*hdr)); /* no info leak */
@@ -91,7 +92,8 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
skb_checksum_start_offset(skb));
hdr->csum_offset = __cpu_to_virtio16(little_endian,
skb->csum_offset);
- } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ } else if (has_data_valid &&
+ skb->ip_summed == CHECKSUM_UNNECESSARY) {
hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
} /* else everything is zero */
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 487e57391664..7afe991e900e 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -871,7 +871,7 @@ int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
* upper-layer output functions
*/
int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
- struct ipv6_txoptions *opt, int tclass);
+ __u32 mark, struct ipv6_txoptions *opt, int tclass);
int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
index d4c1c75b8862..73dd87647460 100644
--- a/include/net/lwtunnel.h
+++ b/include/net/lwtunnel.h
@@ -44,6 +44,8 @@ struct lwtunnel_encap_ops {
int (*get_encap_size)(struct lwtunnel_state *lwtstate);
int (*cmp_encap)(struct lwtunnel_state *a, struct lwtunnel_state *b);
int (*xmit)(struct sk_buff *skb);
+
+ struct module *owner;
};
#ifdef CONFIG_LWTUNNEL
@@ -105,6 +107,8 @@ int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
unsigned int num);
int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
unsigned int num);
+int lwtunnel_valid_encap_type(u16 encap_type);
+int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len);
int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
struct nlattr *encap,
unsigned int family, const void *cfg,
@@ -168,6 +172,15 @@ static inline int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
return -EOPNOTSUPP;
}
+static inline int lwtunnel_valid_encap_type(u16 encap_type)
+{
+ return -EOPNOTSUPP;
+}
+static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
struct nlattr *encap,
unsigned int family, const void *cfg,
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 924325c46aab..7dfdb517f0be 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -207,9 +207,9 @@ struct nft_set_iter {
unsigned int skip;
int err;
int (*fn)(const struct nft_ctx *ctx,
- const struct nft_set *set,
+ struct nft_set *set,
const struct nft_set_iter *iter,
- const struct nft_set_elem *elem);
+ struct nft_set_elem *elem);
};
/**
@@ -301,7 +301,7 @@ struct nft_set_ops {
void (*remove)(const struct nft_set *set,
const struct nft_set_elem *elem);
void (*walk)(const struct nft_ctx *ctx,
- const struct nft_set *set,
+ struct nft_set *set,
struct nft_set_iter *iter);
unsigned int (*privsize)(const struct nlattr * const nla[]);
diff --git a/include/net/netfilter/nft_fib.h b/include/net/netfilter/nft_fib.h
index cbedda077db2..5ceb2205e4e3 100644
--- a/include/net/netfilter/nft_fib.h
+++ b/include/net/netfilter/nft_fib.h
@@ -9,6 +9,12 @@ struct nft_fib {
extern const struct nla_policy nft_fib_policy[];
+static inline bool
+nft_fib_is_loopback(const struct sk_buff *skb, const struct net_device *in)
+{
+ return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK;
+}
+
int nft_fib_dump(struct sk_buff *skb, const struct nft_expr *expr);
int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
const struct nlattr * const tb[]);
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index f0cf5a1b777e..0378e88f6fd3 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -110,6 +110,7 @@ struct netns_ipv4 {
int sysctl_tcp_orphan_retries;
int sysctl_tcp_fin_timeout;
unsigned int sysctl_tcp_notsent_lowat;
+ int sysctl_tcp_tw_reuse;
int sysctl_igmp_max_memberships;
int sysctl_igmp_max_msf;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 207147b4c6b2..6061963cca98 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -252,7 +252,6 @@ extern int sysctl_tcp_wmem[3];
extern int sysctl_tcp_rmem[3];
extern int sysctl_tcp_app_win;
extern int sysctl_tcp_adv_win_scale;
-extern int sysctl_tcp_tw_reuse;
extern int sysctl_tcp_frto;
extern int sysctl_tcp_low_latency;
extern int sysctl_tcp_nometrics_save;
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 958a24d8fae7..b567e4452a47 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -352,6 +352,20 @@ static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
}
}
+static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
+{
+ if (mtu >= 4096)
+ return IB_MTU_4096;
+ else if (mtu >= 2048)
+ return IB_MTU_2048;
+ else if (mtu >= 1024)
+ return IB_MTU_1024;
+ else if (mtu >= 512)
+ return IB_MTU_512;
+ else
+ return IB_MTU_256;
+}
+
enum ib_port_state {
IB_PORT_NOP = 0,
IB_PORT_DOWN = 1,
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 96dd0b3f70d7..da5033dd8cbc 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -809,11 +809,11 @@ static inline void fc_set_wwnn(struct fc_lport *lport, u64 wwnn)
/**
* fc_set_wwpn() - Set the World Wide Port Name of a local port
* @lport: The local port whose WWPN is to be set
- * @wwnn: The new WWPN
+ * @wwpn: The new WWPN
*/
-static inline void fc_set_wwpn(struct fc_lport *lport, u64 wwnn)
+static inline void fc_set_wwpn(struct fc_lport *lport, u64 wwpn)
{
- lport->wwpn = wwnn;
+ lport->wwpn = wwpn;
}
/**
diff --git a/include/soc/arc/mcip.h b/include/soc/arc/mcip.h
index 6902c2a8bd23..4b6b489a8d7c 100644
--- a/include/soc/arc/mcip.h
+++ b/include/soc/arc/mcip.h
@@ -55,17 +55,17 @@ struct mcip_cmd {
struct mcip_bcr {
#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int pad3:8,
- idu:1, llm:1, num_cores:6,
- iocoh:1, gfrc:1, dbg:1, pad2:1,
- msg:1, sem:1, ipi:1, pad:1,
+ unsigned int pad4:6, pw_dom:1, pad3:1,
+ idu:1, pad2:1, num_cores:6,
+ pad:1, gfrc:1, dbg:1, pw:1,
+ msg:1, sem:1, ipi:1, slv:1,
ver:8;
#else
unsigned int ver:8,
- pad:1, ipi:1, sem:1, msg:1,
- pad2:1, dbg:1, gfrc:1, iocoh:1,
- num_cores:6, llm:1, idu:1,
- pad3:8;
+ slv:1, ipi:1, sem:1, msg:1,
+ pw:1, dbg:1, gfrc:1, pad:1,
+ num_cores:6, pad2:1, idu:1,
+ pad3:1, pw_dom:1, pad4:6;
#endif
};
diff --git a/include/sound/hdmi-codec.h b/include/sound/hdmi-codec.h
index 530c57bdefa0..915c4357945c 100644
--- a/include/sound/hdmi-codec.h
+++ b/include/sound/hdmi-codec.h
@@ -36,10 +36,10 @@ struct hdmi_codec_daifmt {
HDMI_AC97,
HDMI_SPDIF,
} fmt;
- int bit_clk_inv:1;
- int frame_clk_inv:1;
- int bit_clk_master:1;
- int frame_clk_master:1;
+ unsigned int bit_clk_inv:1;
+ unsigned int frame_clk_inv:1;
+ unsigned int bit_clk_master:1;
+ unsigned int frame_clk_master:1;
};
/*
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 2b502f6cc6d0..b86168a21d56 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -813,6 +813,7 @@ struct snd_soc_component {
unsigned int suspended:1; /* is in suspend PM state */
struct list_head list;
+ struct list_head card_aux_list; /* for auxiliary bound components */
struct list_head card_list;
struct snd_soc_dai_driver *dai_drv;
@@ -1152,6 +1153,7 @@ struct snd_soc_card {
*/
struct snd_soc_aux_dev *aux_dev;
int num_aux_devs;
+ struct list_head aux_comp_list;
const struct snd_kcontrol_new *controls;
int num_controls;
@@ -1547,6 +1549,7 @@ static inline void snd_soc_initialize_card_lists(struct snd_soc_card *card)
INIT_LIST_HEAD(&card->widgets);
INIT_LIST_HEAD(&card->paths);
INIT_LIST_HEAD(&card->dapm_list);
+ INIT_LIST_HEAD(&card->aux_comp_list);
INIT_LIST_HEAD(&card->component_dev_list);
}
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 29e6858bb164..43edf82e54ff 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -174,6 +174,10 @@ enum tcm_sense_reason_table {
TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED = R(0x16),
TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED = R(0x17),
TCM_COPY_TARGET_DEVICE_NOT_REACHABLE = R(0x18),
+ TCM_TOO_MANY_TARGET_DESCS = R(0x19),
+ TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE = R(0x1a),
+ TCM_TOO_MANY_SEGMENT_DESCS = R(0x1b),
+ TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE = R(0x1c),
#undef R
};
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index c14bed4ab097..88d18a8ceb59 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -130,8 +130,8 @@ DECLARE_EVENT_CLASS(btrfs__inode,
BTRFS_I(inode)->root->root_key.objectid;
),
- TP_printk_btrfs("root = %llu(%s), gen = %llu, ino = %lu, blocks = %llu, "
- "disk_i_size = %llu, last_trans = %llu, logged_trans = %llu",
+ TP_printk_btrfs("root=%llu(%s) gen=%llu ino=%lu blocks=%llu "
+ "disk_i_size=%llu last_trans=%llu logged_trans=%llu",
show_root_type(__entry->root_objectid),
(unsigned long long)__entry->generation,
(unsigned long)__entry->ino,
@@ -184,14 +184,16 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
TRACE_EVENT_CONDITION(btrfs_get_extent,
- TP_PROTO(struct btrfs_root *root, struct extent_map *map),
+ TP_PROTO(struct btrfs_root *root, struct inode *inode,
+ struct extent_map *map),
- TP_ARGS(root, map),
+ TP_ARGS(root, inode, map),
TP_CONDITION(map),
TP_STRUCT__entry_btrfs(
__field( u64, root_objectid )
+ __field( u64, ino )
__field( u64, start )
__field( u64, len )
__field( u64, orig_start )
@@ -204,7 +206,8 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
TP_fast_assign_btrfs(root->fs_info,
__entry->root_objectid = root->root_key.objectid;
- __entry->start = map->start;
+ __entry->ino = btrfs_ino(inode);
+ __entry->start = map->start;
__entry->len = map->len;
__entry->orig_start = map->orig_start;
__entry->block_start = map->block_start;
@@ -214,11 +217,12 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
__entry->compress_type = map->compress_type;
),
- TP_printk_btrfs("root = %llu(%s), start = %llu, len = %llu, "
- "orig_start = %llu, block_start = %llu(%s), "
- "block_len = %llu, flags = %s, refs = %u, "
- "compress_type = %u",
+ TP_printk_btrfs("root=%llu(%s) ino=%llu start=%llu len=%llu "
+ "orig_start=%llu block_start=%llu(%s) "
+ "block_len=%llu flags=%s refs=%u "
+ "compress_type=%u",
show_root_type(__entry->root_objectid),
+ (unsigned long long)__entry->ino,
(unsigned long long)__entry->start,
(unsigned long long)__entry->len,
(unsigned long long)__entry->orig_start,
@@ -259,6 +263,7 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent,
__field( int, compress_type )
__field( int, refs )
__field( u64, root_objectid )
+ __field( u64, truncated_len )
),
TP_fast_assign_btrfs(btrfs_sb(inode->i_sb),
@@ -273,18 +278,21 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent,
__entry->refs = atomic_read(&ordered->refs);
__entry->root_objectid =
BTRFS_I(inode)->root->root_key.objectid;
+ __entry->truncated_len = ordered->truncated_len;
),
- TP_printk_btrfs("root = %llu(%s), ino = %llu, file_offset = %llu, "
- "start = %llu, len = %llu, disk_len = %llu, "
- "bytes_left = %llu, flags = %s, compress_type = %d, "
- "refs = %d",
+ TP_printk_btrfs("root=%llu(%s) ino=%llu file_offset=%llu "
+ "start=%llu len=%llu disk_len=%llu "
+ "truncated_len=%llu "
+ "bytes_left=%llu flags=%s compress_type=%d "
+ "refs=%d",
show_root_type(__entry->root_objectid),
(unsigned long long)__entry->ino,
(unsigned long long)__entry->file_offset,
(unsigned long long)__entry->start,
(unsigned long long)__entry->len,
(unsigned long long)__entry->disk_len,
+ (unsigned long long)__entry->truncated_len,
(unsigned long long)__entry->bytes_left,
show_ordered_flags(__entry->flags),
__entry->compress_type, __entry->refs)
@@ -354,10 +362,10 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
BTRFS_I(inode)->root->root_key.objectid;
),
- TP_printk_btrfs("root = %llu(%s), ino = %lu, page_index = %lu, "
- "nr_to_write = %ld, pages_skipped = %ld, range_start = %llu, "
- "range_end = %llu, for_kupdate = %d, "
- "for_reclaim = %d, range_cyclic = %d, writeback_index = %lu",
+ TP_printk_btrfs("root=%llu(%s) ino=%lu page_index=%lu "
+ "nr_to_write=%ld pages_skipped=%ld range_start=%llu "
+ "range_end=%llu for_kupdate=%d "
+ "for_reclaim=%d range_cyclic=%d writeback_index=%lu",
show_root_type(__entry->root_objectid),
(unsigned long)__entry->ino, __entry->index,
__entry->nr_to_write, __entry->pages_skipped,
@@ -400,8 +408,8 @@ TRACE_EVENT(btrfs_writepage_end_io_hook,
BTRFS_I(page->mapping->host)->root->root_key.objectid;
),
- TP_printk_btrfs("root = %llu(%s), ino = %lu, page_index = %lu, start = %llu, "
- "end = %llu, uptodate = %d",
+ TP_printk_btrfs("root=%llu(%s) ino=%lu page_index=%lu start=%llu "
+ "end=%llu uptodate=%d",
show_root_type(__entry->root_objectid),
(unsigned long)__entry->ino, (unsigned long)__entry->index,
(unsigned long long)__entry->start,
@@ -433,7 +441,7 @@ TRACE_EVENT(btrfs_sync_file,
BTRFS_I(inode)->root->root_key.objectid;
),
- TP_printk_btrfs("root = %llu(%s), ino = %ld, parent = %ld, datasync = %d",
+ TP_printk_btrfs("root=%llu(%s) ino=%ld parent=%ld datasync=%d",
show_root_type(__entry->root_objectid),
(unsigned long)__entry->ino, (unsigned long)__entry->parent,
__entry->datasync)
@@ -484,9 +492,9 @@ TRACE_EVENT(btrfs_add_block_group,
__entry->create = create;
),
- TP_printk("%pU: block_group offset = %llu, size = %llu, "
- "flags = %llu(%s), bytes_used = %llu, bytes_super = %llu, "
- "create = %d", __entry->fsid,
+ TP_printk("%pU: block_group offset=%llu size=%llu "
+ "flags=%llu(%s) bytes_used=%llu bytes_super=%llu "
+ "create=%d", __entry->fsid,
(unsigned long long)__entry->offset,
(unsigned long long)__entry->size,
(unsigned long long)__entry->flags,
@@ -535,9 +543,9 @@ DECLARE_EVENT_CLASS(btrfs_delayed_tree_ref,
__entry->seq = ref->seq;
),
- TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, action = %s, "
- "parent = %llu(%s), ref_root = %llu(%s), level = %d, "
- "type = %s, seq = %llu",
+ TP_printk_btrfs("bytenr=%llu num_bytes=%llu action=%s "
+ "parent=%llu(%s) ref_root=%llu(%s) level=%d "
+ "type=%s seq=%llu",
(unsigned long long)__entry->bytenr,
(unsigned long long)__entry->num_bytes,
show_ref_action(__entry->action),
@@ -600,9 +608,9 @@ DECLARE_EVENT_CLASS(btrfs_delayed_data_ref,
__entry->seq = ref->seq;
),
- TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, action = %s, "
- "parent = %llu(%s), ref_root = %llu(%s), owner = %llu, "
- "offset = %llu, type = %s, seq = %llu",
+ TP_printk_btrfs("bytenr=%llu num_bytes=%llu action=%s "
+ "parent=%llu(%s) ref_root=%llu(%s) owner=%llu "
+ "offset=%llu type=%s seq=%llu",
(unsigned long long)__entry->bytenr,
(unsigned long long)__entry->num_bytes,
show_ref_action(__entry->action),
@@ -657,7 +665,7 @@ DECLARE_EVENT_CLASS(btrfs_delayed_ref_head,
__entry->is_data = head_ref->is_data;
),
- TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, action = %s, is_data = %d",
+ TP_printk_btrfs("bytenr=%llu num_bytes=%llu action=%s is_data=%d",
(unsigned long long)__entry->bytenr,
(unsigned long long)__entry->num_bytes,
show_ref_action(__entry->action),
@@ -721,8 +729,8 @@ DECLARE_EVENT_CLASS(btrfs__chunk,
__entry->root_objectid = fs_info->chunk_root->root_key.objectid;
),
- TP_printk_btrfs("root = %llu(%s), offset = %llu, size = %llu, "
- "num_stripes = %d, sub_stripes = %d, type = %s",
+ TP_printk_btrfs("root=%llu(%s) offset=%llu size=%llu "
+ "num_stripes=%d sub_stripes=%d type=%s",
show_root_type(__entry->root_objectid),
(unsigned long long)__entry->offset,
(unsigned long long)__entry->size,
@@ -771,8 +779,8 @@ TRACE_EVENT(btrfs_cow_block,
__entry->cow_level = btrfs_header_level(cow);
),
- TP_printk_btrfs("root = %llu(%s), refs = %d, orig_buf = %llu "
- "(orig_level = %d), cow_buf = %llu (cow_level = %d)",
+ TP_printk_btrfs("root=%llu(%s) refs=%d orig_buf=%llu "
+ "(orig_level=%d) cow_buf=%llu (cow_level=%d)",
show_root_type(__entry->root_objectid),
__entry->refs,
(unsigned long long)__entry->buf_start,
@@ -836,7 +844,7 @@ TRACE_EVENT(btrfs_trigger_flush,
__assign_str(reason, reason)
),
- TP_printk("%pU: %s: flush = %d(%s), flags = %llu(%s), bytes = %llu",
+ TP_printk("%pU: %s: flush=%d(%s) flags=%llu(%s) bytes=%llu",
__entry->fsid, __get_str(reason), __entry->flush,
show_flush_action(__entry->flush),
(unsigned long long)__entry->flags,
@@ -879,8 +887,8 @@ TRACE_EVENT(btrfs_flush_space,
__entry->ret = ret;
),
- TP_printk("%pU: state = %d(%s), flags = %llu(%s), num_bytes = %llu, "
- "orig_bytes = %llu, ret = %d", __entry->fsid, __entry->state,
+ TP_printk("%pU: state=%d(%s) flags=%llu(%s) num_bytes=%llu "
+ "orig_bytes=%llu ret=%d", __entry->fsid, __entry->state,
show_flush_state(__entry->state),
(unsigned long long)__entry->flags,
__print_flags((unsigned long)__entry->flags, "|",
@@ -905,7 +913,7 @@ DECLARE_EVENT_CLASS(btrfs__reserved_extent,
__entry->len = len;
),
- TP_printk_btrfs("root = %llu(%s), start = %llu, len = %llu",
+ TP_printk_btrfs("root=%llu(%s) start=%llu len=%llu",
show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
(unsigned long long)__entry->start,
(unsigned long long)__entry->len)
@@ -944,7 +952,7 @@ TRACE_EVENT(find_free_extent,
__entry->data = data;
),
- TP_printk_btrfs("root = %Lu(%s), len = %Lu, empty_size = %Lu, flags = %Lu(%s)",
+ TP_printk_btrfs("root=%Lu(%s) len=%Lu empty_size=%Lu flags=%Lu(%s)",
show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
__entry->num_bytes, __entry->empty_size, __entry->data,
__print_flags((unsigned long)__entry->data, "|",
@@ -973,8 +981,8 @@ DECLARE_EVENT_CLASS(btrfs__reserve_extent,
__entry->len = len;
),
- TP_printk_btrfs("root = %Lu(%s), block_group = %Lu, flags = %Lu(%s), "
- "start = %Lu, len = %Lu",
+ TP_printk_btrfs("root=%Lu(%s) block_group=%Lu flags=%Lu(%s) "
+ "start=%Lu len=%Lu",
show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
__entry->bg_objectid,
__entry->flags, __print_flags((unsigned long)__entry->flags,
@@ -1025,8 +1033,8 @@ TRACE_EVENT(btrfs_find_cluster,
__entry->min_bytes = min_bytes;
),
- TP_printk_btrfs("block_group = %Lu, flags = %Lu(%s), start = %Lu, len = %Lu,"
- " empty_size = %Lu, min_bytes = %Lu", __entry->bg_objectid,
+ TP_printk_btrfs("block_group=%Lu flags=%Lu(%s) start=%Lu len=%Lu "
+ "empty_size=%Lu min_bytes=%Lu", __entry->bg_objectid,
__entry->flags,
__print_flags((unsigned long)__entry->flags, "|",
BTRFS_GROUP_FLAGS), __entry->start,
@@ -1047,7 +1055,7 @@ TRACE_EVENT(btrfs_failed_cluster_setup,
__entry->bg_objectid = block_group->key.objectid;
),
- TP_printk_btrfs("block_group = %Lu", __entry->bg_objectid)
+ TP_printk_btrfs("block_group=%Lu", __entry->bg_objectid)
);
TRACE_EVENT(btrfs_setup_cluster,
@@ -1075,8 +1083,8 @@ TRACE_EVENT(btrfs_setup_cluster,
__entry->bitmap = bitmap;
),
- TP_printk_btrfs("block_group = %Lu, flags = %Lu(%s), window_start = %Lu, "
- "size = %Lu, max_size = %Lu, bitmap = %d",
+ TP_printk_btrfs("block_group=%Lu flags=%Lu(%s) window_start=%Lu "
+ "size=%Lu max_size=%Lu bitmap=%d",
__entry->bg_objectid,
__entry->flags,
__print_flags((unsigned long)__entry->flags, "|",
@@ -1103,7 +1111,7 @@ TRACE_EVENT(alloc_extent_state,
__entry->ip = IP
),
- TP_printk("state=%p; mask = %s; caller = %pS", __entry->state,
+ TP_printk("state=%p mask=%s caller=%pS", __entry->state,
show_gfp_flags(__entry->mask), (void *)__entry->ip)
);
@@ -1123,7 +1131,7 @@ TRACE_EVENT(free_extent_state,
__entry->ip = IP
),
- TP_printk(" state=%p; caller = %pS", __entry->state,
+ TP_printk("state=%p caller=%pS", __entry->state,
(void *)__entry->ip)
);
@@ -1151,28 +1159,32 @@ DECLARE_EVENT_CLASS(btrfs__work,
__entry->normal_work = &work->normal_work;
),
- TP_printk_btrfs("work=%p (normal_work=%p), wq=%p, func=%pf, ordered_func=%p,"
- " ordered_free=%p",
+ TP_printk_btrfs("work=%p (normal_work=%p) wq=%p func=%pf ordered_func=%p "
+ "ordered_free=%p",
__entry->work, __entry->normal_work, __entry->wq,
__entry->func, __entry->ordered_func, __entry->ordered_free)
);
-/* For situiations that the work is freed */
+/*
+ * For situiations when the work is freed, we pass fs_info and a tag that that
+ * matches address of the work structure so it can be paired with the
+ * scheduling event.
+ */
DECLARE_EVENT_CLASS(btrfs__work__done,
- TP_PROTO(struct btrfs_work *work),
+ TP_PROTO(struct btrfs_fs_info *fs_info, void *wtag),
- TP_ARGS(work),
+ TP_ARGS(fs_info, wtag),
TP_STRUCT__entry_btrfs(
- __field( void *, work )
+ __field( void *, wtag )
),
- TP_fast_assign_btrfs(btrfs_work_owner(work),
- __entry->work = work;
+ TP_fast_assign_btrfs(fs_info,
+ __entry->wtag = wtag;
),
- TP_printk_btrfs("work->%p", __entry->work)
+ TP_printk_btrfs("work->%p", __entry->wtag)
);
DEFINE_EVENT(btrfs__work, btrfs_work_queued,
@@ -1191,9 +1203,9 @@ DEFINE_EVENT(btrfs__work, btrfs_work_sched,
DEFINE_EVENT(btrfs__work__done, btrfs_all_work_done,
- TP_PROTO(struct btrfs_work *work),
+ TP_PROTO(struct btrfs_fs_info *fs_info, void *wtag),
- TP_ARGS(work)
+ TP_ARGS(fs_info, wtag)
);
DEFINE_EVENT(btrfs__work, btrfs_ordered_sched,
@@ -1221,7 +1233,7 @@ DECLARE_EVENT_CLASS(btrfs__workqueue,
__entry->high = high;
),
- TP_printk_btrfs("name=%s%s, wq=%p", __get_str(name),
+ TP_printk_btrfs("name=%s%s wq=%p", __get_str(name),
__print_flags(__entry->high, "",
{(WQ_HIGHPRI), "-high"}),
__entry->wq)
@@ -1276,7 +1288,7 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_data_map,
__entry->free_reserved = free_reserved;
),
- TP_printk_btrfs("rootid=%llu, ino=%lu, free_reserved=%llu",
+ TP_printk_btrfs("rootid=%llu ino=%lu free_reserved=%llu",
__entry->rootid, __entry->ino, __entry->free_reserved)
);
@@ -1323,7 +1335,7 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_rsv_data,
__entry->op = op;
),
- TP_printk_btrfs("root=%llu, ino=%lu, start=%llu, len=%llu, reserved=%llu, op=%s",
+ TP_printk_btrfs("root=%llu ino=%lu start=%llu len=%llu reserved=%llu op=%s",
__entry->rootid, __entry->ino, __entry->start, __entry->len,
__entry->reserved,
__print_flags((unsigned long)__entry->op, "",
@@ -1361,7 +1373,7 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_delayed_ref,
__entry->reserved = reserved;
),
- TP_printk_btrfs("root=%llu, reserved=%llu, op=free",
+ TP_printk_btrfs("root=%llu reserved=%llu op=free",
__entry->ref_root, __entry->reserved)
);
@@ -1388,7 +1400,7 @@ DECLARE_EVENT_CLASS(btrfs_qgroup_extent,
__entry->num_bytes = rec->num_bytes;
),
- TP_printk_btrfs("bytenr = %llu, num_bytes = %llu",
+ TP_printk_btrfs("bytenr=%llu num_bytes=%llu",
(unsigned long long)__entry->bytenr,
(unsigned long long)__entry->num_bytes)
);
@@ -1430,8 +1442,8 @@ TRACE_EVENT(btrfs_qgroup_account_extent,
__entry->nr_new_roots = nr_new_roots;
),
- TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, nr_old_roots = %llu, "
- "nr_new_roots = %llu",
+ TP_printk_btrfs("bytenr=%llu num_bytes=%llu nr_old_roots=%llu "
+ "nr_new_roots=%llu",
__entry->bytenr,
__entry->num_bytes,
__entry->nr_old_roots,
@@ -1457,7 +1469,7 @@ TRACE_EVENT(qgroup_update_counters,
__entry->cur_new_count = cur_new_count;
),
- TP_printk_btrfs("qgid = %llu, cur_old_count = %llu, cur_new_count = %llu",
+ TP_printk_btrfs("qgid=%llu cur_old_count=%llu cur_new_count=%llu",
__entry->qgid,
__entry->cur_old_count,
__entry->cur_new_count)
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index 9e687ca9a307..15bf875d0e4a 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -47,8 +47,7 @@
{(unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \
{(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \
{(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\
- {(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"},\
- {(unsigned long)__GFP_OTHER_NODE, "__GFP_OTHER_NODE"} \
+ {(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"}\
#define show_gfp_flags(flags) \
(flags) ? __print_flags(flags, "|", \
diff --git a/include/trace/events/swiotlb.h b/include/trace/events/swiotlb.h
index 7ea4c5e7c448..288c0c54a2b4 100644
--- a/include/trace/events/swiotlb.h
+++ b/include/trace/events/swiotlb.h
@@ -11,16 +11,16 @@ TRACE_EVENT(swiotlb_bounced,
TP_PROTO(struct device *dev,
dma_addr_t dev_addr,
size_t size,
- int swiotlb_force),
+ enum swiotlb_force swiotlb_force),
TP_ARGS(dev, dev_addr, size, swiotlb_force),
TP_STRUCT__entry(
- __string( dev_name, dev_name(dev) )
- __field( u64, dma_mask )
- __field( dma_addr_t, dev_addr )
- __field( size_t, size )
- __field( int, swiotlb_force )
+ __string( dev_name, dev_name(dev) )
+ __field( u64, dma_mask )
+ __field( dma_addr_t, dev_addr )
+ __field( size_t, size )
+ __field( enum swiotlb_force, swiotlb_force )
),
TP_fast_assign(
@@ -37,7 +37,10 @@ TRACE_EVENT(swiotlb_bounced,
__entry->dma_mask,
(unsigned long long)__entry->dev_addr,
__entry->size,
- __entry->swiotlb_force ? "swiotlb_force" : "" )
+ __print_symbolic(__entry->swiotlb_force,
+ { SWIOTLB_NORMAL, "NORMAL" },
+ { SWIOTLB_FORCE, "FORCE" },
+ { SWIOTLB_NO_FORCE, "NO_FORCE" }))
);
#endif /* _TRACE_SWIOTLB_H */
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index a8b93e685239..f330ba4547cf 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -414,6 +414,7 @@ header-y += telephony.h
header-y += termios.h
header-y += thermal.h
header-y += time.h
+header-y += timerfd.h
header-y += times.h
header-y += timex.h
header-y += tiocl.h
diff --git a/include/uapi/linux/cec-funcs.h b/include/uapi/linux/cec-funcs.h
index 3cbc327801d6..c451eec42a83 100644
--- a/include/uapi/linux/cec-funcs.h
+++ b/include/uapi/linux/cec-funcs.h
@@ -1665,14 +1665,15 @@ static inline void cec_msg_report_current_latency(struct cec_msg *msg,
__u8 audio_out_compensated,
__u8 audio_out_delay)
{
- msg->len = 7;
+ msg->len = 6;
msg->msg[0] |= 0xf; /* broadcast */
msg->msg[1] = CEC_MSG_REPORT_CURRENT_LATENCY;
msg->msg[2] = phys_addr >> 8;
msg->msg[3] = phys_addr & 0xff;
msg->msg[4] = video_latency;
msg->msg[5] = (low_latency_mode << 2) | audio_out_compensated;
- msg->msg[6] = audio_out_delay;
+ if (audio_out_compensated == 3)
+ msg->msg[msg->len++] = audio_out_delay;
}
static inline void cec_ops_report_current_latency(const struct cec_msg *msg,
@@ -1686,7 +1687,10 @@ static inline void cec_ops_report_current_latency(const struct cec_msg *msg,
*video_latency = msg->msg[4];
*low_latency_mode = (msg->msg[5] >> 2) & 1;
*audio_out_compensated = msg->msg[5] & 3;
- *audio_out_delay = msg->msg[6];
+ if (*audio_out_compensated == 3 && msg->len >= 7)
+ *audio_out_delay = msg->msg[6];
+ else
+ *audio_out_delay = 0;
}
static inline void cec_msg_request_current_latency(struct cec_msg *msg,
diff --git a/include/uapi/linux/netfilter/nf_log.h b/include/uapi/linux/netfilter/nf_log.h
index 8be21e02387d..d0b5fa91ff54 100644
--- a/include/uapi/linux/netfilter/nf_log.h
+++ b/include/uapi/linux/netfilter/nf_log.h
@@ -9,4 +9,6 @@
#define NF_LOG_MACDECODE 0x20 /* Decode MAC header */
#define NF_LOG_MASK 0x2f
+#define NF_LOG_PREFIXLEN 128
+
#endif /* _NETFILTER_NF_LOG_H */
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 881d49e94569..e3f27e09eb2b 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -235,7 +235,7 @@ enum nft_rule_compat_flags {
/**
* enum nft_rule_compat_attributes - nf_tables rule compat attributes
*
- * @NFTA_RULE_COMPAT_PROTO: numerice value of handled protocol (NLA_U32)
+ * @NFTA_RULE_COMPAT_PROTO: numeric value of handled protocol (NLA_U32)
* @NFTA_RULE_COMPAT_FLAGS: bitmask of enum nft_rule_compat_flags (NLA_U32)
*/
enum nft_rule_compat_attributes {
@@ -499,7 +499,7 @@ enum nft_bitwise_attributes {
* enum nft_byteorder_ops - nf_tables byteorder operators
*
* @NFT_BYTEORDER_NTOH: network to host operator
- * @NFT_BYTEORDER_HTON: host to network opertaor
+ * @NFT_BYTEORDER_HTON: host to network operator
*/
enum nft_byteorder_ops {
NFT_BYTEORDER_NTOH,
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 6b76e3b0c18e..bea982af9cfb 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -1772,7 +1772,9 @@ enum nl80211_commands {
*
* @NL80211_ATTR_OPMODE_NOTIF: Operating mode field from Operating Mode
* Notification Element based on association request when used with
- * %NL80211_CMD_NEW_STATION; u8 attribute.
+ * %NL80211_CMD_NEW_STATION or %NL80211_CMD_SET_STATION (only when
+ * %NL80211_FEATURE_FULL_AP_CLIENT_STATE is supported, or with TDLS);
+ * u8 attribute.
*
* @NL80211_ATTR_VENDOR_ID: The vendor ID, either a 24-bit OUI or, if
* %NL80211_VENDOR_ID_IS_LINUX is set, a special Linux ID (not used yet)
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index cb4bcdc58543..a4dcd88ec271 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -397,7 +397,7 @@ enum {
TCA_BPF_NAME,
TCA_BPF_FLAGS,
TCA_BPF_FLAGS_GEN,
- TCA_BPF_DIGEST,
+ TCA_BPF_TAG,
__TCA_BPF_MAX,
};
diff --git a/include/uapi/linux/tc_act/tc_bpf.h b/include/uapi/linux/tc_act/tc_bpf.h
index a6b88a6f7f71..975b50dc8d1d 100644
--- a/include/uapi/linux/tc_act/tc_bpf.h
+++ b/include/uapi/linux/tc_act/tc_bpf.h
@@ -27,7 +27,7 @@ enum {
TCA_ACT_BPF_FD,
TCA_ACT_BPF_NAME,
TCA_ACT_BPF_PAD,
- TCA_ACT_BPF_DIGEST,
+ TCA_ACT_BPF_TAG,
__TCA_ACT_BPF_MAX,
};
#define TCA_ACT_BPF_MAX (__TCA_ACT_BPF_MAX - 1)
diff --git a/include/uapi/linux/timerfd.h b/include/uapi/linux/timerfd.h
new file mode 100644
index 000000000000..6fcfaa8da173
--- /dev/null
+++ b/include/uapi/linux/timerfd.h
@@ -0,0 +1,36 @@
+/*
+ * include/linux/timerfd.h
+ *
+ * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
+ *
+ */
+
+#ifndef _UAPI_LINUX_TIMERFD_H
+#define _UAPI_LINUX_TIMERFD_H
+
+#include <linux/types.h>
+
+/* For O_CLOEXEC and O_NONBLOCK */
+#include <linux/fcntl.h>
+
+/* For _IO helpers */
+#include <linux/ioctl.h>
+
+/*
+ * CAREFUL: Check include/asm-generic/fcntl.h when defining
+ * new flags, since they might collide with O_* ones. We want
+ * to re-use O_* flags that couldn't possibly have a meaning
+ * from eventfd, in order to leave a free define-space for
+ * shared O_* flags.
+ *
+ * Also make sure to update the masks in include/linux/timerfd.h
+ * when adding new flags.
+ */
+#define TFD_TIMER_ABSTIME (1 << 0)
+#define TFD_TIMER_CANCEL_ON_SET (1 << 1)
+#define TFD_CLOEXEC O_CLOEXEC
+#define TFD_NONBLOCK O_NONBLOCK
+
+#define TFD_IOC_SET_TICKS _IOW('T', 0, __u64)
+
+#endif /* _UAPI_LINUX_TIMERFD_H */
diff --git a/include/uapi/linux/usb/functionfs.h b/include/uapi/linux/usb/functionfs.h
index acc63697a0cc..b2a31a55a612 100644
--- a/include/uapi/linux/usb/functionfs.h
+++ b/include/uapi/linux/usb/functionfs.h
@@ -93,6 +93,7 @@ struct usb_ext_prop_desc {
* | 0 | magic | LE32 | FUNCTIONFS_DESCRIPTORS_MAGIC_V2 |
* | 4 | length | LE32 | length of the whole data chunk |
* | 8 | flags | LE32 | combination of functionfs_flags |
+ * | | eventfd | LE32 | eventfd file descriptor |
* | | fs_count | LE32 | number of full-speed descriptors |
* | | hs_count | LE32 | number of high-speed descriptors |
* | | ss_count | LE32 | number of super-speed descriptors |
diff --git a/include/uapi/rdma/Kbuild b/include/uapi/rdma/Kbuild
index 82bdf5626859..bb68cb1b04ed 100644
--- a/include/uapi/rdma/Kbuild
+++ b/include/uapi/rdma/Kbuild
@@ -16,3 +16,4 @@ header-y += nes-abi.h
header-y += ocrdma-abi.h
header-y += hns-abi.h
header-y += vmw_pvrdma-abi.h
+header-y += qedr-abi.h
diff --git a/include/uapi/rdma/cxgb3-abi.h b/include/uapi/rdma/cxgb3-abi.h
index 48a19bda071b..d24eee12128f 100644
--- a/include/uapi/rdma/cxgb3-abi.h
+++ b/include/uapi/rdma/cxgb3-abi.h
@@ -30,7 +30,7 @@
* SOFTWARE.
*/
#ifndef CXGB3_ABI_USER_H
-#define CXBG3_ABI_USER_H
+#define CXGB3_ABI_USER_H
#include <linux/types.h>
diff --git a/init/Kconfig b/init/Kconfig
index 223b734abccd..e1a937348a3e 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1176,6 +1176,10 @@ config CGROUP_DEBUG
Say N.
+config SOCK_CGROUP_DATA
+ bool
+ default n
+
endif # CGROUPS
config CHECKPOINT_RESTORE
diff --git a/ipc/sem.c b/ipc/sem.c
index e08b94851922..3ec5742b5640 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -1977,7 +1977,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
}
rcu_read_lock();
- sem_lock(sma, sops, nsops);
+ locknum = sem_lock(sma, sops, nsops);
if (!ipc_valid_object(&sma->sem_perm))
goto out_unlock_free;
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 8b1dde96a0fa..7b44195da81b 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -231,9 +231,11 @@ static void untag_chunk(struct node *p)
if (size)
new = alloc_chunk(size);
+ mutex_lock(&entry->group->mark_mutex);
spin_lock(&entry->lock);
if (chunk->dead || !entry->inode) {
spin_unlock(&entry->lock);
+ mutex_unlock(&entry->group->mark_mutex);
if (new)
free_chunk(new);
goto out;
@@ -251,6 +253,7 @@ static void untag_chunk(struct node *p)
list_del_rcu(&chunk->hash);
spin_unlock(&hash_lock);
spin_unlock(&entry->lock);
+ mutex_unlock(&entry->group->mark_mutex);
fsnotify_destroy_mark(entry, audit_tree_group);
goto out;
}
@@ -258,8 +261,8 @@ static void untag_chunk(struct node *p)
if (!new)
goto Fallback;
- fsnotify_duplicate_mark(&new->mark, entry);
- if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.inode, NULL, 1)) {
+ if (fsnotify_add_mark_locked(&new->mark, entry->group, entry->inode,
+ NULL, 1)) {
fsnotify_put_mark(&new->mark);
goto Fallback;
}
@@ -293,6 +296,7 @@ static void untag_chunk(struct node *p)
owner->root = new;
spin_unlock(&hash_lock);
spin_unlock(&entry->lock);
+ mutex_unlock(&entry->group->mark_mutex);
fsnotify_destroy_mark(entry, audit_tree_group);
fsnotify_put_mark(&new->mark); /* drop initial reference */
goto out;
@@ -309,6 +313,7 @@ Fallback:
put_tree(owner);
spin_unlock(&hash_lock);
spin_unlock(&entry->lock);
+ mutex_unlock(&entry->group->mark_mutex);
out:
fsnotify_put_mark(entry);
spin_lock(&hash_lock);
@@ -386,18 +391,21 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
chunk_entry = &chunk->mark;
+ mutex_lock(&old_entry->group->mark_mutex);
spin_lock(&old_entry->lock);
if (!old_entry->inode) {
/* old_entry is being shot, lets just lie */
spin_unlock(&old_entry->lock);
+ mutex_unlock(&old_entry->group->mark_mutex);
fsnotify_put_mark(old_entry);
free_chunk(chunk);
return -ENOENT;
}
- fsnotify_duplicate_mark(chunk_entry, old_entry);
- if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->inode, NULL, 1)) {
+ if (fsnotify_add_mark_locked(chunk_entry, old_entry->group,
+ old_entry->inode, NULL, 1)) {
spin_unlock(&old_entry->lock);
+ mutex_unlock(&old_entry->group->mark_mutex);
fsnotify_put_mark(chunk_entry);
fsnotify_put_mark(old_entry);
return -ENOSPC;
@@ -413,6 +421,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
chunk->dead = 1;
spin_unlock(&chunk_entry->lock);
spin_unlock(&old_entry->lock);
+ mutex_unlock(&old_entry->group->mark_mutex);
fsnotify_destroy_mark(chunk_entry, audit_tree_group);
@@ -445,6 +454,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
spin_unlock(&hash_lock);
spin_unlock(&chunk_entry->lock);
spin_unlock(&old_entry->lock);
+ mutex_unlock(&old_entry->group->mark_mutex);
fsnotify_destroy_mark(old_entry, audit_tree_group);
fsnotify_put_mark(chunk_entry); /* drop initial reference */
fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index a2ac051c342f..3d55d95dcf49 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -11,7 +11,6 @@
*/
#include <linux/bpf.h>
#include <linux/err.h>
-#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/filter.h>
@@ -56,7 +55,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
attr->value_size == 0 || attr->map_flags)
return ERR_PTR(-EINVAL);
- if (attr->value_size >= 1 << (KMALLOC_SHIFT_MAX - 1))
+ if (attr->value_size > KMALLOC_MAX_SIZE)
/* if value_size is bigger, the user space won't be able to
* access the elements.
*/
@@ -74,14 +73,10 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
if (array_size >= U32_MAX - PAGE_SIZE)
return ERR_PTR(-ENOMEM);
-
/* allocate all map elements and zero-initialize them */
- array = kzalloc(array_size, GFP_USER | __GFP_NOWARN);
- if (!array) {
- array = vzalloc(array_size);
- if (!array)
- return ERR_PTR(-ENOMEM);
- }
+ array = bpf_map_area_alloc(array_size);
+ if (!array)
+ return ERR_PTR(-ENOMEM);
/* copy mandatory map attributes */
array->map.map_type = attr->map_type;
@@ -97,7 +92,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
if (array_size >= U32_MAX - PAGE_SIZE ||
elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) {
- kvfree(array);
+ bpf_map_area_free(array);
return ERR_PTR(-ENOMEM);
}
out:
@@ -262,7 +257,7 @@ static void array_map_free(struct bpf_map *map)
if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
bpf_array_free_percpu(array);
- kvfree(array);
+ bpf_map_area_free(array);
}
static const struct bpf_map_ops array_ops = {
@@ -319,7 +314,8 @@ static void fd_array_map_free(struct bpf_map *map)
/* make sure it's empty */
for (i = 0; i < array->map.max_entries; i++)
BUG_ON(array->ptrs[i] != NULL);
- kvfree(array);
+
+ bpf_map_area_free(array);
}
static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 1eb4f1303756..503d4211988a 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -146,10 +146,11 @@ void __bpf_prog_free(struct bpf_prog *fp)
vfree(fp);
}
-int bpf_prog_calc_digest(struct bpf_prog *fp)
+int bpf_prog_calc_tag(struct bpf_prog *fp)
{
const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
- u32 raw_size = bpf_prog_digest_scratch_size(fp);
+ u32 raw_size = bpf_prog_tag_scratch_size(fp);
+ u32 digest[SHA_DIGEST_WORDS];
u32 ws[SHA_WORKSPACE_WORDS];
u32 i, bsize, psize, blocks;
struct bpf_insn *dst;
@@ -162,7 +163,7 @@ int bpf_prog_calc_digest(struct bpf_prog *fp)
if (!raw)
return -ENOMEM;
- sha_init(fp->digest);
+ sha_init(digest);
memset(ws, 0, sizeof(ws));
/* We need to take out the map fd for the digest calculation
@@ -204,13 +205,14 @@ int bpf_prog_calc_digest(struct bpf_prog *fp)
*bits = cpu_to_be64((psize - 1) << 3);
while (blocks--) {
- sha_transform(fp->digest, todo, ws);
+ sha_transform(digest, todo, ws);
todo += SHA_MESSAGE_BYTES;
}
- result = (__force __be32 *)fp->digest;
+ result = (__force __be32 *)digest;
for (i = 0; i < SHA_DIGEST_WORDS; i++)
- result[i] = cpu_to_be32(fp->digest[i]);
+ result[i] = cpu_to_be32(digest[i]);
+ memcpy(fp->tag, result, sizeof(fp->tag));
vfree(raw);
return 0;
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 34debc1a9641..a753bbe7df0a 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -13,7 +13,6 @@
#include <linux/bpf.h>
#include <linux/jhash.h>
#include <linux/filter.h>
-#include <linux/vmalloc.h>
#include "percpu_freelist.h"
#include "bpf_lru_list.h"
@@ -103,7 +102,7 @@ static void htab_free_elems(struct bpf_htab *htab)
free_percpu(pptr);
}
free_elems:
- vfree(htab->elems);
+ bpf_map_area_free(htab->elems);
}
static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
@@ -125,7 +124,8 @@ static int prealloc_init(struct bpf_htab *htab)
{
int err = -ENOMEM, i;
- htab->elems = vzalloc(htab->elem_size * htab->map.max_entries);
+ htab->elems = bpf_map_area_alloc(htab->elem_size *
+ htab->map.max_entries);
if (!htab->elems)
return -ENOMEM;
@@ -274,7 +274,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
*/
goto free_htab;
- if (htab->map.value_size >= (1 << (KMALLOC_SHIFT_MAX - 1)) -
+ if (htab->map.value_size >= KMALLOC_MAX_SIZE -
MAX_BPF_STACK - sizeof(struct htab_elem))
/* if value_size is bigger, the user space won't be able to
* access the elements via bpf syscall. This check also makes
@@ -320,14 +320,10 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
goto free_htab;
err = -ENOMEM;
- htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct bucket),
- GFP_USER | __GFP_NOWARN);
-
- if (!htab->buckets) {
- htab->buckets = vmalloc(htab->n_buckets * sizeof(struct bucket));
- if (!htab->buckets)
- goto free_htab;
- }
+ htab->buckets = bpf_map_area_alloc(htab->n_buckets *
+ sizeof(struct bucket));
+ if (!htab->buckets)
+ goto free_htab;
for (i = 0; i < htab->n_buckets; i++) {
INIT_HLIST_HEAD(&htab->buckets[i].head);
@@ -354,7 +350,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
free_extra_elems:
free_percpu(htab->extra_elems);
free_buckets:
- kvfree(htab->buckets);
+ bpf_map_area_free(htab->buckets);
free_htab:
kfree(htab);
return ERR_PTR(err);
@@ -1014,7 +1010,7 @@ static void htab_map_free(struct bpf_map *map)
prealloc_destroy(htab);
free_percpu(htab->extra_elems);
- kvfree(htab->buckets);
+ bpf_map_area_free(htab->buckets);
kfree(htab);
}
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 732ae16d12b7..be8519148c25 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -7,7 +7,6 @@
#include <linux/bpf.h>
#include <linux/jhash.h>
#include <linux/filter.h>
-#include <linux/vmalloc.h>
#include <linux/stacktrace.h>
#include <linux/perf_event.h>
#include "percpu_freelist.h"
@@ -32,7 +31,7 @@ static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size;
int err;
- smap->elems = vzalloc(elem_size * smap->map.max_entries);
+ smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries);
if (!smap->elems)
return -ENOMEM;
@@ -45,7 +44,7 @@ static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
return 0;
free_elems:
- vfree(smap->elems);
+ bpf_map_area_free(smap->elems);
return err;
}
@@ -76,12 +75,9 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
if (cost >= U32_MAX - PAGE_SIZE)
return ERR_PTR(-E2BIG);
- smap = kzalloc(cost, GFP_USER | __GFP_NOWARN);
- if (!smap) {
- smap = vzalloc(cost);
- if (!smap)
- return ERR_PTR(-ENOMEM);
- }
+ smap = bpf_map_area_alloc(cost);
+ if (!smap)
+ return ERR_PTR(-ENOMEM);
err = -E2BIG;
cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
@@ -112,7 +108,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
put_buffers:
put_callchain_buffers();
free_smap:
- kvfree(smap);
+ bpf_map_area_free(smap);
return ERR_PTR(err);
}
@@ -262,9 +258,9 @@ static void stack_map_free(struct bpf_map *map)
/* wait for bpf programs to complete before freeing stack map */
synchronize_rcu();
- vfree(smap->elems);
+ bpf_map_area_free(smap->elems);
pcpu_freelist_destroy(&smap->freelist);
- kvfree(smap);
+ bpf_map_area_free(smap);
put_callchain_buffers();
}
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index e89acea22ecf..19b6129eab23 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -12,6 +12,8 @@
#include <linux/bpf.h>
#include <linux/syscalls.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mmzone.h>
#include <linux/anon_inodes.h>
#include <linux/file.h>
#include <linux/license.h>
@@ -49,6 +51,30 @@ void bpf_register_map_type(struct bpf_map_type_list *tl)
list_add(&tl->list_node, &bpf_map_types);
}
+void *bpf_map_area_alloc(size_t size)
+{
+ /* We definitely need __GFP_NORETRY, so OOM killer doesn't
+ * trigger under memory pressure as we really just want to
+ * fail instead.
+ */
+ const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
+ void *area;
+
+ if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
+ area = kmalloc(size, GFP_USER | flags);
+ if (area != NULL)
+ return area;
+ }
+
+ return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | flags,
+ PAGE_KERNEL);
+}
+
+void bpf_map_area_free(void *area)
+{
+ kvfree(area);
+}
+
int bpf_map_precharge_memlock(u32 pages)
{
struct user_struct *user = get_current_user();
@@ -688,17 +714,17 @@ static int bpf_prog_release(struct inode *inode, struct file *filp)
static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
{
const struct bpf_prog *prog = filp->private_data;
- char prog_digest[sizeof(prog->digest) * 2 + 1] = { };
+ char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
- bin2hex(prog_digest, prog->digest, sizeof(prog->digest));
+ bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
seq_printf(m,
"prog_type:\t%u\n"
"prog_jited:\t%u\n"
- "prog_digest:\t%s\n"
+ "prog_tag:\t%s\n"
"memlock:\t%llu\n",
prog->type,
prog->jited,
- prog_digest,
+ prog_tag,
prog->pages * 1ULL << PAGE_SHIFT);
}
#endif
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 83ed2f8f6f22..cdc43b899f28 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2936,7 +2936,7 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
int insn_cnt = env->prog->len;
int i, j, err;
- err = bpf_prog_calc_digest(env->prog);
+ err = bpf_prog_calc_tag(env->prog);
if (err)
return err;
diff --git a/kernel/capability.c b/kernel/capability.c
index a98e814f216f..f97fe77ceb88 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -318,6 +318,7 @@ bool has_capability(struct task_struct *t, int cap)
{
return has_ns_capability(t, &init_user_ns, cap);
}
+EXPORT_SYMBOL(has_capability);
/**
* has_ns_capability_noaudit - Does a task have a capability (unaudited)
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 042fd7e8e030..0a5f630f5c54 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -764,7 +764,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int prev_state, ret = 0;
- bool hasdied = false;
if (num_online_cpus() == 1)
return -EBUSY;
@@ -809,7 +808,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
cpuhp_kick_ap_work(cpu);
}
- hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
out:
cpu_hotplug_done();
return ret;
@@ -1302,10 +1300,24 @@ static int cpuhp_cb_check(enum cpuhp_state state)
*/
static int cpuhp_reserve_state(enum cpuhp_state state)
{
- enum cpuhp_state i;
+ enum cpuhp_state i, end;
+ struct cpuhp_step *step;
- for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) {
- if (!cpuhp_ap_states[i].name)
+ switch (state) {
+ case CPUHP_AP_ONLINE_DYN:
+ step = cpuhp_ap_states + CPUHP_AP_ONLINE_DYN;
+ end = CPUHP_AP_ONLINE_DYN_END;
+ break;
+ case CPUHP_BP_PREPARE_DYN:
+ step = cpuhp_bp_states + CPUHP_BP_PREPARE_DYN;
+ end = CPUHP_BP_PREPARE_DYN_END;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = state; i <= end; i++, step++) {
+ if (!step->name)
return i;
}
WARN(1, "No more dynamic states available for CPU hotplug\n");
@@ -1323,7 +1335,7 @@ static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
mutex_lock(&cpuhp_state_mutex);
- if (state == CPUHP_AP_ONLINE_DYN) {
+ if (state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN) {
ret = cpuhp_reserve_state(state);
if (ret < 0)
goto out;
@@ -1471,6 +1483,7 @@ int __cpuhp_setup_state(enum cpuhp_state state,
bool multi_instance)
{
int cpu, ret = 0;
+ bool dynstate;
if (cpuhp_cb_check(state) || !name)
return -EINVAL;
@@ -1480,6 +1493,12 @@ int __cpuhp_setup_state(enum cpuhp_state state,
ret = cpuhp_store_callbacks(state, name, startup, teardown,
multi_instance);
+ dynstate = state == CPUHP_AP_ONLINE_DYN;
+ if (ret > 0 && dynstate) {
+ state = ret;
+ ret = 0;
+ }
+
if (ret || !invoke || !startup)
goto out;
@@ -1508,7 +1527,7 @@ out:
* If the requested state is CPUHP_AP_ONLINE_DYN, return the
* dynamically allocated state in case of success.
*/
- if (!ret && state == CPUHP_AP_ONLINE_DYN)
+ if (!ret && dynstate)
return state;
return ret;
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index ab15509fab8c..110b38a58493 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2249,7 +2249,7 @@ static int __perf_install_in_context(void *info)
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
struct perf_event_context *task_ctx = cpuctx->task_ctx;
- bool activate = true;
+ bool reprogram = true;
int ret = 0;
raw_spin_lock(&cpuctx->ctx.lock);
@@ -2257,27 +2257,26 @@ static int __perf_install_in_context(void *info)
raw_spin_lock(&ctx->lock);
task_ctx = ctx;
- /* If we're on the wrong CPU, try again */
- if (task_cpu(ctx->task) != smp_processor_id()) {
- ret = -ESRCH;
- goto unlock;
- }
+ reprogram = (ctx->task == current);
/*
- * If we're on the right CPU, see if the task we target is
- * current, if not we don't have to activate the ctx, a future
- * context switch will do that for us.
+ * If the task is running, it must be running on this CPU,
+ * otherwise we cannot reprogram things.
+ *
+ * If its not running, we don't care, ctx->lock will
+ * serialize against it becoming runnable.
*/
- if (ctx->task != current)
- activate = false;
- else
- WARN_ON_ONCE(cpuctx->task_ctx && cpuctx->task_ctx != ctx);
+ if (task_curr(ctx->task) && !reprogram) {
+ ret = -ESRCH;
+ goto unlock;
+ }
+ WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx);
} else if (task_ctx) {
raw_spin_lock(&task_ctx->lock);
}
- if (activate) {
+ if (reprogram) {
ctx_sched_out(ctx, cpuctx, EVENT_TIME);
add_event_to_ctx(event, ctx);
ctx_resched(cpuctx, task_ctx);
@@ -2328,13 +2327,36 @@ perf_install_in_context(struct perf_event_context *ctx,
/*
* Installing events is tricky because we cannot rely on ctx->is_active
* to be set in case this is the nr_events 0 -> 1 transition.
+ *
+ * Instead we use task_curr(), which tells us if the task is running.
+ * However, since we use task_curr() outside of rq::lock, we can race
+ * against the actual state. This means the result can be wrong.
+ *
+ * If we get a false positive, we retry, this is harmless.
+ *
+ * If we get a false negative, things are complicated. If we are after
+ * perf_event_context_sched_in() ctx::lock will serialize us, and the
+ * value must be correct. If we're before, it doesn't matter since
+ * perf_event_context_sched_in() will program the counter.
+ *
+ * However, this hinges on the remote context switch having observed
+ * our task->perf_event_ctxp[] store, such that it will in fact take
+ * ctx::lock in perf_event_context_sched_in().
+ *
+ * We do this by task_function_call(), if the IPI fails to hit the task
+ * we know any future context switch of task must see the
+ * perf_event_ctpx[] store.
*/
-again:
+
/*
- * Cannot use task_function_call() because we need to run on the task's
- * CPU regardless of whether its current or not.
+ * This smp_mb() orders the task->perf_event_ctxp[] store with the
+ * task_cpu() load, such that if the IPI then does not find the task
+ * running, a future context switch of that task must observe the
+ * store.
*/
- if (!cpu_function_call(task_cpu(task), __perf_install_in_context, event))
+ smp_mb();
+again:
+ if (!task_function_call(task, __perf_install_in_context, event))
return;
raw_spin_lock_irq(&ctx->lock);
@@ -2348,12 +2370,16 @@ again:
raw_spin_unlock_irq(&ctx->lock);
return;
}
- raw_spin_unlock_irq(&ctx->lock);
/*
- * Since !ctx->is_active doesn't mean anything, we must IPI
- * unconditionally.
+ * If the task is not running, ctx->lock will avoid it becoming so,
+ * thus we can safely install the event.
*/
- goto again;
+ if (task_curr(task)) {
+ raw_spin_unlock_irq(&ctx->lock);
+ goto again;
+ }
+ add_event_to_ctx(event, ctx);
+ raw_spin_unlock_irq(&ctx->lock);
}
/*
@@ -7034,25 +7060,12 @@ static void perf_log_itrace_start(struct perf_event *event)
perf_output_end(&handle);
}
-/*
- * Generic event overflow handling, sampling.
- */
-
-static int __perf_event_overflow(struct perf_event *event,
- int throttle, struct perf_sample_data *data,
- struct pt_regs *regs)
+static int
+__perf_event_account_interrupt(struct perf_event *event, int throttle)
{
- int events = atomic_read(&event->event_limit);
struct hw_perf_event *hwc = &event->hw;
- u64 seq;
int ret = 0;
-
- /*
- * Non-sampling counters might still use the PMI to fold short
- * hardware counters, ignore those.
- */
- if (unlikely(!is_sampling_event(event)))
- return 0;
+ u64 seq;
seq = __this_cpu_read(perf_throttled_seq);
if (seq != hwc->interrupts_seq) {
@@ -7080,6 +7093,34 @@ static int __perf_event_overflow(struct perf_event *event,
perf_adjust_period(event, delta, hwc->last_period, true);
}
+ return ret;
+}
+
+int perf_event_account_interrupt(struct perf_event *event)
+{
+ return __perf_event_account_interrupt(event, 1);
+}
+
+/*
+ * Generic event overflow handling, sampling.
+ */
+
+static int __perf_event_overflow(struct perf_event *event,
+ int throttle, struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ int events = atomic_read(&event->event_limit);
+ int ret = 0;
+
+ /*
+ * Non-sampling counters might still use the PMI to fold short
+ * hardware counters, ignore those.
+ */
+ if (unlikely(!is_sampling_event(event)))
+ return 0;
+
+ ret = __perf_event_account_interrupt(event, throttle);
+
/*
* XXX event_limit might not quite work as expected on inherited
* events
@@ -9503,6 +9544,37 @@ static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
return 0;
}
+/*
+ * Variation on perf_event_ctx_lock_nested(), except we take two context
+ * mutexes.
+ */
+static struct perf_event_context *
+__perf_event_ctx_lock_double(struct perf_event *group_leader,
+ struct perf_event_context *ctx)
+{
+ struct perf_event_context *gctx;
+
+again:
+ rcu_read_lock();
+ gctx = READ_ONCE(group_leader->ctx);
+ if (!atomic_inc_not_zero(&gctx->refcount)) {
+ rcu_read_unlock();
+ goto again;
+ }
+ rcu_read_unlock();
+
+ mutex_lock_double(&gctx->mutex, &ctx->mutex);
+
+ if (group_leader->ctx != gctx) {
+ mutex_unlock(&ctx->mutex);
+ mutex_unlock(&gctx->mutex);
+ put_ctx(gctx);
+ goto again;
+ }
+
+ return gctx;
+}
+
/**
* sys_perf_event_open - open a performance event, associate it to a task/cpu
*
@@ -9746,12 +9818,31 @@ SYSCALL_DEFINE5(perf_event_open,
}
if (move_group) {
- gctx = group_leader->ctx;
- mutex_lock_double(&gctx->mutex, &ctx->mutex);
+ gctx = __perf_event_ctx_lock_double(group_leader, ctx);
+
if (gctx->task == TASK_TOMBSTONE) {
err = -ESRCH;
goto err_locked;
}
+
+ /*
+ * Check if we raced against another sys_perf_event_open() call
+ * moving the software group underneath us.
+ */
+ if (!(group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) {
+ /*
+ * If someone moved the group out from under us, check
+ * if this new event wound up on the same ctx, if so
+ * its the regular !move_group case, otherwise fail.
+ */
+ if (gctx != ctx) {
+ err = -EINVAL;
+ goto err_locked;
+ } else {
+ perf_event_ctx_unlock(group_leader, gctx);
+ move_group = 0;
+ }
+ }
} else {
mutex_lock(&ctx->mutex);
}
@@ -9853,7 +9944,7 @@ SYSCALL_DEFINE5(perf_event_open,
perf_unpin_context(ctx);
if (move_group)
- mutex_unlock(&gctx->mutex);
+ perf_event_ctx_unlock(group_leader, gctx);
mutex_unlock(&ctx->mutex);
if (task) {
@@ -9879,7 +9970,7 @@ SYSCALL_DEFINE5(perf_event_open,
err_locked:
if (move_group)
- mutex_unlock(&gctx->mutex);
+ perf_event_ctx_unlock(group_leader, gctx);
mutex_unlock(&ctx->mutex);
/* err_file: */
fput(event_file);
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 93ad6c1fb9b6..a9b8cf500591 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -182,6 +182,13 @@ void static_key_slow_dec_deferred(struct static_key_deferred *key)
}
EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
+void static_key_deferred_flush(struct static_key_deferred *key)
+{
+ STATIC_KEY_CHECK_USE();
+ flush_delayed_work(&key->work);
+}
+EXPORT_SYMBOL_GPL(static_key_deferred_flush);
+
void jump_label_rate_limit(struct static_key_deferred *key,
unsigned long rl)
{
diff --git a/kernel/memremap.c b/kernel/memremap.c
index b501e390bb34..9ecedc28b928 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -246,7 +246,9 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
/* pages are dead and unused, undo the arch mapping */
align_start = res->start & ~(SECTION_SIZE - 1);
align_size = ALIGN(resource_size(res), SECTION_SIZE);
+ mem_hotplug_begin();
arch_remove_memory(align_start, align_size);
+ mem_hotplug_done();
untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
pgmap_radix_release(res);
dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
@@ -358,7 +360,9 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
if (error)
goto err_pfn_remap;
+ mem_hotplug_begin();
error = arch_add_memory(nid, align_start, align_size, true);
+ mem_hotplug_done();
if (error)
goto err_add_memory;
diff --git a/kernel/module.c b/kernel/module.c
index 5088784c0cf9..38d4270925d4 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1145,7 +1145,7 @@ static size_t module_flags_taint(struct module *mod, char *buf)
for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
if (taint_flags[i].module && test_bit(i, &mod->taints))
- buf[l++] = taint_flags[i].true;
+ buf[l++] = taint_flags[i].c_true;
}
return l;
diff --git a/kernel/panic.c b/kernel/panic.c
index c51edaa04fce..08aa88dde7de 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -249,7 +249,7 @@ void panic(const char *fmt, ...)
* Delay timeout seconds before rebooting the machine.
* We can't use the "normal" timers since we just panicked.
*/
- pr_emerg("Rebooting in %d seconds..", panic_timeout);
+ pr_emerg("Rebooting in %d seconds..\n", panic_timeout);
for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) {
touch_nmi_watchdog();
@@ -355,7 +355,7 @@ const char *print_tainted(void)
for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
const struct taint_flag *t = &taint_flags[i];
*s++ = test_bit(i, &tainted_mask) ?
- t->true : t->false;
+ t->c_true : t->c_false;
}
*s = 0;
} else
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index df9e8e9e0be7..eef2ce968636 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -151,8 +151,12 @@ out:
static void delayed_free_pidns(struct rcu_head *p)
{
- kmem_cache_free(pid_ns_cachep,
- container_of(p, struct pid_namespace, rcu));
+ struct pid_namespace *ns = container_of(p, struct pid_namespace, rcu);
+
+ dec_pid_namespaces(ns->ucounts);
+ put_user_ns(ns->user_ns);
+
+ kmem_cache_free(pid_ns_cachep, ns);
}
static void destroy_pid_namespace(struct pid_namespace *ns)
@@ -162,8 +166,6 @@ static void destroy_pid_namespace(struct pid_namespace *ns)
ns_free_inum(&ns->ns);
for (i = 0; i < PIDMAP_ENTRIES; i++)
kfree(ns->pidmap[i].page);
- dec_pid_namespaces(ns->ucounts);
- put_user_ns(ns->user_ns);
call_rcu(&ns->rcu, delayed_free_pidns);
}
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index f67ceb7768b8..15e6baef5c73 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -46,7 +46,7 @@ static const char * const mem_sleep_labels[] = {
const char *mem_sleep_states[PM_SUSPEND_MAX];
suspend_state_t mem_sleep_current = PM_SUSPEND_FREEZE;
-suspend_state_t mem_sleep_default = PM_SUSPEND_MAX;
+static suspend_state_t mem_sleep_default = PM_SUSPEND_MEM;
unsigned int pm_suspend_global_flags;
EXPORT_SYMBOL_GPL(pm_suspend_global_flags);
@@ -168,7 +168,7 @@ void suspend_set_ops(const struct platform_suspend_ops *ops)
}
if (valid_state(PM_SUSPEND_MEM)) {
mem_sleep_states[PM_SUSPEND_MEM] = mem_sleep_labels[PM_SUSPEND_MEM];
- if (mem_sleep_default >= PM_SUSPEND_MEM)
+ if (mem_sleep_default == PM_SUSPEND_MEM)
mem_sleep_current = PM_SUSPEND_MEM;
}
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index 80adef7d4c3d..0d6ff3e471be 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -136,6 +136,7 @@ int rcu_jiffies_till_stall_check(void);
#define TPS(x) tracepoint_string(x)
void rcu_early_boot_tests(void);
+void rcu_test_sync_prims(void);
/*
* This function really isn't for public consumption, but RCU is special in
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index 1898559e6b60..b23a4d076f3d 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -185,9 +185,6 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused
* benefits of doing might_sleep() to reduce latency.)
*
* Cool, huh? (Due to Josh Triplett.)
- *
- * But we want to make this a static inline later. The cond_resched()
- * currently makes this problematic.
*/
void synchronize_sched(void)
{
@@ -195,7 +192,6 @@ void synchronize_sched(void)
lock_is_held(&rcu_lock_map) ||
lock_is_held(&rcu_sched_lock_map),
"Illegal synchronize_sched() in RCU read-side critical section");
- cond_resched();
}
EXPORT_SYMBOL_GPL(synchronize_sched);
diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
index 196f0302e2f4..c64b827ecbca 100644
--- a/kernel/rcu/tiny_plugin.h
+++ b/kernel/rcu/tiny_plugin.h
@@ -60,12 +60,17 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
/*
* During boot, we forgive RCU lockdep issues. After this function is
- * invoked, we start taking RCU lockdep issues seriously.
+ * invoked, we start taking RCU lockdep issues seriously. Note that unlike
+ * Tree RCU, Tiny RCU transitions directly from RCU_SCHEDULER_INACTIVE
+ * to RCU_SCHEDULER_RUNNING, skipping the RCU_SCHEDULER_INIT stage.
+ * The reason for this is that Tiny RCU does not need kthreads, so does
+ * not have to care about the fact that the scheduler is half-initialized
+ * at a certain phase of the boot process.
*/
void __init rcu_scheduler_starting(void)
{
WARN_ON(nr_context_switches() > 0);
- rcu_scheduler_active = 1;
+ rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
}
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 96c52e43f7ca..cb4e2056ccf3 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -127,13 +127,16 @@ int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
int sysctl_panic_on_rcu_stall __read_mostly;
/*
- * The rcu_scheduler_active variable transitions from zero to one just
- * before the first task is spawned. So when this variable is zero, RCU
- * can assume that there is but one task, allowing RCU to (for example)
+ * The rcu_scheduler_active variable is initialized to the value
+ * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
+ * first task is spawned. So when this variable is RCU_SCHEDULER_INACTIVE,
+ * RCU can assume that there is but one task, allowing RCU to (for example)
* optimize synchronize_rcu() to a simple barrier(). When this variable
- * is one, RCU must actually do all the hard work required to detect real
- * grace periods. This variable is also used to suppress boot-time false
- * positives from lockdep-RCU error checking.
+ * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
+ * to detect real grace periods. This variable is also used to suppress
+ * boot-time false positives from lockdep-RCU error checking. Finally, it
+ * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
+ * is fully initialized, including all of its kthreads having been spawned.
*/
int rcu_scheduler_active __read_mostly;
EXPORT_SYMBOL_GPL(rcu_scheduler_active);
@@ -3980,18 +3983,22 @@ static int __init rcu_spawn_gp_kthread(void)
early_initcall(rcu_spawn_gp_kthread);
/*
- * This function is invoked towards the end of the scheduler's initialization
- * process. Before this is called, the idle task might contain
- * RCU read-side critical sections (during which time, this idle
- * task is booting the system). After this function is called, the
- * idle tasks are prohibited from containing RCU read-side critical
- * sections. This function also enables RCU lockdep checking.
+ * This function is invoked towards the end of the scheduler's
+ * initialization process. Before this is called, the idle task might
+ * contain synchronous grace-period primitives (during which time, this idle
+ * task is booting the system, and such primitives are no-ops). After this
+ * function is called, any synchronous grace-period primitives are run as
+ * expedited, with the requesting task driving the grace period forward.
+ * A later core_initcall() rcu_exp_runtime_mode() will switch to full
+ * runtime RCU functionality.
*/
void rcu_scheduler_starting(void)
{
WARN_ON(num_online_cpus() != 1);
WARN_ON(nr_context_switches() > 0);
- rcu_scheduler_active = 1;
+ rcu_test_sync_prims();
+ rcu_scheduler_active = RCU_SCHEDULER_INIT;
+ rcu_test_sync_prims();
}
/*
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index d3053e99fdb6..e59e1849b89a 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -532,18 +532,28 @@ struct rcu_exp_work {
};
/*
+ * Common code to drive an expedited grace period forward, used by
+ * workqueues and mid-boot-time tasks.
+ */
+static void rcu_exp_sel_wait_wake(struct rcu_state *rsp,
+ smp_call_func_t func, unsigned long s)
+{
+ /* Initialize the rcu_node tree in preparation for the wait. */
+ sync_rcu_exp_select_cpus(rsp, func);
+
+ /* Wait and clean up, including waking everyone. */
+ rcu_exp_wait_wake(rsp, s);
+}
+
+/*
* Work-queue handler to drive an expedited grace period forward.
*/
static void wait_rcu_exp_gp(struct work_struct *wp)
{
struct rcu_exp_work *rewp;
- /* Initialize the rcu_node tree in preparation for the wait. */
rewp = container_of(wp, struct rcu_exp_work, rew_work);
- sync_rcu_exp_select_cpus(rewp->rew_rsp, rewp->rew_func);
-
- /* Wait and clean up, including waking everyone. */
- rcu_exp_wait_wake(rewp->rew_rsp, rewp->rew_s);
+ rcu_exp_sel_wait_wake(rewp->rew_rsp, rewp->rew_func, rewp->rew_s);
}
/*
@@ -569,12 +579,18 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp,
if (exp_funnel_lock(rsp, s))
return; /* Someone else did our work for us. */
- /* Marshall arguments and schedule the expedited grace period. */
- rew.rew_func = func;
- rew.rew_rsp = rsp;
- rew.rew_s = s;
- INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
- schedule_work(&rew.rew_work);
+ /* Ensure that load happens before action based on it. */
+ if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) {
+ /* Direct call during scheduler init and early_initcalls(). */
+ rcu_exp_sel_wait_wake(rsp, func, s);
+ } else {
+ /* Marshall arguments & schedule the expedited grace period. */
+ rew.rew_func = func;
+ rew.rew_rsp = rsp;
+ rew.rew_s = s;
+ INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
+ schedule_work(&rew.rew_work);
+ }
/* Wait for expedited grace period to complete. */
rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
@@ -676,6 +692,8 @@ void synchronize_rcu_expedited(void)
{
struct rcu_state *rsp = rcu_state_p;
+ if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
+ return;
_synchronize_rcu_expedited(rsp, sync_rcu_exp_handler);
}
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
@@ -693,3 +711,15 @@ void synchronize_rcu_expedited(void)
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+
+/*
+ * Switch to run-time mode once Tree RCU has fully initialized.
+ */
+static int __init rcu_exp_runtime_mode(void)
+{
+ rcu_test_sync_prims();
+ rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
+ rcu_test_sync_prims();
+ return 0;
+}
+core_initcall(rcu_exp_runtime_mode);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 85c5a883c6e3..56583e764ebf 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -670,7 +670,7 @@ void synchronize_rcu(void)
lock_is_held(&rcu_lock_map) ||
lock_is_held(&rcu_sched_lock_map),
"Illegal synchronize_rcu() in RCU read-side critical section");
- if (!rcu_scheduler_active)
+ if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
return;
if (rcu_gp_is_expedited())
synchronize_rcu_expedited();
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index f19271dce0a9..4f6db7e6a117 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -121,11 +121,14 @@ EXPORT_SYMBOL(rcu_read_lock_sched_held);
* Should expedited grace-period primitives always fall back to their
* non-expedited counterparts? Intended for use within RCU. Note
* that if the user specifies both rcu_expedited and rcu_normal, then
- * rcu_normal wins.
+ * rcu_normal wins. (Except during the time period during boot from
+ * when the first task is spawned until the rcu_exp_runtime_mode()
+ * core_initcall() is invoked, at which point everything is expedited.)
*/
bool rcu_gp_is_normal(void)
{
- return READ_ONCE(rcu_normal);
+ return READ_ONCE(rcu_normal) &&
+ rcu_scheduler_active != RCU_SCHEDULER_INIT;
}
EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
@@ -135,13 +138,14 @@ static atomic_t rcu_expedited_nesting =
/*
* Should normal grace-period primitives be expedited? Intended for
* use within RCU. Note that this function takes the rcu_expedited
- * sysfs/boot variable into account as well as the rcu_expedite_gp()
- * nesting. So looping on rcu_unexpedite_gp() until rcu_gp_is_expedited()
- * returns false is a -really- bad idea.
+ * sysfs/boot variable and rcu_scheduler_active into account as well
+ * as the rcu_expedite_gp() nesting. So looping on rcu_unexpedite_gp()
+ * until rcu_gp_is_expedited() returns false is a -really- bad idea.
*/
bool rcu_gp_is_expedited(void)
{
- return rcu_expedited || atomic_read(&rcu_expedited_nesting);
+ return rcu_expedited || atomic_read(&rcu_expedited_nesting) ||
+ rcu_scheduler_active == RCU_SCHEDULER_INIT;
}
EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
@@ -257,7 +261,7 @@ EXPORT_SYMBOL_GPL(rcu_callback_map);
int notrace debug_lockdep_rcu_enabled(void)
{
- return rcu_scheduler_active && debug_locks &&
+ return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks &&
current->lockdep_recursion == 0;
}
EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
@@ -591,7 +595,7 @@ EXPORT_SYMBOL_GPL(call_rcu_tasks);
void synchronize_rcu_tasks(void)
{
/* Complain if the scheduler has not started. */
- RCU_LOCKDEP_WARN(!rcu_scheduler_active,
+ RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
"synchronize_rcu_tasks called too soon");
/* Wait for the grace period. */
@@ -813,6 +817,23 @@ static void rcu_spawn_tasks_kthread(void)
#endif /* #ifdef CONFIG_TASKS_RCU */
+/*
+ * Test each non-SRCU synchronous grace-period wait API. This is
+ * useful just after a change in mode for these primitives, and
+ * during early boot.
+ */
+void rcu_test_sync_prims(void)
+{
+ if (!IS_ENABLED(CONFIG_PROVE_RCU))
+ return;
+ synchronize_rcu();
+ synchronize_rcu_bh();
+ synchronize_sched();
+ synchronize_rcu_expedited();
+ synchronize_rcu_bh_expedited();
+ synchronize_sched_expedited();
+}
+
#ifdef CONFIG_PROVE_RCU
/*
@@ -865,6 +886,7 @@ void rcu_early_boot_tests(void)
early_boot_test_call_rcu_bh();
if (rcu_self_test_sched)
early_boot_test_call_rcu_sched();
+ rcu_test_sync_prims();
}
static int rcu_verify_early_boot_tests(void)
diff --git a/kernel/signal.c b/kernel/signal.c
index ff046b73ff2d..3603d93a1968 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -346,7 +346,7 @@ static bool task_participate_group_stop(struct task_struct *task)
* fresh group stop. Read comment in do_signal_stop() for details.
*/
if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
- sig->flags = SIGNAL_STOP_STOPPED;
+ signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
return true;
}
return false;
@@ -843,7 +843,7 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force)
* will take ->siglock, notice SIGNAL_CLD_MASK, and
* notify its parent. See get_signal_to_deliver().
*/
- signal->flags = why | SIGNAL_STOP_CONTINUED;
+ signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
signal->group_stop_count = 0;
signal->group_exit_code = 0;
}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 8dbaec0e4f7f..1aea594a54db 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -2475,6 +2475,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
break;
if (neg)
continue;
+ val = convmul * val / convdiv;
if ((min && val < *min) || (max && val > *max))
continue;
*i = val;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 2c115fdab397..74e0388cc88d 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -767,7 +767,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
tick = expires;
/* Skip reprogram of event if its not changed */
- if (ts->tick_stopped && (expires == dev->next_event))
+ if (ts->tick_stopped && (expires == ts->next_tick))
goto out;
/*
@@ -787,6 +787,8 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
trace_tick_stop(1, TICK_DEP_MASK_NONE);
}
+ ts->next_tick = tick;
+
/*
* If the expiration time == KTIME_MAX, then we simply stop
* the tick timer.
@@ -802,7 +804,10 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
else
tick_program_event(tick, 1);
out:
- /* Update the estimated sleep length */
+ /*
+ * Update the estimated sleep length until the next timer
+ * (not only the tick).
+ */
ts->sleep_length = ktime_sub(dev->next_event, now);
return tick;
}
diff --git a/kernel/time/tick-sched.h b/kernel/time/tick-sched.h
index bf38226e5c17..075444e3d48e 100644
--- a/kernel/time/tick-sched.h
+++ b/kernel/time/tick-sched.h
@@ -27,6 +27,7 @@ enum tick_nohz_mode {
* timer is modified for nohz sleeps. This is necessary
* to resume the tick timer operation in the timeline
* when the CPU returns from nohz sleep.
+ * @next_tick: Next tick to be fired when in dynticks mode.
* @tick_stopped: Indicator that the idle tick has been stopped
* @idle_jiffies: jiffies at the entry to idle for idle time accounting
* @idle_calls: Total number of idle calls
@@ -44,6 +45,7 @@ struct tick_sched {
unsigned long check_clocks;
enum tick_nohz_mode nohz_mode;
ktime_t last_tick;
+ ktime_t next_tick;
int inidle;
int tick_stopped;
unsigned long idle_jiffies;
diff --git a/kernel/ucount.c b/kernel/ucount.c
index 9d20d5dd298a..4bbd38ec3788 100644
--- a/kernel/ucount.c
+++ b/kernel/ucount.c
@@ -128,10 +128,10 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
struct hlist_head *hashent = ucounts_hashentry(ns, uid);
struct ucounts *ucounts, *new;
- spin_lock(&ucounts_lock);
+ spin_lock_irq(&ucounts_lock);
ucounts = find_ucounts(ns, uid, hashent);
if (!ucounts) {
- spin_unlock(&ucounts_lock);
+ spin_unlock_irq(&ucounts_lock);
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new)
@@ -141,7 +141,7 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
new->uid = uid;
atomic_set(&new->count, 0);
- spin_lock(&ucounts_lock);
+ spin_lock_irq(&ucounts_lock);
ucounts = find_ucounts(ns, uid, hashent);
if (ucounts) {
kfree(new);
@@ -152,16 +152,18 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
}
if (!atomic_add_unless(&ucounts->count, 1, INT_MAX))
ucounts = NULL;
- spin_unlock(&ucounts_lock);
+ spin_unlock_irq(&ucounts_lock);
return ucounts;
}
static void put_ucounts(struct ucounts *ucounts)
{
+ unsigned long flags;
+
if (atomic_dec_and_test(&ucounts->count)) {
- spin_lock(&ucounts_lock);
+ spin_lock_irqsave(&ucounts_lock, flags);
hlist_del_init(&ucounts->node);
- spin_unlock(&ucounts_lock);
+ spin_unlock_irqrestore(&ucounts_lock, flags);
kfree(ucounts);
}
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index d4b0fa01cae3..63177be0159e 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -49,6 +49,8 @@ unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
#define for_each_watchdog_cpu(cpu) \
for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
+atomic_t watchdog_park_in_progress = ATOMIC_INIT(0);
+
/*
* The 'watchdog_running' variable is set to 1 when the watchdog threads
* are registered/started and is set to 0 when the watchdog threads are
@@ -260,6 +262,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
int duration;
int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
+ if (atomic_read(&watchdog_park_in_progress) != 0)
+ return HRTIMER_NORESTART;
+
/* kick the hardlockup detector */
watchdog_interrupt_count();
@@ -467,12 +472,16 @@ static int watchdog_park_threads(void)
{
int cpu, ret = 0;
+ atomic_set(&watchdog_park_in_progress, 1);
+
for_each_watchdog_cpu(cpu) {
ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
if (ret)
break;
}
+ atomic_set(&watchdog_park_in_progress, 0);
+
return ret;
}
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
index 84016c8aee6b..12b8dd640786 100644
--- a/kernel/watchdog_hld.c
+++ b/kernel/watchdog_hld.c
@@ -84,6 +84,9 @@ static void watchdog_overflow_callback(struct perf_event *event,
/* Ensure the watchdog never gets throttled */
event->hw.interrupts = 0;
+ if (atomic_read(&watchdog_park_in_progress) != 0)
+ return;
+
if (__this_cpu_read(watchdog_nmi_touch) == true) {
__this_cpu_write(watchdog_nmi_touch, false);
return;
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index b06848a104e6..eb9e9a7870fa 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -164,7 +164,7 @@ config DEBUG_INFO_REDUCED
config DEBUG_INFO_SPLIT
bool "Produce split debuginfo in .dwo files"
- depends on DEBUG_INFO
+ depends on DEBUG_INFO && !FRV
help
Generate debug info into separate .dwo files. This significantly
reduces the build directory size for builds with DEBUG_INFO,
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 86c8911b0e3a..a3e14ce92a56 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -144,4 +144,3 @@ int ioremap_page_range(unsigned long addr,
return err;
}
-EXPORT_SYMBOL_GPL(ioremap_page_range);
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 25f572303801..e68604ae3ced 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -730,43 +730,50 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
}
EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
+static inline void pipe_truncate(struct iov_iter *i)
+{
+ struct pipe_inode_info *pipe = i->pipe;
+ if (pipe->nrbufs) {
+ size_t off = i->iov_offset;
+ int idx = i->idx;
+ int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1);
+ if (off) {
+ pipe->bufs[idx].len = off - pipe->bufs[idx].offset;
+ idx = next_idx(idx, pipe);
+ nrbufs++;
+ }
+ while (pipe->nrbufs > nrbufs) {
+ pipe_buf_release(pipe, &pipe->bufs[idx]);
+ idx = next_idx(idx, pipe);
+ pipe->nrbufs--;
+ }
+ }
+}
+
static void pipe_advance(struct iov_iter *i, size_t size)
{
struct pipe_inode_info *pipe = i->pipe;
- struct pipe_buffer *buf;
- int idx = i->idx;
- size_t off = i->iov_offset, orig_sz;
-
if (unlikely(i->count < size))
size = i->count;
- orig_sz = size;
-
if (size) {
+ struct pipe_buffer *buf;
+ size_t off = i->iov_offset, left = size;
+ int idx = i->idx;
if (off) /* make it relative to the beginning of buffer */
- size += off - pipe->bufs[idx].offset;
+ left += off - pipe->bufs[idx].offset;
while (1) {
buf = &pipe->bufs[idx];
- if (size <= buf->len)
+ if (left <= buf->len)
break;
- size -= buf->len;
+ left -= buf->len;
idx = next_idx(idx, pipe);
}
- buf->len = size;
i->idx = idx;
- off = i->iov_offset = buf->offset + size;
- }
- if (off)
- idx = next_idx(idx, pipe);
- if (pipe->nrbufs) {
- int unused = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
- /* [curbuf,unused) is in use. Free [idx,unused) */
- while (idx != unused) {
- pipe_buf_release(pipe, &pipe->bufs[idx]);
- idx = next_idx(idx, pipe);
- pipe->nrbufs--;
- }
+ i->iov_offset = buf->offset + left;
}
- i->count -= orig_sz;
+ i->count -= size;
+ /* ... and discard everything past that point */
+ pipe_truncate(i);
}
void iov_iter_advance(struct iov_iter *i, size_t size)
@@ -826,6 +833,7 @@ void iov_iter_pipe(struct iov_iter *i, int direction,
size_t count)
{
BUG_ON(direction != ITER_PIPE);
+ WARN_ON(pipe->nrbufs == pipe->buffers);
i->type = direction;
i->pipe = pipe;
i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 6f382e07de77..84812a9fb16f 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -640,6 +640,7 @@ static inline void radix_tree_shrink(struct radix_tree_root *root,
update_node(node, private);
}
+ WARN_ON_ONCE(!list_empty(&node->private_list));
radix_tree_node_free(node);
}
}
@@ -666,6 +667,7 @@ static void delete_node(struct radix_tree_root *root,
root->rnode = NULL;
}
+ WARN_ON_ONCE(!list_empty(&node->private_list));
radix_tree_node_free(node);
node = parent;
@@ -767,6 +769,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
struct radix_tree_node *old = child;
offset = child->offset + 1;
child = child->parent;
+ WARN_ON_ONCE(!list_empty(&old->private_list));
radix_tree_node_free(old);
if (old == entry_to_node(node))
return;
@@ -1824,15 +1827,19 @@ EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
* __radix_tree_delete_node - try to free node after clearing a slot
* @root: radix tree root
* @node: node containing @index
+ * @update_node: callback for changing leaf nodes
+ * @private: private data to pass to @update_node
*
* After clearing the slot at @index in @node from radix tree
* rooted at @root, call this function to attempt freeing the
* node and shrinking the tree.
*/
void __radix_tree_delete_node(struct radix_tree_root *root,
- struct radix_tree_node *node)
+ struct radix_tree_node *node,
+ radix_tree_update_node_t update_node,
+ void *private)
{
- delete_node(root, node, NULL, NULL);
+ delete_node(root, node, update_node, private);
}
/**
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index cb1b54ee8527..a8d74a733a38 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -53,7 +53,7 @@
*/
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
-int swiotlb_force;
+enum swiotlb_force swiotlb_force;
/*
* Used to do a quick range check in swiotlb_tbl_unmap_single and
@@ -83,6 +83,12 @@ static unsigned int *io_tlb_list;
static unsigned int io_tlb_index;
/*
+ * Max segment that we can provide which (if pages are contingous) will
+ * not be bounced (unless SWIOTLB_FORCE is set).
+ */
+unsigned int max_segment;
+
+/*
* We need to save away the original address corresponding to a mapped entry
* for the sync operations.
*/
@@ -106,8 +112,12 @@ setup_io_tlb_npages(char *str)
}
if (*str == ',')
++str;
- if (!strcmp(str, "force"))
- swiotlb_force = 1;
+ if (!strcmp(str, "force")) {
+ swiotlb_force = SWIOTLB_FORCE;
+ } else if (!strcmp(str, "noforce")) {
+ swiotlb_force = SWIOTLB_NO_FORCE;
+ io_tlb_nslabs = 1;
+ }
return 0;
}
@@ -120,6 +130,20 @@ unsigned long swiotlb_nr_tbl(void)
}
EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
+unsigned int swiotlb_max_segment(void)
+{
+ return max_segment;
+}
+EXPORT_SYMBOL_GPL(swiotlb_max_segment);
+
+void swiotlb_set_max_segment(unsigned int val)
+{
+ if (swiotlb_force == SWIOTLB_FORCE)
+ max_segment = 1;
+ else
+ max_segment = rounddown(val, PAGE_SIZE);
+}
+
/* default to 64MB */
#define IO_TLB_DEFAULT_SIZE (64UL<<20)
unsigned long swiotlb_size_or_default(void)
@@ -201,6 +225,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
if (verbose)
swiotlb_print_info();
+ swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
return 0;
}
@@ -279,6 +304,7 @@ swiotlb_late_init_with_default_size(size_t default_size)
rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs);
if (rc)
free_pages((unsigned long)vstart, order);
+
return rc;
}
@@ -333,6 +359,8 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
late_alloc = 1;
+ swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
+
return 0;
cleanup4:
@@ -347,6 +375,7 @@ cleanup2:
io_tlb_end = 0;
io_tlb_start = 0;
io_tlb_nslabs = 0;
+ max_segment = 0;
return -ENOMEM;
}
@@ -375,6 +404,7 @@ void __init swiotlb_free(void)
PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
}
io_tlb_nslabs = 0;
+ max_segment = 0;
}
int is_swiotlb_buffer(phys_addr_t paddr)
@@ -453,11 +483,11 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
: 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
/*
- * For mappings greater than a page, we limit the stride (and
- * hence alignment) to a page size.
+ * For mappings greater than or equal to a page, we limit the stride
+ * (and hence alignment) to a page size.
*/
nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
- if (size > PAGE_SIZE)
+ if (size >= PAGE_SIZE)
stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
else
stride = 1;
@@ -543,8 +573,15 @@ static phys_addr_t
map_single(struct device *hwdev, phys_addr_t phys, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
- dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start);
+ dma_addr_t start_dma_addr;
+ if (swiotlb_force == SWIOTLB_NO_FORCE) {
+ dev_warn_ratelimited(hwdev, "Cannot do DMA to address %pa\n",
+ &phys);
+ return SWIOTLB_MAP_ERROR;
+ }
+
+ start_dma_addr = phys_to_dma(hwdev, io_tlb_start);
return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size,
dir, attrs);
}
@@ -721,6 +758,9 @@ static void
swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
int do_panic)
{
+ if (swiotlb_force == SWIOTLB_NO_FORCE)
+ return;
+
/*
* Ran out of IOMMU space for this operation. This is very bad.
* Unfortunately the drivers cannot handle this operation properly.
@@ -763,7 +803,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
* we can safely return the device addr and not worry about bounce
* buffering it.
*/
- if (dma_capable(dev, dev_addr, size) && !swiotlb_force)
+ if (dma_capable(dev, dev_addr, size) && swiotlb_force != SWIOTLB_FORCE)
return dev_addr;
trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
@@ -904,7 +944,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
phys_addr_t paddr = sg_phys(sg);
dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
- if (swiotlb_force ||
+ if (swiotlb_force == SWIOTLB_FORCE ||
!dma_capable(hwdev, dev_addr, sg->length)) {
phys_addr_t map = map_single(hwdev, sg_phys(sg),
sg->length, dir, attrs);
diff --git a/mm/filemap.c b/mm/filemap.c
index 82f26cde830c..b772a33ef640 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -138,7 +138,7 @@ static int page_cache_tree_insert(struct address_space *mapping,
dax_radix_locked_entry(0, RADIX_DAX_EMPTY));
/* Wakeup waiters for exceptional entry lock */
dax_wake_mapping_entry_waiter(mapping, page->index, p,
- false);
+ true);
}
}
__radix_tree_replace(&mapping->page_tree, node, slot, page,
@@ -912,6 +912,29 @@ void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
}
EXPORT_SYMBOL_GPL(add_page_wait_queue);
+#ifndef clear_bit_unlock_is_negative_byte
+
+/*
+ * PG_waiters is the high bit in the same byte as PG_lock.
+ *
+ * On x86 (and on many other architectures), we can clear PG_lock and
+ * test the sign bit at the same time. But if the architecture does
+ * not support that special operation, we just do this all by hand
+ * instead.
+ *
+ * The read of PG_waiters has to be after (or concurrently with) PG_locked
+ * being cleared, but a memory barrier should be unneccssary since it is
+ * in the same byte as PG_locked.
+ */
+static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem)
+{
+ clear_bit_unlock(nr, mem);
+ /* smp_mb__after_atomic(); */
+ return test_bit(PG_waiters, mem);
+}
+
+#endif
+
/**
* unlock_page - unlock a locked page
* @page: the page
@@ -921,16 +944,19 @@ EXPORT_SYMBOL_GPL(add_page_wait_queue);
* mechanism between PageLocked pages and PageWriteback pages is shared.
* But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
*
- * The mb is necessary to enforce ordering between the clear_bit and the read
- * of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()).
+ * Note that this depends on PG_waiters being the sign bit in the byte
+ * that contains PG_locked - thus the BUILD_BUG_ON(). That allows us to
+ * clear the PG_locked bit and test PG_waiters at the same time fairly
+ * portably (architectures that do LL/SC can test any bit, while x86 can
+ * test the sign bit).
*/
void unlock_page(struct page *page)
{
+ BUILD_BUG_ON(PG_waiters != 7);
page = compound_head(page);
VM_BUG_ON_PAGE(!PageLocked(page), page);
- clear_bit_unlock(PG_locked, &page->flags);
- smp_mb__after_atomic();
- wake_up_page(page, PG_locked);
+ if (clear_bit_unlock_is_negative_byte(PG_locked, &page->flags))
+ wake_up_page_bit(page, PG_locked);
}
EXPORT_SYMBOL(unlock_page);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 10eedbf14421..5f3ad65c85de 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -783,6 +783,12 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
assert_spin_locked(pmd_lockptr(mm, pmd));
+ /*
+ * When we COW a devmap PMD entry, we split it into PTEs, so we should
+ * not be in this function with `flags & FOLL_COW` set.
+ */
+ WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");
+
if (flags & FOLL_WRITE && !pmd_write(*pmd))
return NULL;
@@ -883,15 +889,17 @@ void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd)
{
pmd_t entry;
unsigned long haddr;
+ bool write = vmf->flags & FAULT_FLAG_WRITE;
vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
goto unlock;
entry = pmd_mkyoung(orig_pmd);
+ if (write)
+ entry = pmd_mkdirty(entry);
haddr = vmf->address & HPAGE_PMD_MASK;
- if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry,
- vmf->flags & FAULT_FLAG_WRITE))
+ if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write))
update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd);
unlock:
@@ -919,8 +927,7 @@ static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd,
}
for (i = 0; i < HPAGE_PMD_NR; i++) {
- pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE |
- __GFP_OTHER_NODE, vma,
+ pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, vma,
vmf->address, page_to_nid(page));
if (unlikely(!pages[i] ||
mem_cgroup_try_charge(pages[i], vma->vm_mm,
@@ -1127,6 +1134,16 @@ out_unlock:
return ret;
}
+/*
+ * FOLL_FORCE can write to even unwritable pmd's, but only
+ * after we've gone through a COW cycle and they are dirty.
+ */
+static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
+{
+ return pmd_write(pmd) ||
+ ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
+}
+
struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
unsigned long addr,
pmd_t *pmd,
@@ -1137,7 +1154,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
assert_spin_locked(pmd_lockptr(mm, pmd));
- if (flags & FOLL_WRITE && !pmd_write(*pmd))
+ if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
goto out;
/* Avoid dumping huge zero page */
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 3edb759c5c7d..c7025c132670 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1773,23 +1773,32 @@ free:
}
/*
- * When releasing a hugetlb pool reservation, any surplus pages that were
- * allocated to satisfy the reservation must be explicitly freed if they were
- * never used.
- * Called with hugetlb_lock held.
+ * This routine has two main purposes:
+ * 1) Decrement the reservation count (resv_huge_pages) by the value passed
+ * in unused_resv_pages. This corresponds to the prior adjustments made
+ * to the associated reservation map.
+ * 2) Free any unused surplus pages that may have been allocated to satisfy
+ * the reservation. As many as unused_resv_pages may be freed.
+ *
+ * Called with hugetlb_lock held. However, the lock could be dropped (and
+ * reacquired) during calls to cond_resched_lock. Whenever dropping the lock,
+ * we must make sure nobody else can claim pages we are in the process of
+ * freeing. Do this by ensuring resv_huge_page always is greater than the
+ * number of huge pages we plan to free when dropping the lock.
*/
static void return_unused_surplus_pages(struct hstate *h,
unsigned long unused_resv_pages)
{
unsigned long nr_pages;
- /* Uncommit the reservation */
- h->resv_huge_pages -= unused_resv_pages;
-
/* Cannot return gigantic pages currently */
if (hstate_is_gigantic(h))
- return;
+ goto out;
+ /*
+ * Part (or even all) of the reservation could have been backed
+ * by pre-allocated pages. Only free surplus pages.
+ */
nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
/*
@@ -1799,12 +1808,22 @@ static void return_unused_surplus_pages(struct hstate *h,
* when the nodes with surplus pages have no free pages.
* free_pool_huge_page() will balance the the freed pages across the
* on-line nodes with memory and will handle the hstate accounting.
+ *
+ * Note that we decrement resv_huge_pages as we free the pages. If
+ * we drop the lock, resv_huge_pages will still be sufficiently large
+ * to cover subsequent pages we may free.
*/
while (nr_pages--) {
+ h->resv_huge_pages--;
+ unused_resv_pages--;
if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
- break;
+ goto out;
cond_resched_lock(&hugetlb_lock);
}
+
+out:
+ /* Fully uncommit the reservation */
+ h->resv_huge_pages -= unused_resv_pages;
}
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index e32389a97030..77ae3239c3de 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -943,7 +943,7 @@ static void collapse_huge_page(struct mm_struct *mm,
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
/* Only allocate from the target node */
- gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_OTHER_NODE | __GFP_THISNODE;
+ gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
/*
* Before allocating the hugepage, release the mmap_sem read lock.
@@ -1242,7 +1242,6 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
struct vm_area_struct *vma;
unsigned long addr;
pmd_t *pmd, _pmd;
- bool deposited = false;
i_mmap_lock_write(mapping);
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
@@ -1267,26 +1266,10 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
/* assume page table is clear */
_pmd = pmdp_collapse_flush(vma, addr, pmd);
- /*
- * now deposit the pgtable for arch that need it
- * otherwise free it.
- */
- if (arch_needs_pgtable_deposit()) {
- /*
- * The deposit should be visibile only after
- * collapse is seen by others.
- */
- smp_wmb();
- pgtable_trans_huge_deposit(vma->vm_mm, pmd,
- pmd_pgtable(_pmd));
- deposited = true;
- }
spin_unlock(ptl);
up_write(&vma->vm_mm->mmap_sem);
- if (!deposited) {
- atomic_long_dec(&vma->vm_mm->nr_ptes);
- pte_free(vma->vm_mm, pmd_pgtable(_pmd));
- }
+ atomic_long_dec(&vma->vm_mm->nr_ptes);
+ pte_free(vma->vm_mm, pmd_pgtable(_pmd));
}
}
i_mmap_unlock_write(mapping);
@@ -1326,8 +1309,7 @@ static void collapse_shmem(struct mm_struct *mm,
VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
/* Only allocate from the target node */
- gfp = alloc_hugepage_khugepaged_gfpmask() |
- __GFP_OTHER_NODE | __GFP_THISNODE;
+ gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
new_page = khugepaged_alloc_page(hpage, gfp, node);
if (!new_page) {
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 4048897e7b01..b822e158b319 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -625,8 +625,8 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
int nid, unsigned int lru_mask)
{
+ struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg);
unsigned long nr = 0;
- struct mem_cgroup_per_node *mz;
enum lru_list lru;
VM_BUG_ON((unsigned)nid >= nr_node_ids);
@@ -634,8 +634,7 @@ unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
for_each_lru(lru) {
if (!(BIT(lru) & lru_mask))
continue;
- mz = mem_cgroup_nodeinfo(memcg, nid);
- nr += mz->lru_size[lru];
+ nr += mem_cgroup_get_lru_size(lruvec, lru);
}
return nr;
}
@@ -1002,6 +1001,7 @@ out:
* mem_cgroup_update_lru_size - account for adding or removing an lru page
* @lruvec: mem_cgroup per zone lru vector
* @lru: index of lru list the page is sitting on
+ * @zid: zone id of the accounted pages
* @nr_pages: positive when adding or negative when removing
*
* This function must be called under lru_lock, just before a page is added
@@ -1009,27 +1009,25 @@ out:
* so as to allow it to check that lru_size 0 is consistent with list_empty).
*/
void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
- int nr_pages)
+ int zid, int nr_pages)
{
struct mem_cgroup_per_node *mz;
unsigned long *lru_size;
long size;
- bool empty;
if (mem_cgroup_disabled())
return;
mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- lru_size = mz->lru_size + lru;
- empty = list_empty(lruvec->lists + lru);
+ lru_size = &mz->lru_zone_size[zid][lru];
if (nr_pages < 0)
*lru_size += nr_pages;
size = *lru_size;
- if (WARN_ONCE(size < 0 || empty != !size,
- "%s(%p, %d, %d): lru_size %ld but %sempty\n",
- __func__, lruvec, lru, nr_pages, size, empty ? "" : "not ")) {
+ if (WARN_ONCE(size < 0,
+ "%s(%p, %d, %d): lru_size %ld\n",
+ __func__, lruvec, lru, nr_pages, size)) {
VM_BUG_ON(1);
*lru_size = 0;
}
@@ -4355,9 +4353,9 @@ static int mem_cgroup_do_precharge(unsigned long count)
return ret;
}
- /* Try charges one by one with reclaim */
+ /* Try charges one by one with reclaim, but do not retry */
while (count--) {
- ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1);
+ ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
if (ret)
return ret;
mc.precharge++;
diff --git a/mm/memory.c b/mm/memory.c
index 7d23b5050248..6bf2b471e30c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3008,13 +3008,6 @@ static int do_set_pmd(struct vm_fault *vmf, struct page *page)
ret = 0;
count_vm_event(THP_FILE_MAPPED);
out:
- /*
- * If we are going to fallback to pte mapping, do a
- * withdraw with pmd lock held.
- */
- if (arch_needs_pgtable_deposit() && ret == VM_FAULT_FALLBACK)
- vmf->prealloc_pte = pgtable_trans_huge_withdraw(vma->vm_mm,
- vmf->pmd);
spin_unlock(vmf->ptl);
return ret;
}
@@ -3055,20 +3048,18 @@ int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
ret = do_set_pmd(vmf, page);
if (ret != VM_FAULT_FALLBACK)
- goto fault_handled;
+ return ret;
}
if (!vmf->pte) {
ret = pte_alloc_one_map(vmf);
if (ret)
- goto fault_handled;
+ return ret;
}
/* Re-check under ptl */
- if (unlikely(!pte_none(*vmf->pte))) {
- ret = VM_FAULT_NOPAGE;
- goto fault_handled;
- }
+ if (unlikely(!pte_none(*vmf->pte)))
+ return VM_FAULT_NOPAGE;
flush_icache_page(vma, page);
entry = mk_pte(page, vma->vm_page_prot);
@@ -3088,15 +3079,8 @@ int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
/* no need to invalidate: a not-present page won't be cached */
update_mmu_cache(vma, vmf->address, vmf->pte);
- ret = 0;
-fault_handled:
- /* preallocated pagetable is unused: free it */
- if (vmf->prealloc_pte) {
- pte_free(vmf->vma->vm_mm, vmf->prealloc_pte);
- vmf->prealloc_pte = 0;
- }
- return ret;
+ return 0;
}
@@ -3360,15 +3344,24 @@ static int do_shared_fault(struct vm_fault *vmf)
static int do_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
+ int ret;
/* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
if (!vma->vm_ops->fault)
- return VM_FAULT_SIGBUS;
- if (!(vmf->flags & FAULT_FLAG_WRITE))
- return do_read_fault(vmf);
- if (!(vma->vm_flags & VM_SHARED))
- return do_cow_fault(vmf);
- return do_shared_fault(vmf);
+ ret = VM_FAULT_SIGBUS;
+ else if (!(vmf->flags & FAULT_FLAG_WRITE))
+ ret = do_read_fault(vmf);
+ else if (!(vma->vm_flags & VM_SHARED))
+ ret = do_cow_fault(vmf);
+ else
+ ret = do_shared_fault(vmf);
+
+ /* preallocated pagetable is unused: free it */
+ if (vmf->prealloc_pte) {
+ pte_free(vma->vm_mm, vmf->prealloc_pte);
+ vmf->prealloc_pte = 0;
+ }
+ return ret;
}
static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
@@ -3779,8 +3772,8 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
}
#endif /* __PAGETABLE_PMD_FOLDED */
-static int __follow_pte(struct mm_struct *mm, unsigned long address,
- pte_t **ptepp, spinlock_t **ptlp)
+static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
+ pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
{
pgd_t *pgd;
pud_t *pud;
@@ -3797,11 +3790,20 @@ static int __follow_pte(struct mm_struct *mm, unsigned long address,
pmd = pmd_offset(pud, address);
VM_BUG_ON(pmd_trans_huge(*pmd));
- if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
- goto out;
- /* We cannot handle huge page PFN maps. Luckily they don't exist. */
- if (pmd_huge(*pmd))
+ if (pmd_huge(*pmd)) {
+ if (!pmdpp)
+ goto out;
+
+ *ptlp = pmd_lock(mm, pmd);
+ if (pmd_huge(*pmd)) {
+ *pmdpp = pmd;
+ return 0;
+ }
+ spin_unlock(*ptlp);
+ }
+
+ if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
goto out;
ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
@@ -3817,16 +3819,30 @@ out:
return -EINVAL;
}
-int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp,
- spinlock_t **ptlp)
+static inline int follow_pte(struct mm_struct *mm, unsigned long address,
+ pte_t **ptepp, spinlock_t **ptlp)
+{
+ int res;
+
+ /* (void) is needed to make gcc happy */
+ (void) __cond_lock(*ptlp,
+ !(res = __follow_pte_pmd(mm, address, ptepp, NULL,
+ ptlp)));
+ return res;
+}
+
+int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
+ pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
{
int res;
/* (void) is needed to make gcc happy */
(void) __cond_lock(*ptlp,
- !(res = __follow_pte(mm, address, ptepp, ptlp)));
+ !(res = __follow_pte_pmd(mm, address, ptepp, pmdpp,
+ ptlp)));
return res;
}
+EXPORT_SYMBOL(follow_pte_pmd);
/**
* follow_pfn - look up PFN at a user virtual address
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index e43142c15631..ca2723d47338 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1033,36 +1033,39 @@ static void node_states_set_node(int node, struct memory_notify *arg)
node_set_state(node, N_MEMORY);
}
-int zone_can_shift(unsigned long pfn, unsigned long nr_pages,
- enum zone_type target)
+bool zone_can_shift(unsigned long pfn, unsigned long nr_pages,
+ enum zone_type target, int *zone_shift)
{
struct zone *zone = page_zone(pfn_to_page(pfn));
enum zone_type idx = zone_idx(zone);
int i;
+ *zone_shift = 0;
+
if (idx < target) {
/* pages must be at end of current zone */
if (pfn + nr_pages != zone_end_pfn(zone))
- return 0;
+ return false;
/* no zones in use between current zone and target */
for (i = idx + 1; i < target; i++)
if (zone_is_initialized(zone - idx + i))
- return 0;
+ return false;
}
if (target < idx) {
/* pages must be at beginning of current zone */
if (pfn != zone->zone_start_pfn)
- return 0;
+ return false;
/* no zones in use between current zone and target */
for (i = target + 1; i < idx; i++)
if (zone_is_initialized(zone - idx + i))
- return 0;
+ return false;
}
- return target - idx;
+ *zone_shift = target - idx;
+ return true;
}
/* Must be protected by mem_hotplug_begin() */
@@ -1089,10 +1092,13 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
!can_online_high_movable(zone))
return -EINVAL;
- if (online_type == MMOP_ONLINE_KERNEL)
- zone_shift = zone_can_shift(pfn, nr_pages, ZONE_NORMAL);
- else if (online_type == MMOP_ONLINE_MOVABLE)
- zone_shift = zone_can_shift(pfn, nr_pages, ZONE_MOVABLE);
+ if (online_type == MMOP_ONLINE_KERNEL) {
+ if (!zone_can_shift(pfn, nr_pages, ZONE_NORMAL, &zone_shift))
+ return -EINVAL;
+ } else if (online_type == MMOP_ONLINE_MOVABLE) {
+ if (!zone_can_shift(pfn, nr_pages, ZONE_MOVABLE, &zone_shift))
+ return -EINVAL;
+ }
zone = move_pfn_range(zone_shift, pfn, pfn + nr_pages);
if (!zone)
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 2e346645eb80..1e7873e40c9a 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2017,8 +2017,8 @@ retry_cpuset:
nmask = policy_nodemask(gfp, pol);
zl = policy_zonelist(gfp, pol, node);
- mpol_cond_put(pol);
page = __alloc_pages_nodemask(gfp, order, zl, nmask);
+ mpol_cond_put(pol);
out:
if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
goto retry_cpuset;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2c6d5f64feca..f3e0c69a97b7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1864,14 +1864,14 @@ int move_freepages(struct zone *zone,
#endif
for (page = start_page; page <= end_page;) {
- /* Make sure we are not inadvertently changing nodes */
- VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
-
if (!pfn_valid_within(page_to_pfn(page))) {
page++;
continue;
}
+ /* Make sure we are not inadvertently changing nodes */
+ VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
+
if (!PageBuddy(page)) {
page++;
continue;
@@ -2583,30 +2583,22 @@ int __isolate_free_page(struct page *page, unsigned int order)
* Update NUMA hit/miss statistics
*
* Must be called with interrupts disabled.
- *
- * When __GFP_OTHER_NODE is set assume the node of the preferred
- * zone is the local node. This is useful for daemons who allocate
- * memory on behalf of other processes.
*/
-static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
- gfp_t flags)
+static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
{
#ifdef CONFIG_NUMA
- int local_nid = numa_node_id();
enum zone_stat_item local_stat = NUMA_LOCAL;
- if (unlikely(flags & __GFP_OTHER_NODE)) {
+ if (z->node != numa_node_id())
local_stat = NUMA_OTHER;
- local_nid = preferred_zone->node;
- }
- if (z->node == local_nid) {
+ if (z->node == preferred_zone->node)
__inc_zone_state(z, NUMA_HIT);
- __inc_zone_state(z, local_stat);
- } else {
+ else {
__inc_zone_state(z, NUMA_MISS);
__inc_zone_state(preferred_zone, NUMA_FOREIGN);
}
+ __inc_zone_state(z, local_stat);
#endif
}
@@ -2674,7 +2666,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
}
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
- zone_statistics(preferred_zone, zone, gfp_flags);
+ zone_statistics(preferred_zone, zone);
local_irq_restore(flags);
VM_BUG_ON_PAGE(bad_range(zone, page), page);
@@ -3531,12 +3523,13 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
struct page *page = NULL;
unsigned int alloc_flags;
unsigned long did_some_progress;
- enum compact_priority compact_priority = DEF_COMPACT_PRIORITY;
+ enum compact_priority compact_priority;
enum compact_result compact_result;
- int compaction_retries = 0;
- int no_progress_loops = 0;
+ int compaction_retries;
+ int no_progress_loops;
unsigned long alloc_start = jiffies;
unsigned int stall_timeout = 10 * HZ;
+ unsigned int cpuset_mems_cookie;
/*
* In the slowpath, we sanity check order to avoid ever trying to
@@ -3557,6 +3550,23 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
(__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
gfp_mask &= ~__GFP_ATOMIC;
+retry_cpuset:
+ compaction_retries = 0;
+ no_progress_loops = 0;
+ compact_priority = DEF_COMPACT_PRIORITY;
+ cpuset_mems_cookie = read_mems_allowed_begin();
+ /*
+ * We need to recalculate the starting point for the zonelist iterator
+ * because we might have used different nodemask in the fast path, or
+ * there was a cpuset modification and we are retrying - otherwise we
+ * could end up iterating over non-eligible zones endlessly.
+ */
+ ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
+ ac->high_zoneidx, ac->nodemask);
+ if (!ac->preferred_zoneref->zone)
+ goto nopage;
+
+
/*
* The fast path uses conservative alloc_flags to succeed only until
* kswapd needs to be woken up, and to avoid the cost of setting up
@@ -3716,6 +3726,13 @@ retry:
&compaction_retries))
goto retry;
+ /*
+ * It's possible we raced with cpuset update so the OOM would be
+ * premature (see below the nopage: label for full explanation).
+ */
+ if (read_mems_allowed_retry(cpuset_mems_cookie))
+ goto retry_cpuset;
+
/* Reclaim has failed us, start killing things */
page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
if (page)
@@ -3728,6 +3745,16 @@ retry:
}
nopage:
+ /*
+ * When updating a task's mems_allowed or mempolicy nodemask, it is
+ * possible to race with parallel threads in such a way that our
+ * allocation can fail while the mask is being updated. If we are about
+ * to fail, check if the cpuset changed during allocation and if so,
+ * retry.
+ */
+ if (read_mems_allowed_retry(cpuset_mems_cookie))
+ goto retry_cpuset;
+
warn_alloc(gfp_mask,
"page allocation failure: order:%u", order);
got_pg:
@@ -3742,7 +3769,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, nodemask_t *nodemask)
{
struct page *page;
- unsigned int cpuset_mems_cookie;
unsigned int alloc_flags = ALLOC_WMARK_LOW;
gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */
struct alloc_context ac = {
@@ -3779,9 +3805,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
alloc_flags |= ALLOC_CMA;
-retry_cpuset:
- cpuset_mems_cookie = read_mems_allowed_begin();
-
/* Dirty zone balancing only done in the fast path */
ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
@@ -3792,8 +3815,13 @@ retry_cpuset:
*/
ac.preferred_zoneref = first_zones_zonelist(ac.zonelist,
ac.high_zoneidx, ac.nodemask);
- if (!ac.preferred_zoneref) {
+ if (!ac.preferred_zoneref->zone) {
page = NULL;
+ /*
+ * This might be due to race with cpuset_current_mems_allowed
+ * update, so make sure we retry with original nodemask in the
+ * slow path.
+ */
goto no_zone;
}
@@ -3802,6 +3830,7 @@ retry_cpuset:
if (likely(page))
goto out;
+no_zone:
/*
* Runtime PM, block IO and its error handling path can deadlock
* because I/O on the device might not complete.
@@ -3813,21 +3842,10 @@ retry_cpuset:
* Restore the original nodemask if it was potentially replaced with
* &cpuset_current_mems_allowed to optimize the fast-path attempt.
*/
- if (cpusets_enabled())
+ if (unlikely(ac.nodemask != nodemask))
ac.nodemask = nodemask;
- page = __alloc_pages_slowpath(alloc_mask, order, &ac);
-no_zone:
- /*
- * When updating a task's mems_allowed, it is possible to race with
- * parallel threads in such a way that an allocation can fail while
- * the mask is being updated. If a page allocation is about to fail,
- * check if the cpuset changed during allocation and if so, retry.
- */
- if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) {
- alloc_mask = gfp_mask;
- goto retry_cpuset;
- }
+ page = __alloc_pages_slowpath(alloc_mask, order, &ac);
out:
if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
@@ -3904,8 +3922,8 @@ EXPORT_SYMBOL(free_pages);
* drivers to provide a backing region of memory for use as either an
* sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
*/
-static struct page *__page_frag_refill(struct page_frag_cache *nc,
- gfp_t gfp_mask)
+static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
+ gfp_t gfp_mask)
{
struct page *page = NULL;
gfp_t gfp = gfp_mask;
@@ -3925,22 +3943,23 @@ static struct page *__page_frag_refill(struct page_frag_cache *nc,
return page;
}
-void __page_frag_drain(struct page *page, unsigned int order,
- unsigned int count)
+void __page_frag_cache_drain(struct page *page, unsigned int count)
{
VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
if (page_ref_sub_and_test(page, count)) {
+ unsigned int order = compound_order(page);
+
if (order == 0)
free_hot_cold_page(page, false);
else
__free_pages_ok(page, order);
}
}
-EXPORT_SYMBOL(__page_frag_drain);
+EXPORT_SYMBOL(__page_frag_cache_drain);
-void *__alloc_page_frag(struct page_frag_cache *nc,
- unsigned int fragsz, gfp_t gfp_mask)
+void *page_frag_alloc(struct page_frag_cache *nc,
+ unsigned int fragsz, gfp_t gfp_mask)
{
unsigned int size = PAGE_SIZE;
struct page *page;
@@ -3948,7 +3967,7 @@ void *__alloc_page_frag(struct page_frag_cache *nc,
if (unlikely(!nc->va)) {
refill:
- page = __page_frag_refill(nc, gfp_mask);
+ page = __page_frag_cache_refill(nc, gfp_mask);
if (!page)
return NULL;
@@ -3991,19 +4010,19 @@ refill:
return nc->va + offset;
}
-EXPORT_SYMBOL(__alloc_page_frag);
+EXPORT_SYMBOL(page_frag_alloc);
/*
* Frees a page fragment allocated out of either a compound or order 0 page.
*/
-void __free_page_frag(void *addr)
+void page_frag_free(void *addr)
{
struct page *page = virt_to_head_page(addr);
if (unlikely(put_page_testzero(page)))
__free_pages_ok(page, compound_order(page));
}
-EXPORT_SYMBOL(__free_page_frag);
+EXPORT_SYMBOL(page_frag_free);
static void *make_alloc_exact(unsigned long addr, unsigned int order,
size_t size)
@@ -7255,6 +7274,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
.zone = page_zone(pfn_to_page(start)),
.mode = MIGRATE_SYNC,
.ignore_skip_hint = true,
+ .gfp_mask = GFP_KERNEL,
};
INIT_LIST_HEAD(&cc.migratepages);
diff --git a/mm/slab.c b/mm/slab.c
index 29bc6c0dedd0..4f2ec6bb46eb 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2457,7 +2457,6 @@ union freelist_init_state {
unsigned int pos;
unsigned int *list;
unsigned int count;
- unsigned int rand;
};
struct rnd_state rnd_state;
};
@@ -2483,8 +2482,7 @@ static bool freelist_state_initialize(union freelist_init_state *state,
} else {
state->list = cachep->random_seq;
state->count = count;
- state->pos = 0;
- state->rand = rand;
+ state->pos = rand % count;
ret = true;
}
return ret;
@@ -2493,7 +2491,9 @@ static bool freelist_state_initialize(union freelist_init_state *state,
/* Get the next entry on the list and randomize it using a random shift */
static freelist_idx_t next_random_slot(union freelist_init_state *state)
{
- return (state->list[state->pos++] + state->rand) % state->count;
+ if (state->pos >= state->count)
+ state->pos = 0;
+ return state->list[state->pos++];
}
/* Swap two freelist entries */
diff --git a/mm/slub.c b/mm/slub.c
index 067598a00849..7aa6f433f4de 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -496,10 +496,11 @@ static inline int check_valid_pointer(struct kmem_cache *s,
return 1;
}
-static void print_section(char *text, u8 *addr, unsigned int length)
+static void print_section(char *level, char *text, u8 *addr,
+ unsigned int length)
{
metadata_access_enable();
- print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
+ print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
length, 1);
metadata_access_disable();
}
@@ -636,14 +637,15 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
p, p - addr, get_freepointer(s, p));
if (s->flags & SLAB_RED_ZONE)
- print_section("Redzone ", p - s->red_left_pad, s->red_left_pad);
+ print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
+ s->red_left_pad);
else if (p > addr + 16)
- print_section("Bytes b4 ", p - 16, 16);
+ print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
- print_section("Object ", p, min_t(unsigned long, s->object_size,
- PAGE_SIZE));
+ print_section(KERN_ERR, "Object ", p,
+ min_t(unsigned long, s->object_size, PAGE_SIZE));
if (s->flags & SLAB_RED_ZONE)
- print_section("Redzone ", p + s->object_size,
+ print_section(KERN_ERR, "Redzone ", p + s->object_size,
s->inuse - s->object_size);
if (s->offset)
@@ -658,7 +660,8 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
if (off != size_from_object(s))
/* Beginning of the filler is the free pointer */
- print_section("Padding ", p + off, size_from_object(s) - off);
+ print_section(KERN_ERR, "Padding ", p + off,
+ size_from_object(s) - off);
dump_stack();
}
@@ -820,7 +823,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
end--;
slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
- print_section("Padding ", end - remainder, remainder);
+ print_section(KERN_ERR, "Padding ", end - remainder, remainder);
restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
return 0;
@@ -973,7 +976,7 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
page->freelist);
if (!alloc)
- print_section("Object ", (void *)object,
+ print_section(KERN_INFO, "Object ", (void *)object,
s->object_size);
dump_stack();
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 1c6e0321205d..4761701d1721 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -943,11 +943,25 @@ bool reuse_swap_page(struct page *page, int *total_mapcount)
count = page_trans_huge_mapcount(page, total_mapcount);
if (count <= 1 && PageSwapCache(page)) {
count += page_swapcount(page);
- if (count == 1 && !PageWriteback(page)) {
+ if (count != 1)
+ goto out;
+ if (!PageWriteback(page)) {
delete_from_swap_cache(page);
SetPageDirty(page);
+ } else {
+ swp_entry_t entry;
+ struct swap_info_struct *p;
+
+ entry.val = page_private(page);
+ p = swap_info_get(entry);
+ if (p->flags & SWP_STABLE_WRITES) {
+ spin_unlock(&p->lock);
+ return false;
+ }
+ spin_unlock(&p->lock);
}
}
+out:
return count <= 1;
}
@@ -2448,6 +2462,10 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
error = -ENOMEM;
goto bad_swap;
}
+
+ if (bdi_cap_stable_pages_required(inode_to_bdi(inode)))
+ p->flags |= SWP_STABLE_WRITES;
+
if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) {
int cpu;
diff --git a/mm/truncate.c b/mm/truncate.c
index fd97f1dbce29..dd7b24e083c5 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -24,20 +24,12 @@
#include <linux/rmap.h>
#include "internal.h"
-static void clear_exceptional_entry(struct address_space *mapping,
- pgoff_t index, void *entry)
+static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
+ void *entry)
{
struct radix_tree_node *node;
void **slot;
- /* Handled by shmem itself */
- if (shmem_mapping(mapping))
- return;
-
- if (dax_mapping(mapping)) {
- dax_delete_mapping_entry(mapping, index);
- return;
- }
spin_lock_irq(&mapping->tree_lock);
/*
* Regular page slots are stabilized by the page lock even
@@ -55,6 +47,56 @@ unlock:
spin_unlock_irq(&mapping->tree_lock);
}
+/*
+ * Unconditionally remove exceptional entry. Usually called from truncate path.
+ */
+static void truncate_exceptional_entry(struct address_space *mapping,
+ pgoff_t index, void *entry)
+{
+ /* Handled by shmem itself */
+ if (shmem_mapping(mapping))
+ return;
+
+ if (dax_mapping(mapping)) {
+ dax_delete_mapping_entry(mapping, index);
+ return;
+ }
+ clear_shadow_entry(mapping, index, entry);
+}
+
+/*
+ * Invalidate exceptional entry if easily possible. This handles exceptional
+ * entries for invalidate_inode_pages() so for DAX it evicts only unlocked and
+ * clean entries.
+ */
+static int invalidate_exceptional_entry(struct address_space *mapping,
+ pgoff_t index, void *entry)
+{
+ /* Handled by shmem itself */
+ if (shmem_mapping(mapping))
+ return 1;
+ if (dax_mapping(mapping))
+ return dax_invalidate_mapping_entry(mapping, index);
+ clear_shadow_entry(mapping, index, entry);
+ return 1;
+}
+
+/*
+ * Invalidate exceptional entry if clean. This handles exceptional entries for
+ * invalidate_inode_pages2() so for DAX it evicts only clean entries.
+ */
+static int invalidate_exceptional_entry2(struct address_space *mapping,
+ pgoff_t index, void *entry)
+{
+ /* Handled by shmem itself */
+ if (shmem_mapping(mapping))
+ return 1;
+ if (dax_mapping(mapping))
+ return dax_invalidate_mapping_entry_sync(mapping, index);
+ clear_shadow_entry(mapping, index, entry);
+ return 1;
+}
+
/**
* do_invalidatepage - invalidate part or all of a page
* @page: the page which is affected
@@ -262,7 +304,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
break;
if (radix_tree_exceptional_entry(page)) {
- clear_exceptional_entry(mapping, index, page);
+ truncate_exceptional_entry(mapping, index,
+ page);
continue;
}
@@ -351,7 +394,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
}
if (radix_tree_exceptional_entry(page)) {
- clear_exceptional_entry(mapping, index, page);
+ truncate_exceptional_entry(mapping, index,
+ page);
continue;
}
@@ -470,7 +514,8 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
break;
if (radix_tree_exceptional_entry(page)) {
- clear_exceptional_entry(mapping, index, page);
+ invalidate_exceptional_entry(mapping, index,
+ page);
continue;
}
@@ -592,7 +637,9 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
break;
if (radix_tree_exceptional_entry(page)) {
- clear_exceptional_entry(mapping, index, page);
+ if (!invalidate_exceptional_entry2(mapping,
+ index, page))
+ ret = -EBUSY;
continue;
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 6aa5b01d3e75..532a2a750952 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -242,6 +242,16 @@ unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru)
return node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru);
}
+unsigned long lruvec_zone_lru_size(struct lruvec *lruvec, enum lru_list lru,
+ int zone_idx)
+{
+ if (!mem_cgroup_disabled())
+ return mem_cgroup_get_zone_lru_size(lruvec, lru, zone_idx);
+
+ return zone_page_state(&lruvec_pgdat(lruvec)->node_zones[zone_idx],
+ NR_ZONE_LRU_BASE + lru);
+}
+
/*
* Add a shrinker callback to be called from the vm.
*/
@@ -1382,8 +1392,7 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
* be complete before mem_cgroup_update_lru_size due to a santity check.
*/
static __always_inline void update_lru_sizes(struct lruvec *lruvec,
- enum lru_list lru, unsigned long *nr_zone_taken,
- unsigned long nr_taken)
+ enum lru_list lru, unsigned long *nr_zone_taken)
{
int zid;
@@ -1392,11 +1401,11 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec,
continue;
__update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
- }
-
#ifdef CONFIG_MEMCG
- mem_cgroup_update_lru_size(lruvec, lru, -nr_taken);
+ mem_cgroup_update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
#endif
+ }
+
}
/*
@@ -1501,7 +1510,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
*nr_scanned = scan;
trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, scan,
nr_taken, mode, is_file_lru(lru));
- update_lru_sizes(lruvec, lru, nr_zone_taken, nr_taken);
+ update_lru_sizes(lruvec, lru, nr_zone_taken);
return nr_taken;
}
@@ -2047,10 +2056,8 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
if (!managed_zone(zone))
continue;
- inactive_zone = zone_page_state(zone,
- NR_ZONE_LRU_BASE + (file * LRU_FILE));
- active_zone = zone_page_state(zone,
- NR_ZONE_LRU_BASE + (file * LRU_FILE) + LRU_ACTIVE);
+ inactive_zone = lruvec_zone_lru_size(lruvec, file * LRU_FILE, zid);
+ active_zone = lruvec_zone_lru_size(lruvec, (file * LRU_FILE) + LRU_ACTIVE, zid);
inactive -= min(inactive, inactive_zone);
active -= min(active, active_zone);
diff --git a/mm/workingset.c b/mm/workingset.c
index 241fa5d6b3b2..abb58ffa3c64 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -473,7 +473,8 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
if (WARN_ON_ONCE(node->exceptional))
goto out_invalid;
inc_node_state(page_pgdat(virt_to_page(node)), WORKINGSET_NODERECLAIM);
- __radix_tree_delete_node(&mapping->page_tree, node);
+ __radix_tree_delete_node(&mapping->page_tree, node,
+ workingset_update_node, mapping);
out_invalid:
spin_unlock(&mapping->tree_lock);
diff --git a/net/Kconfig b/net/Kconfig
index a1005007224c..a29bb4b41c50 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -258,10 +258,6 @@ config XPS
config HWBM
bool
-config SOCK_CGROUP_DATA
- bool
- default n
-
config CGROUP_NET_PRIO
bool "Network priority cgroup"
depends on CGROUPS
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 019557d0a11d..09cfe87f0a44 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -1059,7 +1059,9 @@ static void __exit lane_module_cleanup(void)
{
int i;
+#ifdef CONFIG_PROC_FS
remove_proc_entry("lec", atm_proc_root);
+#endif
deregister_atm_ioctl(&lane_ioctl_ops);
diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
index 4855d18a8511..038b109b2be7 100644
--- a/net/ax25/ax25_subr.c
+++ b/net/ax25/ax25_subr.c
@@ -264,7 +264,7 @@ void ax25_disconnect(ax25_cb *ax25, int reason)
{
ax25_clear_queues(ax25);
- if (!sock_flag(ax25->sk, SOCK_DESTROY))
+ if (!ax25->sk || !sock_flag(ax25->sk, SOCK_DESTROY))
ax25_stop_heartbeat(ax25);
ax25_stop_t1timer(ax25);
ax25_stop_t2timer(ax25);
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index 9c561e683f4b..0854ebd8613e 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -474,7 +474,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if) {
ret = -EINVAL;
- goto put_primary_if;
+ goto free_skb;
}
/* Create one header to be copied to all fragments */
@@ -502,7 +502,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
skb_fragment = batadv_frag_create(skb, &frag_header, mtu);
if (!skb_fragment) {
ret = -ENOMEM;
- goto free_skb;
+ goto put_primary_if;
}
batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
@@ -511,7 +511,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
ret = batadv_send_unicast_skb(skb_fragment, neigh_node);
if (ret != NET_XMIT_SUCCESS) {
ret = NET_XMIT_DROP;
- goto free_skb;
+ goto put_primary_if;
}
frag_header.no++;
@@ -519,7 +519,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
/* The initial check in this function should cover this case */
if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1) {
ret = -EINVAL;
- goto free_skb;
+ goto put_primary_if;
}
}
@@ -527,7 +527,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
if (batadv_skb_head_push(skb, header_size) < 0 ||
pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0) {
ret = -ENOMEM;
- goto free_skb;
+ goto put_primary_if;
}
memcpy(skb->data, &frag_header, header_size);
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 8ca6a929bf12..95087e6e8258 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -399,7 +399,7 @@ bridged_dnat:
br_nf_hook_thresh(NF_BR_PRE_ROUTING,
net, sk, skb, skb->dev,
NULL,
- br_nf_pre_routing_finish);
+ br_nf_pre_routing_finish_bridge);
return 0;
}
ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr);
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 71c7453268c1..7109b389ea58 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -781,20 +781,6 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
return 0;
}
-static int br_dev_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[])
-{
- struct net_bridge *br = netdev_priv(dev);
-
- if (tb[IFLA_ADDRESS]) {
- spin_lock_bh(&br->lock);
- br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
- spin_unlock_bh(&br->lock);
- }
-
- return register_netdevice(dev);
-}
-
static int br_port_slave_changelink(struct net_device *brdev,
struct net_device *dev,
struct nlattr *tb[],
@@ -1115,6 +1101,25 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
return 0;
}
+static int br_dev_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ struct net_bridge *br = netdev_priv(dev);
+ int err;
+
+ if (tb[IFLA_ADDRESS]) {
+ spin_lock_bh(&br->lock);
+ br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
+ spin_unlock_bh(&br->lock);
+ }
+
+ err = br_changelink(dev, tb, data);
+ if (err)
+ return err;
+
+ return register_netdevice(dev);
+}
+
static size_t br_get_size(const struct net_device *brdev)
{
return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index 3949ce70be07..292e33bd916e 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -214,7 +214,7 @@ static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt,
SKCIPHER_REQUEST_ON_STACK(req, key->tfm);
struct sg_table sgt;
struct scatterlist prealloc_sg;
- char iv[AES_BLOCK_SIZE];
+ char iv[AES_BLOCK_SIZE] __aligned(8);
int pad_byte = AES_BLOCK_SIZE - (in_len & (AES_BLOCK_SIZE - 1));
int crypt_len = encrypt ? in_len + pad_byte : in_len;
int ret;
diff --git a/net/core/dev.c b/net/core/dev.c
index 8db5a0b4b520..7f218e095361 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2795,9 +2795,9 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
if (skb->ip_summed != CHECKSUM_NONE &&
!can_checksum_protocol(features, type)) {
features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
- } else if (illegal_highdma(skb->dev, skb)) {
- features &= ~NETIF_F_SG;
}
+ if (illegal_highdma(skb->dev, skb))
+ features &= ~NETIF_F_SG;
return features;
}
@@ -4441,7 +4441,9 @@ static void skb_gro_reset_offset(struct sk_buff *skb)
pinfo->nr_frags &&
!PageHighMem(skb_frag_page(frag0))) {
NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
- NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
+ NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
+ skb_frag_size(frag0),
+ skb->end - skb->tail);
}
}
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 8e0c0635ee97..fb55327dcfea 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -75,6 +75,7 @@ static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
struct nlattr *nla;
struct sk_buff *skb;
unsigned long flags;
+ void *msg_header;
al = sizeof(struct net_dm_alert_msg);
al += dm_hit_limit * sizeof(struct net_dm_drop_point);
@@ -82,21 +83,41 @@ static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
skb = genlmsg_new(al, GFP_KERNEL);
- if (skb) {
- genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
- 0, NET_DM_CMD_ALERT);
- nla = nla_reserve(skb, NLA_UNSPEC,
- sizeof(struct net_dm_alert_msg));
- msg = nla_data(nla);
- memset(msg, 0, al);
- } else {
- mod_timer(&data->send_timer, jiffies + HZ / 10);
+ if (!skb)
+ goto err;
+
+ msg_header = genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
+ 0, NET_DM_CMD_ALERT);
+ if (!msg_header) {
+ nlmsg_free(skb);
+ skb = NULL;
+ goto err;
+ }
+ nla = nla_reserve(skb, NLA_UNSPEC,
+ sizeof(struct net_dm_alert_msg));
+ if (!nla) {
+ nlmsg_free(skb);
+ skb = NULL;
+ goto err;
}
+ msg = nla_data(nla);
+ memset(msg, 0, al);
+ goto out;
+err:
+ mod_timer(&data->send_timer, jiffies + HZ / 10);
+out:
spin_lock_irqsave(&data->lock, flags);
swap(data->skb, skb);
spin_unlock_irqrestore(&data->lock, flags);
+ if (skb) {
+ struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
+ struct genlmsghdr *gnlh = (struct genlmsghdr *)nlmsg_data(nlh);
+
+ genlmsg_end(skb, genlmsg_data(gnlh));
+ }
+
return skb;
}
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index e23766c7e3ba..236a21e3c878 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1712,7 +1712,7 @@ static noinline_for_stack int ethtool_get_channels(struct net_device *dev,
static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
void __user *useraddr)
{
- struct ethtool_channels channels, max;
+ struct ethtool_channels channels, max = { .cmd = ETHTOOL_GCHANNELS };
u32 max_rx_in_use = 0;
if (!dev->ethtool_ops->set_channels || !dev->ethtool_ops->get_channels)
diff --git a/net/core/filter.c b/net/core/filter.c
index e6c412b94dec..1969b3f118c1 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2972,12 +2972,6 @@ void bpf_warn_invalid_xdp_action(u32 act)
}
EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
-void bpf_warn_invalid_xdp_buffer(void)
-{
- WARN_ONCE(1, "Illegal XDP buffer encountered, expect throughput degradation\n");
-}
-EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_buffer);
-
static u32 sk_filter_convert_ctx_access(enum bpf_access_type type, int dst_reg,
int src_reg, int ctx_off,
struct bpf_insn *insn_buf,
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index d6447dc10371..1b7673aac59d 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -67,8 +67,8 @@ EXPORT_SYMBOL(skb_flow_dissector_init);
* The function will try to retrieve a be32 entity at
* offset poff
*/
-__be16 skb_flow_get_be16(const struct sk_buff *skb, int poff, void *data,
- int hlen)
+static __be16 skb_flow_get_be16(const struct sk_buff *skb, int poff,
+ void *data, int hlen)
{
__be16 *u, _u;
@@ -468,8 +468,9 @@ ip_proto_again:
if (hdr->flags & GRE_ACK)
offset += sizeof(((struct pptp_gre_header *)0)->ack);
- ppp_hdr = skb_header_pointer(skb, nhoff + offset,
- sizeof(_ppp_hdr), _ppp_hdr);
+ ppp_hdr = __skb_header_pointer(skb, nhoff + offset,
+ sizeof(_ppp_hdr),
+ data, hlen, _ppp_hdr);
if (!ppp_hdr)
goto out_bad;
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
index 71bb3e2eca08..b3eef90b2df9 100644
--- a/net/core/lwt_bpf.c
+++ b/net/core/lwt_bpf.c
@@ -386,6 +386,7 @@ static const struct lwtunnel_encap_ops bpf_encap_ops = {
.fill_encap = bpf_fill_encap_info,
.get_encap_size = bpf_encap_nlsize,
.cmp_encap = bpf_encap_cmp,
+ .owner = THIS_MODULE,
};
static int __init bpf_lwt_init(void)
diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
index a5d4e866ce88..c23465005f2f 100644
--- a/net/core/lwtunnel.c
+++ b/net/core/lwtunnel.c
@@ -26,6 +26,7 @@
#include <net/lwtunnel.h>
#include <net/rtnetlink.h>
#include <net/ip6_fib.h>
+#include <net/nexthop.h>
#ifdef CONFIG_MODULES
@@ -114,25 +115,77 @@ int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
ret = -EOPNOTSUPP;
rcu_read_lock();
ops = rcu_dereference(lwtun_encaps[encap_type]);
+ if (likely(ops && ops->build_state && try_module_get(ops->owner))) {
+ ret = ops->build_state(dev, encap, family, cfg, lws);
+ if (ret)
+ module_put(ops->owner);
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+EXPORT_SYMBOL(lwtunnel_build_state);
+
+int lwtunnel_valid_encap_type(u16 encap_type)
+{
+ const struct lwtunnel_encap_ops *ops;
+ int ret = -EINVAL;
+
+ if (encap_type == LWTUNNEL_ENCAP_NONE ||
+ encap_type > LWTUNNEL_ENCAP_MAX)
+ return ret;
+
+ rcu_read_lock();
+ ops = rcu_dereference(lwtun_encaps[encap_type]);
+ rcu_read_unlock();
#ifdef CONFIG_MODULES
if (!ops) {
const char *encap_type_str = lwtunnel_encap_str(encap_type);
if (encap_type_str) {
- rcu_read_unlock();
+ __rtnl_unlock();
request_module("rtnl-lwt-%s", encap_type_str);
+ rtnl_lock();
+
rcu_read_lock();
ops = rcu_dereference(lwtun_encaps[encap_type]);
+ rcu_read_unlock();
}
}
#endif
- if (likely(ops && ops->build_state))
- ret = ops->build_state(dev, encap, family, cfg, lws);
- rcu_read_unlock();
+ return ops ? 0 : -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(lwtunnel_valid_encap_type);
- return ret;
+int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining)
+{
+ struct rtnexthop *rtnh = (struct rtnexthop *)attr;
+ struct nlattr *nla_entype;
+ struct nlattr *attrs;
+ struct nlattr *nla;
+ u16 encap_type;
+ int attrlen;
+
+ while (rtnh_ok(rtnh, remaining)) {
+ attrlen = rtnh_attrlen(rtnh);
+ if (attrlen > 0) {
+ attrs = rtnh_attrs(rtnh);
+ nla = nla_find(attrs, attrlen, RTA_ENCAP);
+ nla_entype = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
+
+ if (nla_entype) {
+ encap_type = nla_get_u16(nla_entype);
+
+ if (lwtunnel_valid_encap_type(encap_type) != 0)
+ return -EOPNOTSUPP;
+ }
+ }
+ rtnh = rtnh_next(rtnh, &remaining);
+ }
+
+ return 0;
}
-EXPORT_SYMBOL(lwtunnel_build_state);
+EXPORT_SYMBOL(lwtunnel_valid_encap_type_attr);
void lwtstate_free(struct lwtunnel_state *lws)
{
@@ -144,6 +197,7 @@ void lwtstate_free(struct lwtunnel_state *lws)
} else {
kfree(lws);
}
+ module_put(ops->owner);
}
EXPORT_SYMBOL(lwtstate_free);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 18b5aae99bec..75e3ea7bda08 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -3898,6 +3898,9 @@ static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh)
u32 filter_mask;
int err;
+ if (nlmsg_len(nlh) < sizeof(*ifsm))
+ return -EINVAL;
+
ifsm = nlmsg_data(nlh);
if (ifsm->ifindex > 0)
dev = __dev_get_by_index(net, ifsm->ifindex);
@@ -3947,6 +3950,9 @@ static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
cb->seq = net->dev_base_seq;
+ if (nlmsg_len(cb->nlh) < sizeof(*ifsm))
+ return -EINVAL;
+
ifsm = nlmsg_data(cb->nlh);
filter_mask = ifsm->filter_mask;
if (!filter_mask)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 5a03730fbc1a..734c71468b01 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -369,7 +369,7 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
local_irq_save(flags);
nc = this_cpu_ptr(&netdev_alloc_cache);
- data = __alloc_page_frag(nc, fragsz, gfp_mask);
+ data = page_frag_alloc(nc, fragsz, gfp_mask);
local_irq_restore(flags);
return data;
}
@@ -391,7 +391,7 @@ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
- return __alloc_page_frag(&nc->page, fragsz, gfp_mask);
+ return page_frag_alloc(&nc->page, fragsz, gfp_mask);
}
void *napi_alloc_frag(unsigned int fragsz)
@@ -441,7 +441,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
local_irq_save(flags);
nc = this_cpu_ptr(&netdev_alloc_cache);
- data = __alloc_page_frag(nc, len, gfp_mask);
+ data = page_frag_alloc(nc, len, gfp_mask);
pfmemalloc = nc->pfmemalloc;
local_irq_restore(flags);
@@ -505,7 +505,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
if (sk_memalloc_socks())
gfp_mask |= __GFP_MEMALLOC;
- data = __alloc_page_frag(&nc->page, len, gfp_mask);
+ data = page_frag_alloc(&nc->page, len, gfp_mask);
if (unlikely(!data))
return NULL;
diff --git a/net/core/sock.c b/net/core/sock.c
index f560e0826009..4eca27dc5c94 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -222,7 +222,7 @@ static const char *const af_family_key_strings[AF_MAX+1] = {
"sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
"sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
"sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_KCM" ,
- "sk_lock-AF_MAX"
+ "sk_lock-AF_QIPCRTR", "sk_lock-AF_MAX"
};
static const char *const af_family_slock_key_strings[AF_MAX+1] = {
"slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
@@ -239,7 +239,7 @@ static const char *const af_family_slock_key_strings[AF_MAX+1] = {
"slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
"slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
"slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_KCM" ,
- "slock-AF_MAX"
+ "slock-AF_QIPCRTR", "slock-AF_MAX"
};
static const char *const af_family_clock_key_strings[AF_MAX+1] = {
"clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
@@ -256,7 +256,7 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = {
"clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
"clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
"clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_KCM" ,
- "clock-AF_MAX"
+ "clock-AF_QIPCRTR", "clock-AF_MAX"
};
/*
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index adfc790f7193..c4e879c02186 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -227,7 +227,7 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req
opt = ireq->ipv6_opt;
if (!opt)
opt = rcu_dereference(np->opt);
- err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
+ err = ip6_xmit(sk, skb, &fl6, sk->sk_mark, opt, np->tclass);
rcu_read_unlock();
err = net_xmit_eval(err);
}
@@ -281,7 +281,7 @@ static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
if (!IS_ERR(dst)) {
skb_dst_set(skb, dst);
- ip6_xmit(ctl_sk, skb, &fl6, NULL, 0);
+ ip6_xmit(ctl_sk, skb, &fl6, 0, NULL, 0);
DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
return;
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 5fff951a0a49..da3862124545 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -394,9 +394,11 @@ static int dsa_dst_apply(struct dsa_switch_tree *dst)
return err;
}
- err = dsa_cpu_port_ethtool_setup(dst->ds[0]);
- if (err)
- return err;
+ if (dst->ds[0]) {
+ err = dsa_cpu_port_ethtool_setup(dst->ds[0]);
+ if (err)
+ return err;
+ }
/* If we use a tagging format that doesn't have an ethertype
* field, make sure that all packets from this point on get
@@ -433,7 +435,8 @@ static void dsa_dst_unapply(struct dsa_switch_tree *dst)
dsa_ds_unapply(dst, ds);
}
- dsa_cpu_port_ethtool_restore(dst->ds[0]);
+ if (dst->ds[0])
+ dsa_cpu_port_ethtool_restore(dst->ds[0]);
pr_info("DSA: tree %d unapplied\n", dst->tree);
dst->applied = false;
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 68c9eea00518..7d4596110851 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1105,10 +1105,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p,
/* Use already configured phy mode */
if (p->phy_interface == PHY_INTERFACE_MODE_NA)
p->phy_interface = p->phy->interface;
- phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
- p->phy_interface);
-
- return 0;
+ return phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
+ p->phy_interface);
}
static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
@@ -1203,6 +1201,8 @@ int dsa_slave_suspend(struct net_device *slave_dev)
{
struct dsa_slave_priv *p = netdev_priv(slave_dev);
+ netif_device_detach(slave_dev);
+
if (p->phy) {
phy_stop(p->phy);
p->old_pause = -1;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 3ff8938893ec..7db2ad2e82d3 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -46,6 +46,7 @@
#include <net/rtnetlink.h>
#include <net/xfrm.h>
#include <net/l3mdev.h>
+#include <net/lwtunnel.h>
#include <trace/events/fib.h>
#ifndef CONFIG_IP_MULTIPLE_TABLES
@@ -85,7 +86,7 @@ struct fib_table *fib_new_table(struct net *net, u32 id)
if (tb)
return tb;
- if (id == RT_TABLE_LOCAL)
+ if (id == RT_TABLE_LOCAL && !net->ipv4.fib_has_custom_rules)
alias = fib_new_table(net, RT_TABLE_MAIN);
tb = fib_trie_table(id, alias);
@@ -677,6 +678,10 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
cfg->fc_mx_len = nla_len(attr);
break;
case RTA_MULTIPATH:
+ err = lwtunnel_valid_encap_type_attr(nla_data(attr),
+ nla_len(attr));
+ if (err < 0)
+ goto errout;
cfg->fc_mp = nla_data(attr);
cfg->fc_mp_len = nla_len(attr);
break;
@@ -691,6 +696,9 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
break;
case RTA_ENCAP_TYPE:
cfg->fc_encap_type = nla_get_u16(attr);
+ err = lwtunnel_valid_encap_type(cfg->fc_encap_type);
+ if (err < 0)
+ goto errout;
break;
}
}
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 7a5b4c7d9a87..9a375b908d01 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1279,8 +1279,9 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
goto nla_put_failure;
#endif
- if (fi->fib_nh->nh_lwtstate)
- lwtunnel_fill_encap(skb, fi->fib_nh->nh_lwtstate);
+ if (fi->fib_nh->nh_lwtstate &&
+ lwtunnel_fill_encap(skb, fi->fib_nh->nh_lwtstate) < 0)
+ goto nla_put_failure;
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
if (fi->fib_nhs > 1) {
@@ -1316,8 +1317,10 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
goto nla_put_failure;
#endif
- if (nh->nh_lwtstate)
- lwtunnel_fill_encap(skb, nh->nh_lwtstate);
+ if (nh->nh_lwtstate &&
+ lwtunnel_fill_encap(skb, nh->nh_lwtstate) < 0)
+ goto nla_put_failure;
+
/* length of rtnetlink header + attributes */
rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
} endfor_nexthops(fi);
@@ -1618,8 +1621,13 @@ void fib_select_multipath(struct fib_result *res, int hash)
void fib_select_path(struct net *net, struct fib_result *res,
struct flowi4 *fl4, int mp_hash)
{
+ bool oif_check;
+
+ oif_check = (fl4->flowi4_oif == 0 ||
+ fl4->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF);
+
#ifdef CONFIG_IP_ROUTE_MULTIPATH
- if (res->fi->fib_nhs > 1 && fl4->flowi4_oif == 0) {
+ if (res->fi->fib_nhs > 1 && oif_check) {
if (mp_hash < 0)
mp_hash = get_hash_from_flowi4(fl4) >> 1;
@@ -1629,7 +1637,7 @@ void fib_select_path(struct net *net, struct fib_result *res,
#endif
if (!res->prefixlen &&
res->table->tb_num_default > 1 &&
- res->type == RTN_UNICAST && !fl4->flowi4_oif)
+ res->type == RTN_UNICAST && oif_check)
fib_select_default(fl4, res);
if (!fl4->saddr)
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 68d622133f53..5b15459955f8 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -219,9 +219,14 @@ static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
static void igmp_gq_start_timer(struct in_device *in_dev)
{
int tv = prandom_u32() % in_dev->mr_maxdelay;
+ unsigned long exp = jiffies + tv + 2;
+
+ if (in_dev->mr_gq_running &&
+ time_after_eq(exp, (in_dev->mr_gq_timer).expires))
+ return;
in_dev->mr_gq_running = 1;
- if (!mod_timer(&in_dev->mr_gq_timer, jiffies+tv+2))
+ if (!mod_timer(&in_dev->mr_gq_timer, exp))
in_dev_hold(in_dev);
}
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index fac275c48108..b67719f45953 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1629,6 +1629,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
sk->sk_protocol = ip_hdr(skb)->protocol;
sk->sk_bound_dev_if = arg->bound_dev_if;
sk->sk_sndbuf = sysctl_wmem_default;
+ sk->sk_mark = fl4.flowi4_mark;
err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
len, 0, &ipc, &rt, MSG_DONTWAIT);
if (unlikely(err)) {
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 57e1405e8282..53ae0c6315ad 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -1225,8 +1225,14 @@ void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
* which has interface index (iif) as the first member of the
* underlying inet{6}_skb_parm struct. This code then overlays
* PKTINFO_SKB_CB and in_pktinfo also has iif as the first
- * element so the iif is picked up from the prior IPCB
+ * element so the iif is picked up from the prior IPCB. If iif
+ * is the loopback interface, then return the sending interface
+ * (e.g., process binds socket to eth0 for Tx which is
+ * redirected to loopback in the rtable/dst).
*/
+ if (pktinfo->ipi_ifindex == LOOPBACK_IFINDEX)
+ pktinfo->ipi_ifindex = inet_iif(skb);
+
pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb);
} else {
pktinfo->ipi_ifindex = 0;
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index fed3d29f9eb3..0fd1976ab63b 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -313,6 +313,7 @@ static const struct lwtunnel_encap_ops ip_tun_lwt_ops = {
.fill_encap = ip_tun_fill_encap_info,
.get_encap_size = ip_tun_encap_nlsize,
.cmp_encap = ip_tun_cmp_encap,
+ .owner = THIS_MODULE,
};
static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = {
@@ -403,6 +404,7 @@ static const struct lwtunnel_encap_ops ip6_tun_lwt_ops = {
.fill_encap = ip6_tun_fill_encap_info,
.get_encap_size = ip6_tun_encap_nlsize,
.cmp_encap = ip_tun_cmp_encap,
+ .owner = THIS_MODULE,
};
void __init ip_tunnel_core_init(void)
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 21db00d0362b..0a783cd73faf 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -144,6 +144,11 @@ clusterip_config_find_get(struct net *net, __be32 clusterip, int entry)
rcu_read_lock_bh();
c = __clusterip_config_find(net, clusterip);
if (c) {
+#ifdef CONFIG_PROC_FS
+ if (!c->pde)
+ c = NULL;
+ else
+#endif
if (unlikely(!atomic_inc_not_zero(&c->refcount)))
c = NULL;
else if (entry)
@@ -166,14 +171,15 @@ clusterip_config_init_nodelist(struct clusterip_config *c,
static struct clusterip_config *
clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
- struct net_device *dev)
+ struct net_device *dev)
{
+ struct net *net = dev_net(dev);
struct clusterip_config *c;
- struct clusterip_net *cn = net_generic(dev_net(dev), clusterip_net_id);
+ struct clusterip_net *cn = net_generic(net, clusterip_net_id);
c = kzalloc(sizeof(*c), GFP_ATOMIC);
if (!c)
- return NULL;
+ return ERR_PTR(-ENOMEM);
c->dev = dev;
c->clusterip = ip;
@@ -185,6 +191,17 @@ clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
atomic_set(&c->refcount, 1);
atomic_set(&c->entries, 1);
+ spin_lock_bh(&cn->lock);
+ if (__clusterip_config_find(net, ip)) {
+ spin_unlock_bh(&cn->lock);
+ kfree(c);
+
+ return ERR_PTR(-EBUSY);
+ }
+
+ list_add_rcu(&c->list, &cn->configs);
+ spin_unlock_bh(&cn->lock);
+
#ifdef CONFIG_PROC_FS
{
char buffer[16];
@@ -195,16 +212,16 @@ clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
cn->procdir,
&clusterip_proc_fops, c);
if (!c->pde) {
+ spin_lock_bh(&cn->lock);
+ list_del_rcu(&c->list);
+ spin_unlock_bh(&cn->lock);
kfree(c);
- return NULL;
+
+ return ERR_PTR(-ENOMEM);
}
}
#endif
- spin_lock_bh(&cn->lock);
- list_add_rcu(&c->list, &cn->configs);
- spin_unlock_bh(&cn->lock);
-
return c;
}
@@ -410,9 +427,9 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
config = clusterip_config_init(cipinfo,
e->ip.dst.s_addr, dev);
- if (!config) {
+ if (IS_ERR(config)) {
dev_put(dev);
- return -ENOMEM;
+ return PTR_ERR(config);
}
dev_mc_add(config->dev, config->clustermac);
}
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
index f273098e48fd..37fb9552e858 100644
--- a/net/ipv4/netfilter/ipt_rpfilter.c
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
@@ -63,10 +63,10 @@ static bool rpfilter_lookup_reverse(struct net *net, struct flowi4 *fl4,
return dev_match || flags & XT_RPFILTER_LOOSE;
}
-static bool rpfilter_is_local(const struct sk_buff *skb)
+static bool
+rpfilter_is_loopback(const struct sk_buff *skb, const struct net_device *in)
{
- const struct rtable *rt = skb_rtable(skb);
- return rt && (rt->rt_flags & RTCF_LOCAL);
+ return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK;
}
static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
@@ -79,7 +79,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
info = par->matchinfo;
invert = info->flags & XT_RPFILTER_INVERT;
- if (rpfilter_is_local(skb))
+ if (rpfilter_is_loopback(skb, xt_in(par)))
return true ^ invert;
iph = ip_hdr(skb);
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
index fd8220213afc..146d86105183 100644
--- a/net/ipv4/netfilter/nf_reject_ipv4.c
+++ b/net/ipv4/netfilter/nf_reject_ipv4.c
@@ -126,6 +126,8 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook)
/* ip_route_me_harder expects skb->dst to be set */
skb_dst_set_noref(nskb, skb_dst(oldskb));
+ nskb->mark = IP4_REPLY_MARK(net, oldskb->mark);
+
skb_reserve(nskb, LL_MAX_HEADER);
niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
ip4_dst_hoplimit(skb_dst(nskb)));
diff --git a/net/ipv4/netfilter/nft_fib_ipv4.c b/net/ipv4/netfilter/nft_fib_ipv4.c
index 965b1a161369..2981291910dd 100644
--- a/net/ipv4/netfilter/nft_fib_ipv4.c
+++ b/net/ipv4/netfilter/nft_fib_ipv4.c
@@ -26,13 +26,6 @@ static __be32 get_saddr(__be32 addr)
return addr;
}
-static bool fib4_is_local(const struct sk_buff *skb)
-{
- const struct rtable *rt = skb_rtable(skb);
-
- return rt && (rt->rt_flags & RTCF_LOCAL);
-}
-
#define DSCP_BITS 0xfc
void nft_fib4_eval_type(const struct nft_expr *expr, struct nft_regs *regs,
@@ -95,8 +88,10 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
else
oif = NULL;
- if (nft_hook(pkt) == NF_INET_PRE_ROUTING && fib4_is_local(pkt->skb)) {
- nft_fib_store_result(dest, priv->result, pkt, LOOPBACK_IFINDEX);
+ if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
+ nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
+ nft_fib_store_result(dest, priv->result, pkt,
+ nft_in(pkt)->ifindex);
return;
}
@@ -131,7 +126,7 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
switch (res.type) {
case RTN_UNICAST:
break;
- case RTN_LOCAL: /* should not appear here, see fib4_is_local() above */
+ case RTN_LOCAL: /* Should not see RTN_LOCAL here */
return;
default:
break;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index a82a11747b3f..709ffe67d1de 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1914,7 +1914,8 @@ local_input:
}
}
- rth = rt_dst_alloc(net->loopback_dev, flags | RTCF_LOCAL, res.type,
+ rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
+ flags | RTCF_LOCAL, res.type,
IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
if (!rth)
goto e_nobufs;
@@ -2471,7 +2472,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id,
r->rtm_dst_len = 32;
r->rtm_src_len = 0;
r->rtm_tos = fl4->flowi4_tos;
- r->rtm_table = table_id;
+ r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT;
if (nla_put_u32(skb, RTA_TABLE, table_id))
goto nla_put_failure;
r->rtm_type = rt->rt_type;
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 80bc36b25de2..b2fa498b15d1 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -433,13 +433,6 @@ static struct ctl_table ipv4_table[] = {
.extra2 = &tcp_adv_win_scale_max,
},
{
- .procname = "tcp_tw_reuse",
- .data = &sysctl_tcp_tw_reuse,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec
- },
- {
.procname = "tcp_frto",
.data = &sysctl_tcp_frto,
.maxlen = sizeof(int),
@@ -958,7 +951,14 @@ static struct ctl_table ipv4_net_table[] = {
.data = &init_net.ipv4.sysctl_tcp_notsent_lowat,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_douintvec,
+ },
+ {
+ .procname = "tcp_tw_reuse",
+ .data = &init_net.ipv4.sysctl_tcp_tw_reuse,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
},
#ifdef CONFIG_IP_ROUTE_MULTIPATH
{
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 4e777a3243f9..dd2560c83a85 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -113,7 +113,7 @@ static bool tcp_fastopen_cookie_gen(struct request_sock *req,
struct tcp_fastopen_cookie tmp;
if (__tcp_fastopen_cookie_gen(&ip6h->saddr, &tmp)) {
- struct in6_addr *buf = (struct in6_addr *) tmp.val;
+ struct in6_addr *buf = &tmp.addr;
int i;
for (i = 0; i < 4; i++)
@@ -205,6 +205,7 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
* scaled. So correct it appropriately.
*/
tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
+ tp->max_window = tp->snd_wnd;
/* Activate the retrans timer so that SYNACK can be retransmitted.
* The request socket is not added to the ehash
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 6c790754ae3e..41dcbd568cbe 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5078,7 +5078,7 @@ static void tcp_check_space(struct sock *sk)
if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
/* pairs with tcp_poll() */
- smp_mb__after_atomic();
+ smp_mb();
if (sk->sk_socket &&
test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
tcp_new_space(sk);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 30d81f533ada..fe9da4fb96bf 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -84,7 +84,6 @@
#include <crypto/hash.h>
#include <linux/scatterlist.h>
-int sysctl_tcp_tw_reuse __read_mostly;
int sysctl_tcp_low_latency __read_mostly;
#ifdef CONFIG_TCP_MD5SIG
@@ -120,7 +119,7 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
and use initial timestamp retrieved from peer table.
*/
if (tcptw->tw_ts_recent_stamp &&
- (!twp || (sysctl_tcp_tw_reuse &&
+ (!twp || (sock_net(sk)->ipv4.sysctl_tcp_tw_reuse &&
get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
if (tp->write_seq == 0)
@@ -2456,6 +2455,7 @@ static int __net_init tcp_sk_init(struct net *net)
net->ipv4.sysctl_tcp_orphan_retries = 0;
net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
+ net->ipv4.sysctl_tcp_tw_reuse = 0;
return 0;
fail:
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index d46f4d5b1c62..ba8f02d0f283 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -606,7 +606,6 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst,
return ret;
}
-EXPORT_SYMBOL_GPL(tcp_peer_is_proven);
void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
{
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index c1e124bc8e1e..f60e88e56255 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -5540,8 +5540,7 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
struct net_device *dev;
struct inet6_dev *idev;
- rcu_read_lock();
- for_each_netdev_rcu(net, dev) {
+ for_each_netdev(net, dev) {
idev = __in6_dev_get(dev);
if (idev) {
int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
@@ -5550,7 +5549,6 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
dev_disable_change(idev);
}
}
- rcu_read_unlock();
}
static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c
index a7bc54ab46e2..13b5e85fe0d5 100644
--- a/net/ipv6/ila/ila_lwt.c
+++ b/net/ipv6/ila/ila_lwt.c
@@ -238,6 +238,7 @@ static const struct lwtunnel_encap_ops ila_encap_ops = {
.fill_encap = ila_fill_encap_info,
.get_encap_size = ila_encap_nlsize,
.cmp_encap = ila_encap_cmp,
+ .owner = THIS_MODULE,
};
int ila_lwt_init(void)
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 7396e75e161b..75c308239243 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -176,7 +176,7 @@ int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused
/* Restore final destination back after routing done */
fl6.daddr = sk->sk_v6_daddr;
- res = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
+ res = ip6_xmit(sk, skb, &fl6, sk->sk_mark, rcu_dereference(np->opt),
np->tclass);
rcu_read_unlock();
return res;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 75b6108234dd..558631860d91 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -582,6 +582,9 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
return -1;
offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
+ /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
+ ipv6h = ipv6_hdr(skb);
+
if (offset > 0) {
struct ipv6_tlv_tnl_enc_lim *tel;
tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 89c59e656f44..fc7b4017ba24 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -191,6 +191,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
ops = rcu_dereference(inet6_offloads[proto]);
if (!ops || !ops->callbacks.gro_receive) {
__pskb_pull(skb, skb_gro_offset(skb));
+ skb_gro_frag0_invalidate(skb);
proto = ipv6_gso_pull_exthdrs(skb, proto);
skb_gro_pull(skb, -skb_transport_offset(skb));
skb_reset_transport_header(skb);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 70d0de404197..2c0df09e9036 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -172,7 +172,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
* which are using proper atomic operations or spinlocks.
*/
int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
- struct ipv6_txoptions *opt, int tclass)
+ __u32 mark, struct ipv6_txoptions *opt, int tclass)
{
struct net *net = sock_net(sk);
const struct ipv6_pinfo *np = inet6_sk(sk);
@@ -240,7 +240,7 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
skb->protocol = htons(ETH_P_IPV6);
skb->priority = sk->sk_priority;
- skb->mark = sk->sk_mark;
+ skb->mark = mark;
mtu = dst_mtu(dst);
if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
@@ -1373,7 +1373,7 @@ emsgsize:
*/
cork->length += length;
- if (((length > mtu) ||
+ if ((((length + fragheaderlen) > mtu) ||
(skb && skb_is_gso(skb))) &&
(sk->sk_protocol == IPPROTO_UDP) &&
(rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 36d292180942..ff8ee06491c3 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -400,18 +400,19 @@ ip6_tnl_dev_uninit(struct net_device *dev)
__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
{
- const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw;
- __u8 nexthdr = ipv6h->nexthdr;
- __u16 off = sizeof(*ipv6h);
+ const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
+ unsigned int nhoff = raw - skb->data;
+ unsigned int off = nhoff + sizeof(*ipv6h);
+ u8 next, nexthdr = ipv6h->nexthdr;
while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
- __u16 optlen = 0;
struct ipv6_opt_hdr *hdr;
- if (raw + off + sizeof(*hdr) > skb->data &&
- !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr)))
+ u16 optlen;
+
+ if (!pskb_may_pull(skb, off + sizeof(*hdr)))
break;
- hdr = (struct ipv6_opt_hdr *) (raw + off);
+ hdr = (struct ipv6_opt_hdr *)(skb->data + off);
if (nexthdr == NEXTHDR_FRAGMENT) {
struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
if (frag_hdr->frag_off)
@@ -422,20 +423,29 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
} else {
optlen = ipv6_optlen(hdr);
}
+ /* cache hdr->nexthdr, since pskb_may_pull() might
+ * invalidate hdr
+ */
+ next = hdr->nexthdr;
if (nexthdr == NEXTHDR_DEST) {
- __u16 i = off + 2;
+ u16 i = 2;
+
+ /* Remember : hdr is no longer valid at this point. */
+ if (!pskb_may_pull(skb, off + optlen))
+ break;
+
while (1) {
struct ipv6_tlv_tnl_enc_lim *tel;
/* No more room for encapsulation limit */
- if (i + sizeof (*tel) > off + optlen)
+ if (i + sizeof(*tel) > optlen)
break;
- tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i];
+ tel = (struct ipv6_tlv_tnl_enc_lim *) skb->data + off + i;
/* return index of option if found and valid */
if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
tel->length == 1)
- return i;
+ return i + off - nhoff;
/* else jump to next option */
if (tel->type)
i += tel->length + 2;
@@ -443,7 +453,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
i++;
}
}
- nexthdr = hdr->nexthdr;
+ nexthdr = next;
off += optlen;
}
return 0;
@@ -1108,7 +1118,7 @@ route_lookup:
t->parms.name);
goto tx_err_dst_release;
}
- mtu = dst_mtu(dst) - psh_hlen;
+ mtu = dst_mtu(dst) - psh_hlen - t->tun_hlen;
if (encap_limit >= 0) {
max_headroom += 8;
mtu -= 8;
@@ -1117,7 +1127,7 @@ route_lookup:
mtu = IPV6_MIN_MTU;
if (skb_dst(skb) && !t->parms.collect_md)
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
- if (skb->len > mtu && !skb_is_gso(skb)) {
+ if (skb->len - t->tun_hlen > mtu && !skb_is_gso(skb)) {
*pmtu = mtu;
err = -EMSGSIZE;
goto tx_err_dst_release;
@@ -1303,6 +1313,8 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
fl6.flowlabel = key->label;
} else {
offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
+ /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
+ ipv6h = ipv6_hdr(skb);
if (offset > 0) {
struct ipv6_tlv_tnl_enc_lim *tel;
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index f4b4a4a5f4ba..d82042c8d8fd 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -189,12 +189,12 @@ static int vti6_tnl_create2(struct net_device *dev)
struct vti6_net *ip6n = net_generic(net, vti6_net_id);
int err;
+ dev->rtnl_link_ops = &vti6_link_ops;
err = register_netdevice(dev);
if (err < 0)
goto out;
strcpy(t->parms.name, dev->name);
- dev->rtnl_link_ops = &vti6_link_ops;
dev_hold(dev);
vti6_tnl_link(ip6n, t);
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 14a3903f1c82..7139fffd61b6 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -81,7 +81,7 @@ static void mld_gq_timer_expire(unsigned long data);
static void mld_ifc_timer_expire(unsigned long data);
static void mld_ifc_event(struct inet6_dev *idev);
static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
-static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *addr);
+static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
static void mld_clear_delrec(struct inet6_dev *idev);
static bool mld_in_v1_mode(const struct inet6_dev *idev);
static int sf_setstate(struct ifmcaddr6 *pmc);
@@ -692,9 +692,9 @@ static void igmp6_group_dropped(struct ifmcaddr6 *mc)
dev_mc_del(dev, buf);
}
- if (mc->mca_flags & MAF_NOREPORT)
- goto done;
spin_unlock_bh(&mc->mca_lock);
+ if (mc->mca_flags & MAF_NOREPORT)
+ return;
if (!mc->idev->dead)
igmp6_leave_group(mc);
@@ -702,8 +702,6 @@ static void igmp6_group_dropped(struct ifmcaddr6 *mc)
spin_lock_bh(&mc->mca_lock);
if (del_timer(&mc->mca_timer))
atomic_dec(&mc->mca_refcnt);
-done:
- ip6_mc_clear_src(mc);
spin_unlock_bh(&mc->mca_lock);
}
@@ -748,10 +746,11 @@ static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
spin_unlock_bh(&idev->mc_lock);
}
-static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca)
+static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
{
struct ifmcaddr6 *pmc, *pmc_prev;
- struct ip6_sf_list *psf, *psf_next;
+ struct ip6_sf_list *psf;
+ struct in6_addr *pmca = &im->mca_addr;
spin_lock_bh(&idev->mc_lock);
pmc_prev = NULL;
@@ -768,14 +767,20 @@ static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca)
}
spin_unlock_bh(&idev->mc_lock);
+ spin_lock_bh(&im->mca_lock);
if (pmc) {
- for (psf = pmc->mca_tomb; psf; psf = psf_next) {
- psf_next = psf->sf_next;
- kfree(psf);
+ im->idev = pmc->idev;
+ im->mca_crcount = idev->mc_qrv;
+ im->mca_sfmode = pmc->mca_sfmode;
+ if (pmc->mca_sfmode == MCAST_INCLUDE) {
+ im->mca_tomb = pmc->mca_tomb;
+ im->mca_sources = pmc->mca_sources;
+ for (psf = im->mca_sources; psf; psf = psf->sf_next)
+ psf->sf_crcount = im->mca_crcount;
}
in6_dev_put(pmc->idev);
- kfree(pmc);
}
+ spin_unlock_bh(&im->mca_lock);
}
static void mld_clear_delrec(struct inet6_dev *idev)
@@ -904,7 +909,7 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
mca_get(mc);
write_unlock_bh(&idev->lock);
- mld_del_delrec(idev, &mc->mca_addr);
+ mld_del_delrec(idev, mc);
igmp6_group_added(mc);
ma_put(mc);
return 0;
@@ -927,6 +932,7 @@ int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
write_unlock_bh(&idev->lock);
igmp6_group_dropped(ma);
+ ip6_mc_clear_src(ma);
ma_put(ma);
return 0;
@@ -2501,15 +2507,17 @@ void ipv6_mc_down(struct inet6_dev *idev)
/* Withdraw multicast list */
read_lock_bh(&idev->lock);
- mld_ifc_stop_timer(idev);
- mld_gq_stop_timer(idev);
- mld_dad_stop_timer(idev);
for (i = idev->mc_list; i; i = i->next)
igmp6_group_dropped(i);
- read_unlock_bh(&idev->lock);
- mld_clear_delrec(idev);
+ /* Should stop timer after group drop. or we will
+ * start timer again in mld_ifc_event()
+ */
+ mld_ifc_stop_timer(idev);
+ mld_gq_stop_timer(idev);
+ mld_dad_stop_timer(idev);
+ read_unlock_bh(&idev->lock);
}
static void ipv6_mc_reset(struct inet6_dev *idev)
@@ -2531,8 +2539,10 @@ void ipv6_mc_up(struct inet6_dev *idev)
read_lock_bh(&idev->lock);
ipv6_mc_reset(idev);
- for (i = idev->mc_list; i; i = i->next)
+ for (i = idev->mc_list; i; i = i->next) {
+ mld_del_delrec(idev, i);
igmp6_group_added(i);
+ }
read_unlock_bh(&idev->lock);
}
@@ -2565,6 +2575,7 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev)
/* Deactivate timers */
ipv6_mc_down(idev);
+ mld_clear_delrec(idev);
/* Delete all-nodes address. */
/* We cannot call ipv6_dev_mc_dec() directly, our caller in
@@ -2579,11 +2590,9 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev)
write_lock_bh(&idev->lock);
while ((i = idev->mc_list) != NULL) {
idev->mc_list = i->next;
- write_unlock_bh(&idev->lock);
- igmp6_group_dropped(i);
+ write_unlock_bh(&idev->lock);
ma_put(i);
-
write_lock_bh(&idev->lock);
}
write_unlock_bh(&idev->lock);
diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
index d5263dc364a9..b12e61b7b16c 100644
--- a/net/ipv6/netfilter/ip6t_rpfilter.c
+++ b/net/ipv6/netfilter/ip6t_rpfilter.c
@@ -72,10 +72,10 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
return ret;
}
-static bool rpfilter_is_local(const struct sk_buff *skb)
+static bool
+rpfilter_is_loopback(const struct sk_buff *skb, const struct net_device *in)
{
- const struct rt6_info *rt = (const void *) skb_dst(skb);
- return rt && (rt->rt6i_flags & RTF_LOCAL);
+ return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK;
}
static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
@@ -85,7 +85,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
struct ipv6hdr *iph;
bool invert = info->flags & XT_RPFILTER_INVERT;
- if (rpfilter_is_local(skb))
+ if (rpfilter_is_loopback(skb, xt_in(par)))
return true ^ invert;
iph = ipv6_hdr(skb);
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index 10090400c72f..eedee5d108d9 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -157,6 +157,7 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
fl6.fl6_sport = otcph->dest;
fl6.fl6_dport = otcph->source;
fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst(oldskb)->dev);
+ fl6.flowi6_mark = IP6_REPLY_MARK(net, oldskb->mark);
security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
dst = ip6_route_output(net, NULL, &fl6);
if (dst->error) {
@@ -180,6 +181,8 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
skb_dst_set(nskb, dst);
+ nskb->mark = fl6.flowi6_mark;
+
skb_reserve(nskb, hh_len + dst->header_len);
ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
ip6_dst_hoplimit(dst));
diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c
index c947aad8bcc6..765facf03d45 100644
--- a/net/ipv6/netfilter/nft_fib_ipv6.c
+++ b/net/ipv6/netfilter/nft_fib_ipv6.c
@@ -18,13 +18,6 @@
#include <net/ip6_fib.h>
#include <net/ip6_route.h>
-static bool fib6_is_local(const struct sk_buff *skb)
-{
- const struct rt6_info *rt = (const void *)skb_dst(skb);
-
- return rt && (rt->rt6i_flags & RTF_LOCAL);
-}
-
static int get_ifindex(const struct net_device *dev)
{
return dev ? dev->ifindex : 0;
@@ -164,8 +157,10 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
lookup_flags = nft_fib6_flowi_init(&fl6, priv, pkt, oif);
- if (nft_hook(pkt) == NF_INET_PRE_ROUTING && fib6_is_local(pkt->skb)) {
- nft_fib_store_result(dest, priv->result, pkt, LOOPBACK_IFINDEX);
+ if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
+ nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
+ nft_fib_store_result(dest, priv->result, pkt,
+ nft_in(pkt)->ifindex);
return;
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 8417c41d8ec8..7ea85370c11c 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1464,7 +1464,7 @@ static struct rt6_info *__ip6_route_redirect(struct net *net,
struct fib6_node *fn;
/* Get the "current" route for this destination and
- * check if the redirect has come from approriate router.
+ * check if the redirect has come from appropriate router.
*
* RFC 4861 specifies that redirects should only be
* accepted if they come from the nexthop to the target.
@@ -2768,7 +2768,7 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
old MTU is the lowest MTU in the path, update the route PMTU
to reflect the increase. In this case if the other nodes' MTU
also have the lowest MTU, TOO BIG MESSAGE will be lead to
- PMTU discouvery.
+ PMTU discovery.
*/
if (rt->dst.dev == arg->dev &&
dst_metric_raw(&rt->dst, RTAX_MTU) &&
@@ -2896,6 +2896,11 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
if (tb[RTA_MULTIPATH]) {
cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
+
+ err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
+ cfg->fc_mp_len);
+ if (err < 0)
+ goto errout;
}
if (tb[RTA_PREF]) {
@@ -2909,9 +2914,14 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
if (tb[RTA_ENCAP])
cfg->fc_encap = tb[RTA_ENCAP];
- if (tb[RTA_ENCAP_TYPE])
+ if (tb[RTA_ENCAP_TYPE]) {
cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
+ err = lwtunnel_valid_encap_type(cfg->fc_encap_type);
+ if (err < 0)
+ goto errout;
+ }
+
if (tb[RTA_EXPIRES]) {
unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
@@ -3317,7 +3327,8 @@ static int rt6_fill_node(struct net *net,
if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
goto nla_put_failure;
- lwtunnel_fill_encap(skb, rt->dst.lwtstate);
+ if (lwtunnel_fill_encap(skb, rt->dst.lwtstate) < 0)
+ goto nla_put_failure;
nlmsg_end(skb, nlh);
return 0;
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
index b172d85c650a..a855eb325b03 100644
--- a/net/ipv6/seg6.c
+++ b/net/ipv6/seg6.c
@@ -176,6 +176,8 @@ static int seg6_genl_set_tunsrc(struct sk_buff *skb, struct genl_info *info)
val = nla_data(info->attrs[SEG6_ATTR_DST]);
t_new = kmemdup(val, sizeof(*val), GFP_KERNEL);
+ if (!t_new)
+ return -ENOMEM;
mutex_lock(&sdata->lock);
diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c
index ef1c8a46e7ac..03a064803626 100644
--- a/net/ipv6/seg6_hmac.c
+++ b/net/ipv6/seg6_hmac.c
@@ -400,7 +400,7 @@ static int seg6_hmac_init_algo(void)
*p_tfm = tfm;
}
- p_tfm = this_cpu_ptr(algo->tfms);
+ p_tfm = raw_cpu_ptr(algo->tfms);
tfm = *p_tfm;
shsize = sizeof(*shash) + crypto_shash_descsize(tfm);
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index bbfca22c34ae..c46f8cbf5ab5 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -265,7 +265,9 @@ int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
#ifdef CONFIG_DST_CACHE
+ preempt_disable();
dst = dst_cache_get(&slwt->cache);
+ preempt_enable();
#endif
if (unlikely(!dst)) {
@@ -286,7 +288,9 @@ int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
}
#ifdef CONFIG_DST_CACHE
+ preempt_disable();
dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr);
+ preempt_enable();
#endif
}
@@ -418,6 +422,7 @@ static const struct lwtunnel_encap_ops seg6_iptun_ops = {
.fill_encap = seg6_fill_encap_info,
.get_encap_size = seg6_encap_nlsize,
.cmp_encap = seg6_encap_cmp,
+ .owner = THIS_MODULE,
};
int __init seg6_iptunnel_init(void)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 73bc8fc68acd..cb8929681dc7 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -469,7 +469,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
opt = ireq->ipv6_opt;
if (!opt)
opt = rcu_dereference(np->opt);
- err = ip6_xmit(sk, skb, fl6, opt, np->tclass);
+ err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
rcu_read_unlock();
err = net_xmit_eval(err);
}
@@ -840,7 +840,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
if (!IS_ERR(dst)) {
skb_dst_set(buff, dst);
- ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
+ ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
if (rst)
TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index cfb9e5f4e28f..13190b38f22e 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1044,7 +1044,8 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
{
struct sock *sk = sock->sk;
struct iucv_sock *iucv = iucv_sk(sk);
- size_t headroom, linear;
+ size_t headroom = 0;
+ size_t linear;
struct sk_buff *skb;
struct iucv_message txmsg = {0};
struct cmsghdr *cmsg;
@@ -1122,18 +1123,20 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
* this is fine for SOCK_SEQPACKET (unless we want to support
* segmented records using the MSG_EOR flag), but
* for SOCK_STREAM we might want to improve it in future */
- headroom = (iucv->transport == AF_IUCV_TRANS_HIPER)
- ? sizeof(struct af_iucv_trans_hdr) + ETH_HLEN : 0;
- if (headroom + len < PAGE_SIZE) {
+ if (iucv->transport == AF_IUCV_TRANS_HIPER) {
+ headroom = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
linear = len;
} else {
- /* In nonlinear "classic" iucv skb,
- * reserve space for iucv_array
- */
- if (iucv->transport != AF_IUCV_TRANS_HIPER)
- headroom += sizeof(struct iucv_array) *
- (MAX_SKB_FRAGS + 1);
- linear = PAGE_SIZE - headroom;
+ if (len < PAGE_SIZE) {
+ linear = len;
+ } else {
+ /* In nonlinear "classic" iucv skb,
+ * reserve space for iucv_array
+ */
+ headroom = sizeof(struct iucv_array) *
+ (MAX_SKB_FRAGS + 1);
+ linear = PAGE_SIZE - headroom;
+ }
}
skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear,
noblock, &err, 0);
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 8938b6ba57a0..3d73278b86ca 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -47,7 +47,8 @@ static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
return (struct l2tp_ip_sock *)sk;
}
-static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
+static struct sock *__l2tp_ip_bind_lookup(const struct net *net, __be32 laddr,
+ __be32 raddr, int dif, u32 tunnel_id)
{
struct sock *sk;
@@ -61,6 +62,7 @@ static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif
if ((l2tp->conn_id == tunnel_id) &&
net_eq(sock_net(sk), net) &&
!(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) &&
+ (!inet->inet_daddr || !raddr || inet->inet_daddr == raddr) &&
(!sk->sk_bound_dev_if || !dif ||
sk->sk_bound_dev_if == dif))
goto found;
@@ -71,15 +73,6 @@ found:
return sk;
}
-static inline struct sock *l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
-{
- struct sock *sk = __l2tp_ip_bind_lookup(net, laddr, dif, tunnel_id);
- if (sk)
- sock_hold(sk);
-
- return sk;
-}
-
/* When processing receive frames, there are two cases to
* consider. Data frames consist of a non-zero session-id and an
* optional cookie. Control frames consist of a regular L2TP header
@@ -183,8 +176,8 @@ pass_up:
struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
read_lock_bh(&l2tp_ip_lock);
- sk = __l2tp_ip_bind_lookup(net, iph->daddr, inet_iif(skb),
- tunnel_id);
+ sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr,
+ inet_iif(skb), tunnel_id);
if (!sk) {
read_unlock_bh(&l2tp_ip_lock);
goto discard;
@@ -280,7 +273,7 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
inet->inet_saddr = 0; /* Use device */
write_lock_bh(&l2tp_ip_lock);
- if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr,
+ if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, 0,
sk->sk_bound_dev_if, addr->l2tp_conn_id)) {
write_unlock_bh(&l2tp_ip_lock);
ret = -EADDRINUSE;
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index f092ac441fdd..331ccf5a7bad 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -59,12 +59,14 @@ static inline struct l2tp_ip6_sock *l2tp_ip6_sk(const struct sock *sk)
static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
struct in6_addr *laddr,
+ const struct in6_addr *raddr,
int dif, u32 tunnel_id)
{
struct sock *sk;
sk_for_each_bound(sk, &l2tp_ip6_bind_table) {
- const struct in6_addr *addr = inet6_rcv_saddr(sk);
+ const struct in6_addr *sk_laddr = inet6_rcv_saddr(sk);
+ const struct in6_addr *sk_raddr = &sk->sk_v6_daddr;
struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
if (l2tp == NULL)
@@ -72,7 +74,8 @@ static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
if ((l2tp->conn_id == tunnel_id) &&
net_eq(sock_net(sk), net) &&
- (!addr || ipv6_addr_equal(addr, laddr)) &&
+ (!sk_laddr || ipv6_addr_any(sk_laddr) || ipv6_addr_equal(sk_laddr, laddr)) &&
+ (!raddr || ipv6_addr_any(sk_raddr) || ipv6_addr_equal(sk_raddr, raddr)) &&
(!sk->sk_bound_dev_if || !dif ||
sk->sk_bound_dev_if == dif))
goto found;
@@ -83,17 +86,6 @@ found:
return sk;
}
-static inline struct sock *l2tp_ip6_bind_lookup(struct net *net,
- struct in6_addr *laddr,
- int dif, u32 tunnel_id)
-{
- struct sock *sk = __l2tp_ip6_bind_lookup(net, laddr, dif, tunnel_id);
- if (sk)
- sock_hold(sk);
-
- return sk;
-}
-
/* When processing receive frames, there are two cases to
* consider. Data frames consist of a non-zero session-id and an
* optional cookie. Control frames consist of a regular L2TP header
@@ -197,8 +189,8 @@ pass_up:
struct ipv6hdr *iph = ipv6_hdr(skb);
read_lock_bh(&l2tp_ip6_lock);
- sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, inet6_iif(skb),
- tunnel_id);
+ sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr,
+ inet6_iif(skb), tunnel_id);
if (!sk) {
read_unlock_bh(&l2tp_ip6_lock);
goto discard;
@@ -330,7 +322,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
rcu_read_unlock();
write_lock_bh(&l2tp_ip6_lock);
- if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, bound_dev_if,
+ if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, NULL, bound_dev_if,
addr->l2tp_conn_id)) {
write_unlock_bh(&l2tp_ip6_lock);
err = -EADDRINUSE;
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index e75cbf6ecc26..a0d901d8992e 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -231,9 +231,6 @@ ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata)
!(sta->sdata->bss && sta->sdata->bss == sdata->bss))
continue;
- if (!sta->uploaded || !test_sta_flag(sta, WLAN_STA_ASSOC))
- continue;
-
max_bw = max(max_bw, ieee80211_get_sta_bw(&sta->sta));
}
rcu_read_unlock();
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 41497b670e2b..d37ae7dc114b 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -6,6 +6,7 @@
* Copyright (c) 2006 Jiri Benc <jbenc@suse.cz>
* Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
+ * Copyright (c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -1295,6 +1296,26 @@ static void ieee80211_iface_work(struct work_struct *work)
} else if (ieee80211_is_action(mgmt->frame_control) &&
mgmt->u.action.category == WLAN_CATEGORY_VHT) {
switch (mgmt->u.action.u.vht_group_notif.action_code) {
+ case WLAN_VHT_ACTION_OPMODE_NOTIF: {
+ struct ieee80211_rx_status *status;
+ enum nl80211_band band;
+ u8 opmode;
+
+ status = IEEE80211_SKB_RXCB(skb);
+ band = status->band;
+ opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode;
+
+ mutex_lock(&local->sta_mtx);
+ sta = sta_info_get_bss(sdata, mgmt->sa);
+
+ if (sta)
+ ieee80211_vht_handle_opmode(sdata, sta,
+ opmode,
+ band);
+
+ mutex_unlock(&local->sta_mtx);
+ break;
+ }
case WLAN_VHT_ACTION_GROUPID_MGMT:
ieee80211_process_mu_groups(sdata, mgmt);
break;
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 1822c77f2b1c..56fb47953b72 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -913,12 +913,17 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
supp_ht = supp_ht || sband->ht_cap.ht_supported;
supp_vht = supp_vht || sband->vht_cap.vht_supported;
- if (sband->ht_cap.ht_supported)
- local->rx_chains =
- max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs),
- local->rx_chains);
+ if (!sband->ht_cap.ht_supported)
+ continue;
/* TODO: consider VHT for RX chains, hopefully it's the same */
+ local->rx_chains =
+ max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs),
+ local->rx_chains);
+
+ /* no need to mask, SM_PS_DISABLED has all bits set */
+ sband->ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED <<
+ IEEE80211_HT_CAP_SM_PS_SHIFT;
}
/* if low-level driver supports AP, we also support VLAN */
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 3e289a64ed43..3090dd4342f6 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2472,7 +2472,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
if (!ifmsh->mshcfg.dot11MeshForwarding)
goto out;
- fwd_skb = skb_copy_expand(skb, local->tx_headroom, 0, GFP_ATOMIC);
+ fwd_skb = skb_copy_expand(skb, local->tx_headroom +
+ sdata->encrypt_headroom, 0, GFP_ATOMIC);
if (!fwd_skb) {
net_info_ratelimited("%s: failed to clone mesh frame\n",
sdata->name);
@@ -2880,17 +2881,10 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
switch (mgmt->u.action.u.vht_opmode_notif.action_code) {
case WLAN_VHT_ACTION_OPMODE_NOTIF: {
- u8 opmode;
-
/* verify opmode is present */
if (len < IEEE80211_MIN_ACTION_SIZE + 2)
goto invalid;
-
- opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode;
-
- ieee80211_vht_handle_opmode(rx->sdata, rx->sta,
- opmode, status->band);
- goto handled;
+ goto queue;
}
case WLAN_VHT_ACTION_GROUPID_MGMT: {
if (len < IEEE80211_MIN_ACTION_SIZE + 25)
@@ -3942,21 +3936,31 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
u64_stats_update_end(&stats->syncp);
if (fast_rx->internal_forward) {
- struct sta_info *dsta = sta_info_get(rx->sdata, skb->data);
+ struct sk_buff *xmit_skb = NULL;
+ bool multicast = is_multicast_ether_addr(skb->data);
+
+ if (multicast) {
+ xmit_skb = skb_copy(skb, GFP_ATOMIC);
+ } else if (sta_info_get(rx->sdata, skb->data)) {
+ xmit_skb = skb;
+ skb = NULL;
+ }
- if (dsta) {
+ if (xmit_skb) {
/*
* Send to wireless media and increase priority by 256
* to keep the received priority instead of
* reclassifying the frame (see cfg80211_classify8021d).
*/
- skb->priority += 256;
- skb->protocol = htons(ETH_P_802_3);
- skb_reset_network_header(skb);
- skb_reset_mac_header(skb);
- dev_queue_xmit(skb);
- return true;
+ xmit_skb->priority += 256;
+ xmit_skb->protocol = htons(ETH_P_802_3);
+ skb_reset_network_header(xmit_skb);
+ skb_reset_mac_header(xmit_skb);
+ dev_queue_xmit(xmit_skb);
}
+
+ if (!skb)
+ return true;
}
/* deliver to local stack */
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index b6cfcf038c11..50c309094c37 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -1501,8 +1501,8 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
/* This will evaluate to 1, 3, 5 or 7. */
for (ac = IEEE80211_AC_VO; ac < IEEE80211_NUM_ACS; ac++)
- if (ignored_acs & BIT(ac))
- continue;
+ if (!(ignored_acs & ieee80211_ac_to_qos_mask[ac]))
+ break;
tid = 7 - 2 * ac;
ieee80211_send_null_response(sta, tid, reason, true, false);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 2c21b7039136..797e847cbc49 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1243,7 +1243,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
struct ieee80211_vif *vif,
- struct ieee80211_sta *pubsta,
+ struct sta_info *sta,
struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -1257,10 +1257,13 @@ static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
if (!ieee80211_is_data(hdr->frame_control))
return NULL;
- if (pubsta) {
+ if (sta) {
u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
- txq = pubsta->txq[tid];
+ if (!sta->uploaded)
+ return NULL;
+
+ txq = sta->sta.txq[tid];
} else if (vif) {
txq = vif->txq;
}
@@ -1503,23 +1506,17 @@ static bool ieee80211_queue_skb(struct ieee80211_local *local,
struct fq *fq = &local->fq;
struct ieee80211_vif *vif;
struct txq_info *txqi;
- struct ieee80211_sta *pubsta;
if (!local->ops->wake_tx_queue ||
sdata->vif.type == NL80211_IFTYPE_MONITOR)
return false;
- if (sta && sta->uploaded)
- pubsta = &sta->sta;
- else
- pubsta = NULL;
-
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
sdata = container_of(sdata->bss,
struct ieee80211_sub_if_data, u.ap);
vif = &sdata->vif;
- txqi = ieee80211_get_txq(local, vif, pubsta, skb);
+ txqi = ieee80211_get_txq(local, vif, sta, skb);
if (!txqi)
return false;
@@ -3287,7 +3284,7 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
int extra_head = fast_tx->hdr_len - (ETH_HLEN - 2);
int hw_headroom = sdata->local->hw.extra_tx_headroom;
struct ethhdr eth;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_info *info;
struct ieee80211_hdr *hdr = (void *)fast_tx->hdr;
struct ieee80211_tx_data tx;
ieee80211_tx_result r;
@@ -3351,6 +3348,7 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
memcpy(skb->data + fast_tx->da_offs, eth.h_dest, ETH_ALEN);
memcpy(skb->data + fast_tx->sa_offs, eth.h_source, ETH_ALEN);
+ info = IEEE80211_SKB_CB(skb);
memset(info, 0, sizeof(*info));
info->band = fast_tx->band;
info->control.vif = &sdata->vif;
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index 6832bf6ab69f..43e45bb660bc 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -527,8 +527,10 @@ void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
u32 changed = __ieee80211_vht_handle_opmode(sdata, sta, opmode, band);
- if (changed > 0)
+ if (changed > 0) {
+ ieee80211_recalc_min_chandef(sdata);
rate_control_rate_update(local, sband, sta, changed);
+ }
}
void ieee80211_get_vht_mask_from_cap(__le16 vht_cap,
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 15fe97644ffe..5b77377e5a15 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -98,18 +98,19 @@ bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
}
EXPORT_SYMBOL_GPL(mpls_pkt_too_big);
-static u32 mpls_multipath_hash(struct mpls_route *rt,
- struct sk_buff *skb, bool bos)
+static u32 mpls_multipath_hash(struct mpls_route *rt, struct sk_buff *skb)
{
struct mpls_entry_decoded dec;
+ unsigned int mpls_hdr_len = 0;
struct mpls_shim_hdr *hdr;
bool eli_seen = false;
int label_index;
u32 hash = 0;
- for (label_index = 0; label_index < MAX_MP_SELECT_LABELS && !bos;
+ for (label_index = 0; label_index < MAX_MP_SELECT_LABELS;
label_index++) {
- if (!pskb_may_pull(skb, sizeof(*hdr) * label_index))
+ mpls_hdr_len += sizeof(*hdr);
+ if (!pskb_may_pull(skb, mpls_hdr_len))
break;
/* Read and decode the current label */
@@ -134,37 +135,38 @@ static u32 mpls_multipath_hash(struct mpls_route *rt,
eli_seen = true;
}
- bos = dec.bos;
- if (bos && pskb_may_pull(skb, sizeof(*hdr) * label_index +
- sizeof(struct iphdr))) {
+ if (!dec.bos)
+ continue;
+
+ /* found bottom label; does skb have room for a header? */
+ if (pskb_may_pull(skb, mpls_hdr_len + sizeof(struct iphdr))) {
const struct iphdr *v4hdr;
- v4hdr = (const struct iphdr *)(mpls_hdr(skb) +
- label_index);
+ v4hdr = (const struct iphdr *)(hdr + 1);
if (v4hdr->version == 4) {
hash = jhash_3words(ntohl(v4hdr->saddr),
ntohl(v4hdr->daddr),
v4hdr->protocol, hash);
} else if (v4hdr->version == 6 &&
- pskb_may_pull(skb, sizeof(*hdr) * label_index +
- sizeof(struct ipv6hdr))) {
+ pskb_may_pull(skb, mpls_hdr_len +
+ sizeof(struct ipv6hdr))) {
const struct ipv6hdr *v6hdr;
- v6hdr = (const struct ipv6hdr *)(mpls_hdr(skb) +
- label_index);
-
+ v6hdr = (const struct ipv6hdr *)(hdr + 1);
hash = __ipv6_addr_jhash(&v6hdr->saddr, hash);
hash = __ipv6_addr_jhash(&v6hdr->daddr, hash);
hash = jhash_1word(v6hdr->nexthdr, hash);
}
}
+
+ break;
}
return hash;
}
static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
- struct sk_buff *skb, bool bos)
+ struct sk_buff *skb)
{
int alive = ACCESS_ONCE(rt->rt_nhn_alive);
u32 hash = 0;
@@ -180,7 +182,7 @@ static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
if (alive <= 0)
return NULL;
- hash = mpls_multipath_hash(rt, skb, bos);
+ hash = mpls_multipath_hash(rt, skb);
nh_index = hash % alive;
if (alive == rt->rt_nhn)
goto out;
@@ -278,17 +280,11 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
hdr = mpls_hdr(skb);
dec = mpls_entry_decode(hdr);
- /* Pop the label */
- skb_pull(skb, sizeof(*hdr));
- skb_reset_network_header(skb);
-
- skb_orphan(skb);
-
rt = mpls_route_input_rcu(net, dec.label);
if (!rt)
goto drop;
- nh = mpls_select_multipath(rt, skb, dec.bos);
+ nh = mpls_select_multipath(rt, skb);
if (!nh)
goto drop;
@@ -297,6 +293,12 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
if (!mpls_output_possible(out_dev))
goto drop;
+ /* Pop the label */
+ skb_pull(skb, sizeof(*hdr));
+ skb_reset_network_header(skb);
+
+ skb_orphan(skb);
+
if (skb_warn_if_lro(skb))
goto drop;
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
index 2f7ccd934416..1d281c1ff7c1 100644
--- a/net/mpls/mpls_iptunnel.c
+++ b/net/mpls/mpls_iptunnel.c
@@ -215,6 +215,7 @@ static const struct lwtunnel_encap_ops mpls_iptun_ops = {
.fill_encap = mpls_fill_encap_info,
.get_encap_size = mpls_encap_nlsize,
.cmp_encap = mpls_encap_cmp,
+ .owner = THIS_MODULE,
};
static int __init mpls_iptunnel_init(void)
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 63729b489c2c..bbc45f8a7b2d 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -494,7 +494,7 @@ config NFT_CT
depends on NF_CONNTRACK
tristate "Netfilter nf_tables conntrack module"
help
- This option adds the "meta" expression that you can use to match
+ This option adds the "ct" expression that you can use to match
connection tracking information such as the flow state.
config NFT_SET_RBTREE
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 3a073cd9fcf4..4e8083c5e01d 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -85,11 +85,11 @@ static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
static __read_mostly bool nf_conntrack_locks_all;
/* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */
-#define GC_MAX_BUCKETS_DIV 64u
-/* upper bound of scan intervals */
-#define GC_INTERVAL_MAX (2 * HZ)
-/* maximum conntracks to evict per gc run */
-#define GC_MAX_EVICTS 256u
+#define GC_MAX_BUCKETS_DIV 128u
+/* upper bound of full table scan */
+#define GC_MAX_SCAN_JIFFIES (16u * HZ)
+/* desired ratio of entries found to be expired */
+#define GC_EVICT_RATIO 50u
static struct conntrack_gc_work conntrack_gc_work;
@@ -938,6 +938,7 @@ static noinline int early_drop(struct net *net, unsigned int _hash)
static void gc_worker(struct work_struct *work)
{
+ unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u);
unsigned int i, goal, buckets = 0, expired_count = 0;
struct conntrack_gc_work *gc_work;
unsigned int ratio, scanned = 0;
@@ -979,8 +980,7 @@ static void gc_worker(struct work_struct *work)
*/
rcu_read_unlock();
cond_resched_rcu_qs();
- } while (++buckets < goal &&
- expired_count < GC_MAX_EVICTS);
+ } while (++buckets < goal);
if (gc_work->exiting)
return;
@@ -997,27 +997,25 @@ static void gc_worker(struct work_struct *work)
* 1. Minimize time until we notice a stale entry
* 2. Maximize scan intervals to not waste cycles
*
- * Normally, expired_count will be 0, this increases the next_run time
- * to priorize 2) above.
+ * Normally, expire ratio will be close to 0.
*
- * As soon as a timed-out entry is found, move towards 1) and increase
- * the scan frequency.
- * In case we have lots of evictions next scan is done immediately.
+ * As soon as a sizeable fraction of the entries have expired
+ * increase scan frequency.
*/
ratio = scanned ? expired_count * 100 / scanned : 0;
- if (ratio >= 90 || expired_count == GC_MAX_EVICTS) {
- gc_work->next_gc_run = 0;
- next_run = 0;
- } else if (expired_count) {
- gc_work->next_gc_run /= 2U;
- next_run = msecs_to_jiffies(1);
+ if (ratio > GC_EVICT_RATIO) {
+ gc_work->next_gc_run = min_interval;
} else {
- if (gc_work->next_gc_run < GC_INTERVAL_MAX)
- gc_work->next_gc_run += msecs_to_jiffies(1);
+ unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV;
- next_run = gc_work->next_gc_run;
+ BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0);
+
+ gc_work->next_gc_run += min_interval;
+ if (gc_work->next_gc_run > max)
+ gc_work->next_gc_run = max;
}
+ next_run = gc_work->next_gc_run;
gc_work->last_bucket = i;
queue_delayed_work(system_long_wq, &gc_work->dwork, next_run);
}
@@ -1025,7 +1023,7 @@ static void gc_worker(struct work_struct *work)
static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
{
INIT_DELAYED_WORK(&gc_work->dwork, gc_worker);
- gc_work->next_gc_run = GC_INTERVAL_MAX;
+ gc_work->next_gc_run = HZ;
gc_work->exiting = false;
}
@@ -1917,7 +1915,7 @@ int nf_conntrack_init_start(void)
nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
conntrack_gc_work_init(&conntrack_gc_work);
- queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, GC_INTERVAL_MAX);
+ queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, HZ);
return 0;
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 3dca90dc24ad..ffb9e8ada899 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -13,7 +13,6 @@
/* Internal logging interface, which relies on the real
LOG target modules */
-#define NF_LOG_PREFIXLEN 128
#define NFLOGGER_NAME_LEN 64
static struct nf_logger __rcu *loggers[NFPROTO_NUMPROTO][NF_LOG_TYPE_MAX] __read_mostly;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index a019a87e58ee..1b913760f205 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -928,7 +928,8 @@ static struct nft_chain *nf_tables_chain_lookup(const struct nft_table *table,
}
static const struct nla_policy nft_chain_policy[NFTA_CHAIN_MAX + 1] = {
- [NFTA_CHAIN_TABLE] = { .type = NLA_STRING },
+ [NFTA_CHAIN_TABLE] = { .type = NLA_STRING,
+ .len = NFT_TABLE_MAXNAMELEN - 1 },
[NFTA_CHAIN_HANDLE] = { .type = NLA_U64 },
[NFTA_CHAIN_NAME] = { .type = NLA_STRING,
.len = NFT_CHAIN_MAXNAMELEN - 1 },
@@ -1854,7 +1855,8 @@ static struct nft_rule *nf_tables_rule_lookup(const struct nft_chain *chain,
}
static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = {
- [NFTA_RULE_TABLE] = { .type = NLA_STRING },
+ [NFTA_RULE_TABLE] = { .type = NLA_STRING,
+ .len = NFT_TABLE_MAXNAMELEN - 1 },
[NFTA_RULE_CHAIN] = { .type = NLA_STRING,
.len = NFT_CHAIN_MAXNAMELEN - 1 },
[NFTA_RULE_HANDLE] = { .type = NLA_U64 },
@@ -2115,7 +2117,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
* is called on error from nf_tables_newrule().
*/
expr = nft_expr_first(rule);
- while (expr->ops && expr != nft_expr_last(rule)) {
+ while (expr != nft_expr_last(rule) && expr->ops) {
nf_tables_expr_destroy(ctx, expr);
expr = nft_expr_next(expr);
}
@@ -2443,7 +2445,8 @@ nft_select_set_ops(const struct nlattr * const nla[],
}
static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = {
- [NFTA_SET_TABLE] = { .type = NLA_STRING },
+ [NFTA_SET_TABLE] = { .type = NLA_STRING,
+ .len = NFT_TABLE_MAXNAMELEN - 1 },
[NFTA_SET_NAME] = { .type = NLA_STRING,
.len = NFT_SET_MAXNAMELEN - 1 },
[NFTA_SET_FLAGS] = { .type = NLA_U32 },
@@ -3084,9 +3087,9 @@ static int nf_tables_delset(struct net *net, struct sock *nlsk,
}
static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx,
- const struct nft_set *set,
+ struct nft_set *set,
const struct nft_set_iter *iter,
- const struct nft_set_elem *elem)
+ struct nft_set_elem *elem)
{
const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
enum nft_registers dreg;
@@ -3192,8 +3195,10 @@ static const struct nla_policy nft_set_elem_policy[NFTA_SET_ELEM_MAX + 1] = {
};
static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = {
- [NFTA_SET_ELEM_LIST_TABLE] = { .type = NLA_STRING },
- [NFTA_SET_ELEM_LIST_SET] = { .type = NLA_STRING },
+ [NFTA_SET_ELEM_LIST_TABLE] = { .type = NLA_STRING,
+ .len = NFT_TABLE_MAXNAMELEN - 1 },
+ [NFTA_SET_ELEM_LIST_SET] = { .type = NLA_STRING,
+ .len = NFT_SET_MAXNAMELEN - 1 },
[NFTA_SET_ELEM_LIST_ELEMENTS] = { .type = NLA_NESTED },
[NFTA_SET_ELEM_LIST_SET_ID] = { .type = NLA_U32 },
};
@@ -3303,9 +3308,9 @@ struct nft_set_dump_args {
};
static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
- const struct nft_set *set,
+ struct nft_set *set,
const struct nft_set_iter *iter,
- const struct nft_set_elem *elem)
+ struct nft_set_elem *elem)
{
struct nft_set_dump_args *args;
@@ -3317,7 +3322,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
u8 genmask = nft_genmask_cur(net);
- const struct nft_set *set;
+ struct nft_set *set;
struct nft_set_dump_args args;
struct nft_ctx ctx;
struct nlattr *nla[NFTA_SET_ELEM_LIST_MAX + 1];
@@ -3740,10 +3745,18 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
goto err5;
}
+ if (set->size &&
+ !atomic_add_unless(&set->nelems, 1, set->size + set->ndeact)) {
+ err = -ENFILE;
+ goto err6;
+ }
+
nft_trans_elem(trans) = elem;
list_add_tail(&trans->list, &ctx->net->nft.commit_list);
return 0;
+err6:
+ set->ops->remove(set, &elem);
err5:
kfree(trans);
err4:
@@ -3790,15 +3803,9 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
return -EBUSY;
nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
- if (set->size &&
- !atomic_add_unless(&set->nelems, 1, set->size + set->ndeact))
- return -ENFILE;
-
err = nft_add_set_elem(&ctx, set, attr, nlh->nlmsg_flags);
- if (err < 0) {
- atomic_dec(&set->nelems);
+ if (err < 0)
break;
- }
}
return err;
}
@@ -3883,9 +3890,9 @@ err1:
}
static int nft_flush_set(const struct nft_ctx *ctx,
- const struct nft_set *set,
+ struct nft_set *set,
const struct nft_set_iter *iter,
- const struct nft_set_elem *elem)
+ struct nft_set_elem *elem)
{
struct nft_trans *trans;
int err;
@@ -3899,9 +3906,10 @@ static int nft_flush_set(const struct nft_ctx *ctx,
err = -ENOENT;
goto err1;
}
+ set->ndeact++;
- nft_trans_elem_set(trans) = (struct nft_set *)set;
- nft_trans_elem(trans) = *((struct nft_set_elem *)elem);
+ nft_trans_elem_set(trans) = set;
+ nft_trans_elem(trans) = *elem;
list_add_tail(&trans->list, &ctx->net->nft.commit_list);
return 0;
@@ -4032,8 +4040,10 @@ struct nft_object *nf_tables_obj_lookup(const struct nft_table *table,
EXPORT_SYMBOL_GPL(nf_tables_obj_lookup);
static const struct nla_policy nft_obj_policy[NFTA_OBJ_MAX + 1] = {
- [NFTA_OBJ_TABLE] = { .type = NLA_STRING },
- [NFTA_OBJ_NAME] = { .type = NLA_STRING },
+ [NFTA_OBJ_TABLE] = { .type = NLA_STRING,
+ .len = NFT_TABLE_MAXNAMELEN - 1 },
+ [NFTA_OBJ_NAME] = { .type = NLA_STRING,
+ .len = NFT_OBJ_MAXNAMELEN - 1 },
[NFTA_OBJ_TYPE] = { .type = NLA_U32 },
[NFTA_OBJ_DATA] = { .type = NLA_NESTED },
};
@@ -4262,10 +4272,11 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
if (idx > s_idx)
memset(&cb->args[1], 0,
sizeof(cb->args) - sizeof(cb->args[0]));
- if (filter->table[0] &&
+ if (filter && filter->table[0] &&
strcmp(filter->table, table->name))
goto cont;
- if (filter->type != NFT_OBJECT_UNSPEC &&
+ if (filter &&
+ filter->type != NFT_OBJECT_UNSPEC &&
obj->type->type != filter->type)
goto cont;
@@ -5009,9 +5020,9 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
const struct nft_chain *chain);
static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx,
- const struct nft_set *set,
+ struct nft_set *set,
const struct nft_set_iter *iter,
- const struct nft_set_elem *elem)
+ struct nft_set_elem *elem)
{
const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
const struct nft_data *data;
@@ -5035,7 +5046,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
{
const struct nft_rule *rule;
const struct nft_expr *expr, *last;
- const struct nft_set *set;
+ struct nft_set *set;
struct nft_set_binding *binding;
struct nft_set_iter iter;
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 7de2f46734a4..049ad2d9ee66 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -98,7 +98,8 @@ out:
}
static const struct nla_policy nft_dynset_policy[NFTA_DYNSET_MAX + 1] = {
- [NFTA_DYNSET_SET_NAME] = { .type = NLA_STRING },
+ [NFTA_DYNSET_SET_NAME] = { .type = NLA_STRING,
+ .len = NFT_SET_MAXNAMELEN - 1 },
[NFTA_DYNSET_SET_ID] = { .type = NLA_U32 },
[NFTA_DYNSET_OP] = { .type = NLA_U32 },
[NFTA_DYNSET_SREG_KEY] = { .type = NLA_U32 },
diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c
index 6271e40a3dd6..6f6e64423643 100644
--- a/net/netfilter/nft_log.c
+++ b/net/netfilter/nft_log.c
@@ -39,7 +39,8 @@ static void nft_log_eval(const struct nft_expr *expr,
static const struct nla_policy nft_log_policy[NFTA_LOG_MAX + 1] = {
[NFTA_LOG_GROUP] = { .type = NLA_U16 },
- [NFTA_LOG_PREFIX] = { .type = NLA_STRING },
+ [NFTA_LOG_PREFIX] = { .type = NLA_STRING,
+ .len = NF_LOG_PREFIXLEN - 1 },
[NFTA_LOG_SNAPLEN] = { .type = NLA_U32 },
[NFTA_LOG_QTHRESHOLD] = { .type = NLA_U16 },
[NFTA_LOG_LEVEL] = { .type = NLA_U32 },
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index d4f97fa7e21d..e21aea7e5ec8 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -49,7 +49,8 @@ static void nft_lookup_eval(const struct nft_expr *expr,
}
static const struct nla_policy nft_lookup_policy[NFTA_LOOKUP_MAX + 1] = {
- [NFTA_LOOKUP_SET] = { .type = NLA_STRING },
+ [NFTA_LOOKUP_SET] = { .type = NLA_STRING,
+ .len = NFT_SET_MAXNAMELEN - 1 },
[NFTA_LOOKUP_SET_ID] = { .type = NLA_U32 },
[NFTA_LOOKUP_SREG] = { .type = NLA_U32 },
[NFTA_LOOKUP_DREG] = { .type = NLA_U32 },
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
index 415a65ba2b85..1ae8c49ca4a1 100644
--- a/net/netfilter/nft_objref.c
+++ b/net/netfilter/nft_objref.c
@@ -193,10 +193,12 @@ nft_objref_select_ops(const struct nft_ctx *ctx,
}
static const struct nla_policy nft_objref_policy[NFTA_OBJREF_MAX + 1] = {
- [NFTA_OBJREF_IMM_NAME] = { .type = NLA_STRING },
+ [NFTA_OBJREF_IMM_NAME] = { .type = NLA_STRING,
+ .len = NFT_OBJ_MAXNAMELEN - 1 },
[NFTA_OBJREF_IMM_TYPE] = { .type = NLA_U32 },
[NFTA_OBJREF_SET_SREG] = { .type = NLA_U32 },
- [NFTA_OBJREF_SET_NAME] = { .type = NLA_STRING },
+ [NFTA_OBJREF_SET_NAME] = { .type = NLA_STRING,
+ .len = NFT_SET_MAXNAMELEN - 1 },
[NFTA_OBJREF_SET_ID] = { .type = NLA_U32 },
};
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index 36d2b1096546..7d699bbd45b0 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -250,6 +250,22 @@ static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt,
return 0;
}
+static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
+ __wsum fsum, __wsum tsum, int csum_offset)
+{
+ __sum16 sum;
+
+ if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
+ return -1;
+
+ nft_csum_replace(&sum, fsum, tsum);
+ if (!skb_make_writable(skb, csum_offset + sizeof(sum)) ||
+ skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
+ return -1;
+
+ return 0;
+}
+
static void nft_payload_set_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
@@ -259,7 +275,6 @@ static void nft_payload_set_eval(const struct nft_expr *expr,
const u32 *src = &regs->data[priv->sreg];
int offset, csum_offset;
__wsum fsum, tsum;
- __sum16 sum;
switch (priv->base) {
case NFT_PAYLOAD_LL_HEADER:
@@ -282,18 +297,14 @@ static void nft_payload_set_eval(const struct nft_expr *expr,
csum_offset = offset + priv->csum_offset;
offset += priv->offset;
- if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
+ if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) &&
(priv->base != NFT_PAYLOAD_TRANSPORT_HEADER ||
skb->ip_summed != CHECKSUM_PARTIAL)) {
- if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
- goto err;
-
fsum = skb_checksum(skb, offset, priv->len, 0);
tsum = csum_partial(src, priv->len, 0);
- nft_csum_replace(&sum, fsum, tsum);
- if (!skb_make_writable(skb, csum_offset + sizeof(sum)) ||
- skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
+ if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
+ nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset))
goto err;
if (priv->csum_flags &&
diff --git a/net/netfilter/nft_queue.c b/net/netfilter/nft_queue.c
index 3e19fa1230dc..dbb6aaff67ec 100644
--- a/net/netfilter/nft_queue.c
+++ b/net/netfilter/nft_queue.c
@@ -38,7 +38,7 @@ static void nft_queue_eval(const struct nft_expr *expr,
if (priv->queues_total > 1) {
if (priv->flags & NFT_QUEUE_FLAG_CPU_FANOUT) {
- int cpu = smp_processor_id();
+ int cpu = raw_smp_processor_id();
queue = priv->queuenum + cpu % priv->queues_total;
} else {
diff --git a/net/netfilter/nft_quota.c b/net/netfilter/nft_quota.c
index bd6efc53f26d..2d6fe3559912 100644
--- a/net/netfilter/nft_quota.c
+++ b/net/netfilter/nft_quota.c
@@ -110,30 +110,32 @@ static int nft_quota_obj_init(const struct nlattr * const tb[],
static int nft_quota_do_dump(struct sk_buff *skb, struct nft_quota *priv,
bool reset)
{
+ u64 consumed, consumed_cap;
u32 flags = priv->flags;
- u64 consumed;
-
- if (reset) {
- consumed = atomic64_xchg(&priv->consumed, 0);
- if (test_and_clear_bit(NFT_QUOTA_DEPLETED_BIT, &priv->flags))
- flags |= NFT_QUOTA_F_DEPLETED;
- } else {
- consumed = atomic64_read(&priv->consumed);
- }
/* Since we inconditionally increment consumed quota for each packet
* that we see, don't go over the quota boundary in what we send to
* userspace.
*/
- if (consumed > priv->quota)
- consumed = priv->quota;
+ consumed = atomic64_read(&priv->consumed);
+ if (consumed >= priv->quota) {
+ consumed_cap = priv->quota;
+ flags |= NFT_QUOTA_F_DEPLETED;
+ } else {
+ consumed_cap = consumed;
+ }
if (nla_put_be64(skb, NFTA_QUOTA_BYTES, cpu_to_be64(priv->quota),
NFTA_QUOTA_PAD) ||
- nla_put_be64(skb, NFTA_QUOTA_CONSUMED, cpu_to_be64(consumed),
+ nla_put_be64(skb, NFTA_QUOTA_CONSUMED, cpu_to_be64(consumed_cap),
NFTA_QUOTA_PAD) ||
nla_put_be32(skb, NFTA_QUOTA_FLAGS, htonl(flags)))
goto nla_put_failure;
+
+ if (reset) {
+ atomic64_sub(consumed, &priv->consumed);
+ clear_bit(NFT_QUOTA_DEPLETED_BIT, &priv->flags);
+ }
return 0;
nla_put_failure:
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index 1e20e2bbb6d9..e36069fb76ae 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -212,7 +212,7 @@ static void nft_hash_remove(const struct nft_set *set,
rhashtable_remove_fast(&priv->ht, &he->node, nft_hash_params);
}
-static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
+static void nft_hash_walk(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_iter *iter)
{
struct nft_hash *priv = nft_set_priv(set);
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 08376e50f6cd..f06f55ee516d 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -221,7 +221,7 @@ static void *nft_rbtree_deactivate(const struct net *net,
}
static void nft_rbtree_walk(const struct nft_ctx *ctx,
- const struct nft_set *set,
+ struct nft_set *set,
struct nft_set_iter *iter)
{
const struct nft_rbtree *priv = nft_set_priv(set);
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index 28c56b95fb7f..ea7c67050792 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -1502,10 +1502,7 @@ static int __init netlbl_init(void)
printk(KERN_INFO "NetLabel: Initializing\n");
printk(KERN_INFO "NetLabel: domain hash size = %u\n",
(1 << NETLBL_DOMHSH_BITSIZE));
- printk(KERN_INFO "NetLabel: protocols ="
- " UNLABELED"
- " CIPSOv4"
- "\n");
+ printk(KERN_INFO "NetLabel: protocols = UNLABELED CIPSOv4 CALIPSO\n");
ret_val = netlbl_domhsh_init(NETLBL_DOMHSH_BITSIZE);
if (ret_val != 0)
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 6b78bab27755..54253ea5976e 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -514,7 +514,7 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
int hooknum, nh_off, err = NF_ACCEPT;
nh_off = skb_network_offset(skb);
- skb_pull(skb, nh_off);
+ skb_pull_rcsum(skb, nh_off);
/* See HOOK2MANIP(). */
if (maniptype == NF_NAT_MANIP_SRC)
@@ -579,6 +579,7 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
err = nf_nat_packet(ct, ctinfo, hooknum, skb);
push:
skb_push(skb, nh_off);
+ skb_postpush_rcsum(skb, skb->data, nh_off);
return err;
}
@@ -886,7 +887,7 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
/* The conntrack module expects to be working at L3. */
nh_ofs = skb_network_offset(skb);
- skb_pull(skb, nh_ofs);
+ skb_pull_rcsum(skb, nh_ofs);
if (key->ip.frag != OVS_FRAG_TYPE_NONE) {
err = handle_fragments(net, key, info->zone.id, skb);
@@ -900,6 +901,7 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
err = ovs_ct_lookup(net, key, info, skb);
skb_push(skb, nh_ofs);
+ skb_postpush_rcsum(skb, skb->data, nh_ofs);
if (err)
kfree_skb(skb);
return err;
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 2d4c4d3911c0..9c62b6325f7a 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -606,7 +606,6 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
rcu_assign_pointer(flow->sf_acts, acts);
packet->priority = flow->key.phy.priority;
packet->mark = flow->key.phy.skb_mark;
- packet->protocol = flow->key.eth.type;
rcu_read_lock();
dp = get_dp_rcu(net, ovs_header->dp_ifindex);
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 08aa926cd5cf..2c0a00f7f1b7 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -312,7 +312,8 @@ static bool icmp6hdr_ok(struct sk_buff *skb)
* Returns 0 if it encounters a non-vlan or incomplete packet.
* Returns 1 after successfully parsing vlan tag.
*/
-static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh)
+static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh,
+ bool untag_vlan)
{
struct vlan_head *vh = (struct vlan_head *)skb->data;
@@ -330,7 +331,20 @@ static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh)
key_vh->tci = vh->tci | htons(VLAN_TAG_PRESENT);
key_vh->tpid = vh->tpid;
- __skb_pull(skb, sizeof(struct vlan_head));
+ if (unlikely(untag_vlan)) {
+ int offset = skb->data - skb_mac_header(skb);
+ u16 tci;
+ int err;
+
+ __skb_push(skb, offset);
+ err = __skb_vlan_pop(skb, &tci);
+ __skb_pull(skb, offset);
+ if (err)
+ return err;
+ __vlan_hwaccel_put_tag(skb, key_vh->tpid, tci);
+ } else {
+ __skb_pull(skb, sizeof(struct vlan_head));
+ }
return 1;
}
@@ -351,13 +365,13 @@ static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
key->eth.vlan.tpid = skb->vlan_proto;
} else {
/* Parse outer vlan tag in the non-accelerated case. */
- res = parse_vlan_tag(skb, &key->eth.vlan);
+ res = parse_vlan_tag(skb, &key->eth.vlan, true);
if (res <= 0)
return res;
}
/* Parse inner vlan tag. */
- res = parse_vlan_tag(skb, &key->eth.cvlan);
+ res = parse_vlan_tag(skb, &key->eth.cvlan, false);
if (res <= 0)
return res;
@@ -800,29 +814,15 @@ int ovs_flow_key_extract_userspace(struct net *net, const struct nlattr *attr,
if (err)
return err;
- if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) {
- /* key_extract assumes that skb->protocol is set-up for
- * layer 3 packets which is the case for other callers,
- * in particular packets recieved from the network stack.
- * Here the correct value can be set from the metadata
- * extracted above.
- */
- skb->protocol = key->eth.type;
- } else {
- struct ethhdr *eth;
-
- skb_reset_mac_header(skb);
- eth = eth_hdr(skb);
-
- /* Normally, setting the skb 'protocol' field would be
- * handled by a call to eth_type_trans(), but it assumes
- * there's a sending device, which we may not have.
- */
- if (eth_proto_is_802_3(eth->h_proto))
- skb->protocol = eth->h_proto;
- else
- skb->protocol = htons(ETH_P_802_2);
- }
+ /* key_extract assumes that skb->protocol is set-up for
+ * layer 3 packets which is the case for other callers,
+ * in particular packets received from the network stack.
+ * Here the correct value can be set from the metadata
+ * extracted above.
+ * For L2 packet key eth type would be zero. skb protocol
+ * would be set to correct value later during key-extact.
+ */
+ skb->protocol = key->eth.type;
return key_extract(skb, key);
}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index b9e1a13b4ba3..3d555c79a7b5 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1976,7 +1976,7 @@ static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
return -EINVAL;
*len -= sizeof(vnet_hdr);
- if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le()))
+ if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true))
return -EINVAL;
return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
@@ -2237,7 +2237,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
if (po->has_vnet_hdr) {
if (virtio_net_hdr_from_skb(skb, h.raw + macoff -
sizeof(struct virtio_net_hdr),
- vio_le())) {
+ vio_le(), true)) {
spin_lock(&sk->sk_receive_queue.lock);
goto drop_n_account;
}
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index c985ecbe9bd6..ae5ac175b2be 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -252,7 +252,7 @@ static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node,
const int pkt_len = 20;
struct qrtr_hdr *hdr;
struct sk_buff *skb;
- u32 *buf;
+ __le32 *buf;
skb = alloc_skb(QRTR_HDR_SIZE + pkt_len, GFP_KERNEL);
if (!skb)
@@ -269,7 +269,7 @@ static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node,
hdr->dst_node_id = cpu_to_le32(dst_node);
hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL);
- buf = (u32 *)skb_put(skb, pkt_len);
+ buf = (__le32 *)skb_put(skb, pkt_len);
memset(buf, 0, pkt_len);
buf[0] = cpu_to_le32(QRTR_TYPE_RESUME_TX);
buf[1] = cpu_to_le32(src_node);
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 2095c83ce773..e10456ef6f7a 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -900,8 +900,6 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
goto err;
}
act->order = i;
- if (event == RTM_GETACTION)
- act->tcfa_refcnt++;
list_add_tail(&act->list, &actions);
}
@@ -914,7 +912,8 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
return ret;
}
err:
- tcf_action_destroy(&actions, 0);
+ if (event != RTM_GETACTION)
+ tcf_action_destroy(&actions, 0);
return ret;
}
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 1c60317f0121..520baa41cba3 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -123,12 +123,11 @@ static int tcf_bpf_dump_ebpf_info(const struct tcf_bpf *prog,
nla_put_string(skb, TCA_ACT_BPF_NAME, prog->bpf_name))
return -EMSGSIZE;
- nla = nla_reserve(skb, TCA_ACT_BPF_DIGEST,
- sizeof(prog->filter->digest));
+ nla = nla_reserve(skb, TCA_ACT_BPF_TAG, sizeof(prog->filter->tag));
if (nla == NULL)
return -EMSGSIZE;
- memcpy(nla_data(nla), prog->filter->digest, nla_len(nla));
+ memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
return 0;
}
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 3fbba79a4ef0..1ecdf809b5fa 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -148,13 +148,15 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n)
unsigned long cl;
unsigned long fh;
int err;
- int tp_created = 0;
+ int tp_created;
if ((n->nlmsg_type != RTM_GETTFILTER) &&
!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
return -EPERM;
replay:
+ tp_created = 0;
+
err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL);
if (err < 0)
return err;
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index adc776048d1a..d9c97018317d 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -555,11 +555,11 @@ static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
return -EMSGSIZE;
- nla = nla_reserve(skb, TCA_BPF_DIGEST, sizeof(prog->filter->digest));
+ nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
if (nla == NULL)
return -EMSGSIZE;
- memcpy(nla_data(nla), prog->filter->digest, nla_len(nla));
+ memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
return 0;
}
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 333f8e268431..970db7a41684 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -153,10 +153,14 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
switch (ip_tunnel_info_af(info)) {
case AF_INET:
+ skb_key.enc_control.addr_type =
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS;
skb_key.enc_ipv4.src = key->u.ipv4.src;
skb_key.enc_ipv4.dst = key->u.ipv4.dst;
break;
case AF_INET6:
+ skb_key.enc_control.addr_type =
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS;
skb_key.enc_ipv6.src = key->u.ipv6.src;
skb_key.enc_ipv6.dst = key->u.ipv6.dst;
break;
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 5ed8e79bf102..64dfd35ccdcc 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -222,7 +222,8 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
rcu_read_lock();
- res = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt), np->tclass);
+ res = ip6_xmit(sk, skb, fl6, sk->sk_mark, rcu_dereference(np->opt),
+ np->tclass);
rcu_read_unlock();
return res;
}
diff --git a/net/sctp/offload.c b/net/sctp/offload.c
index 7e869d0cca69..4f5a2b580aa5 100644
--- a/net/sctp/offload.c
+++ b/net/sctp/offload.c
@@ -68,7 +68,7 @@ static struct sk_buff *sctp_gso_segment(struct sk_buff *skb,
goto out;
}
- segs = skb_segment(skb, features | NETIF_F_HW_CSUM);
+ segs = skb_segment(skb, features | NETIF_F_HW_CSUM | NETIF_F_SG);
if (IS_ERR(segs))
goto out;
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index e54082699520..34efaa4ef2f6 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -1048,7 +1048,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
(new_transport->state == SCTP_PF)))
new_transport = asoc->peer.active_path;
if (new_transport->state == SCTP_UNCONFIRMED) {
- WARN_ONCE(1, "Atempt to send packet on unconfirmed path.");
+ WARN_ONCE(1, "Attempt to send packet on unconfirmed path.");
sctp_chunk_fail(chunk, 0);
sctp_chunk_free(chunk);
continue;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 318c6786d653..37eeab7899fc 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -235,8 +235,12 @@ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk,
sctp_assoc_t id)
{
struct sctp_association *addr_asoc = NULL, *id_asoc = NULL;
- struct sctp_transport *transport;
+ struct sctp_af *af = sctp_get_af_specific(addr->ss_family);
union sctp_addr *laddr = (union sctp_addr *)addr;
+ struct sctp_transport *transport;
+
+ if (sctp_verify_addr(sk, laddr, af->sockaddr_len))
+ return NULL;
addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
laddr,
diff --git a/net/socket.c b/net/socket.c
index 8487bf136e5c..0758e13754e2 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -533,11 +533,11 @@ static ssize_t sockfs_listxattr(struct dentry *dentry, char *buffer,
return used;
}
-int sockfs_setattr(struct dentry *dentry, struct iattr *iattr)
+static int sockfs_setattr(struct dentry *dentry, struct iattr *iattr)
{
int err = simple_setattr(dentry, iattr);
- if (!err) {
+ if (!err && (iattr->ia_valid & ATTR_UID)) {
struct socket *sock = SOCKET_I(d_inode(dentry));
sock->sk->sk_uid = iattr->ia_uid;
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 886e9d381771..153082598522 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1489,7 +1489,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
case RPC_GSS_PROC_DESTROY:
if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq))
goto auth_err;
- rsci->h.expiry_time = get_seconds();
+ rsci->h.expiry_time = seconds_since_boot();
set_bit(CACHE_NEGATIVE, &rsci->h.flags);
if (resv->iov_len + 4 > PAGE_SIZE)
goto drop;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 1efbe48e794f..1dc9f3bac099 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -336,6 +336,11 @@ out:
static DEFINE_IDA(rpc_clids);
+void rpc_cleanup_clids(void)
+{
+ ida_destroy(&rpc_clids);
+}
+
static int rpc_alloc_clid(struct rpc_clnt *clnt)
{
int clid;
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index d1c330a7953a..c73de181467a 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -119,6 +119,7 @@ out:
static void __exit
cleanup_sunrpc(void)
{
+ rpc_cleanup_clids();
rpcauth_remove_module();
cleanup_socket_xprt();
svc_cleanup_xprt_sock();
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 3bc1d61694cb..9c9db55a0c1e 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -799,6 +799,8 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
dprintk("svc_recv: found XPT_CLOSE\n");
+ if (test_and_clear_bit(XPT_KILL_TEMP, &xprt->xpt_flags))
+ xprt->xpt_ops->xpo_kill_temp_xprt(xprt);
svc_delete_xprt(xprt);
/* Leave XPT_BUSY set on the dead xprt: */
goto out;
@@ -1020,9 +1022,11 @@ void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr)
le = to_be_closed.next;
list_del_init(le);
xprt = list_entry(le, struct svc_xprt, xpt_list);
- dprintk("svc_age_temp_xprts_now: closing %p\n", xprt);
- xprt->xpt_ops->xpo_kill_temp_xprt(xprt);
- svc_close_xprt(xprt);
+ set_bit(XPT_CLOSE, &xprt->xpt_flags);
+ set_bit(XPT_KILL_TEMP, &xprt->xpt_flags);
+ dprintk("svc_age_temp_xprts_now: queuing xprt %p for closing\n",
+ xprt);
+ svc_xprt_enqueue(xprt);
}
}
EXPORT_SYMBOL_GPL(svc_age_temp_xprts_now);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 57d35fbb1c28..172b537f8cfc 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -347,8 +347,6 @@ int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
atomic_inc(&rdma_stat_read);
return ret;
err:
- ib_dma_unmap_sg(xprt->sc_cm_id->device,
- frmr->sg, frmr->sg_nents, frmr->direction);
svc_rdma_put_context(ctxt, 0);
svc_rdma_put_frmr(xprt, frmr);
return ret;
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 6b109a808d4c..02462d67d191 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -169,7 +169,7 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *skb,
/* Send response, if necessary */
if (respond && (mtyp == DSC_REQ_MSG)) {
- rskb = tipc_buf_acquire(MAX_H_SIZE);
+ rskb = tipc_buf_acquire(MAX_H_SIZE, GFP_ATOMIC);
if (!rskb)
return;
tipc_disc_init_msg(net, rskb, DSC_RESP_MSG, bearer);
@@ -278,7 +278,7 @@ int tipc_disc_create(struct net *net, struct tipc_bearer *b,
req = kmalloc(sizeof(*req), GFP_ATOMIC);
if (!req)
return -ENOMEM;
- req->buf = tipc_buf_acquire(MAX_H_SIZE);
+ req->buf = tipc_buf_acquire(MAX_H_SIZE, GFP_ATOMIC);
if (!req->buf) {
kfree(req);
return -ENOMEM;
diff --git a/net/tipc/link.c b/net/tipc/link.c
index bda89bf9f4ff..4e8647aef01c 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1395,7 +1395,7 @@ tnl:
msg_set_seqno(hdr, seqno++);
pktlen = msg_size(hdr);
msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
- tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE);
+ tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
if (!tnlskb) {
pr_warn("%sunable to send packet\n", link_co_err);
return;
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index a22be502f1bd..ab02d0742476 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -58,12 +58,12 @@ static unsigned int align(unsigned int i)
* NOTE: Headroom is reserved to allow prepending of a data link header.
* There may also be unrequested tailroom present at the buffer's end.
*/
-struct sk_buff *tipc_buf_acquire(u32 size)
+struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
{
struct sk_buff *skb;
unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
- skb = alloc_skb_fclone(buf_size, GFP_ATOMIC);
+ skb = alloc_skb_fclone(buf_size, gfp);
if (skb) {
skb_reserve(skb, BUF_HEADROOM);
skb_put(skb, size);
@@ -95,7 +95,7 @@ struct sk_buff *tipc_msg_create(uint user, uint type,
struct tipc_msg *msg;
struct sk_buff *buf;
- buf = tipc_buf_acquire(hdr_sz + data_sz);
+ buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC);
if (unlikely(!buf))
return NULL;
@@ -261,7 +261,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
/* No fragmentation needed? */
if (likely(msz <= pktmax)) {
- skb = tipc_buf_acquire(msz);
+ skb = tipc_buf_acquire(msz, GFP_KERNEL);
if (unlikely(!skb))
return -ENOMEM;
skb_orphan(skb);
@@ -282,7 +282,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
msg_set_importance(&pkthdr, msg_importance(mhdr));
/* Prepare first fragment */
- skb = tipc_buf_acquire(pktmax);
+ skb = tipc_buf_acquire(pktmax, GFP_KERNEL);
if (!skb)
return -ENOMEM;
skb_orphan(skb);
@@ -313,7 +313,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
pktsz = drem + INT_H_SIZE;
else
pktsz = pktmax;
- skb = tipc_buf_acquire(pktsz);
+ skb = tipc_buf_acquire(pktsz, GFP_KERNEL);
if (!skb) {
rc = -ENOMEM;
goto error;
@@ -448,7 +448,7 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
if (msz > (max / 2))
return false;
- _skb = tipc_buf_acquire(max);
+ _skb = tipc_buf_acquire(max, GFP_ATOMIC);
if (!_skb)
return false;
@@ -496,7 +496,7 @@ bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
/* Never return SHORT header; expand by replacing buffer if necessary */
if (msg_short(hdr)) {
- *skb = tipc_buf_acquire(BASIC_H_SIZE + dlen);
+ *skb = tipc_buf_acquire(BASIC_H_SIZE + dlen, GFP_ATOMIC);
if (!*skb)
goto exit;
memcpy((*skb)->data + BASIC_H_SIZE, msg_data(hdr), dlen);
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 8d408612ffa4..2c3dc38abf9c 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -820,7 +820,7 @@ static inline bool msg_is_reset(struct tipc_msg *hdr)
return (msg_user(hdr) == LINK_PROTOCOL) && (msg_type(hdr) == RESET_MSG);
}
-struct sk_buff *tipc_buf_acquire(u32 size);
+struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp);
bool tipc_msg_validate(struct sk_buff *skb);
bool tipc_msg_reverse(u32 own_addr, struct sk_buff **skb, int err);
void tipc_msg_init(u32 own_addr, struct tipc_msg *m, u32 user, u32 type,
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index c1cfd92de17a..23f8899e0f8c 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -69,7 +69,7 @@ static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
u32 dest)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size);
+ struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC);
struct tipc_msg *msg;
if (buf != NULL) {
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 9d2f4c2b08ab..27753325e06e 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -263,6 +263,11 @@ static void tipc_node_write_lock(struct tipc_node *n)
write_lock_bh(&n->lock);
}
+static void tipc_node_write_unlock_fast(struct tipc_node *n)
+{
+ write_unlock_bh(&n->lock);
+}
+
static void tipc_node_write_unlock(struct tipc_node *n)
{
struct net *net = n->net;
@@ -417,7 +422,7 @@ void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr)
}
tipc_node_write_lock(n);
list_add_tail(subscr, &n->publ_list);
- tipc_node_write_unlock(n);
+ tipc_node_write_unlock_fast(n);
tipc_node_put(n);
}
@@ -435,7 +440,7 @@ void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr)
}
tipc_node_write_lock(n);
list_del_init(subscr);
- tipc_node_write_unlock(n);
+ tipc_node_write_unlock_fast(n);
tipc_node_put(n);
}
diff --git a/net/tipc/server.c b/net/tipc/server.c
index 215849ce453d..3cd6402e812c 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -86,12 +86,12 @@ struct outqueue_entry {
static void tipc_recv_work(struct work_struct *work);
static void tipc_send_work(struct work_struct *work);
static void tipc_clean_outqueues(struct tipc_conn *con);
-static void tipc_sock_release(struct tipc_conn *con);
static void tipc_conn_kref_release(struct kref *kref)
{
struct tipc_conn *con = container_of(kref, struct tipc_conn, kref);
- struct sockaddr_tipc *saddr = con->server->saddr;
+ struct tipc_server *s = con->server;
+ struct sockaddr_tipc *saddr = s->saddr;
struct socket *sock = con->sock;
struct sock *sk;
@@ -103,9 +103,13 @@ static void tipc_conn_kref_release(struct kref *kref)
}
saddr->scope = -TIPC_NODE_SCOPE;
kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr));
- tipc_sock_release(con);
sock_release(sock);
con->sock = NULL;
+
+ spin_lock_bh(&s->idr_lock);
+ idr_remove(&s->conn_idr, con->conid);
+ s->idr_in_use--;
+ spin_unlock_bh(&s->idr_lock);
}
tipc_clean_outqueues(con);
@@ -128,8 +132,10 @@ static struct tipc_conn *tipc_conn_lookup(struct tipc_server *s, int conid)
spin_lock_bh(&s->idr_lock);
con = idr_find(&s->conn_idr, conid);
- if (con)
+ if (con && test_bit(CF_CONNECTED, &con->flags))
conn_get(con);
+ else
+ con = NULL;
spin_unlock_bh(&s->idr_lock);
return con;
}
@@ -186,26 +192,15 @@ static void tipc_unregister_callbacks(struct tipc_conn *con)
write_unlock_bh(&sk->sk_callback_lock);
}
-static void tipc_sock_release(struct tipc_conn *con)
-{
- struct tipc_server *s = con->server;
-
- if (con->conid)
- s->tipc_conn_release(con->conid, con->usr_data);
-
- tipc_unregister_callbacks(con);
-}
-
static void tipc_close_conn(struct tipc_conn *con)
{
struct tipc_server *s = con->server;
if (test_and_clear_bit(CF_CONNECTED, &con->flags)) {
+ tipc_unregister_callbacks(con);
- spin_lock_bh(&s->idr_lock);
- idr_remove(&s->conn_idr, con->conid);
- s->idr_in_use--;
- spin_unlock_bh(&s->idr_lock);
+ if (con->conid)
+ s->tipc_conn_release(con->conid, con->usr_data);
/* We shouldn't flush pending works as we may be in the
* thread. In fact the races with pending rx/tx work structs
@@ -458,6 +453,11 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid,
if (!con)
return -EINVAL;
+ if (!test_bit(CF_CONNECTED, &con->flags)) {
+ conn_put(con);
+ return 0;
+ }
+
e = tipc_alloc_entry(data, len);
if (!e) {
conn_put(con);
@@ -471,12 +471,8 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid,
list_add_tail(&e->list, &con->outqueue);
spin_unlock_bh(&con->outqueue_lock);
- if (test_bit(CF_CONNECTED, &con->flags)) {
- if (!queue_work(s->send_wq, &con->swork))
- conn_put(con);
- } else {
+ if (!queue_work(s->send_wq, &con->swork))
conn_put(con);
- }
return 0;
}
@@ -500,7 +496,7 @@ static void tipc_send_to_sock(struct tipc_conn *con)
int ret;
spin_lock_bh(&con->outqueue_lock);
- while (1) {
+ while (test_bit(CF_CONNECTED, &con->flags)) {
e = list_entry(con->outqueue.next, struct outqueue_entry,
list);
if ((struct list_head *) e == &con->outqueue)
@@ -623,14 +619,12 @@ int tipc_server_start(struct tipc_server *s)
void tipc_server_stop(struct tipc_server *s)
{
struct tipc_conn *con;
- int total = 0;
int id;
spin_lock_bh(&s->idr_lock);
- for (id = 0; total < s->idr_in_use; id++) {
+ for (id = 0; s->idr_in_use; id++) {
con = idr_find(&s->conn_idr, id);
if (con) {
- total++;
spin_unlock_bh(&s->idr_lock);
tipc_close_conn(con);
spin_lock_bh(&s->idr_lock);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 333c5dae0072..800caaa699a1 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -441,15 +441,19 @@ static void __tipc_shutdown(struct socket *sock, int error)
while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
if (TIPC_SKB_CB(skb)->bytes_read) {
kfree_skb(skb);
- } else {
- if (!tipc_sk_type_connectionless(sk) &&
- sk->sk_state != TIPC_DISCONNECTING) {
- tipc_set_sk_state(sk, TIPC_DISCONNECTING);
- tipc_node_remove_conn(net, dnode, tsk->portid);
- }
- tipc_sk_respond(sk, skb, error);
+ continue;
+ }
+ if (!tipc_sk_type_connectionless(sk) &&
+ sk->sk_state != TIPC_DISCONNECTING) {
+ tipc_set_sk_state(sk, TIPC_DISCONNECTING);
+ tipc_node_remove_conn(net, dnode, tsk->portid);
}
+ tipc_sk_respond(sk, skb, error);
}
+
+ if (tipc_sk_type_connectionless(sk))
+ return;
+
if (sk->sk_state != TIPC_DISCONNECTING) {
skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
@@ -457,10 +461,8 @@ static void __tipc_shutdown(struct socket *sock, int error)
tsk->portid, error);
if (skb)
tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
- if (!tipc_sk_type_connectionless(sk)) {
- tipc_node_remove_conn(net, dnode, tsk->portid);
- tipc_set_sk_state(sk, TIPC_DISCONNECTING);
- }
+ tipc_node_remove_conn(net, dnode, tsk->portid);
+ tipc_set_sk_state(sk, TIPC_DISCONNECTING);
}
}
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 0dd02244e21d..9d94e65d0894 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -54,6 +54,8 @@ struct tipc_subscriber {
static void tipc_subscrp_delete(struct tipc_subscription *sub);
static void tipc_subscrb_put(struct tipc_subscriber *subscriber);
+static void tipc_subscrp_put(struct tipc_subscription *subscription);
+static void tipc_subscrp_get(struct tipc_subscription *subscription);
/**
* htohl - convert value to endianness used by destination
@@ -123,6 +125,7 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
{
struct tipc_name_seq seq;
+ tipc_subscrp_get(sub);
tipc_subscrp_convert_seq(&sub->evt.s.seq, sub->swap, &seq);
if (!tipc_subscrp_check_overlap(&seq, found_lower, found_upper))
return;
@@ -132,30 +135,23 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
tipc_subscrp_send_event(sub, found_lower, found_upper, event, port_ref,
node);
+ tipc_subscrp_put(sub);
}
static void tipc_subscrp_timeout(unsigned long data)
{
struct tipc_subscription *sub = (struct tipc_subscription *)data;
- struct tipc_subscriber *subscriber = sub->subscriber;
/* Notify subscriber of timeout */
tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
TIPC_SUBSCR_TIMEOUT, 0, 0);
- spin_lock_bh(&subscriber->lock);
- tipc_subscrp_delete(sub);
- spin_unlock_bh(&subscriber->lock);
-
- tipc_subscrb_put(subscriber);
+ tipc_subscrp_put(sub);
}
static void tipc_subscrb_kref_release(struct kref *kref)
{
- struct tipc_subscriber *subcriber = container_of(kref,
- struct tipc_subscriber, kref);
-
- kfree(subcriber);
+ kfree(container_of(kref,struct tipc_subscriber, kref));
}
static void tipc_subscrb_put(struct tipc_subscriber *subscriber)
@@ -168,6 +164,59 @@ static void tipc_subscrb_get(struct tipc_subscriber *subscriber)
kref_get(&subscriber->kref);
}
+static void tipc_subscrp_kref_release(struct kref *kref)
+{
+ struct tipc_subscription *sub = container_of(kref,
+ struct tipc_subscription,
+ kref);
+ struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
+ struct tipc_subscriber *subscriber = sub->subscriber;
+
+ spin_lock_bh(&subscriber->lock);
+ tipc_nametbl_unsubscribe(sub);
+ list_del(&sub->subscrp_list);
+ atomic_dec(&tn->subscription_count);
+ spin_unlock_bh(&subscriber->lock);
+ kfree(sub);
+ tipc_subscrb_put(subscriber);
+}
+
+static void tipc_subscrp_put(struct tipc_subscription *subscription)
+{
+ kref_put(&subscription->kref, tipc_subscrp_kref_release);
+}
+
+static void tipc_subscrp_get(struct tipc_subscription *subscription)
+{
+ kref_get(&subscription->kref);
+}
+
+/* tipc_subscrb_subscrp_delete - delete a specific subscription or all
+ * subscriptions for a given subscriber.
+ */
+static void tipc_subscrb_subscrp_delete(struct tipc_subscriber *subscriber,
+ struct tipc_subscr *s)
+{
+ struct list_head *subscription_list = &subscriber->subscrp_list;
+ struct tipc_subscription *sub, *temp;
+
+ spin_lock_bh(&subscriber->lock);
+ list_for_each_entry_safe(sub, temp, subscription_list, subscrp_list) {
+ if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr)))
+ continue;
+
+ tipc_subscrp_get(sub);
+ spin_unlock_bh(&subscriber->lock);
+ tipc_subscrp_delete(sub);
+ tipc_subscrp_put(sub);
+ spin_lock_bh(&subscriber->lock);
+
+ if (s)
+ break;
+ }
+ spin_unlock_bh(&subscriber->lock);
+}
+
static struct tipc_subscriber *tipc_subscrb_create(int conid)
{
struct tipc_subscriber *subscriber;
@@ -177,8 +226,8 @@ static struct tipc_subscriber *tipc_subscrb_create(int conid)
pr_warn("Subscriber rejected, no memory\n");
return NULL;
}
- kref_init(&subscriber->kref);
INIT_LIST_HEAD(&subscriber->subscrp_list);
+ kref_init(&subscriber->kref);
subscriber->conid = conid;
spin_lock_init(&subscriber->lock);
@@ -187,55 +236,22 @@ static struct tipc_subscriber *tipc_subscrb_create(int conid)
static void tipc_subscrb_delete(struct tipc_subscriber *subscriber)
{
- struct tipc_subscription *sub, *temp;
- u32 timeout;
-
- spin_lock_bh(&subscriber->lock);
- /* Destroy any existing subscriptions for subscriber */
- list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
- subscrp_list) {
- timeout = htohl(sub->evt.s.timeout, sub->swap);
- if ((timeout == TIPC_WAIT_FOREVER) || del_timer(&sub->timer)) {
- tipc_subscrp_delete(sub);
- tipc_subscrb_put(subscriber);
- }
- }
- spin_unlock_bh(&subscriber->lock);
-
+ tipc_subscrb_subscrp_delete(subscriber, NULL);
tipc_subscrb_put(subscriber);
}
static void tipc_subscrp_delete(struct tipc_subscription *sub)
{
- struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
+ u32 timeout = htohl(sub->evt.s.timeout, sub->swap);
- tipc_nametbl_unsubscribe(sub);
- list_del(&sub->subscrp_list);
- kfree(sub);
- atomic_dec(&tn->subscription_count);
+ if (timeout == TIPC_WAIT_FOREVER || del_timer(&sub->timer))
+ tipc_subscrp_put(sub);
}
static void tipc_subscrp_cancel(struct tipc_subscr *s,
struct tipc_subscriber *subscriber)
{
- struct tipc_subscription *sub, *temp;
- u32 timeout;
-
- spin_lock_bh(&subscriber->lock);
- /* Find first matching subscription, exit if not found */
- list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
- subscrp_list) {
- if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) {
- timeout = htohl(sub->evt.s.timeout, sub->swap);
- if ((timeout == TIPC_WAIT_FOREVER) ||
- del_timer(&sub->timer)) {
- tipc_subscrp_delete(sub);
- tipc_subscrb_put(subscriber);
- }
- break;
- }
- }
- spin_unlock_bh(&subscriber->lock);
+ tipc_subscrb_subscrp_delete(subscriber, s);
}
static struct tipc_subscription *tipc_subscrp_create(struct net *net,
@@ -272,6 +288,7 @@ static struct tipc_subscription *tipc_subscrp_create(struct net *net,
sub->swap = swap;
memcpy(&sub->evt.s, s, sizeof(*s));
atomic_inc(&tn->subscription_count);
+ kref_init(&sub->kref);
return sub;
}
@@ -288,17 +305,16 @@ static void tipc_subscrp_subscribe(struct net *net, struct tipc_subscr *s,
spin_lock_bh(&subscriber->lock);
list_add(&sub->subscrp_list, &subscriber->subscrp_list);
- tipc_subscrb_get(subscriber);
sub->subscriber = subscriber;
tipc_nametbl_subscribe(sub);
+ tipc_subscrb_get(subscriber);
spin_unlock_bh(&subscriber->lock);
+ setup_timer(&sub->timer, tipc_subscrp_timeout, (unsigned long)sub);
timeout = htohl(sub->evt.s.timeout, swap);
- if (timeout == TIPC_WAIT_FOREVER)
- return;
- setup_timer(&sub->timer, tipc_subscrp_timeout, (unsigned long)sub);
- mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout));
+ if (timeout != TIPC_WAIT_FOREVER)
+ mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout));
}
/* Handle one termination request for the subscriber */
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index be60103082c9..ffdc214c117a 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -57,6 +57,7 @@ struct tipc_subscriber;
* @evt: template for events generated by subscription
*/
struct tipc_subscription {
+ struct kref kref;
struct tipc_subscriber *subscriber;
struct net *net;
struct timer_list timer;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 127656ebe7be..cef79873b09d 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -995,6 +995,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
unsigned int hash;
struct unix_address *addr;
struct hlist_head *list;
+ struct path path = { NULL, NULL };
err = -EINVAL;
if (sunaddr->sun_family != AF_UNIX)
@@ -1010,9 +1011,20 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
goto out;
addr_len = err;
+ if (sun_path[0]) {
+ umode_t mode = S_IFSOCK |
+ (SOCK_INODE(sock)->i_mode & ~current_umask());
+ err = unix_mknod(sun_path, mode, &path);
+ if (err) {
+ if (err == -EEXIST)
+ err = -EADDRINUSE;
+ goto out;
+ }
+ }
+
err = mutex_lock_interruptible(&u->bindlock);
if (err)
- goto out;
+ goto out_put;
err = -EINVAL;
if (u->addr)
@@ -1029,16 +1041,6 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
atomic_set(&addr->refcnt, 1);
if (sun_path[0]) {
- struct path path;
- umode_t mode = S_IFSOCK |
- (SOCK_INODE(sock)->i_mode & ~current_umask());
- err = unix_mknod(sun_path, mode, &path);
- if (err) {
- if (err == -EEXIST)
- err = -EADDRINUSE;
- unix_release_addr(addr);
- goto out_up;
- }
addr->hash = UNIX_HASH_SIZE;
hash = d_backing_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1);
spin_lock(&unix_table_lock);
@@ -1065,6 +1067,9 @@ out_unlock:
spin_unlock(&unix_table_lock);
out_up:
mutex_unlock(&u->bindlock);
+out_put:
+ if (err)
+ path_put(&path);
out:
return err;
}
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 3df85a751a85..5c1b267e22be 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -4615,6 +4615,15 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
break;
}
+ /*
+ * Older kernel versions ignored this attribute entirely, so don't
+ * reject attempts to update it but mark it as unused instead so the
+ * driver won't look at the data.
+ */
+ if (statype != CFG80211_STA_AP_CLIENT_UNASSOC &&
+ statype != CFG80211_STA_TDLS_PEER_SETUP)
+ params->opmode_notif_used = false;
+
return 0;
}
EXPORT_SYMBOL(cfg80211_check_station_change);
@@ -4854,6 +4863,12 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
params.local_pm = pm;
}
+ if (info->attrs[NL80211_ATTR_OPMODE_NOTIF]) {
+ params.opmode_notif_used = true;
+ params.opmode_notif =
+ nla_get_u8(info->attrs[NL80211_ATTR_OPMODE_NOTIF]);
+ }
+
/* Include parameters for TDLS peer (will check later) */
err = nl80211_set_station_tdls(info, &params);
if (err)
@@ -14502,13 +14517,17 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) {
bool schedule_destroy_work = false;
- bool schedule_scan_stop = false;
struct cfg80211_sched_scan_request *sched_scan_req =
rcu_dereference(rdev->sched_scan_req);
if (sched_scan_req && notify->portid &&
- sched_scan_req->owner_nlportid == notify->portid)
- schedule_scan_stop = true;
+ sched_scan_req->owner_nlportid == notify->portid) {
+ sched_scan_req->owner_nlportid = 0;
+
+ if (rdev->ops->sched_scan_stop &&
+ rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
+ schedule_work(&rdev->sched_scan_stop_wk);
+ }
list_for_each_entry_rcu(wdev, &rdev->wiphy.wdev_list, list) {
cfg80211_mlme_unregister_socket(wdev, notify->portid);
@@ -14539,12 +14558,6 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
spin_unlock(&rdev->destroy_list_lock);
schedule_work(&rdev->destroy_work);
}
- } else if (schedule_scan_stop) {
- sched_scan_req->owner_nlportid = 0;
-
- if (rdev->ops->sched_scan_stop &&
- rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
- schedule_work(&rdev->sched_scan_stop_wk);
}
}
diff --git a/samples/Kconfig b/samples/Kconfig
index a6d2a43bbf2e..b124f62ed6cb 100644
--- a/samples/Kconfig
+++ b/samples/Kconfig
@@ -105,4 +105,11 @@ config SAMPLE_BLACKFIN_GPTIMERS
help
Build samples of blackfin gptimers sample module.
+config SAMPLE_VFIO_MDEV_MTTY
+ tristate "Build VFIO mtty example mediated device sample code -- loadable modules only"
+ depends on VFIO_MDEV_DEVICE && m
+ help
+ Build a virtual tty sample driver for use as a VFIO
+ mediated device
+
endif # SAMPLES
diff --git a/samples/Makefile b/samples/Makefile
index e17d66d77f09..86a137e451d9 100644
--- a/samples/Makefile
+++ b/samples/Makefile
@@ -2,4 +2,5 @@
obj-$(CONFIG_SAMPLES) += kobject/ kprobes/ trace_events/ livepatch/ \
hw_breakpoint/ kfifo/ kdb/ hidraw/ rpmsg/ seccomp/ \
- configfs/ connector/ v4l/ trace_printk/ blackfin/
+ configfs/ connector/ v4l/ trace_printk/ blackfin/ \
+ vfio-mdev/
diff --git a/samples/bpf/sock_example.h b/samples/bpf/sock_example.h
index 09f7fe7e5fd7..d8014065d479 100644
--- a/samples/bpf/sock_example.h
+++ b/samples/bpf/sock_example.h
@@ -4,7 +4,7 @@
#include <unistd.h>
#include <string.h>
#include <errno.h>
-#include <net/ethernet.h>
+#include <linux/if_ether.h>
#include <net/if.h>
#include <linux/if_packet.h>
#include <arpa/inet.h>
diff --git a/samples/bpf/tc_l2_redirect_kern.c b/samples/bpf/tc_l2_redirect_kern.c
index 92a44729dbe4..7ef2a12b25b2 100644
--- a/samples/bpf/tc_l2_redirect_kern.c
+++ b/samples/bpf/tc_l2_redirect_kern.c
@@ -4,6 +4,7 @@
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
+#define KBUILD_MODNAME "foo"
#include <uapi/linux/bpf.h>
#include <uapi/linux/if_ether.h>
#include <uapi/linux/if_packet.h>
diff --git a/samples/bpf/trace_output_user.c b/samples/bpf/trace_output_user.c
index f4fa6af22def..ccca1e348017 100644
--- a/samples/bpf/trace_output_user.c
+++ b/samples/bpf/trace_output_user.c
@@ -9,7 +9,6 @@
#include <string.h>
#include <fcntl.h>
#include <poll.h>
-#include <sys/ioctl.h>
#include <linux/perf_event.h>
#include <linux/bpf.h>
#include <errno.h>
diff --git a/samples/bpf/xdp_tx_iptunnel_kern.c b/samples/bpf/xdp_tx_iptunnel_kern.c
index 85c38ecd3a2d..0f4f6e8c8611 100644
--- a/samples/bpf/xdp_tx_iptunnel_kern.c
+++ b/samples/bpf/xdp_tx_iptunnel_kern.c
@@ -8,6 +8,7 @@
* encapsulating the incoming packet in an IPv4/v6 header
* and then XDP_TX it out.
*/
+#define KBUILD_MODNAME "foo"
#include <uapi/linux/bpf.h>
#include <linux/in.h>
#include <linux/if_ether.h>
diff --git a/samples/vfio-mdev/Makefile b/samples/vfio-mdev/Makefile
index a932edbe38eb..cbbd868a50a8 100644
--- a/samples/vfio-mdev/Makefile
+++ b/samples/vfio-mdev/Makefile
@@ -1,13 +1 @@
-#
-# Makefile for mtty.c file
-#
-KERNEL_DIR:=/lib/modules/$(shell uname -r)/build
-
-obj-m:=mtty.o
-
-modules clean modules_install:
- $(MAKE) -C $(KERNEL_DIR) SUBDIRS=$(PWD) $@
-
-default: modules
-
-module: modules
+obj-$(CONFIG_SAMPLE_VFIO_MDEV_MTTY) += mtty.o
diff --git a/samples/vfio-mdev/mtty.c b/samples/vfio-mdev/mtty.c
index 6b633a4ea333..ca495686b9c3 100644
--- a/samples/vfio-mdev/mtty.c
+++ b/samples/vfio-mdev/mtty.c
@@ -164,7 +164,7 @@ static struct mdev_state *find_mdev_state_by_uuid(uuid_le uuid)
struct mdev_state *mds;
list_for_each_entry(mds, &mdev_devices_list, next) {
- if (uuid_le_cmp(mds->mdev->uuid, uuid) == 0)
+ if (uuid_le_cmp(mdev_uuid(mds->mdev), uuid) == 0)
return mds;
}
@@ -341,7 +341,8 @@ static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state,
pr_err("Serial port %d: Fifo level trigger\n",
index);
#endif
- mtty_trigger_interrupt(mdev_state->mdev->uuid);
+ mtty_trigger_interrupt(
+ mdev_uuid(mdev_state->mdev));
}
} else {
#if defined(DEBUG_INTR)
@@ -355,7 +356,8 @@ static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state,
*/
if (mdev_state->s[index].uart_reg[UART_IER] &
UART_IER_RLSI)
- mtty_trigger_interrupt(mdev_state->mdev->uuid);
+ mtty_trigger_interrupt(
+ mdev_uuid(mdev_state->mdev));
}
mutex_unlock(&mdev_state->rxtx_lock);
break;
@@ -374,7 +376,8 @@ static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state,
pr_err("Serial port %d: IER_THRI write\n",
index);
#endif
- mtty_trigger_interrupt(mdev_state->mdev->uuid);
+ mtty_trigger_interrupt(
+ mdev_uuid(mdev_state->mdev));
}
mutex_unlock(&mdev_state->rxtx_lock);
@@ -445,7 +448,7 @@ static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state,
#if defined(DEBUG_INTR)
pr_err("Serial port %d: MCR_OUT2 write\n", index);
#endif
- mtty_trigger_interrupt(mdev_state->mdev->uuid);
+ mtty_trigger_interrupt(mdev_uuid(mdev_state->mdev));
}
if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
@@ -453,7 +456,7 @@ static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state,
#if defined(DEBUG_INTR)
pr_err("Serial port %d: MCR RTS/DTR write\n", index);
#endif
- mtty_trigger_interrupt(mdev_state->mdev->uuid);
+ mtty_trigger_interrupt(mdev_uuid(mdev_state->mdev));
}
break;
@@ -504,7 +507,8 @@ static void handle_bar_read(unsigned int index, struct mdev_state *mdev_state,
#endif
if (mdev_state->s[index].uart_reg[UART_IER] &
UART_IER_THRI)
- mtty_trigger_interrupt(mdev_state->mdev->uuid);
+ mtty_trigger_interrupt(
+ mdev_uuid(mdev_state->mdev));
}
mutex_unlock(&mdev_state->rxtx_lock);
@@ -734,7 +738,7 @@ int mtty_create(struct kobject *kobj, struct mdev_device *mdev)
for (i = 0; i < 2; i++) {
snprintf(name, MTTY_STRING_LEN, "%s-%d",
- dev_driver_string(mdev->parent->dev), i + 1);
+ dev_driver_string(mdev_parent_dev(mdev)), i + 1);
if (!strcmp(kobj->name, name)) {
nr_ports = i + 1;
break;
@@ -1069,7 +1073,7 @@ int mtty_get_region_info(struct mdev_device *mdev,
{
unsigned int size = 0;
struct mdev_state *mdev_state;
- int bar_index;
+ u32 bar_index;
if (!mdev)
return -EINVAL;
@@ -1078,8 +1082,11 @@ int mtty_get_region_info(struct mdev_device *mdev,
if (!mdev_state)
return -EINVAL;
- mutex_lock(&mdev_state->ops_lock);
bar_index = region_info->index;
+ if (bar_index >= VFIO_PCI_NUM_REGIONS)
+ return -EINVAL;
+
+ mutex_lock(&mdev_state->ops_lock);
switch (bar_index) {
case VFIO_PCI_CONFIG_REGION_INDEX:
@@ -1176,7 +1183,10 @@ static long mtty_ioctl(struct mdev_device *mdev, unsigned int cmd,
memcpy(&mdev_state->dev_info, &info, sizeof(info));
- return copy_to_user((void __user *)arg, &info, minsz);
+ if (copy_to_user((void __user *)arg, &info, minsz))
+ return -EFAULT;
+
+ return 0;
}
case VFIO_DEVICE_GET_REGION_INFO:
{
@@ -1197,7 +1207,10 @@ static long mtty_ioctl(struct mdev_device *mdev, unsigned int cmd,
if (ret)
return ret;
- return copy_to_user((void __user *)arg, &info, minsz);
+ if (copy_to_user((void __user *)arg, &info, minsz))
+ return -EFAULT;
+
+ return 0;
}
case VFIO_DEVICE_GET_IRQ_INFO:
@@ -1217,10 +1230,10 @@ static long mtty_ioctl(struct mdev_device *mdev, unsigned int cmd,
if (ret)
return ret;
- if (info.count == -1)
- return -EINVAL;
+ if (copy_to_user((void __user *)arg, &info, minsz))
+ return -EFAULT;
- return copy_to_user((void __user *)arg, &info, minsz);
+ return 0;
}
case VFIO_DEVICE_SET_IRQS:
{
@@ -1298,10 +1311,8 @@ static ssize_t
sample_mdev_dev_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct mdev_device *mdev = to_mdev_device(dev);
-
- if (mdev)
- return sprintf(buf, "This is MDEV %s\n", dev_name(&mdev->dev));
+ if (mdev_from_dev(dev))
+ return sprintf(buf, "This is MDEV %s\n", dev_name(dev));
return sprintf(buf, "\n");
}
@@ -1402,7 +1413,7 @@ struct attribute_group *mdev_type_groups[] = {
NULL,
};
-struct parent_ops mdev_fops = {
+struct mdev_parent_ops mdev_fops = {
.owner = THIS_MODULE,
.dev_attr_groups = mtty_dev_groups,
.mdev_attr_groups = mdev_dev_groups,
@@ -1447,6 +1458,7 @@ static int __init mtty_dev_init(void)
if (IS_ERR(mtty_dev.vd_class)) {
pr_err("Error: failed to register mtty_dev class\n");
+ ret = PTR_ERR(mtty_dev.vd_class);
goto failed1;
}
@@ -1458,7 +1470,8 @@ static int __init mtty_dev_init(void)
if (ret)
goto failed2;
- if (mdev_register_device(&mtty_dev.dev, &mdev_fops) != 0)
+ ret = mdev_register_device(&mtty_dev.dev, &mdev_fops);
+ if (ret)
goto failed3;
mutex_init(&mdev_list_lock);
diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h
index 950fd2e64bb7..12262c0cc691 100644
--- a/scripts/gcc-plugins/gcc-common.h
+++ b/scripts/gcc-plugins/gcc-common.h
@@ -39,6 +39,9 @@
#include "hash-map.h"
#endif
+#if BUILDING_GCC_VERSION >= 7000
+#include "memmodel.h"
+#endif
#include "emit-rtl.h"
#include "debug.h"
#include "target.h"
@@ -91,6 +94,9 @@
#include "tree-ssa-alias.h"
#include "tree-ssa.h"
#include "stringpool.h"
+#if BUILDING_GCC_VERSION >= 7000
+#include "tree-vrp.h"
+#endif
#include "tree-ssanames.h"
#include "print-tree.h"
#include "tree-eh.h"
@@ -287,6 +293,22 @@ static inline struct cgraph_node *cgraph_next_function_with_gimple_body(struct c
return NULL;
}
+static inline bool cgraph_for_node_and_aliases(cgraph_node_ptr node, bool (*callback)(cgraph_node_ptr, void *), void *data, bool include_overwritable)
+{
+ cgraph_node_ptr alias;
+
+ if (callback(node, data))
+ return true;
+
+ for (alias = node->same_body; alias; alias = alias->next) {
+ if (include_overwritable || cgraph_function_body_availability(alias) > AVAIL_OVERWRITABLE)
+ if (cgraph_for_node_and_aliases(alias, callback, data, include_overwritable))
+ return true;
+ }
+
+ return false;
+}
+
#define FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) \
for ((node) = cgraph_first_function_with_gimple_body(); (node); \
(node) = cgraph_next_function_with_gimple_body(node))
@@ -399,6 +421,7 @@ typedef union gimple_statement_d gassign;
typedef union gimple_statement_d gcall;
typedef union gimple_statement_d gcond;
typedef union gimple_statement_d gdebug;
+typedef union gimple_statement_d ggoto;
typedef union gimple_statement_d gphi;
typedef union gimple_statement_d greturn;
@@ -452,6 +475,16 @@ static inline const gdebug *as_a_const_gdebug(const_gimple stmt)
return stmt;
}
+static inline ggoto *as_a_ggoto(gimple stmt)
+{
+ return stmt;
+}
+
+static inline const ggoto *as_a_const_ggoto(const_gimple stmt)
+{
+ return stmt;
+}
+
static inline gphi *as_a_gphi(gimple stmt)
{
return stmt;
@@ -496,6 +529,14 @@ static inline const greturn *as_a_const_greturn(const_gimple stmt)
typedef struct rtx_def rtx_insn;
+static inline const char *get_decl_section_name(const_tree decl)
+{
+ if (DECL_SECTION_NAME(decl) == NULL_TREE)
+ return NULL;
+
+ return TREE_STRING_POINTER(DECL_SECTION_NAME(decl));
+}
+
static inline void set_decl_section_name(tree node, const char *value)
{
if (value)
@@ -511,6 +552,7 @@ typedef struct gimple_statement_base gassign;
typedef struct gimple_statement_call gcall;
typedef struct gimple_statement_base gcond;
typedef struct gimple_statement_base gdebug;
+typedef struct gimple_statement_base ggoto;
typedef struct gimple_statement_phi gphi;
typedef struct gimple_statement_base greturn;
@@ -564,6 +606,16 @@ static inline const gdebug *as_a_const_gdebug(const_gimple stmt)
return stmt;
}
+static inline ggoto *as_a_ggoto(gimple stmt)
+{
+ return stmt;
+}
+
+static inline const ggoto *as_a_const_ggoto(const_gimple stmt)
+{
+ return stmt;
+}
+
static inline gphi *as_a_gphi(gimple stmt)
{
return as_a<gphi>(stmt);
@@ -611,6 +663,11 @@ inline bool is_a_helper<const gassign *>::test(const_gimple gs)
#define INSN_DELETED_P(insn) (insn)->deleted()
+static inline const char *get_decl_section_name(const_tree decl)
+{
+ return DECL_SECTION_NAME(decl);
+}
+
/* symtab/cgraph related */
#define debug_cgraph_node(node) (node)->debug()
#define cgraph_get_node(decl) cgraph_node::get(decl)
@@ -619,6 +676,7 @@ inline bool is_a_helper<const gassign *>::test(const_gimple gs)
#define cgraph_n_nodes symtab->cgraph_count
#define cgraph_max_uid symtab->cgraph_max_uid
#define varpool_get_node(decl) varpool_node::get(decl)
+#define dump_varpool_node(file, node) (node)->dump(file)
#define cgraph_create_edge(caller, callee, call_stmt, count, freq, nest) \
(caller)->create_edge((callee), (call_stmt), (count), (freq))
@@ -674,6 +732,11 @@ static inline cgraph_node_ptr cgraph_alias_target(cgraph_node_ptr node)
return node->get_alias_target();
}
+static inline bool cgraph_for_node_and_aliases(cgraph_node_ptr node, bool (*callback)(cgraph_node_ptr, void *), void *data, bool include_overwritable)
+{
+ return node->call_for_symbol_thunks_and_aliases(callback, data, include_overwritable);
+}
+
static inline struct cgraph_node_hook_list *cgraph_add_function_insertion_hook(cgraph_node_hook hook, void *data)
{
return symtab->add_cgraph_insertion_hook(hook, data);
@@ -731,6 +794,13 @@ static inline gimple gimple_build_assign_with_ops(enum tree_code subcode, tree l
template <>
template <>
+inline bool is_a_helper<const ggoto *>::test(const_gimple gs)
+{
+ return gs->code == GIMPLE_GOTO;
+}
+
+template <>
+template <>
inline bool is_a_helper<const greturn *>::test(const_gimple gs)
{
return gs->code == GIMPLE_RETURN;
@@ -766,6 +836,16 @@ static inline const gcall *as_a_const_gcall(const_gimple stmt)
return as_a<const gcall *>(stmt);
}
+static inline ggoto *as_a_ggoto(gimple stmt)
+{
+ return as_a<ggoto *>(stmt);
+}
+
+static inline const ggoto *as_a_const_ggoto(const_gimple stmt)
+{
+ return as_a<const ggoto *>(stmt);
+}
+
static inline gphi *as_a_gphi(gimple stmt)
{
return as_a<gphi *>(stmt);
@@ -828,4 +908,9 @@ static inline void debug_gimple_stmt(const_gimple s)
#define debug_gimple_stmt(s) debug_gimple_stmt(CONST_CAST_GIMPLE(s))
#endif
+#if BUILDING_GCC_VERSION >= 7000
+#define get_inner_reference(exp, pbitsize, pbitpos, poffset, pmode, punsignedp, preversep, pvolatilep, keep_aligning) \
+ get_inner_reference(exp, pbitsize, pbitpos, poffset, pmode, punsignedp, preversep, pvolatilep)
+#endif
+
#endif
diff --git a/scripts/gcc-plugins/latent_entropy_plugin.c b/scripts/gcc-plugins/latent_entropy_plugin.c
index 12541126575b..8ff203ad4809 100644
--- a/scripts/gcc-plugins/latent_entropy_plugin.c
+++ b/scripts/gcc-plugins/latent_entropy_plugin.c
@@ -328,9 +328,9 @@ static enum tree_code get_op(tree *rhs)
op = LROTATE_EXPR;
/*
* This code limits the value of random_const to
- * the size of a wide int for the rotation
+ * the size of a long for the rotation
*/
- random_const &= HOST_BITS_PER_WIDE_INT - 1;
+ random_const %= TYPE_PRECISION(long_unsigned_type_node);
break;
}
diff --git a/sound/firewire/fireworks/fireworks_stream.c b/sound/firewire/fireworks/fireworks_stream.c
index ee47924aef0d..827161bc269c 100644
--- a/sound/firewire/fireworks/fireworks_stream.c
+++ b/sound/firewire/fireworks/fireworks_stream.c
@@ -117,7 +117,7 @@ destroy_stream(struct snd_efw *efw, struct amdtp_stream *stream)
conn = &efw->in_conn;
amdtp_stream_destroy(stream);
- cmp_connection_destroy(&efw->out_conn);
+ cmp_connection_destroy(conn);
}
static int
diff --git a/sound/firewire/tascam/tascam-stream.c b/sound/firewire/tascam/tascam-stream.c
index 4ad3bd7fd445..f1657a4e0621 100644
--- a/sound/firewire/tascam/tascam-stream.c
+++ b/sound/firewire/tascam/tascam-stream.c
@@ -343,7 +343,7 @@ int snd_tscm_stream_init_duplex(struct snd_tscm *tscm)
if (err < 0)
amdtp_stream_destroy(&tscm->rx_stream);
- return 0;
+ return err;
}
/* At bus reset, streaming is stopped and some registers are clear. */
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 9448daff9d8b..7d660ee1d5e8 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2230,6 +2230,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_FIXUP_ASUS_W2JC),
SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601),
SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS),
+ SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3),
SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
@@ -6983,6 +6984,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51),
SND_PCI_QUIRK(0x1043, 0x17bd, "ASUS N751", ALC668_FIXUP_ASUS_Nx51),
+ SND_PCI_QUIRK(0x1043, 0x1963, "ASUS X71SL", ALC662_FIXUP_ASUS_MODE8),
SND_PCI_QUIRK(0x1043, 0x1b73, "ASUS N55SF", ALC662_FIXUP_BASS_16),
SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c
index efe3a44658d5..4576f987a4a5 100644
--- a/sound/soc/codecs/nau8825.c
+++ b/sound/soc/codecs/nau8825.c
@@ -561,9 +561,9 @@ static void nau8825_xtalk_prepare(struct nau8825 *nau8825)
nau8825_xtalk_backup(nau8825);
/* Config IIS as master to output signal by codec */
regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL2,
- NAU8825_I2S_MS_MASK | NAU8825_I2S_DRV_MASK |
+ NAU8825_I2S_MS_MASK | NAU8825_I2S_LRC_DIV_MASK |
NAU8825_I2S_BLK_DIV_MASK, NAU8825_I2S_MS_MASTER |
- (0x2 << NAU8825_I2S_DRV_SFT) | 0x1);
+ (0x2 << NAU8825_I2S_LRC_DIV_SFT) | 0x1);
/* Ramp up headphone volume to 0dB to get better performance and
* avoid pop noise in headphone.
*/
@@ -657,7 +657,7 @@ static void nau8825_xtalk_clean(struct nau8825 *nau8825)
NAU8825_IRQ_RMS_EN, NAU8825_IRQ_RMS_EN);
/* Recover default value for IIS */
regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL2,
- NAU8825_I2S_MS_MASK | NAU8825_I2S_DRV_MASK |
+ NAU8825_I2S_MS_MASK | NAU8825_I2S_LRC_DIV_MASK |
NAU8825_I2S_BLK_DIV_MASK, NAU8825_I2S_MS_SLAVE);
/* Restore value of specific register for cross talk */
nau8825_xtalk_restore(nau8825);
@@ -2006,7 +2006,8 @@ static void nau8825_fll_apply(struct nau8825 *nau8825,
NAU8825_FLL_INTEGER_MASK, fll_param->fll_int);
/* FLL pre-scaler */
regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL4,
- NAU8825_FLL_REF_DIV_MASK, fll_param->clk_ref_div);
+ NAU8825_FLL_REF_DIV_MASK,
+ fll_param->clk_ref_div << NAU8825_FLL_REF_DIV_SFT);
/* select divided VCO input */
regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL5,
NAU8825_FLL_CLK_SW_MASK, NAU8825_FLL_CLK_SW_REF);
diff --git a/sound/soc/codecs/nau8825.h b/sound/soc/codecs/nau8825.h
index 5d1704e73241..514fd13c2f46 100644
--- a/sound/soc/codecs/nau8825.h
+++ b/sound/soc/codecs/nau8825.h
@@ -137,7 +137,8 @@
#define NAU8825_FLL_CLK_SRC_FS (0x3 << NAU8825_FLL_CLK_SRC_SFT)
/* FLL4 (0x07) */
-#define NAU8825_FLL_REF_DIV_MASK (0x3 << 10)
+#define NAU8825_FLL_REF_DIV_SFT 10
+#define NAU8825_FLL_REF_DIV_MASK (0x3 << NAU8825_FLL_REF_DIV_SFT)
/* FLL5 (0x08) */
#define NAU8825_FLL_PDB_DAC_EN (0x1 << 15)
@@ -247,8 +248,8 @@
/* I2S_PCM_CTRL2 (0x1d) */
#define NAU8825_I2S_TRISTATE (1 << 15) /* 0 - normal mode, 1 - Hi-Z output */
-#define NAU8825_I2S_DRV_SFT 12
-#define NAU8825_I2S_DRV_MASK (0x3 << NAU8825_I2S_DRV_SFT)
+#define NAU8825_I2S_LRC_DIV_SFT 12
+#define NAU8825_I2S_LRC_DIV_MASK (0x3 << NAU8825_I2S_LRC_DIV_SFT)
#define NAU8825_I2S_MS_SFT 3
#define NAU8825_I2S_MS_MASK (1 << NAU8825_I2S_MS_SFT)
#define NAU8825_I2S_MS_MASTER (1 << NAU8825_I2S_MS_SFT)
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 10c2a564a715..1ac96ef9ee20 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -3833,6 +3833,9 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
}
}
+ regmap_update_bits(rt5645->regmap, RT5645_ADDA_CLK1,
+ RT5645_I2S_PD1_MASK, RT5645_I2S_PD1_2);
+
if (rt5645->pdata.jd_invert) {
regmap_update_bits(rt5645->regmap, RT5645_IRQ_CTRL2,
RT5645_JD_1_1_MASK, RT5645_JD_1_1_INV);
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index 8877b74b0510..bb94d50052d7 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -126,6 +126,16 @@ static const struct reg_default aic3x_reg[] = {
{ 108, 0x00 }, { 109, 0x00 },
};
+static bool aic3x_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case AIC3X_RESET:
+ return true;
+ default:
+ return false;
+ }
+}
+
static const struct regmap_config aic3x_regmap = {
.reg_bits = 8,
.val_bits = 8,
@@ -133,6 +143,9 @@ static const struct regmap_config aic3x_regmap = {
.max_register = DAC_ICC_ADJ,
.reg_defaults = aic3x_reg,
.num_reg_defaults = ARRAY_SIZE(aic3x_reg),
+
+ .volatile_reg = aic3x_volatile_reg,
+
.cache_type = REGCACHE_RBTREE,
};
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index 593b7d1aed46..d72ccef9e238 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -1551,7 +1551,7 @@ static int wm_adsp_load(struct wm_adsp *dsp)
const struct wmfw_region *region;
const struct wm_adsp_region *mem;
const char *region_name;
- char *file, *text;
+ char *file, *text = NULL;
struct wm_adsp_buf *buf;
unsigned int reg;
int regions = 0;
@@ -1700,10 +1700,21 @@ static int wm_adsp_load(struct wm_adsp *dsp)
regions, le32_to_cpu(region->len), offset,
region_name);
+ if ((pos + le32_to_cpu(region->len) + sizeof(*region)) >
+ firmware->size) {
+ adsp_err(dsp,
+ "%s.%d: %s region len %d bytes exceeds file length %zu\n",
+ file, regions, region_name,
+ le32_to_cpu(region->len), firmware->size);
+ ret = -EINVAL;
+ goto out_fw;
+ }
+
if (text) {
memcpy(text, region->data, le32_to_cpu(region->len));
adsp_info(dsp, "%s: %s\n", file, text);
kfree(text);
+ text = NULL;
}
if (reg) {
@@ -1748,6 +1759,7 @@ out_fw:
regmap_async_complete(regmap);
wm_adsp_buf_free(&buf_list);
release_firmware(firmware);
+ kfree(text);
out:
kfree(file);
@@ -2233,6 +2245,17 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
}
if (reg) {
+ if ((pos + le32_to_cpu(blk->len) + sizeof(*blk)) >
+ firmware->size) {
+ adsp_err(dsp,
+ "%s.%d: %s region len %d bytes exceeds file length %zu\n",
+ file, blocks, region_name,
+ le32_to_cpu(blk->len),
+ firmware->size);
+ ret = -EINVAL;
+ goto out_fw;
+ }
+
buf = wm_adsp_buf_alloc(blk->data,
le32_to_cpu(blk->len),
&buf_list);
diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c
index 2998954a1c74..bdf8398cbc81 100644
--- a/sound/soc/dwc/designware_i2s.c
+++ b/sound/soc/dwc/designware_i2s.c
@@ -681,22 +681,19 @@ static int dw_i2s_probe(struct platform_device *pdev)
}
if (!pdata) {
- ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
- if (ret == -EPROBE_DEFER) {
- dev_err(&pdev->dev,
- "failed to register PCM, deferring probe\n");
- return ret;
- } else if (ret) {
- dev_err(&pdev->dev,
- "Could not register DMA PCM: %d\n"
- "falling back to PIO mode\n", ret);
+ if (irq >= 0) {
ret = dw_pcm_register(pdev);
- if (ret) {
- dev_err(&pdev->dev,
- "Could not register PIO PCM: %d\n",
+ dev->use_pio = true;
+ } else {
+ ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL,
+ 0);
+ dev->use_pio = false;
+ }
+
+ if (ret) {
+ dev_err(&pdev->dev, "could not register pcm: %d\n",
ret);
- goto err_clk_disable;
- }
+ goto err_clk_disable;
}
}
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 50349437d961..fde08660b63b 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -224,6 +224,12 @@ struct fsl_ssi_soc_data {
* @dbg_stats: Debugging statistics
*
* @soc: SoC specific data
+ *
+ * @fifo_watermark: the FIFO watermark setting. Notifies DMA when
+ * there are @fifo_watermark or fewer words in TX fifo or
+ * @fifo_watermark or more empty words in RX fifo.
+ * @dma_maxburst: max number of words to transfer in one go. So far,
+ * this is always the same as fifo_watermark.
*/
struct fsl_ssi_private {
struct regmap *regs;
@@ -263,6 +269,9 @@ struct fsl_ssi_private {
const struct fsl_ssi_soc_data *soc;
struct device *dev;
+
+ u32 fifo_watermark;
+ u32 dma_maxburst;
};
/*
@@ -1051,21 +1060,7 @@ static int _fsl_ssi_set_dai_fmt(struct device *dev,
regmap_write(regs, CCSR_SSI_SRCR, srcr);
regmap_write(regs, CCSR_SSI_SCR, scr);
- /*
- * Set the watermark for transmit FIFI 0 and receive FIFO 0. We don't
- * use FIFO 1. We program the transmit water to signal a DMA transfer
- * if there are only two (or fewer) elements left in the FIFO. Two
- * elements equals one frame (left channel, right channel). This value,
- * however, depends on the depth of the transmit buffer.
- *
- * We set the watermark on the same level as the DMA burstsize. For
- * fiq it is probably better to use the biggest possible watermark
- * size.
- */
- if (ssi_private->use_dma)
- wm = ssi_private->fifo_depth - 2;
- else
- wm = ssi_private->fifo_depth;
+ wm = ssi_private->fifo_watermark;
regmap_write(regs, CCSR_SSI_SFCSR,
CCSR_SSI_SFCSR_TFWM0(wm) | CCSR_SSI_SFCSR_RFWM0(wm) |
@@ -1373,12 +1368,8 @@ static int fsl_ssi_imx_probe(struct platform_device *pdev,
dev_dbg(&pdev->dev, "could not get baud clock: %ld\n",
PTR_ERR(ssi_private->baudclk));
- /*
- * We have burstsize be "fifo_depth - 2" to match the SSI
- * watermark setting in fsl_ssi_startup().
- */
- ssi_private->dma_params_tx.maxburst = ssi_private->fifo_depth - 2;
- ssi_private->dma_params_rx.maxburst = ssi_private->fifo_depth - 2;
+ ssi_private->dma_params_tx.maxburst = ssi_private->dma_maxburst;
+ ssi_private->dma_params_rx.maxburst = ssi_private->dma_maxburst;
ssi_private->dma_params_tx.addr = ssi_private->ssi_phys + CCSR_SSI_STX0;
ssi_private->dma_params_rx.addr = ssi_private->ssi_phys + CCSR_SSI_SRX0;
@@ -1543,6 +1534,47 @@ static int fsl_ssi_probe(struct platform_device *pdev)
/* Older 8610 DTs didn't have the fifo-depth property */
ssi_private->fifo_depth = 8;
+ /*
+ * Set the watermark for transmit FIFO 0 and receive FIFO 0. We don't
+ * use FIFO 1 but set the watermark appropriately nontheless.
+ * We program the transmit water to signal a DMA transfer
+ * if there are N elements left in the FIFO. For chips with 15-deep
+ * FIFOs, set watermark to 8. This allows the SSI to operate at a
+ * high data rate without channel slipping. Behavior is unchanged
+ * for the older chips with a fifo depth of only 8. A value of 4
+ * might be appropriate for the older chips, but is left at
+ * fifo_depth-2 until sombody has a chance to test.
+ *
+ * We set the watermark on the same level as the DMA burstsize. For
+ * fiq it is probably better to use the biggest possible watermark
+ * size.
+ */
+ switch (ssi_private->fifo_depth) {
+ case 15:
+ /*
+ * 2 samples is not enough when running at high data
+ * rates (like 48kHz @ 16 bits/channel, 16 channels)
+ * 8 seems to split things evenly and leave enough time
+ * for the DMA to fill the FIFO before it's over/under
+ * run.
+ */
+ ssi_private->fifo_watermark = 8;
+ ssi_private->dma_maxburst = 8;
+ break;
+ case 8:
+ default:
+ /*
+ * maintain old behavior for older chips.
+ * Keeping it the same because I don't have an older
+ * board to test with.
+ * I suspect this could be changed to be something to
+ * leave some more space in the fifo.
+ */
+ ssi_private->fifo_watermark = ssi_private->fifo_depth - 2;
+ ssi_private->dma_maxburst = ssi_private->fifo_depth - 2;
+ break;
+ }
+
dev_set_drvdata(&pdev->dev, ssi_private);
if (ssi_private->soc->imx) {
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index 507a86a5eafe..8d2fb2d6f532 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -142,7 +142,7 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w,
* for Jack detection and button press
*/
ret = snd_soc_dai_set_sysclk(codec_dai, RT5640_SCLK_S_RCCLK,
- 0,
+ 48000 * 512,
SND_SOC_CLOCK_IN);
if (!ret) {
if ((byt_rt5640_quirk & BYT_RT5640_MCLK_EN) && priv->mclk)
@@ -825,10 +825,20 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
if ((byt_rt5640_quirk & BYT_RT5640_MCLK_EN) && (is_valleyview())) {
priv->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3");
if (IS_ERR(priv->mclk)) {
+ ret_val = PTR_ERR(priv->mclk);
+
dev_err(&pdev->dev,
- "Failed to get MCLK from pmc_plt_clk_3: %ld\n",
- PTR_ERR(priv->mclk));
- return PTR_ERR(priv->mclk);
+ "Failed to get MCLK from pmc_plt_clk_3: %d\n",
+ ret_val);
+
+ /*
+ * Fall back to bit clock usage for -ENOENT (clock not
+ * available likely due to missing dependencies), bail
+ * for all other errors, including -EPROBE_DEFER
+ */
+ if (ret_val != -ENOENT)
+ return ret_val;
+ byt_rt5640_quirk &= ~BYT_RT5640_MCLK_EN;
}
}
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
index 84b5101e6ca6..6c6b63a6b338 100644
--- a/sound/soc/intel/skylake/skl-pcm.c
+++ b/sound/soc/intel/skylake/skl-pcm.c
@@ -180,6 +180,9 @@ static int skl_pcm_open(struct snd_pcm_substream *substream,
snd_pcm_set_sync(substream);
mconfig = skl_tplg_fe_get_cpr_module(dai, substream->stream);
+ if (!mconfig)
+ return -EINVAL;
+
skl_tplg_d0i3_get(skl, mconfig->d0i3_caps);
return 0;
diff --git a/sound/soc/intel/skylake/skl-sst.c b/sound/soc/intel/skylake/skl-sst.c
index 8fc3178bc79c..b30bd384c8d3 100644
--- a/sound/soc/intel/skylake/skl-sst.c
+++ b/sound/soc/intel/skylake/skl-sst.c
@@ -515,6 +515,9 @@ EXPORT_SYMBOL_GPL(skl_sst_init_fw);
void skl_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx)
{
+
+ if (ctx->dsp->fw)
+ release_firmware(ctx->dsp->fw);
skl_clear_module_table(ctx->dsp);
skl_freeup_uuid_list(ctx);
skl_ipc_free(&ctx->ipc);
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index 4bd68de76130..99b5b0835c1e 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -1030,10 +1030,8 @@ static int __rsnd_kctrl_new(struct rsnd_mod *mod,
return -ENOMEM;
ret = snd_ctl_add(card, kctrl);
- if (ret < 0) {
- snd_ctl_free_one(kctrl);
+ if (ret < 0)
return ret;
- }
cfg->update = update;
cfg->card = card;
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index f1901bb1466e..baa1afa41e3d 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1748,6 +1748,7 @@ static int soc_bind_aux_dev(struct snd_soc_card *card, int num)
component->init = aux_dev->init;
component->auxiliary = 1;
+ list_add(&component->card_aux_list, &card->aux_comp_list);
return 0;
@@ -1758,16 +1759,14 @@ err_defer:
static int soc_probe_aux_devices(struct snd_soc_card *card)
{
- struct snd_soc_component *comp;
+ struct snd_soc_component *comp, *tmp;
int order;
int ret;
for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST;
order++) {
- list_for_each_entry(comp, &card->component_dev_list, card_list) {
- if (!comp->auxiliary)
- continue;
-
+ list_for_each_entry_safe(comp, tmp, &card->aux_comp_list,
+ card_aux_list) {
if (comp->driver->probe_order == order) {
ret = soc_probe_component(card, comp);
if (ret < 0) {
@@ -1776,6 +1775,7 @@ static int soc_probe_aux_devices(struct snd_soc_card *card)
comp->name, ret);
return ret;
}
+ list_del(&comp->card_aux_list);
}
}
}
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index e7a1eaa2772f..6aba14009c92 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -2184,9 +2184,11 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
fe->dpcm[stream].state = SND_SOC_DPCM_STATE_STOP;
break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PAUSED;
+ break;
}
out:
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 65670b2b408c..fbfb1fab88d5 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -514,13 +514,12 @@ static void remove_widget(struct snd_soc_component *comp,
== SND_SOC_TPLG_TYPE_MIXER)
kfree(kcontrol->tlv.p);
- snd_ctl_remove(card, kcontrol);
-
/* Private value is used as struct soc_mixer_control
* for volume mixers or soc_bytes_ext for bytes
* controls.
*/
kfree((void *)kcontrol->private_value);
+ snd_ctl_remove(card, kcontrol);
}
kfree(w->kcontrol_news);
}
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 15d1d5c63c3c..c90607ebe155 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -384,6 +384,9 @@ static void snd_complete_urb(struct urb *urb)
if (unlikely(atomic_read(&ep->chip->shutdown)))
goto exit_clear;
+ if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags)))
+ goto exit_clear;
+
if (usb_pipeout(ep->pipe)) {
retire_outbound_urb(ep, ctx);
/* can be stopped during retire callback */
@@ -534,6 +537,11 @@ static int wait_clear_urbs(struct snd_usb_endpoint *ep)
alive, ep->ep_num);
clear_bit(EP_FLAG_STOPPING, &ep->flags);
+ ep->data_subs = NULL;
+ ep->sync_slave = NULL;
+ ep->retire_data_urb = NULL;
+ ep->prepare_data_urb = NULL;
+
return 0;
}
@@ -912,9 +920,7 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
/**
* snd_usb_endpoint_start: start an snd_usb_endpoint
*
- * @ep: the endpoint to start
- * @can_sleep: flag indicating whether the operation is executed in
- * non-atomic context
+ * @ep: the endpoint to start
*
* A call to this function will increment the use count of the endpoint.
* In case it is not already running, the URBs for this endpoint will be
@@ -924,7 +930,7 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
*
* Returns an error if the URB submission failed, 0 in all other cases.
*/
-int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, bool can_sleep)
+int snd_usb_endpoint_start(struct snd_usb_endpoint *ep)
{
int err;
unsigned int i;
@@ -938,8 +944,6 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, bool can_sleep)
/* just to be sure */
deactivate_urbs(ep, false);
- if (can_sleep)
- wait_clear_urbs(ep);
ep->active_mask = 0;
ep->unlink_mask = 0;
@@ -1020,10 +1024,6 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep)
if (--ep->use_count == 0) {
deactivate_urbs(ep, false);
- ep->data_subs = NULL;
- ep->sync_slave = NULL;
- ep->retire_data_urb = NULL;
- ep->prepare_data_urb = NULL;
set_bit(EP_FLAG_STOPPING, &ep->flags);
}
}
diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h
index 6428392d8f62..584f295d7c77 100644
--- a/sound/usb/endpoint.h
+++ b/sound/usb/endpoint.h
@@ -18,7 +18,7 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
struct audioformat *fmt,
struct snd_usb_endpoint *sync_ep);
-int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, bool can_sleep);
+int snd_usb_endpoint_start(struct snd_usb_endpoint *ep);
void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep);
void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep);
int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep);
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 34c6d4f2c0b6..9aa5b1855481 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -218,7 +218,7 @@ int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface,
}
}
-static int start_endpoints(struct snd_usb_substream *subs, bool can_sleep)
+static int start_endpoints(struct snd_usb_substream *subs)
{
int err;
@@ -231,7 +231,7 @@ static int start_endpoints(struct snd_usb_substream *subs, bool can_sleep)
dev_dbg(&subs->dev->dev, "Starting data EP @%p\n", ep);
ep->data_subs = subs;
- err = snd_usb_endpoint_start(ep, can_sleep);
+ err = snd_usb_endpoint_start(ep);
if (err < 0) {
clear_bit(SUBSTREAM_FLAG_DATA_EP_STARTED, &subs->flags);
return err;
@@ -260,7 +260,7 @@ static int start_endpoints(struct snd_usb_substream *subs, bool can_sleep)
dev_dbg(&subs->dev->dev, "Starting sync EP @%p\n", ep);
ep->sync_slave = subs->data_endpoint;
- err = snd_usb_endpoint_start(ep, can_sleep);
+ err = snd_usb_endpoint_start(ep);
if (err < 0) {
clear_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags);
return err;
@@ -850,7 +850,7 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
/* for playback, submit the URBs now; otherwise, the first hwptr_done
* updates for all URBs would happen at the same time when starting */
if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK)
- ret = start_endpoints(subs, true);
+ ret = start_endpoints(subs);
unlock:
snd_usb_unlock_shutdown(subs->stream->chip);
@@ -1666,7 +1666,7 @@ static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
- err = start_endpoints(subs, false);
+ err = start_endpoints(subs);
if (err < 0)
return err;
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index b3fd2382fdd9..eb4b9f7a571e 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1135,6 +1135,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
+ case USB_ID(0x047F, 0x02F7): /* Plantronics BT-600 */
case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */
case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
diff --git a/tools/lib/subcmd/parse-options.c b/tools/lib/subcmd/parse-options.c
index 3284bb14ae78..8aad81151d50 100644
--- a/tools/lib/subcmd/parse-options.c
+++ b/tools/lib/subcmd/parse-options.c
@@ -213,6 +213,9 @@ static int get_value(struct parse_opt_ctx_t *p,
else
err = get_arg(p, opt, flags, (const char **)opt->value);
+ if (opt->set)
+ *(bool *)opt->set = true;
+
/* PARSE_OPT_NOEMPTY: Allow NULL but disallow empty string. */
if (opt->flags & PARSE_OPT_NOEMPTY) {
const char *val = *(const char **)opt->value;
diff --git a/tools/lib/subcmd/parse-options.h b/tools/lib/subcmd/parse-options.h
index 8866ac438b34..11c3be3bcce7 100644
--- a/tools/lib/subcmd/parse-options.h
+++ b/tools/lib/subcmd/parse-options.h
@@ -137,6 +137,11 @@ struct option {
{ .type = OPTION_STRING, .short_name = (s), .long_name = (l), \
.value = check_vtype(v, const char **), (a), .help = (h), \
.flags = PARSE_OPT_OPTARG, .defval = (intptr_t)(d) }
+#define OPT_STRING_OPTARG_SET(s, l, v, os, a, h, d) \
+ { .type = OPTION_STRING, .short_name = (s), .long_name = (l), \
+ .value = check_vtype(v, const char **), (a), .help = (h), \
+ .flags = PARSE_OPT_OPTARG, .defval = (intptr_t)(d), \
+ .set = check_vtype(os, bool *)}
#define OPT_STRING_NOEMPTY(s, l, v, a, h) { .type = OPTION_STRING, .short_name = (s), .long_name = (l), .value = check_vtype(v, const char **), (a), .help = (h), .flags = PARSE_OPT_NOEMPTY}
#define OPT_DATE(s, l, v, h) \
{ .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = "time", .help = (h), .callback = parse_opt_approxidate_cb }
diff --git a/tools/lib/traceevent/plugin_sched_switch.c b/tools/lib/traceevent/plugin_sched_switch.c
index f1ce60065258..ec30c2fcbac0 100644
--- a/tools/lib/traceevent/plugin_sched_switch.c
+++ b/tools/lib/traceevent/plugin_sched_switch.c
@@ -111,7 +111,7 @@ static int sched_switch_handler(struct trace_seq *s,
trace_seq_printf(s, "%lld ", val);
if (pevent_get_field_val(s, event, "prev_prio", record, &val, 0) == 0)
- trace_seq_printf(s, "[%lld] ", val);
+ trace_seq_printf(s, "[%d] ", (int) val);
if (pevent_get_field_val(s, event, "prev_state", record, &val, 0) == 0)
write_state(s, val);
@@ -129,7 +129,7 @@ static int sched_switch_handler(struct trace_seq *s,
trace_seq_printf(s, "%lld", val);
if (pevent_get_field_val(s, event, "next_prio", record, &val, 0) == 0)
- trace_seq_printf(s, " [%lld]", val);
+ trace_seq_printf(s, " [%d]", (int) val);
return 0;
}
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 27fc3617c6a4..5054d9147f0f 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -430,6 +430,10 @@ that gets then processed, possibly via a perf script, to decide if that
particular perf.data snapshot should be kept or not.
Implies --timestamp-filename, --no-buildid and --no-buildid-cache.
+The reason for the latter two is to reduce the data file switching
+overhead. You can still switch them on with:
+
+ --switch-output --no-no-buildid --no-no-buildid-cache
--dry-run::
Parse options then exit. --dry-run can be used to detect errors in cmdline
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 8fc24824705e..8bb16aa9d661 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -704,9 +704,9 @@ install-tests: all install-gtk
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'; \
$(INSTALL) tests/attr/* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'
-install-bin: install-tools install-tests
+install-bin: install-tools install-tests install-traceevent-plugins
-install: install-bin try-install-man install-traceevent-plugins
+install: install-bin try-install-man
install-python_ext:
$(PYTHON_WORD) util/setup.py --quiet install --root='/$(DESTDIR_SQ)'
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index 35a02f8e5a4a..915869e00d86 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -655,7 +655,6 @@ static const struct {
{ "__GFP_RECLAIM", "R" },
{ "__GFP_DIRECT_RECLAIM", "DR" },
{ "__GFP_KSWAPD_RECLAIM", "KR" },
- { "__GFP_OTHER_NODE", "ON" },
};
static size_t max_gfp_len;
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 74d6a035133a..4ec10e9427d9 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -1405,7 +1405,7 @@ static bool dry_run;
* perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
* using pipes, etc.
*/
-struct option __record_options[] = {
+static struct option __record_options[] = {
OPT_CALLBACK('e', "event", &record.evlist, "event",
"event selector. use 'perf list' to list available events",
parse_events_option),
@@ -1636,7 +1636,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
* overhead. Still generate buildid if they are required
* explicitly using
*
- * perf record --signal-trigger --no-no-buildid \
+ * perf record --switch-output --no-no-buildid \
* --no-no-buildid-cache
*
* Following code equals to:
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index d53e706a6f17..5b134b0d1ff3 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -209,6 +209,7 @@ struct perf_sched {
u64 skipped_samples;
const char *time_str;
struct perf_time_interval ptime;
+ struct perf_time_interval hist_time;
};
/* per thread run time data */
@@ -2460,6 +2461,11 @@ static int timehist_sched_change_event(struct perf_tool *tool,
timehist_print_sample(sched, sample, &al, thread, t);
out:
+ if (sched->hist_time.start == 0 && t >= ptime->start)
+ sched->hist_time.start = t;
+ if (ptime->end == 0 || t <= ptime->end)
+ sched->hist_time.end = t;
+
if (tr) {
/* time of this sched_switch event becomes last time task seen */
tr->last_time = sample->time;
@@ -2624,6 +2630,7 @@ static void timehist_print_summary(struct perf_sched *sched,
struct thread *t;
struct thread_runtime *r;
int i;
+ u64 hist_time = sched->hist_time.end - sched->hist_time.start;
memset(&totals, 0, sizeof(totals));
@@ -2665,7 +2672,7 @@ static void timehist_print_summary(struct perf_sched *sched,
totals.sched_count += r->run_stats.n;
printf(" CPU %2d idle for ", i);
print_sched_time(r->total_run_time, 6);
- printf(" msec\n");
+ printf(" msec (%6.2f%%)\n", 100.0 * r->total_run_time / hist_time);
} else
printf(" CPU %2d idle entire time window\n", i);
}
@@ -2701,12 +2708,16 @@ static void timehist_print_summary(struct perf_sched *sched,
printf("\n"
" Total number of unique tasks: %" PRIu64 "\n"
- "Total number of context switches: %" PRIu64 "\n"
- " Total run time (msec): ",
+ "Total number of context switches: %" PRIu64 "\n",
totals.task_count, totals.sched_count);
+ printf(" Total run time (msec): ");
print_sched_time(totals.total_run_time, 2);
printf("\n");
+
+ printf(" Total scheduling time (msec): ");
+ print_sched_time(hist_time, 2);
+ printf(" (x %d)\n", sched->max_cpu);
}
typedef int (*sched_handler)(struct perf_tool *tool,
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index d281ae2b54e8..6a6f44dd594b 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -163,7 +163,7 @@ static struct map *kernel_get_module_map(const char *module)
/* A file path -- this is an offline module */
if (module && strchr(module, '/'))
- return machine__findnew_module_map(host_machine, 0, module);
+ return dso__new_map(module);
if (!module)
module = "kernel";
@@ -173,6 +173,7 @@ static struct map *kernel_get_module_map(const char *module)
if (strncmp(pos->dso->short_name + 1, module,
pos->dso->short_name_len - 2) == 0 &&
module[pos->dso->short_name_len - 2] == '\0') {
+ map__get(pos);
return pos;
}
}
@@ -188,15 +189,6 @@ struct map *get_target_map(const char *target, bool user)
return kernel_get_module_map(target);
}
-static void put_target_map(struct map *map, bool user)
-{
- if (map && user) {
- /* Only the user map needs to be released */
- map__put(map);
- }
-}
-
-
static int convert_exec_to_group(const char *exec, char **result)
{
char *ptr1, *ptr2, *exec_copy;
@@ -268,21 +260,6 @@ static bool kprobe_warn_out_range(const char *symbol, unsigned long address)
}
/*
- * NOTE:
- * '.gnu.linkonce.this_module' section of kernel module elf directly
- * maps to 'struct module' from linux/module.h. This section contains
- * actual module name which will be used by kernel after loading it.
- * But, we cannot use 'struct module' here since linux/module.h is not
- * exposed to user-space. Offset of 'name' has remained same from long
- * time, so hardcoding it here.
- */
-#ifdef __LP64__
-#define MOD_NAME_OFFSET 24
-#else
-#define MOD_NAME_OFFSET 12
-#endif
-
-/*
* @module can be module name of module file path. In case of path,
* inspect elf and find out what is actual module name.
* Caller has to free mod_name after using it.
@@ -296,6 +273,7 @@ static char *find_module_name(const char *module)
Elf_Data *data;
Elf_Scn *sec;
char *mod_name = NULL;
+ int name_offset;
fd = open(module, O_RDONLY);
if (fd < 0)
@@ -317,7 +295,21 @@ static char *find_module_name(const char *module)
if (!data || !data->d_buf)
goto ret_err;
- mod_name = strdup((char *)data->d_buf + MOD_NAME_OFFSET);
+ /*
+ * NOTE:
+ * '.gnu.linkonce.this_module' section of kernel module elf directly
+ * maps to 'struct module' from linux/module.h. This section contains
+ * actual module name which will be used by kernel after loading it.
+ * But, we cannot use 'struct module' here since linux/module.h is not
+ * exposed to user-space. Offset of 'name' has remained same from long
+ * time, so hardcoding it here.
+ */
+ if (ehdr.e_ident[EI_CLASS] == ELFCLASS32)
+ name_offset = 12;
+ else /* expect ELFCLASS64 by default */
+ name_offset = 24;
+
+ mod_name = strdup((char *)data->d_buf + name_offset);
ret_err:
elf_end(elf);
@@ -412,7 +404,7 @@ static int find_alternative_probe_point(struct debuginfo *dinfo,
}
out:
- put_target_map(map, uprobes);
+ map__put(map);
return ret;
}
@@ -618,6 +610,67 @@ error:
return ret ? : -ENOENT;
}
+/* Adjust symbol name and address */
+static int post_process_probe_trace_point(struct probe_trace_point *tp,
+ struct map *map, unsigned long offs)
+{
+ struct symbol *sym;
+ u64 addr = tp->address + tp->offset - offs;
+
+ sym = map__find_symbol(map, addr);
+ if (!sym)
+ return -ENOENT;
+
+ if (strcmp(sym->name, tp->symbol)) {
+ /* If we have no realname, use symbol for it */
+ if (!tp->realname)
+ tp->realname = tp->symbol;
+ else
+ free(tp->symbol);
+ tp->symbol = strdup(sym->name);
+ if (!tp->symbol)
+ return -ENOMEM;
+ }
+ tp->offset = addr - sym->start;
+ tp->address -= offs;
+
+ return 0;
+}
+
+/*
+ * Rename DWARF symbols to ELF symbols -- gcc sometimes optimizes functions
+ * and generate new symbols with suffixes such as .constprop.N or .isra.N
+ * etc. Since those symbols are not recorded in DWARF, we have to find
+ * correct generated symbols from offline ELF binary.
+ * For online kernel or uprobes we don't need this because those are
+ * rebased on _text, or already a section relative address.
+ */
+static int
+post_process_offline_probe_trace_events(struct probe_trace_event *tevs,
+ int ntevs, const char *pathname)
+{
+ struct map *map;
+ unsigned long stext = 0;
+ int i, ret = 0;
+
+ /* Prepare a map for offline binary */
+ map = dso__new_map(pathname);
+ if (!map || get_text_start_address(pathname, &stext) < 0) {
+ pr_warning("Failed to get ELF symbols for %s\n", pathname);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ntevs; i++) {
+ ret = post_process_probe_trace_point(&tevs[i].point,
+ map, stext);
+ if (ret < 0)
+ break;
+ }
+ map__put(map);
+
+ return ret;
+}
+
static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs,
int ntevs, const char *exec)
{
@@ -645,18 +698,31 @@ static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs,
return ret;
}
-static int add_module_to_probe_trace_events(struct probe_trace_event *tevs,
- int ntevs, const char *module)
+static int
+post_process_module_probe_trace_events(struct probe_trace_event *tevs,
+ int ntevs, const char *module,
+ struct debuginfo *dinfo)
{
+ Dwarf_Addr text_offs = 0;
int i, ret = 0;
char *mod_name = NULL;
+ struct map *map;
if (!module)
return 0;
- mod_name = find_module_name(module);
+ map = get_target_map(module, false);
+ if (!map || debuginfo__get_text_offset(dinfo, &text_offs, true) < 0) {
+ pr_warning("Failed to get ELF symbols for %s\n", module);
+ return -EINVAL;
+ }
+ mod_name = find_module_name(module);
for (i = 0; i < ntevs; i++) {
+ ret = post_process_probe_trace_point(&tevs[i].point,
+ map, (unsigned long)text_offs);
+ if (ret < 0)
+ break;
tevs[i].point.module =
strdup(mod_name ? mod_name : module);
if (!tevs[i].point.module) {
@@ -666,6 +732,8 @@ static int add_module_to_probe_trace_events(struct probe_trace_event *tevs,
}
free(mod_name);
+ map__put(map);
+
return ret;
}
@@ -679,7 +747,8 @@ post_process_kernel_probe_trace_events(struct probe_trace_event *tevs,
/* Skip post process if the target is an offline kernel */
if (symbol_conf.ignore_vmlinux_buildid)
- return 0;
+ return post_process_offline_probe_trace_events(tevs, ntevs,
+ symbol_conf.vmlinux_name);
reloc_sym = kernel_get_ref_reloc_sym();
if (!reloc_sym) {
@@ -722,7 +791,7 @@ arch__post_process_probe_trace_events(struct perf_probe_event *pev __maybe_unuse
static int post_process_probe_trace_events(struct perf_probe_event *pev,
struct probe_trace_event *tevs,
int ntevs, const char *module,
- bool uprobe)
+ bool uprobe, struct debuginfo *dinfo)
{
int ret;
@@ -730,7 +799,8 @@ static int post_process_probe_trace_events(struct perf_probe_event *pev,
ret = add_exec_to_probe_trace_events(tevs, ntevs, module);
else if (module)
/* Currently ref_reloc_sym based probe is not for drivers */
- ret = add_module_to_probe_trace_events(tevs, ntevs, module);
+ ret = post_process_module_probe_trace_events(tevs, ntevs,
+ module, dinfo);
else
ret = post_process_kernel_probe_trace_events(tevs, ntevs);
@@ -774,30 +844,27 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
}
}
- debuginfo__delete(dinfo);
-
if (ntevs > 0) { /* Succeeded to find trace events */
pr_debug("Found %d probe_trace_events.\n", ntevs);
ret = post_process_probe_trace_events(pev, *tevs, ntevs,
- pev->target, pev->uprobes);
+ pev->target, pev->uprobes, dinfo);
if (ret < 0 || ret == ntevs) {
+ pr_debug("Post processing failed or all events are skipped. (%d)\n", ret);
clear_probe_trace_events(*tevs, ntevs);
zfree(tevs);
+ ntevs = 0;
}
- if (ret != ntevs)
- return ret < 0 ? ret : ntevs;
- ntevs = 0;
- /* Fall through */
}
+ debuginfo__delete(dinfo);
+
if (ntevs == 0) { /* No error but failed to find probe point. */
pr_warning("Probe point '%s' not found.\n",
synthesize_perf_probe_point(&pev->point));
return -ENOENT;
- }
- /* Error path : ntevs < 0 */
- pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs);
- if (ntevs < 0) {
+ } else if (ntevs < 0) {
+ /* Error path : ntevs < 0 */
+ pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs);
if (ntevs == -EBADF)
pr_warning("Warning: No dwarf info found in the vmlinux - "
"please rebuild kernel with CONFIG_DEBUG_INFO=y.\n");
@@ -2869,7 +2936,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
}
out:
- put_target_map(map, pev->uprobes);
+ map__put(map);
free(syms);
return ret;
@@ -3362,10 +3429,7 @@ int show_available_funcs(const char *target, struct strfilter *_filter,
return ret;
/* Get a symbol map */
- if (user)
- map = dso__new_map(target);
- else
- map = kernel_get_module_map(target);
+ map = get_target_map(target, user);
if (!map) {
pr_err("Failed to get a map for %s\n", (target) ? : "kernel");
return -EINVAL;
@@ -3397,9 +3461,7 @@ int show_available_funcs(const char *target, struct strfilter *_filter,
}
end:
- if (user) {
- map__put(map);
- }
+ map__put(map);
exit_probe_symbol_maps();
return ret;
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index df4debe564da..0d9d6e0803b8 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -1501,7 +1501,8 @@ int debuginfo__find_available_vars_at(struct debuginfo *dbg,
}
/* For the kernel module, we need a special code to get a DIE */
-static int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs)
+int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
+ bool adjust_offset)
{
int n, i;
Elf32_Word shndx;
@@ -1530,6 +1531,8 @@ static int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs)
if (!shdr)
return -ENOENT;
*offs = shdr->sh_addr;
+ if (adjust_offset)
+ *offs -= shdr->sh_offset;
}
}
return 0;
@@ -1543,16 +1546,12 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
Dwarf_Addr _addr = 0, baseaddr = 0;
const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp;
int baseline = 0, lineno = 0, ret = 0;
- bool reloc = false;
-retry:
+ /* We always need to relocate the address for aranges */
+ if (debuginfo__get_text_offset(dbg, &baseaddr, false) == 0)
+ addr += baseaddr;
/* Find cu die */
if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) {
- if (!reloc && debuginfo__get_text_offset(dbg, &baseaddr) == 0) {
- addr += baseaddr;
- reloc = true;
- goto retry;
- }
pr_warning("Failed to find debug information for address %lx\n",
addr);
ret = -EINVAL;
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index f1d8558f498e..2956c5198652 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -46,6 +46,9 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
struct perf_probe_point *ppt);
+int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
+ bool adjust_offset);
+
/* Find a line range */
int debuginfo__find_line_range(struct debuginfo *dbg, struct line_range *lr);
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 99400b0e8f2a..adbc6c02c3aa 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -537,6 +537,12 @@ int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
break;
} else {
int n = namesz + descsz;
+
+ if (n > (int)sizeof(bf)) {
+ n = sizeof(bf);
+ pr_debug("%s: truncating reading of build id in sysfs file %s: n_namesz=%u, n_descsz=%u.\n",
+ __func__, filename, nhdr.n_namesz, nhdr.n_descsz);
+ }
if (read(fd, bf, n) != n)
break;
}
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 71b05891a6a1..831022b12848 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -90,7 +90,7 @@ ifdef INSTALL_PATH
done;
@# Ask all targets to emit their test scripts
- echo "#!/bin/bash" > $(ALL_SCRIPT)
+ echo "#!/bin/sh" > $(ALL_SCRIPT)
echo "cd \$$(dirname \$$0)" >> $(ALL_SCRIPT)
echo "ROOT=\$$PWD" >> $(ALL_SCRIPT)
diff --git a/tools/testing/selftests/bpf/test_kmod.sh b/tools/testing/selftests/bpf/test_kmod.sh
index 92e627adf354..6d58cca8e235 100755
--- a/tools/testing/selftests/bpf/test_kmod.sh
+++ b/tools/testing/selftests/bpf/test_kmod.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
SRC_TREE=../../../../
diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c
index b13fed534d76..9f7bd1915c21 100644
--- a/tools/testing/selftests/bpf/test_lru_map.c
+++ b/tools/testing/selftests/bpf/test_lru_map.c
@@ -67,21 +67,23 @@ static int map_equal(int lru_map, int expected)
return map_subset(lru_map, expected) && map_subset(expected, lru_map);
}
-static int sched_next_online(int pid, int next_to_try)
+static int sched_next_online(int pid, int *next_to_try)
{
cpu_set_t cpuset;
+ int next = *next_to_try;
+ int ret = -1;
- if (next_to_try == nr_cpus)
- return -1;
-
- while (next_to_try < nr_cpus) {
+ while (next < nr_cpus) {
CPU_ZERO(&cpuset);
- CPU_SET(next_to_try++, &cpuset);
- if (!sched_setaffinity(pid, sizeof(cpuset), &cpuset))
+ CPU_SET(next++, &cpuset);
+ if (!sched_setaffinity(pid, sizeof(cpuset), &cpuset)) {
+ ret = 0;
break;
+ }
}
- return next_to_try;
+ *next_to_try = next;
+ return ret;
}
/* Size of the LRU amp is 2
@@ -96,11 +98,12 @@ static void test_lru_sanity0(int map_type, int map_flags)
{
unsigned long long key, value[nr_cpus];
int lru_map_fd, expected_map_fd;
+ int next_cpu = 0;
printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
map_flags);
- assert(sched_next_online(0, 0) != -1);
+ assert(sched_next_online(0, &next_cpu) != -1);
if (map_flags & BPF_F_NO_COMMON_LRU)
lru_map_fd = create_map(map_type, map_flags, 2 * nr_cpus);
@@ -183,6 +186,7 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
int lru_map_fd, expected_map_fd;
unsigned int batch_size;
unsigned int map_size;
+ int next_cpu = 0;
if (map_flags & BPF_F_NO_COMMON_LRU)
/* Ther percpu lru list (i.e each cpu has its own LRU
@@ -196,7 +200,7 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
map_flags);
- assert(sched_next_online(0, 0) != -1);
+ assert(sched_next_online(0, &next_cpu) != -1);
batch_size = tgt_free / 2;
assert(batch_size * 2 == tgt_free);
@@ -262,6 +266,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
int lru_map_fd, expected_map_fd;
unsigned int batch_size;
unsigned int map_size;
+ int next_cpu = 0;
if (map_flags & BPF_F_NO_COMMON_LRU)
/* Ther percpu lru list (i.e each cpu has its own LRU
@@ -275,7 +280,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
map_flags);
- assert(sched_next_online(0, 0) != -1);
+ assert(sched_next_online(0, &next_cpu) != -1);
batch_size = tgt_free / 2;
assert(batch_size * 2 == tgt_free);
@@ -370,11 +375,12 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
int lru_map_fd, expected_map_fd;
unsigned int batch_size;
unsigned int map_size;
+ int next_cpu = 0;
printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
map_flags);
- assert(sched_next_online(0, 0) != -1);
+ assert(sched_next_online(0, &next_cpu) != -1);
batch_size = tgt_free / 2;
assert(batch_size * 2 == tgt_free);
@@ -430,11 +436,12 @@ static void test_lru_sanity4(int map_type, int map_flags, unsigned int tgt_free)
int lru_map_fd, expected_map_fd;
unsigned long long key, value[nr_cpus];
unsigned long long end_key;
+ int next_cpu = 0;
printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
map_flags);
- assert(sched_next_online(0, 0) != -1);
+ assert(sched_next_online(0, &next_cpu) != -1);
if (map_flags & BPF_F_NO_COMMON_LRU)
lru_map_fd = create_map(map_type, map_flags,
@@ -502,9 +509,8 @@ static void do_test_lru_sanity5(unsigned long long last_key, int map_fd)
static void test_lru_sanity5(int map_type, int map_flags)
{
unsigned long long key, value[nr_cpus];
- int next_sched_cpu = 0;
+ int next_cpu = 0;
int map_fd;
- int i;
if (map_flags & BPF_F_NO_COMMON_LRU)
return;
@@ -519,27 +525,20 @@ static void test_lru_sanity5(int map_type, int map_flags)
key = 0;
assert(!bpf_map_update(map_fd, &key, value, BPF_NOEXIST));
- for (i = 0; i < nr_cpus; i++) {
+ while (sched_next_online(0, &next_cpu) != -1) {
pid_t pid;
pid = fork();
if (pid == 0) {
- next_sched_cpu = sched_next_online(0, next_sched_cpu);
- if (next_sched_cpu != -1)
- do_test_lru_sanity5(key, map_fd);
+ do_test_lru_sanity5(key, map_fd);
exit(0);
} else if (pid == -1) {
- printf("couldn't spawn #%d process\n", i);
+ printf("couldn't spawn process to test key:%llu\n",
+ key);
exit(1);
} else {
int status;
- /* It is mostly redundant and just allow the parent
- * process to update next_shced_cpu for the next child
- * process
- */
- next_sched_cpu = sched_next_online(pid, next_sched_cpu);
-
assert(waitpid(pid, &status, 0) == pid);
assert(status == 0);
key++;
@@ -547,6 +546,8 @@ static void test_lru_sanity5(int map_type, int map_flags)
}
close(map_fd);
+ /* At least one key should be tested */
+ assert(key > 0);
printf("Pass\n");
}
diff --git a/tools/testing/selftests/net/run_netsocktests b/tools/testing/selftests/net/run_netsocktests
index c09a682df56a..16058bbea7a8 100755
--- a/tools/testing/selftests/net/run_netsocktests
+++ b/tools/testing/selftests/net/run_netsocktests
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
echo "--------------------"
echo "running socket test"
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
index c22860ab9733..30e1ac62e8cb 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
@@ -66,7 +66,7 @@ int pmc56_overflow(void)
FAIL_IF(ebb_event_enable(&event));
- mtspr(SPRN_PMC1, pmc_sample_period(sample_period));
+ mtspr(SPRN_PMC2, pmc_sample_period(sample_period));
mtspr(SPRN_PMC5, 0);
mtspr(SPRN_PMC6, 0);
diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c
index bdd58c78902e..df9e0a0cdf29 100644
--- a/tools/testing/selftests/x86/protection_keys.c
+++ b/tools/testing/selftests/x86/protection_keys.c
@@ -1367,7 +1367,7 @@ void run_tests_once(void)
tracing_off();
close_test_fds();
- printf("test %2d PASSED (itertation %d)\n", test_nr, iteration_nr);
+ printf("test %2d PASSED (iteration %d)\n", test_nr, iteration_nr);
dprintf1("======================\n\n");
}
iteration_nr++;
diff --git a/tools/virtio/ringtest/main.h b/tools/virtio/ringtest/main.h
index 34e63cc4c572..14142faf040b 100644
--- a/tools/virtio/ringtest/main.h
+++ b/tools/virtio/ringtest/main.h
@@ -26,6 +26,16 @@ static inline void wait_cycles(unsigned long long cycles)
#define VMEXIT_CYCLES 500
#define VMENTRY_CYCLES 500
+#elif defined(__s390x__)
+static inline void wait_cycles(unsigned long long cycles)
+{
+ asm volatile("0: brctg %0,0b" : : "d" (cycles));
+}
+
+/* tweak me */
+#define VMEXIT_CYCLES 200
+#define VMENTRY_CYCLES 200
+
#else
static inline void wait_cycles(unsigned long long cycles)
{
@@ -81,6 +91,8 @@ extern unsigned ring_size;
/* Is there a portable way to do this? */
#if defined(__x86_64__) || defined(__i386__)
#define cpu_relax() asm ("rep; nop" ::: "memory")
+#elif defined(__s390x__)
+#define cpu_relax() barrier()
#else
#define cpu_relax() assert(0)
#endif
diff --git a/tools/virtio/ringtest/run-on-all.sh b/tools/virtio/ringtest/run-on-all.sh
index 2e69ca812b4c..29b0d3920bfc 100755
--- a/tools/virtio/ringtest/run-on-all.sh
+++ b/tools/virtio/ringtest/run-on-all.sh
@@ -1,12 +1,13 @@
#!/bin/sh
+CPUS_ONLINE=$(lscpu --online -p=cpu|grep -v -e '#')
#use last CPU for host. Why not the first?
#many devices tend to use cpu0 by default so
#it tends to be busier
-HOST_AFFINITY=$(lscpu -p=cpu | tail -1)
+HOST_AFFINITY=$(echo "${CPUS_ONLINE}"|tail -n 1)
#run command on all cpus
-for cpu in $(seq 0 $HOST_AFFINITY)
+for cpu in $CPUS_ONLINE
do
#Don't run guest and host on same CPU
#It actually works ok if using signalling
diff --git a/usr/Makefile b/usr/Makefile
index 17a513268325..0b87e71c00fc 100644
--- a/usr/Makefile
+++ b/usr/Makefile
@@ -5,8 +5,10 @@
klibcdirs:;
PHONY += klibcdirs
-suffix_y = $(CONFIG_INITRAMFS_COMPRESSION)
-AFLAGS_initramfs_data.o += -DINITRAMFS_IMAGE="usr/initramfs_data.cpio$(suffix_y)"
+suffix_y = $(subst $\",,$(CONFIG_INITRAMFS_COMPRESSION))
+datafile_y = initramfs_data.cpio$(suffix_y)
+AFLAGS_initramfs_data.o += -DINITRAMFS_IMAGE="usr/$(datafile_y)"
+
# Generate builtin.o based on initramfs_data.o
obj-$(CONFIG_BLK_DEV_INITRD) := initramfs_data.o
@@ -14,7 +16,7 @@ obj-$(CONFIG_BLK_DEV_INITRD) := initramfs_data.o
# initramfs_data.o contains the compressed initramfs_data.cpio image.
# The image is included using .incbin, a dependency which is not
# tracked automatically.
-$(obj)/initramfs_data.o: $(obj)/initramfs_data.cpio$(suffix_y) FORCE
+$(obj)/initramfs_data.o: $(obj)/$(datafile_y) FORCE
#####
# Generate the initramfs cpio archive
@@ -38,10 +40,8 @@ endif
quiet_cmd_initfs = GEN $@
cmd_initfs = $(initramfs) -o $@ $(ramfs-args) $(ramfs-input)
-targets := initramfs_data.cpio.gz initramfs_data.cpio.bz2 \
- initramfs_data.cpio.lzma initramfs_data.cpio.xz \
- initramfs_data.cpio.lzo initramfs_data.cpio.lz4 \
- initramfs_data.cpio
+targets := $(datafile_y)
+
# do not try to update files included in initramfs
$(deps_initramfs): ;
@@ -51,6 +51,6 @@ $(deps_initramfs): klibcdirs
# 2) There are changes in which files are included (added or deleted)
# 3) If gen_init_cpio are newer than initramfs_data.cpio
# 4) arguments to gen_initramfs.sh changes
-$(obj)/initramfs_data.cpio$(suffix_y): $(obj)/gen_init_cpio $(deps_initramfs) klibcdirs
+$(obj)/$(datafile_y): $(obj)/gen_init_cpio $(deps_initramfs) klibcdirs
$(Q)$(initramfs) -l $(ramfs-input) > $(obj)/.initramfs_data.cpio.d
$(call if_changed,initfs)
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index a2dbbccbb6a3..6a084cd57b88 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -24,6 +24,7 @@
#include <clocksource/arm_arch_timer.h>
#include <asm/arch_timer.h>
+#include <asm/kvm_hyp.h>
#include <kvm/arm_vgic.h>
#include <kvm/arm_arch_timer.h>
@@ -89,9 +90,6 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
struct kvm_vcpu *vcpu;
vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
- vcpu->arch.timer_cpu.armed = false;
-
- WARN_ON(!kvm_timer_should_fire(vcpu));
/*
* If the vcpu is blocked we want to wake it up so that it will see
@@ -512,3 +510,25 @@ void kvm_timer_init(struct kvm *kvm)
{
kvm->arch.timer.cntvoff = kvm_phys_timer_read();
}
+
+/*
+ * On VHE system, we only need to configure trap on physical timer and counter
+ * accesses in EL0 and EL1 once, not for every world switch.
+ * The host kernel runs at EL2 with HCR_EL2.TGE == 1,
+ * and this makes those bits have no effect for the host kernel execution.
+ */
+void kvm_timer_init_vhe(void)
+{
+ /* When HCR_EL2.E2H ==1, EL1PCEN and EL1PCTEN are shifted by 10 */
+ u32 cnthctl_shift = 10;
+ u64 val;
+
+ /*
+ * Disallow physical timer access for the guest.
+ * Physical counter access is allowed.
+ */
+ val = read_sysreg(cnthctl_el2);
+ val &= ~(CNTHCTL_EL1PCEN << cnthctl_shift);
+ val |= (CNTHCTL_EL1PCTEN << cnthctl_shift);
+ write_sysreg(val, cnthctl_el2);
+}
diff --git a/virt/kvm/arm/hyp/timer-sr.c b/virt/kvm/arm/hyp/timer-sr.c
index 798866a8d875..63e28dd18bb0 100644
--- a/virt/kvm/arm/hyp/timer-sr.c
+++ b/virt/kvm/arm/hyp/timer-sr.c
@@ -35,10 +35,16 @@ void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu)
/* Disable the virtual timer */
write_sysreg_el0(0, cntv_ctl);
- /* Allow physical timer/counter access for the host */
- val = read_sysreg(cnthctl_el2);
- val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
- write_sysreg(val, cnthctl_el2);
+ /*
+ * We don't need to do this for VHE since the host kernel runs in EL2
+ * with HCR_EL2.TGE ==1, which makes those bits have no impact.
+ */
+ if (!has_vhe()) {
+ /* Allow physical timer/counter access for the host */
+ val = read_sysreg(cnthctl_el2);
+ val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
+ write_sysreg(val, cnthctl_el2);
+ }
/* Clear cntvoff for the host */
write_sysreg(0, cntvoff_el2);
@@ -50,14 +56,17 @@ void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu)
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
u64 val;
- /*
- * Disallow physical timer access for the guest
- * Physical counter access is allowed
- */
- val = read_sysreg(cnthctl_el2);
- val &= ~CNTHCTL_EL1PCEN;
- val |= CNTHCTL_EL1PCTEN;
- write_sysreg(val, cnthctl_el2);
+ /* Those bits are already configured at boot on VHE-system */
+ if (!has_vhe()) {
+ /*
+ * Disallow physical timer access for the guest
+ * Physical counter access is allowed
+ */
+ val = read_sysreg(cnthctl_el2);
+ val &= ~CNTHCTL_EL1PCEN;
+ val |= CNTHCTL_EL1PCTEN;
+ write_sysreg(val, cnthctl_el2);
+ }
if (timer->enabled) {
write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2);
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index 5114391b7e5a..c737ea0a310a 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -268,15 +268,11 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
- mutex_lock(&kvm->lock);
-
dist->ready = false;
dist->initialized = false;
kfree(dist->spis);
dist->nr_spis = 0;
-
- mutex_unlock(&kvm->lock);
}
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
@@ -286,7 +282,8 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
}
-void kvm_vgic_destroy(struct kvm *kvm)
+/* To be called with kvm->lock held */
+static void __kvm_vgic_destroy(struct kvm *kvm)
{
struct kvm_vcpu *vcpu;
int i;
@@ -297,6 +294,13 @@ void kvm_vgic_destroy(struct kvm *kvm)
kvm_vgic_vcpu_destroy(vcpu);
}
+void kvm_vgic_destroy(struct kvm *kvm)
+{
+ mutex_lock(&kvm->lock);
+ __kvm_vgic_destroy(kvm);
+ mutex_unlock(&kvm->lock);
+}
+
/**
* vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest
* is a GICv2. A GICv3 must be explicitly initialized by the guest using the
@@ -348,6 +352,10 @@ int kvm_vgic_map_resources(struct kvm *kvm)
ret = vgic_v2_map_resources(kvm);
else
ret = vgic_v3_map_resources(kvm);
+
+ if (ret)
+ __kvm_vgic_destroy(kvm);
+
out:
mutex_unlock(&kvm->lock);
return ret;
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 9bab86757fa4..834137e7b83f 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -293,8 +293,6 @@ int vgic_v2_map_resources(struct kvm *kvm)
dist->ready = true;
out:
- if (ret)
- kvm_vgic_destroy(kvm);
return ret;
}
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 5c9f9745e6ca..e6b03fd8c374 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -302,8 +302,6 @@ int vgic_v3_map_resources(struct kvm *kvm)
dist->ready = true;
out:
- if (ret)
- kvm_vgic_destroy(kvm);
return ret;
}
diff --git a/virt/lib/irqbypass.c b/virt/lib/irqbypass.c
index 52abac4bb6a2..6d2fcd6fcb25 100644
--- a/virt/lib/irqbypass.c
+++ b/virt/lib/irqbypass.c
@@ -195,7 +195,7 @@ int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer)
mutex_lock(&lock);
list_for_each_entry(tmp, &consumers, node) {
- if (tmp->token == consumer->token) {
+ if (tmp->token == consumer->token || tmp == consumer) {
mutex_unlock(&lock);
module_put(THIS_MODULE);
return -EBUSY;
@@ -245,7 +245,7 @@ void irq_bypass_unregister_consumer(struct irq_bypass_consumer *consumer)
mutex_lock(&lock);
list_for_each_entry(tmp, &consumers, node) {
- if (tmp->token != consumer->token)
+ if (tmp != consumer)
continue;
list_for_each_entry(producer, &producers, node) {